1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #endif 43 #include <netinet/sctp_header.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 48 #include <netinet/sctp_auth.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_cc_functions.h> 51 52 #define NUMBER_OF_MTU_SIZES 18 53 54 55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF) 56 #include "eventrace_netinet.h" 57 #include "sctputil.tmh" /* this is the file that will be auto 58 * generated */ 59 #else 60 #ifndef KTR_SCTP 61 #define KTR_SCTP KTR_SUBSYS 62 #endif 63 #endif 64 65 void 66 sctp_sblog(struct sockbuf *sb, 67 struct sctp_tcb *stcb, int from, int incr) 68 { 69 struct sctp_cwnd_log sctp_clog; 70 71 sctp_clog.x.sb.stcb = stcb; 72 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 73 if (stcb) 74 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 75 else 76 sctp_clog.x.sb.stcb_sbcc = 0; 77 sctp_clog.x.sb.incr = incr; 78 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 79 SCTP_LOG_EVENT_SB, 80 from, 81 sctp_clog.x.misc.log1, 82 sctp_clog.x.misc.log2, 83 sctp_clog.x.misc.log3, 84 sctp_clog.x.misc.log4); 85 } 86 87 void 88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 89 { 90 struct sctp_cwnd_log sctp_clog; 91 92 sctp_clog.x.close.inp = (void *)inp; 93 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 94 if (stcb) { 95 sctp_clog.x.close.stcb = (void *)stcb; 96 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 97 } else { 98 sctp_clog.x.close.stcb = 0; 99 sctp_clog.x.close.state = 0; 100 } 101 sctp_clog.x.close.loc = loc; 102 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 103 SCTP_LOG_EVENT_CLOSE, 104 0, 105 sctp_clog.x.misc.log1, 106 sctp_clog.x.misc.log2, 107 sctp_clog.x.misc.log3, 108 sctp_clog.x.misc.log4); 109 } 110 111 112 void 113 rto_logging(struct sctp_nets *net, int from) 114 { 115 struct sctp_cwnd_log sctp_clog; 116 117 memset(&sctp_clog, 0, sizeof(sctp_clog)); 118 sctp_clog.x.rto.net = (void *)net; 119 sctp_clog.x.rto.rtt = net->prev_rtt; 120 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 121 SCTP_LOG_EVENT_RTT, 122 from, 123 sctp_clog.x.misc.log1, 124 sctp_clog.x.misc.log2, 125 sctp_clog.x.misc.log3, 126 sctp_clog.x.misc.log4); 127 128 } 129 130 void 131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 132 { 133 struct sctp_cwnd_log sctp_clog; 134 135 sctp_clog.x.strlog.stcb = stcb; 136 sctp_clog.x.strlog.n_tsn = tsn; 137 sctp_clog.x.strlog.n_sseq = sseq; 138 sctp_clog.x.strlog.e_tsn = 0; 139 sctp_clog.x.strlog.e_sseq = 0; 140 sctp_clog.x.strlog.strm = stream; 141 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 142 SCTP_LOG_EVENT_STRM, 143 from, 144 sctp_clog.x.misc.log1, 145 sctp_clog.x.misc.log2, 146 sctp_clog.x.misc.log3, 147 sctp_clog.x.misc.log4); 148 149 } 150 151 void 152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 153 { 154 struct sctp_cwnd_log sctp_clog; 155 156 sctp_clog.x.nagle.stcb = (void *)stcb; 157 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 158 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 159 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 160 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 161 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 162 SCTP_LOG_EVENT_NAGLE, 163 action, 164 sctp_clog.x.misc.log1, 165 sctp_clog.x.misc.log2, 166 sctp_clog.x.misc.log3, 167 sctp_clog.x.misc.log4); 168 } 169 170 171 void 172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 173 { 174 struct sctp_cwnd_log sctp_clog; 175 176 sctp_clog.x.sack.cumack = cumack; 177 sctp_clog.x.sack.oldcumack = old_cumack; 178 sctp_clog.x.sack.tsn = tsn; 179 sctp_clog.x.sack.numGaps = gaps; 180 sctp_clog.x.sack.numDups = dups; 181 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 182 SCTP_LOG_EVENT_SACK, 183 from, 184 sctp_clog.x.misc.log1, 185 sctp_clog.x.misc.log2, 186 sctp_clog.x.misc.log3, 187 sctp_clog.x.misc.log4); 188 } 189 190 void 191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 192 { 193 struct sctp_cwnd_log sctp_clog; 194 195 memset(&sctp_clog, 0, sizeof(sctp_clog)); 196 sctp_clog.x.map.base = map; 197 sctp_clog.x.map.cum = cum; 198 sctp_clog.x.map.high = high; 199 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 200 SCTP_LOG_EVENT_MAP, 201 from, 202 sctp_clog.x.misc.log1, 203 sctp_clog.x.misc.log2, 204 sctp_clog.x.misc.log3, 205 sctp_clog.x.misc.log4); 206 } 207 208 void 209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 210 int from) 211 { 212 struct sctp_cwnd_log sctp_clog; 213 214 memset(&sctp_clog, 0, sizeof(sctp_clog)); 215 sctp_clog.x.fr.largest_tsn = biggest_tsn; 216 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 217 sctp_clog.x.fr.tsn = tsn; 218 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 219 SCTP_LOG_EVENT_FR, 220 from, 221 sctp_clog.x.misc.log1, 222 sctp_clog.x.misc.log2, 223 sctp_clog.x.misc.log3, 224 sctp_clog.x.misc.log4); 225 226 } 227 228 229 void 230 sctp_log_mb(struct mbuf *m, int from) 231 { 232 struct sctp_cwnd_log sctp_clog; 233 234 sctp_clog.x.mb.mp = m; 235 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 236 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 237 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 238 if (SCTP_BUF_IS_EXTENDED(m)) { 239 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 240 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 241 } else { 242 sctp_clog.x.mb.ext = 0; 243 sctp_clog.x.mb.refcnt = 0; 244 } 245 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 246 SCTP_LOG_EVENT_MBUF, 247 from, 248 sctp_clog.x.misc.log1, 249 sctp_clog.x.misc.log2, 250 sctp_clog.x.misc.log3, 251 sctp_clog.x.misc.log4); 252 } 253 254 255 void 256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 257 int from) 258 { 259 struct sctp_cwnd_log sctp_clog; 260 261 if (control == NULL) { 262 SCTP_PRINTF("Gak log of NULL?\n"); 263 return; 264 } 265 sctp_clog.x.strlog.stcb = control->stcb; 266 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 267 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 268 sctp_clog.x.strlog.strm = control->sinfo_stream; 269 if (poschk != NULL) { 270 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 271 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 272 } else { 273 sctp_clog.x.strlog.e_tsn = 0; 274 sctp_clog.x.strlog.e_sseq = 0; 275 } 276 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 277 SCTP_LOG_EVENT_STRM, 278 from, 279 sctp_clog.x.misc.log1, 280 sctp_clog.x.misc.log2, 281 sctp_clog.x.misc.log3, 282 sctp_clog.x.misc.log4); 283 284 } 285 286 void 287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 288 { 289 struct sctp_cwnd_log sctp_clog; 290 291 sctp_clog.x.cwnd.net = net; 292 if (stcb->asoc.send_queue_cnt > 255) 293 sctp_clog.x.cwnd.cnt_in_send = 255; 294 else 295 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 296 if (stcb->asoc.stream_queue_cnt > 255) 297 sctp_clog.x.cwnd.cnt_in_str = 255; 298 else 299 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 300 301 if (net) { 302 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 303 sctp_clog.x.cwnd.inflight = net->flight_size; 304 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 305 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 306 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 307 } 308 if (SCTP_CWNDLOG_PRESEND == from) { 309 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 310 } 311 sctp_clog.x.cwnd.cwnd_augment = augment; 312 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 313 SCTP_LOG_EVENT_CWND, 314 from, 315 sctp_clog.x.misc.log1, 316 sctp_clog.x.misc.log2, 317 sctp_clog.x.misc.log3, 318 sctp_clog.x.misc.log4); 319 320 } 321 322 void 323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 324 { 325 struct sctp_cwnd_log sctp_clog; 326 327 memset(&sctp_clog, 0, sizeof(sctp_clog)); 328 if (inp) { 329 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 330 331 } else { 332 sctp_clog.x.lock.sock = (void *)NULL; 333 } 334 sctp_clog.x.lock.inp = (void *)inp; 335 if (stcb) { 336 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 337 } else { 338 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 339 } 340 if (inp) { 341 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 342 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 343 } else { 344 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 345 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 346 } 347 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 348 if (inp->sctp_socket) { 349 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 350 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 351 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 352 } else { 353 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 354 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 355 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 356 } 357 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 358 SCTP_LOG_LOCK_EVENT, 359 from, 360 sctp_clog.x.misc.log1, 361 sctp_clog.x.misc.log2, 362 sctp_clog.x.misc.log3, 363 sctp_clog.x.misc.log4); 364 365 } 366 367 void 368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 369 { 370 struct sctp_cwnd_log sctp_clog; 371 372 memset(&sctp_clog, 0, sizeof(sctp_clog)); 373 sctp_clog.x.cwnd.net = net; 374 sctp_clog.x.cwnd.cwnd_new_value = error; 375 sctp_clog.x.cwnd.inflight = net->flight_size; 376 sctp_clog.x.cwnd.cwnd_augment = burst; 377 if (stcb->asoc.send_queue_cnt > 255) 378 sctp_clog.x.cwnd.cnt_in_send = 255; 379 else 380 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 381 if (stcb->asoc.stream_queue_cnt > 255) 382 sctp_clog.x.cwnd.cnt_in_str = 255; 383 else 384 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 385 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 386 SCTP_LOG_EVENT_MAXBURST, 387 from, 388 sctp_clog.x.misc.log1, 389 sctp_clog.x.misc.log2, 390 sctp_clog.x.misc.log3, 391 sctp_clog.x.misc.log4); 392 393 } 394 395 void 396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 397 { 398 struct sctp_cwnd_log sctp_clog; 399 400 sctp_clog.x.rwnd.rwnd = peers_rwnd; 401 sctp_clog.x.rwnd.send_size = snd_size; 402 sctp_clog.x.rwnd.overhead = overhead; 403 sctp_clog.x.rwnd.new_rwnd = 0; 404 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 405 SCTP_LOG_EVENT_RWND, 406 from, 407 sctp_clog.x.misc.log1, 408 sctp_clog.x.misc.log2, 409 sctp_clog.x.misc.log3, 410 sctp_clog.x.misc.log4); 411 } 412 413 void 414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 415 { 416 struct sctp_cwnd_log sctp_clog; 417 418 sctp_clog.x.rwnd.rwnd = peers_rwnd; 419 sctp_clog.x.rwnd.send_size = flight_size; 420 sctp_clog.x.rwnd.overhead = overhead; 421 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 422 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 423 SCTP_LOG_EVENT_RWND, 424 from, 425 sctp_clog.x.misc.log1, 426 sctp_clog.x.misc.log2, 427 sctp_clog.x.misc.log3, 428 sctp_clog.x.misc.log4); 429 } 430 431 void 432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 433 { 434 struct sctp_cwnd_log sctp_clog; 435 436 sctp_clog.x.mbcnt.total_queue_size = total_oq; 437 sctp_clog.x.mbcnt.size_change = book; 438 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 439 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 440 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 441 SCTP_LOG_EVENT_MBCNT, 442 from, 443 sctp_clog.x.misc.log1, 444 sctp_clog.x.misc.log2, 445 sctp_clog.x.misc.log3, 446 sctp_clog.x.misc.log4); 447 448 } 449 450 void 451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 452 { 453 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 454 SCTP_LOG_MISC_EVENT, 455 from, 456 a, b, c, d); 457 } 458 459 void 460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 461 { 462 struct sctp_cwnd_log sctp_clog; 463 464 sctp_clog.x.wake.stcb = (void *)stcb; 465 sctp_clog.x.wake.wake_cnt = wake_cnt; 466 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 467 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 468 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 469 470 if (stcb->asoc.stream_queue_cnt < 0xff) 471 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 472 else 473 sctp_clog.x.wake.stream_qcnt = 0xff; 474 475 if (stcb->asoc.chunks_on_out_queue < 0xff) 476 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 477 else 478 sctp_clog.x.wake.chunks_on_oque = 0xff; 479 480 sctp_clog.x.wake.sctpflags = 0; 481 /* set in the defered mode stuff */ 482 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 483 sctp_clog.x.wake.sctpflags |= 1; 484 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 485 sctp_clog.x.wake.sctpflags |= 2; 486 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 487 sctp_clog.x.wake.sctpflags |= 4; 488 /* what about the sb */ 489 if (stcb->sctp_socket) { 490 struct socket *so = stcb->sctp_socket; 491 492 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 493 } else { 494 sctp_clog.x.wake.sbflags = 0xff; 495 } 496 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 497 SCTP_LOG_EVENT_WAKE, 498 from, 499 sctp_clog.x.misc.log1, 500 sctp_clog.x.misc.log2, 501 sctp_clog.x.misc.log3, 502 sctp_clog.x.misc.log4); 503 504 } 505 506 void 507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 508 { 509 struct sctp_cwnd_log sctp_clog; 510 511 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 512 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 513 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 514 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 515 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 516 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 517 sctp_clog.x.blk.sndlen = sendlen; 518 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 519 SCTP_LOG_EVENT_BLOCK, 520 from, 521 sctp_clog.x.misc.log1, 522 sctp_clog.x.misc.log2, 523 sctp_clog.x.misc.log3, 524 sctp_clog.x.misc.log4); 525 526 } 527 528 int 529 sctp_fill_stat_log(void *optval, size_t *optsize) 530 { 531 /* May need to fix this if ktrdump does not work */ 532 return (0); 533 } 534 535 #ifdef SCTP_AUDITING_ENABLED 536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 537 static int sctp_audit_indx = 0; 538 539 static 540 void 541 sctp_print_audit_report(void) 542 { 543 int i; 544 int cnt; 545 546 cnt = 0; 547 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 548 if ((sctp_audit_data[i][0] == 0xe0) && 549 (sctp_audit_data[i][1] == 0x01)) { 550 cnt = 0; 551 SCTP_PRINTF("\n"); 552 } else if (sctp_audit_data[i][0] == 0xf0) { 553 cnt = 0; 554 SCTP_PRINTF("\n"); 555 } else if ((sctp_audit_data[i][0] == 0xc0) && 556 (sctp_audit_data[i][1] == 0x01)) { 557 SCTP_PRINTF("\n"); 558 cnt = 0; 559 } 560 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 561 (uint32_t) sctp_audit_data[i][1]); 562 cnt++; 563 if ((cnt % 14) == 0) 564 SCTP_PRINTF("\n"); 565 } 566 for (i = 0; i < sctp_audit_indx; i++) { 567 if ((sctp_audit_data[i][0] == 0xe0) && 568 (sctp_audit_data[i][1] == 0x01)) { 569 cnt = 0; 570 SCTP_PRINTF("\n"); 571 } else if (sctp_audit_data[i][0] == 0xf0) { 572 cnt = 0; 573 SCTP_PRINTF("\n"); 574 } else if ((sctp_audit_data[i][0] == 0xc0) && 575 (sctp_audit_data[i][1] == 0x01)) { 576 SCTP_PRINTF("\n"); 577 cnt = 0; 578 } 579 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 580 (uint32_t) sctp_audit_data[i][1]); 581 cnt++; 582 if ((cnt % 14) == 0) 583 SCTP_PRINTF("\n"); 584 } 585 SCTP_PRINTF("\n"); 586 } 587 588 void 589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 590 struct sctp_nets *net) 591 { 592 int resend_cnt, tot_out, rep, tot_book_cnt; 593 struct sctp_nets *lnet; 594 struct sctp_tmit_chunk *chk; 595 596 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 597 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 598 sctp_audit_indx++; 599 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 600 sctp_audit_indx = 0; 601 } 602 if (inp == NULL) { 603 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 604 sctp_audit_data[sctp_audit_indx][1] = 0x01; 605 sctp_audit_indx++; 606 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 607 sctp_audit_indx = 0; 608 } 609 return; 610 } 611 if (stcb == NULL) { 612 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 613 sctp_audit_data[sctp_audit_indx][1] = 0x02; 614 sctp_audit_indx++; 615 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 616 sctp_audit_indx = 0; 617 } 618 return; 619 } 620 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 621 sctp_audit_data[sctp_audit_indx][1] = 622 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 623 sctp_audit_indx++; 624 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 625 sctp_audit_indx = 0; 626 } 627 rep = 0; 628 tot_book_cnt = 0; 629 resend_cnt = tot_out = 0; 630 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 631 if (chk->sent == SCTP_DATAGRAM_RESEND) { 632 resend_cnt++; 633 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 634 tot_out += chk->book_size; 635 tot_book_cnt++; 636 } 637 } 638 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 639 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 640 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 641 sctp_audit_indx++; 642 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 643 sctp_audit_indx = 0; 644 } 645 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 646 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 647 rep = 1; 648 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 649 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 650 sctp_audit_data[sctp_audit_indx][1] = 651 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 652 sctp_audit_indx++; 653 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 654 sctp_audit_indx = 0; 655 } 656 } 657 if (tot_out != stcb->asoc.total_flight) { 658 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 659 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 660 sctp_audit_indx++; 661 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 662 sctp_audit_indx = 0; 663 } 664 rep = 1; 665 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 666 (int)stcb->asoc.total_flight); 667 stcb->asoc.total_flight = tot_out; 668 } 669 if (tot_book_cnt != stcb->asoc.total_flight_count) { 670 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 671 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 672 sctp_audit_indx++; 673 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 674 sctp_audit_indx = 0; 675 } 676 rep = 1; 677 SCTP_PRINTF("tot_flt_book:%d\n", tot_book); 678 679 stcb->asoc.total_flight_count = tot_book_cnt; 680 } 681 tot_out = 0; 682 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 683 tot_out += lnet->flight_size; 684 } 685 if (tot_out != stcb->asoc.total_flight) { 686 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 687 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 688 sctp_audit_indx++; 689 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 690 sctp_audit_indx = 0; 691 } 692 rep = 1; 693 SCTP_PRINTF("real flight:%d net total was %d\n", 694 stcb->asoc.total_flight, tot_out); 695 /* now corrective action */ 696 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 697 698 tot_out = 0; 699 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 700 if ((chk->whoTo == lnet) && 701 (chk->sent < SCTP_DATAGRAM_RESEND)) { 702 tot_out += chk->book_size; 703 } 704 } 705 if (lnet->flight_size != tot_out) { 706 SCTP_PRINTF("net:%x flight was %d corrected to %d\n", 707 (uint32_t) lnet, lnet->flight_size, 708 tot_out); 709 lnet->flight_size = tot_out; 710 } 711 } 712 } 713 if (rep) { 714 sctp_print_audit_report(); 715 } 716 } 717 718 void 719 sctp_audit_log(uint8_t ev, uint8_t fd) 720 { 721 722 sctp_audit_data[sctp_audit_indx][0] = ev; 723 sctp_audit_data[sctp_audit_indx][1] = fd; 724 sctp_audit_indx++; 725 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 726 sctp_audit_indx = 0; 727 } 728 } 729 730 #endif 731 732 /* 733 * a list of sizes based on typical mtu's, used only if next hop size not 734 * returned. 735 */ 736 static int sctp_mtu_sizes[] = { 737 68, 738 296, 739 508, 740 512, 741 544, 742 576, 743 1006, 744 1492, 745 1500, 746 1536, 747 2002, 748 2048, 749 4352, 750 4464, 751 8166, 752 17914, 753 32000, 754 65535 755 }; 756 757 void 758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 759 { 760 struct sctp_association *asoc; 761 struct sctp_nets *net; 762 763 asoc = &stcb->asoc; 764 765 (void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 766 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 767 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 768 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 769 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 770 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 771 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 772 (void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 773 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 774 } 775 } 776 777 int 778 find_next_best_mtu(int totsz) 779 { 780 int i, perfer; 781 782 /* 783 * if we are in here we must find the next best fit based on the 784 * size of the dg that failed to be sent. 785 */ 786 perfer = 0; 787 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 788 if (totsz < sctp_mtu_sizes[i]) { 789 perfer = i - 1; 790 if (perfer < 0) 791 perfer = 0; 792 break; 793 } 794 } 795 return (sctp_mtu_sizes[perfer]); 796 } 797 798 void 799 sctp_fill_random_store(struct sctp_pcb *m) 800 { 801 /* 802 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 803 * our counter. The result becomes our good random numbers and we 804 * then setup to give these out. Note that we do no locking to 805 * protect this. This is ok, since if competing folks call this we 806 * will get more gobbled gook in the random store which is what we 807 * want. There is a danger that two guys will use the same random 808 * numbers, but thats ok too since that is random as well :-> 809 */ 810 m->store_at = 0; 811 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 812 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 813 sizeof(m->random_counter), (uint8_t *) m->random_store); 814 m->random_counter++; 815 } 816 817 uint32_t 818 sctp_select_initial_TSN(struct sctp_pcb *inp) 819 { 820 /* 821 * A true implementation should use random selection process to get 822 * the initial stream sequence number, using RFC1750 as a good 823 * guideline 824 */ 825 uint32_t x, *xp; 826 uint8_t *p; 827 int store_at, new_store; 828 829 if (inp->initial_sequence_debug != 0) { 830 uint32_t ret; 831 832 ret = inp->initial_sequence_debug; 833 inp->initial_sequence_debug++; 834 return (ret); 835 } 836 retry: 837 store_at = inp->store_at; 838 new_store = store_at + sizeof(uint32_t); 839 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 840 new_store = 0; 841 } 842 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 843 goto retry; 844 } 845 if (new_store == 0) { 846 /* Refill the random store */ 847 sctp_fill_random_store(inp); 848 } 849 p = &inp->random_store[store_at]; 850 xp = (uint32_t *) p; 851 x = *xp; 852 return (x); 853 } 854 855 uint32_t 856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait) 857 { 858 u_long x, not_done; 859 struct timeval now; 860 861 (void)SCTP_GETTIME_TIMEVAL(&now); 862 not_done = 1; 863 while (not_done) { 864 x = sctp_select_initial_TSN(&inp->sctp_ep); 865 if (x == 0) { 866 /* we never use 0 */ 867 continue; 868 } 869 if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) { 870 not_done = 0; 871 } 872 } 873 return (x); 874 } 875 876 int 877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb, 878 int for_a_init, uint32_t override_tag, uint32_t vrf_id) 879 { 880 struct sctp_association *asoc; 881 882 /* 883 * Anything set to zero is taken care of by the allocation routine's 884 * bzero 885 */ 886 887 /* 888 * Up front select what scoping to apply on addresses I tell my peer 889 * Not sure what to do with these right now, we will need to come up 890 * with a way to set them. We may need to pass them through from the 891 * caller in the sctp_aloc_assoc() function. 892 */ 893 int i; 894 895 asoc = &stcb->asoc; 896 /* init all variables to a known value. */ 897 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 898 asoc->max_burst = m->sctp_ep.max_burst; 899 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 900 asoc->cookie_life = m->sctp_ep.def_cookie_life; 901 asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off); 902 /* EY Init nr_sack variable */ 903 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off); 904 /* JRS 5/21/07 - Init CMT PF variables */ 905 asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf); 906 asoc->sctp_frag_point = m->sctp_frag_point; 907 #ifdef INET 908 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 909 #else 910 asoc->default_tos = 0; 911 #endif 912 913 #ifdef INET6 914 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 915 #else 916 asoc->default_flowlabel = 0; 917 #endif 918 asoc->sb_send_resv = 0; 919 if (override_tag) { 920 asoc->my_vtag = override_tag; 921 } else { 922 asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 923 } 924 /* Get the nonce tags */ 925 asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 926 asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 927 asoc->vrf_id = vrf_id; 928 929 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 930 asoc->hb_is_disabled = 1; 931 else 932 asoc->hb_is_disabled = 0; 933 934 #ifdef SCTP_ASOCLOG_OF_TSNS 935 asoc->tsn_in_at = 0; 936 asoc->tsn_out_at = 0; 937 asoc->tsn_in_wrapped = 0; 938 asoc->tsn_out_wrapped = 0; 939 asoc->cumack_log_at = 0; 940 asoc->cumack_log_atsnt = 0; 941 #endif 942 #ifdef SCTP_FS_SPEC_LOG 943 asoc->fs_index = 0; 944 #endif 945 asoc->refcnt = 0; 946 asoc->assoc_up_sent = 0; 947 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 948 sctp_select_initial_TSN(&m->sctp_ep); 949 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 950 /* we are optimisitic here */ 951 asoc->peer_supports_pktdrop = 1; 952 asoc->peer_supports_nat = 0; 953 asoc->sent_queue_retran_cnt = 0; 954 955 /* for CMT */ 956 asoc->last_net_cmt_send_started = NULL; 957 958 /* This will need to be adjusted */ 959 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 960 asoc->last_acked_seq = asoc->init_seq_number - 1; 961 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 962 asoc->asconf_seq_in = asoc->last_acked_seq; 963 964 /* here we are different, we hold the next one we expect */ 965 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 966 967 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 968 asoc->initial_rto = m->sctp_ep.initial_rto; 969 970 asoc->max_init_times = m->sctp_ep.max_init_times; 971 asoc->max_send_times = m->sctp_ep.max_send_times; 972 asoc->def_net_failure = m->sctp_ep.def_net_failure; 973 asoc->free_chunk_cnt = 0; 974 975 asoc->iam_blocking = 0; 976 /* ECN Nonce initialization */ 977 asoc->context = m->sctp_context; 978 asoc->def_send = m->def_send; 979 asoc->ecn_nonce_allowed = 0; 980 asoc->receiver_nonce_sum = 1; 981 asoc->nonce_sum_expect_base = 1; 982 asoc->nonce_sum_check = 1; 983 asoc->nonce_resync_tsn = 0; 984 asoc->nonce_wait_for_ecne = 0; 985 asoc->nonce_wait_tsn = 0; 986 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 987 asoc->sack_freq = m->sctp_ep.sctp_sack_freq; 988 asoc->pr_sctp_cnt = 0; 989 asoc->total_output_queue_size = 0; 990 991 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 992 struct in6pcb *inp6; 993 994 /* Its a V6 socket */ 995 inp6 = (struct in6pcb *)m; 996 asoc->ipv6_addr_legal = 1; 997 /* Now look at the binding flag to see if V4 will be legal */ 998 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 999 asoc->ipv4_addr_legal = 1; 1000 } else { 1001 /* V4 addresses are NOT legal on the association */ 1002 asoc->ipv4_addr_legal = 0; 1003 } 1004 } else { 1005 /* Its a V4 socket, no - V6 */ 1006 asoc->ipv4_addr_legal = 1; 1007 asoc->ipv6_addr_legal = 0; 1008 } 1009 1010 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND); 1011 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket); 1012 1013 asoc->smallest_mtu = m->sctp_frag_point; 1014 #ifdef SCTP_PRINT_FOR_B_AND_M 1015 SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n", 1016 asoc->smallest_mtu); 1017 #endif 1018 asoc->minrto = m->sctp_ep.sctp_minrto; 1019 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1020 1021 asoc->locked_on_sending = NULL; 1022 asoc->stream_locked_on = 0; 1023 asoc->ecn_echo_cnt_onq = 0; 1024 asoc->stream_locked = 0; 1025 1026 asoc->send_sack = 1; 1027 1028 LIST_INIT(&asoc->sctp_restricted_addrs); 1029 1030 TAILQ_INIT(&asoc->nets); 1031 TAILQ_INIT(&asoc->pending_reply_queue); 1032 TAILQ_INIT(&asoc->asconf_ack_sent); 1033 /* Setup to fill the hb random cache at first HB */ 1034 asoc->hb_random_idx = 4; 1035 1036 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1037 1038 /* 1039 * JRS - Pick the default congestion control module based on the 1040 * sysctl. 1041 */ 1042 switch (m->sctp_ep.sctp_default_cc_module) { 1043 /* JRS - Standard TCP congestion control */ 1044 case SCTP_CC_RFC2581: 1045 { 1046 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 1047 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 1048 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 1049 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 1050 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 1051 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 1052 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 1053 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 1054 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 1055 break; 1056 } 1057 /* JRS - High Speed TCP congestion control (Floyd) */ 1058 case SCTP_CC_HSTCP: 1059 { 1060 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP; 1061 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 1062 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack; 1063 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr; 1064 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 1065 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 1066 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 1067 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 1068 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 1069 break; 1070 } 1071 /* JRS - HTCP congestion control */ 1072 case SCTP_CC_HTCP: 1073 { 1074 stcb->asoc.congestion_control_module = SCTP_CC_HTCP; 1075 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param; 1076 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack; 1077 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr; 1078 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout; 1079 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo; 1080 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 1081 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 1082 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer; 1083 break; 1084 } 1085 /* JRS - By default, use RFC2581 */ 1086 default: 1087 { 1088 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581; 1089 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param; 1090 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack; 1091 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr; 1092 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout; 1093 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo; 1094 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped; 1095 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output; 1096 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer; 1097 break; 1098 } 1099 } 1100 1101 /* 1102 * Now the stream parameters, here we allocate space for all streams 1103 * that we request by default. 1104 */ 1105 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1106 m->sctp_ep.pre_open_stream_count; 1107 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1108 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1109 SCTP_M_STRMO); 1110 if (asoc->strmout == NULL) { 1111 /* big trouble no memory */ 1112 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1113 return (ENOMEM); 1114 } 1115 for (i = 0; i < asoc->streamoutcnt; i++) { 1116 /* 1117 * inbound side must be set to 0xffff, also NOTE when we get 1118 * the INIT-ACK back (for INIT sender) we MUST reduce the 1119 * count (streamoutcnt) but first check if we sent to any of 1120 * the upper streams that were dropped (if some were). Those 1121 * that were dropped must be notified to the upper layer as 1122 * failed to send. 1123 */ 1124 asoc->strmout[i].next_sequence_sent = 0x0; 1125 TAILQ_INIT(&asoc->strmout[i].outqueue); 1126 asoc->strmout[i].stream_no = i; 1127 asoc->strmout[i].last_msg_incomplete = 0; 1128 asoc->strmout[i].next_spoke.tqe_next = 0; 1129 asoc->strmout[i].next_spoke.tqe_prev = 0; 1130 } 1131 /* Now the mapping array */ 1132 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1133 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1134 SCTP_M_MAP); 1135 if (asoc->mapping_array == NULL) { 1136 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1137 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1138 return (ENOMEM); 1139 } 1140 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1141 /* EY - initialize the nr_mapping_array just like mapping array */ 1142 asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY; 1143 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size, 1144 SCTP_M_MAP); 1145 if (asoc->nr_mapping_array == NULL) { 1146 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1147 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1148 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1149 return (ENOMEM); 1150 } 1151 memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size); 1152 1153 /* Now the init of the other outqueues */ 1154 TAILQ_INIT(&asoc->free_chunks); 1155 TAILQ_INIT(&asoc->out_wheel); 1156 TAILQ_INIT(&asoc->control_send_queue); 1157 TAILQ_INIT(&asoc->asconf_send_queue); 1158 TAILQ_INIT(&asoc->send_queue); 1159 TAILQ_INIT(&asoc->sent_queue); 1160 TAILQ_INIT(&asoc->reasmqueue); 1161 TAILQ_INIT(&asoc->resetHead); 1162 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1163 TAILQ_INIT(&asoc->asconf_queue); 1164 /* authentication fields */ 1165 asoc->authinfo.random = NULL; 1166 asoc->authinfo.active_keyid = 0; 1167 asoc->authinfo.assoc_key = NULL; 1168 asoc->authinfo.assoc_keyid = 0; 1169 asoc->authinfo.recv_key = NULL; 1170 asoc->authinfo.recv_keyid = 0; 1171 LIST_INIT(&asoc->shared_keys); 1172 asoc->marked_retrans = 0; 1173 asoc->timoinit = 0; 1174 asoc->timodata = 0; 1175 asoc->timosack = 0; 1176 asoc->timoshutdown = 0; 1177 asoc->timoheartbeat = 0; 1178 asoc->timocookie = 0; 1179 asoc->timoshutdownack = 0; 1180 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1181 asoc->discontinuity_time = asoc->start_time; 1182 /* 1183 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1184 * freed later whe the association is freed. 1185 */ 1186 return (0); 1187 } 1188 1189 int 1190 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1191 { 1192 /* mapping array needs to grow */ 1193 uint8_t *new_array; 1194 uint32_t new_size; 1195 1196 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1197 SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP); 1198 if (new_array == NULL) { 1199 /* can't get more, forget it */ 1200 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", 1201 new_size); 1202 return (-1); 1203 } 1204 memset(new_array, 0, new_size); 1205 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1206 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1207 asoc->mapping_array = new_array; 1208 asoc->mapping_array_size = new_size; 1209 if (asoc->peer_supports_nr_sack) { 1210 new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR); 1211 SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP); 1212 if (new_array == NULL) { 1213 /* can't get more, forget it */ 1214 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", 1215 new_size); 1216 return (-1); 1217 } 1218 memset(new_array, 0, new_size); 1219 memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size); 1220 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1221 asoc->nr_mapping_array = new_array; 1222 asoc->nr_mapping_array_size = new_size; 1223 } 1224 return (0); 1225 } 1226 1227 1228 #if defined(SCTP_USE_THREAD_BASED_ITERATOR) 1229 static void 1230 sctp_iterator_work(struct sctp_iterator *it) 1231 { 1232 int iteration_count = 0; 1233 int inp_skip = 0; 1234 1235 SCTP_ITERATOR_LOCK(); 1236 if (it->inp) { 1237 SCTP_INP_DECR_REF(it->inp); 1238 } 1239 if (it->inp == NULL) { 1240 /* iterator is complete */ 1241 done_with_iterator: 1242 SCTP_ITERATOR_UNLOCK(); 1243 if (it->function_atend != NULL) { 1244 (*it->function_atend) (it->pointer, it->val); 1245 } 1246 SCTP_FREE(it, SCTP_M_ITER); 1247 return; 1248 } 1249 select_a_new_ep: 1250 SCTP_INP_WLOCK(it->inp); 1251 while (((it->pcb_flags) && 1252 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1253 ((it->pcb_features) && 1254 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1255 /* endpoint flags or features don't match, so keep looking */ 1256 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1257 SCTP_INP_WUNLOCK(it->inp); 1258 goto done_with_iterator; 1259 } 1260 SCTP_INP_WUNLOCK(it->inp); 1261 it->inp = LIST_NEXT(it->inp, sctp_list); 1262 if (it->inp == NULL) { 1263 goto done_with_iterator; 1264 } 1265 SCTP_INP_WLOCK(it->inp); 1266 } 1267 1268 SCTP_INP_WUNLOCK(it->inp); 1269 SCTP_INP_RLOCK(it->inp); 1270 1271 /* now go through each assoc which is in the desired state */ 1272 if (it->done_current_ep == 0) { 1273 if (it->function_inp != NULL) 1274 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1275 it->done_current_ep = 1; 1276 } 1277 if (it->stcb == NULL) { 1278 /* run the per instance function */ 1279 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1280 } 1281 if ((inp_skip) || it->stcb == NULL) { 1282 if (it->function_inp_end != NULL) { 1283 inp_skip = (*it->function_inp_end) (it->inp, 1284 it->pointer, 1285 it->val); 1286 } 1287 SCTP_INP_RUNLOCK(it->inp); 1288 goto no_stcb; 1289 } 1290 while (it->stcb) { 1291 SCTP_TCB_LOCK(it->stcb); 1292 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1293 /* not in the right state... keep looking */ 1294 SCTP_TCB_UNLOCK(it->stcb); 1295 goto next_assoc; 1296 } 1297 /* see if we have limited out the iterator loop */ 1298 iteration_count++; 1299 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1300 /* Pause to let others grab the lock */ 1301 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1302 SCTP_TCB_UNLOCK(it->stcb); 1303 1304 SCTP_INP_INCR_REF(it->inp); 1305 SCTP_INP_RUNLOCK(it->inp); 1306 SCTP_ITERATOR_UNLOCK(); 1307 SCTP_ITERATOR_LOCK(); 1308 SCTP_INP_RLOCK(it->inp); 1309 1310 SCTP_INP_DECR_REF(it->inp); 1311 SCTP_TCB_LOCK(it->stcb); 1312 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1313 iteration_count = 0; 1314 } 1315 /* run function on this one */ 1316 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1317 1318 /* 1319 * we lie here, it really needs to have its own type but 1320 * first I must verify that this won't effect things :-0 1321 */ 1322 if (it->no_chunk_output == 0) 1323 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1324 1325 SCTP_TCB_UNLOCK(it->stcb); 1326 next_assoc: 1327 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1328 if (it->stcb == NULL) { 1329 /* Run last function */ 1330 if (it->function_inp_end != NULL) { 1331 inp_skip = (*it->function_inp_end) (it->inp, 1332 it->pointer, 1333 it->val); 1334 } 1335 } 1336 } 1337 SCTP_INP_RUNLOCK(it->inp); 1338 no_stcb: 1339 /* done with all assocs on this endpoint, move on to next endpoint */ 1340 it->done_current_ep = 0; 1341 SCTP_INP_WLOCK(it->inp); 1342 SCTP_INP_WUNLOCK(it->inp); 1343 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1344 it->inp = NULL; 1345 } else { 1346 SCTP_INP_INFO_RLOCK(); 1347 it->inp = LIST_NEXT(it->inp, sctp_list); 1348 SCTP_INP_INFO_RUNLOCK(); 1349 } 1350 if (it->inp == NULL) { 1351 goto done_with_iterator; 1352 } 1353 goto select_a_new_ep; 1354 } 1355 1356 void 1357 sctp_iterator_worker(void) 1358 { 1359 struct sctp_iterator *it = NULL; 1360 1361 /* This function is called with the WQ lock in place */ 1362 1363 SCTP_BASE_INFO(iterator_running) = 1; 1364 again: 1365 it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead)); 1366 while (it) { 1367 /* now lets work on this one */ 1368 TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr); 1369 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1370 sctp_iterator_work(it); 1371 SCTP_IPI_ITERATOR_WQ_LOCK(); 1372 /* sa_ignore FREED_MEMORY */ 1373 it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead)); 1374 } 1375 if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) { 1376 goto again; 1377 } 1378 SCTP_BASE_INFO(iterator_running) = 0; 1379 return; 1380 } 1381 1382 #endif 1383 1384 1385 static void 1386 sctp_handle_addr_wq(void) 1387 { 1388 /* deal with the ADDR wq from the rtsock calls */ 1389 struct sctp_laddr *wi; 1390 struct sctp_asconf_iterator *asc; 1391 1392 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1393 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1394 if (asc == NULL) { 1395 /* Try later, no memory */ 1396 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1397 (struct sctp_inpcb *)NULL, 1398 (struct sctp_tcb *)NULL, 1399 (struct sctp_nets *)NULL); 1400 return; 1401 } 1402 LIST_INIT(&asc->list_of_work); 1403 asc->cnt = 0; 1404 SCTP_IPI_ITERATOR_WQ_LOCK(); 1405 wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq)); 1406 while (wi != NULL) { 1407 LIST_REMOVE(wi, sctp_nxt_addr); 1408 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1409 asc->cnt++; 1410 wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq)); 1411 } 1412 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1413 if (asc->cnt == 0) { 1414 SCTP_FREE(asc, SCTP_M_ASC_IT); 1415 } else { 1416 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep, 1417 sctp_asconf_iterator_stcb, 1418 NULL, /* No ep end for boundall */ 1419 SCTP_PCB_FLAGS_BOUNDALL, 1420 SCTP_PCB_ANY_FEATURES, 1421 SCTP_ASOC_ANY_STATE, 1422 (void *)asc, 0, 1423 sctp_asconf_iterator_end, NULL, 0); 1424 } 1425 } 1426 1427 int retcode = 0; 1428 int cur_oerr = 0; 1429 1430 void 1431 sctp_timeout_handler(void *t) 1432 { 1433 struct sctp_inpcb *inp; 1434 struct sctp_tcb *stcb; 1435 struct sctp_nets *net; 1436 struct sctp_timer *tmr; 1437 1438 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1439 struct socket *so; 1440 1441 #endif 1442 int did_output, type; 1443 struct sctp_iterator *it = NULL; 1444 1445 tmr = (struct sctp_timer *)t; 1446 inp = (struct sctp_inpcb *)tmr->ep; 1447 stcb = (struct sctp_tcb *)tmr->tcb; 1448 net = (struct sctp_nets *)tmr->net; 1449 did_output = 1; 1450 1451 #ifdef SCTP_AUDITING_ENABLED 1452 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1453 sctp_auditing(3, inp, stcb, net); 1454 #endif 1455 1456 /* sanity checks... */ 1457 if (tmr->self != (void *)tmr) { 1458 /* 1459 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1460 * tmr); 1461 */ 1462 return; 1463 } 1464 tmr->stopped_from = 0xa001; 1465 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1466 /* 1467 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1468 * tmr->type); 1469 */ 1470 return; 1471 } 1472 tmr->stopped_from = 0xa002; 1473 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1474 return; 1475 } 1476 /* if this is an iterator timeout, get the struct and clear inp */ 1477 tmr->stopped_from = 0xa003; 1478 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1479 it = (struct sctp_iterator *)inp; 1480 inp = NULL; 1481 } 1482 type = tmr->type; 1483 if (inp) { 1484 SCTP_INP_INCR_REF(inp); 1485 if ((inp->sctp_socket == 0) && 1486 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1487 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1488 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1489 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1490 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1491 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1492 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1493 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1494 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1495 ) { 1496 SCTP_INP_DECR_REF(inp); 1497 return; 1498 } 1499 } 1500 tmr->stopped_from = 0xa004; 1501 if (stcb) { 1502 atomic_add_int(&stcb->asoc.refcnt, 1); 1503 if (stcb->asoc.state == 0) { 1504 atomic_add_int(&stcb->asoc.refcnt, -1); 1505 if (inp) { 1506 SCTP_INP_DECR_REF(inp); 1507 } 1508 return; 1509 } 1510 } 1511 tmr->stopped_from = 0xa005; 1512 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type); 1513 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1514 if (inp) { 1515 SCTP_INP_DECR_REF(inp); 1516 } 1517 if (stcb) { 1518 atomic_add_int(&stcb->asoc.refcnt, -1); 1519 } 1520 return; 1521 } 1522 tmr->stopped_from = 0xa006; 1523 1524 if (stcb) { 1525 SCTP_TCB_LOCK(stcb); 1526 atomic_add_int(&stcb->asoc.refcnt, -1); 1527 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) && 1528 ((stcb->asoc.state == 0) || 1529 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1530 SCTP_TCB_UNLOCK(stcb); 1531 if (inp) { 1532 SCTP_INP_DECR_REF(inp); 1533 } 1534 return; 1535 } 1536 } 1537 /* record in stopped what t-o occured */ 1538 tmr->stopped_from = tmr->type; 1539 1540 /* mark as being serviced now */ 1541 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1542 /* 1543 * Callout has been rescheduled. 1544 */ 1545 goto get_out; 1546 } 1547 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1548 /* 1549 * Not active, so no action. 1550 */ 1551 goto get_out; 1552 } 1553 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1554 1555 /* call the handler for the appropriate timer type */ 1556 switch (tmr->type) { 1557 case SCTP_TIMER_TYPE_ZERO_COPY: 1558 if (inp == NULL) { 1559 break; 1560 } 1561 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1562 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1563 } 1564 break; 1565 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1566 if (inp == NULL) { 1567 break; 1568 } 1569 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1570 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1571 } 1572 break; 1573 case SCTP_TIMER_TYPE_ADDR_WQ: 1574 sctp_handle_addr_wq(); 1575 break; 1576 case SCTP_TIMER_TYPE_ITERATOR: 1577 SCTP_STAT_INCR(sctps_timoiterator); 1578 sctp_iterator_timer(it); 1579 break; 1580 case SCTP_TIMER_TYPE_SEND: 1581 if ((stcb == NULL) || (inp == NULL)) { 1582 break; 1583 } 1584 SCTP_STAT_INCR(sctps_timodata); 1585 stcb->asoc.timodata++; 1586 stcb->asoc.num_send_timers_up--; 1587 if (stcb->asoc.num_send_timers_up < 0) { 1588 stcb->asoc.num_send_timers_up = 0; 1589 } 1590 SCTP_TCB_LOCK_ASSERT(stcb); 1591 cur_oerr = stcb->asoc.overall_error_count; 1592 retcode = sctp_t3rxt_timer(inp, stcb, net); 1593 if (retcode) { 1594 /* no need to unlock on tcb its gone */ 1595 1596 goto out_decr; 1597 } 1598 SCTP_TCB_LOCK_ASSERT(stcb); 1599 #ifdef SCTP_AUDITING_ENABLED 1600 sctp_auditing(4, inp, stcb, net); 1601 #endif 1602 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1603 if ((stcb->asoc.num_send_timers_up == 0) && 1604 (stcb->asoc.sent_queue_cnt > 0) 1605 ) { 1606 struct sctp_tmit_chunk *chk; 1607 1608 /* 1609 * safeguard. If there on some on the sent queue 1610 * somewhere but no timers running something is 1611 * wrong... so we start a timer on the first chunk 1612 * on the send queue on whatever net it is sent to. 1613 */ 1614 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1615 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1616 chk->whoTo); 1617 } 1618 break; 1619 case SCTP_TIMER_TYPE_INIT: 1620 if ((stcb == NULL) || (inp == NULL)) { 1621 break; 1622 } 1623 SCTP_STAT_INCR(sctps_timoinit); 1624 stcb->asoc.timoinit++; 1625 if (sctp_t1init_timer(inp, stcb, net)) { 1626 /* no need to unlock on tcb its gone */ 1627 goto out_decr; 1628 } 1629 /* We do output but not here */ 1630 did_output = 0; 1631 break; 1632 case SCTP_TIMER_TYPE_RECV: 1633 if ((stcb == NULL) || (inp == NULL)) { 1634 break; 1635 } { 1636 int abort_flag; 1637 1638 SCTP_STAT_INCR(sctps_timosack); 1639 stcb->asoc.timosack++; 1640 if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map) 1641 sctp_sack_check(stcb, 0, 0, &abort_flag); 1642 1643 /* 1644 * EY if nr_sacks used then send an nr-sack , a sack 1645 * otherwise 1646 */ 1647 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) 1648 sctp_send_nr_sack(stcb); 1649 else 1650 sctp_send_sack(stcb); 1651 } 1652 #ifdef SCTP_AUDITING_ENABLED 1653 sctp_auditing(4, inp, stcb, net); 1654 #endif 1655 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1656 break; 1657 case SCTP_TIMER_TYPE_SHUTDOWN: 1658 if ((stcb == NULL) || (inp == NULL)) { 1659 break; 1660 } 1661 if (sctp_shutdown_timer(inp, stcb, net)) { 1662 /* no need to unlock on tcb its gone */ 1663 goto out_decr; 1664 } 1665 SCTP_STAT_INCR(sctps_timoshutdown); 1666 stcb->asoc.timoshutdown++; 1667 #ifdef SCTP_AUDITING_ENABLED 1668 sctp_auditing(4, inp, stcb, net); 1669 #endif 1670 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1671 break; 1672 case SCTP_TIMER_TYPE_HEARTBEAT: 1673 { 1674 struct sctp_nets *lnet; 1675 int cnt_of_unconf = 0; 1676 1677 if ((stcb == NULL) || (inp == NULL)) { 1678 break; 1679 } 1680 SCTP_STAT_INCR(sctps_timoheartbeat); 1681 stcb->asoc.timoheartbeat++; 1682 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1683 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1684 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1685 cnt_of_unconf++; 1686 } 1687 } 1688 if (cnt_of_unconf == 0) { 1689 if (sctp_heartbeat_timer(inp, stcb, lnet, 1690 cnt_of_unconf)) { 1691 /* no need to unlock on tcb its gone */ 1692 goto out_decr; 1693 } 1694 } 1695 #ifdef SCTP_AUDITING_ENABLED 1696 sctp_auditing(4, inp, stcb, lnet); 1697 #endif 1698 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 1699 stcb->sctp_ep, stcb, lnet); 1700 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1701 } 1702 break; 1703 case SCTP_TIMER_TYPE_COOKIE: 1704 if ((stcb == NULL) || (inp == NULL)) { 1705 break; 1706 } 1707 if (sctp_cookie_timer(inp, stcb, net)) { 1708 /* no need to unlock on tcb its gone */ 1709 goto out_decr; 1710 } 1711 SCTP_STAT_INCR(sctps_timocookie); 1712 stcb->asoc.timocookie++; 1713 #ifdef SCTP_AUDITING_ENABLED 1714 sctp_auditing(4, inp, stcb, net); 1715 #endif 1716 /* 1717 * We consider T3 and Cookie timer pretty much the same with 1718 * respect to where from in chunk_output. 1719 */ 1720 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1721 break; 1722 case SCTP_TIMER_TYPE_NEWCOOKIE: 1723 { 1724 struct timeval tv; 1725 int i, secret; 1726 1727 if (inp == NULL) { 1728 break; 1729 } 1730 SCTP_STAT_INCR(sctps_timosecret); 1731 (void)SCTP_GETTIME_TIMEVAL(&tv); 1732 SCTP_INP_WLOCK(inp); 1733 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1734 inp->sctp_ep.last_secret_number = 1735 inp->sctp_ep.current_secret_number; 1736 inp->sctp_ep.current_secret_number++; 1737 if (inp->sctp_ep.current_secret_number >= 1738 SCTP_HOW_MANY_SECRETS) { 1739 inp->sctp_ep.current_secret_number = 0; 1740 } 1741 secret = (int)inp->sctp_ep.current_secret_number; 1742 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1743 inp->sctp_ep.secret_key[secret][i] = 1744 sctp_select_initial_TSN(&inp->sctp_ep); 1745 } 1746 SCTP_INP_WUNLOCK(inp); 1747 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1748 } 1749 did_output = 0; 1750 break; 1751 case SCTP_TIMER_TYPE_PATHMTURAISE: 1752 if ((stcb == NULL) || (inp == NULL)) { 1753 break; 1754 } 1755 SCTP_STAT_INCR(sctps_timopathmtu); 1756 sctp_pathmtu_timer(inp, stcb, net); 1757 did_output = 0; 1758 break; 1759 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1760 if ((stcb == NULL) || (inp == NULL)) { 1761 break; 1762 } 1763 if (sctp_shutdownack_timer(inp, stcb, net)) { 1764 /* no need to unlock on tcb its gone */ 1765 goto out_decr; 1766 } 1767 SCTP_STAT_INCR(sctps_timoshutdownack); 1768 stcb->asoc.timoshutdownack++; 1769 #ifdef SCTP_AUDITING_ENABLED 1770 sctp_auditing(4, inp, stcb, net); 1771 #endif 1772 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1773 break; 1774 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1775 if ((stcb == NULL) || (inp == NULL)) { 1776 break; 1777 } 1778 SCTP_STAT_INCR(sctps_timoshutdownguard); 1779 sctp_abort_an_association(inp, stcb, 1780 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED); 1781 /* no need to unlock on tcb its gone */ 1782 goto out_decr; 1783 1784 case SCTP_TIMER_TYPE_STRRESET: 1785 if ((stcb == NULL) || (inp == NULL)) { 1786 break; 1787 } 1788 if (sctp_strreset_timer(inp, stcb, net)) { 1789 /* no need to unlock on tcb its gone */ 1790 goto out_decr; 1791 } 1792 SCTP_STAT_INCR(sctps_timostrmrst); 1793 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1794 break; 1795 case SCTP_TIMER_TYPE_EARLYFR: 1796 /* Need to do FR of things for net */ 1797 if ((stcb == NULL) || (inp == NULL)) { 1798 break; 1799 } 1800 SCTP_STAT_INCR(sctps_timoearlyfr); 1801 sctp_early_fr_timer(inp, stcb, net); 1802 break; 1803 case SCTP_TIMER_TYPE_ASCONF: 1804 if ((stcb == NULL) || (inp == NULL)) { 1805 break; 1806 } 1807 if (sctp_asconf_timer(inp, stcb, net)) { 1808 /* no need to unlock on tcb its gone */ 1809 goto out_decr; 1810 } 1811 SCTP_STAT_INCR(sctps_timoasconf); 1812 #ifdef SCTP_AUDITING_ENABLED 1813 sctp_auditing(4, inp, stcb, net); 1814 #endif 1815 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1816 break; 1817 case SCTP_TIMER_TYPE_PRIM_DELETED: 1818 if ((stcb == NULL) || (inp == NULL)) { 1819 break; 1820 } 1821 sctp_delete_prim_timer(inp, stcb, net); 1822 SCTP_STAT_INCR(sctps_timodelprim); 1823 break; 1824 1825 case SCTP_TIMER_TYPE_AUTOCLOSE: 1826 if ((stcb == NULL) || (inp == NULL)) { 1827 break; 1828 } 1829 SCTP_STAT_INCR(sctps_timoautoclose); 1830 sctp_autoclose_timer(inp, stcb, net); 1831 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1832 did_output = 0; 1833 break; 1834 case SCTP_TIMER_TYPE_ASOCKILL: 1835 if ((stcb == NULL) || (inp == NULL)) { 1836 break; 1837 } 1838 SCTP_STAT_INCR(sctps_timoassockill); 1839 /* Can we free it yet? */ 1840 SCTP_INP_DECR_REF(inp); 1841 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1842 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1843 so = SCTP_INP_SO(inp); 1844 atomic_add_int(&stcb->asoc.refcnt, 1); 1845 SCTP_TCB_UNLOCK(stcb); 1846 SCTP_SOCKET_LOCK(so, 1); 1847 SCTP_TCB_LOCK(stcb); 1848 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1849 #endif 1850 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1851 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1852 SCTP_SOCKET_UNLOCK(so, 1); 1853 #endif 1854 /* 1855 * free asoc, always unlocks (or destroy's) so prevent 1856 * duplicate unlock or unlock of a free mtx :-0 1857 */ 1858 stcb = NULL; 1859 goto out_no_decr; 1860 case SCTP_TIMER_TYPE_INPKILL: 1861 SCTP_STAT_INCR(sctps_timoinpkill); 1862 if (inp == NULL) { 1863 break; 1864 } 1865 /* 1866 * special case, take away our increment since WE are the 1867 * killer 1868 */ 1869 SCTP_INP_DECR_REF(inp); 1870 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1871 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1872 SCTP_CALLED_DIRECTLY_NOCMPSET); 1873 inp = NULL; 1874 goto out_no_decr; 1875 default: 1876 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1877 tmr->type); 1878 break; 1879 }; 1880 #ifdef SCTP_AUDITING_ENABLED 1881 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1882 if (inp) 1883 sctp_auditing(5, inp, stcb, net); 1884 #endif 1885 if ((did_output) && stcb) { 1886 /* 1887 * Now we need to clean up the control chunk chain if an 1888 * ECNE is on it. It must be marked as UNSENT again so next 1889 * call will continue to send it until such time that we get 1890 * a CWR, to remove it. It is, however, less likely that we 1891 * will find a ecn echo on the chain though. 1892 */ 1893 sctp_fix_ecn_echo(&stcb->asoc); 1894 } 1895 get_out: 1896 if (stcb) { 1897 SCTP_TCB_UNLOCK(stcb); 1898 } 1899 out_decr: 1900 if (inp) { 1901 SCTP_INP_DECR_REF(inp); 1902 } 1903 out_no_decr: 1904 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n", 1905 type); 1906 } 1907 1908 void 1909 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1910 struct sctp_nets *net) 1911 { 1912 int to_ticks; 1913 struct sctp_timer *tmr; 1914 1915 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1916 return; 1917 1918 to_ticks = 0; 1919 1920 tmr = NULL; 1921 if (stcb) { 1922 SCTP_TCB_LOCK_ASSERT(stcb); 1923 } 1924 switch (t_type) { 1925 case SCTP_TIMER_TYPE_ZERO_COPY: 1926 tmr = &inp->sctp_ep.zero_copy_timer; 1927 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1928 break; 1929 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1930 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1931 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1932 break; 1933 case SCTP_TIMER_TYPE_ADDR_WQ: 1934 /* Only 1 tick away :-) */ 1935 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1936 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1937 break; 1938 case SCTP_TIMER_TYPE_ITERATOR: 1939 { 1940 struct sctp_iterator *it; 1941 1942 it = (struct sctp_iterator *)inp; 1943 tmr = &it->tmr; 1944 to_ticks = SCTP_ITERATOR_TICKS; 1945 } 1946 break; 1947 case SCTP_TIMER_TYPE_SEND: 1948 /* Here we use the RTO timer */ 1949 { 1950 int rto_val; 1951 1952 if ((stcb == NULL) || (net == NULL)) { 1953 return; 1954 } 1955 tmr = &net->rxt_timer; 1956 if (net->RTO == 0) { 1957 rto_val = stcb->asoc.initial_rto; 1958 } else { 1959 rto_val = net->RTO; 1960 } 1961 to_ticks = MSEC_TO_TICKS(rto_val); 1962 } 1963 break; 1964 case SCTP_TIMER_TYPE_INIT: 1965 /* 1966 * Here we use the INIT timer default usually about 1 1967 * minute. 1968 */ 1969 if ((stcb == NULL) || (net == NULL)) { 1970 return; 1971 } 1972 tmr = &net->rxt_timer; 1973 if (net->RTO == 0) { 1974 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1975 } else { 1976 to_ticks = MSEC_TO_TICKS(net->RTO); 1977 } 1978 break; 1979 case SCTP_TIMER_TYPE_RECV: 1980 /* 1981 * Here we use the Delayed-Ack timer value from the inp 1982 * ususually about 200ms. 1983 */ 1984 if (stcb == NULL) { 1985 return; 1986 } 1987 tmr = &stcb->asoc.dack_timer; 1988 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1989 break; 1990 case SCTP_TIMER_TYPE_SHUTDOWN: 1991 /* Here we use the RTO of the destination. */ 1992 if ((stcb == NULL) || (net == NULL)) { 1993 return; 1994 } 1995 if (net->RTO == 0) { 1996 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1997 } else { 1998 to_ticks = MSEC_TO_TICKS(net->RTO); 1999 } 2000 tmr = &net->rxt_timer; 2001 break; 2002 case SCTP_TIMER_TYPE_HEARTBEAT: 2003 /* 2004 * the net is used here so that we can add in the RTO. Even 2005 * though we use a different timer. We also add the HB timer 2006 * PLUS a random jitter. 2007 */ 2008 if ((inp == NULL) || (stcb == NULL)) { 2009 return; 2010 } else { 2011 uint32_t rndval; 2012 uint8_t this_random; 2013 int cnt_of_unconf = 0; 2014 struct sctp_nets *lnet; 2015 2016 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 2017 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 2018 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 2019 cnt_of_unconf++; 2020 } 2021 } 2022 if (cnt_of_unconf) { 2023 net = lnet = NULL; 2024 (void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 2025 } 2026 if (stcb->asoc.hb_random_idx > 3) { 2027 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2028 memcpy(stcb->asoc.hb_random_values, &rndval, 2029 sizeof(stcb->asoc.hb_random_values)); 2030 stcb->asoc.hb_random_idx = 0; 2031 } 2032 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 2033 stcb->asoc.hb_random_idx++; 2034 stcb->asoc.hb_ect_randombit = 0; 2035 /* 2036 * this_random will be 0 - 256 ms RTO is in ms. 2037 */ 2038 if ((stcb->asoc.hb_is_disabled) && 2039 (cnt_of_unconf == 0)) { 2040 return; 2041 } 2042 if (net) { 2043 int delay; 2044 2045 delay = stcb->asoc.heart_beat_delay; 2046 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 2047 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 2048 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 2049 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 2050 delay = 0; 2051 } 2052 } 2053 if (net->RTO == 0) { 2054 /* Never been checked */ 2055 to_ticks = this_random + stcb->asoc.initial_rto + delay; 2056 } else { 2057 /* set rto_val to the ms */ 2058 to_ticks = delay + net->RTO + this_random; 2059 } 2060 } else { 2061 if (cnt_of_unconf) { 2062 to_ticks = this_random + stcb->asoc.initial_rto; 2063 } else { 2064 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 2065 } 2066 } 2067 /* 2068 * Now we must convert the to_ticks that are now in 2069 * ms to ticks. 2070 */ 2071 to_ticks = MSEC_TO_TICKS(to_ticks); 2072 tmr = &stcb->asoc.hb_timer; 2073 } 2074 break; 2075 case SCTP_TIMER_TYPE_COOKIE: 2076 /* 2077 * Here we can use the RTO timer from the network since one 2078 * RTT was compelete. If a retran happened then we will be 2079 * using the RTO initial value. 2080 */ 2081 if ((stcb == NULL) || (net == NULL)) { 2082 return; 2083 } 2084 if (net->RTO == 0) { 2085 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2086 } else { 2087 to_ticks = MSEC_TO_TICKS(net->RTO); 2088 } 2089 tmr = &net->rxt_timer; 2090 break; 2091 case SCTP_TIMER_TYPE_NEWCOOKIE: 2092 /* 2093 * nothing needed but the endpoint here ususually about 60 2094 * minutes. 2095 */ 2096 if (inp == NULL) { 2097 return; 2098 } 2099 tmr = &inp->sctp_ep.signature_change; 2100 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2101 break; 2102 case SCTP_TIMER_TYPE_ASOCKILL: 2103 if (stcb == NULL) { 2104 return; 2105 } 2106 tmr = &stcb->asoc.strreset_timer; 2107 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 2108 break; 2109 case SCTP_TIMER_TYPE_INPKILL: 2110 /* 2111 * The inp is setup to die. We re-use the signature_chage 2112 * timer since that has stopped and we are in the GONE 2113 * state. 2114 */ 2115 if (inp == NULL) { 2116 return; 2117 } 2118 tmr = &inp->sctp_ep.signature_change; 2119 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2120 break; 2121 case SCTP_TIMER_TYPE_PATHMTURAISE: 2122 /* 2123 * Here we use the value found in the EP for PMTU ususually 2124 * about 10 minutes. 2125 */ 2126 if ((stcb == NULL) || (inp == NULL)) { 2127 return; 2128 } 2129 if (net == NULL) { 2130 return; 2131 } 2132 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2133 tmr = &net->pmtu_timer; 2134 break; 2135 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2136 /* Here we use the RTO of the destination */ 2137 if ((stcb == NULL) || (net == NULL)) { 2138 return; 2139 } 2140 if (net->RTO == 0) { 2141 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2142 } else { 2143 to_ticks = MSEC_TO_TICKS(net->RTO); 2144 } 2145 tmr = &net->rxt_timer; 2146 break; 2147 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2148 /* 2149 * Here we use the endpoints shutdown guard timer usually 2150 * about 3 minutes. 2151 */ 2152 if ((inp == NULL) || (stcb == NULL)) { 2153 return; 2154 } 2155 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2156 tmr = &stcb->asoc.shut_guard_timer; 2157 break; 2158 case SCTP_TIMER_TYPE_STRRESET: 2159 /* 2160 * Here the timer comes from the stcb but its value is from 2161 * the net's RTO. 2162 */ 2163 if ((stcb == NULL) || (net == NULL)) { 2164 return; 2165 } 2166 if (net->RTO == 0) { 2167 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2168 } else { 2169 to_ticks = MSEC_TO_TICKS(net->RTO); 2170 } 2171 tmr = &stcb->asoc.strreset_timer; 2172 break; 2173 2174 case SCTP_TIMER_TYPE_EARLYFR: 2175 { 2176 unsigned int msec; 2177 2178 if ((stcb == NULL) || (net == NULL)) { 2179 return; 2180 } 2181 if (net->flight_size > net->cwnd) { 2182 /* no need to start */ 2183 return; 2184 } 2185 SCTP_STAT_INCR(sctps_earlyfrstart); 2186 if (net->lastsa == 0) { 2187 /* Hmm no rtt estimate yet? */ 2188 msec = stcb->asoc.initial_rto >> 2; 2189 } else { 2190 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 2191 } 2192 if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 2193 msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 2194 if (msec < SCTP_MINFR_MSEC_FLOOR) { 2195 msec = SCTP_MINFR_MSEC_FLOOR; 2196 } 2197 } 2198 to_ticks = MSEC_TO_TICKS(msec); 2199 tmr = &net->fr_timer; 2200 } 2201 break; 2202 case SCTP_TIMER_TYPE_ASCONF: 2203 /* 2204 * Here the timer comes from the stcb but its value is from 2205 * the net's RTO. 2206 */ 2207 if ((stcb == NULL) || (net == NULL)) { 2208 return; 2209 } 2210 if (net->RTO == 0) { 2211 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2212 } else { 2213 to_ticks = MSEC_TO_TICKS(net->RTO); 2214 } 2215 tmr = &stcb->asoc.asconf_timer; 2216 break; 2217 case SCTP_TIMER_TYPE_PRIM_DELETED: 2218 if ((stcb == NULL) || (net != NULL)) { 2219 return; 2220 } 2221 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2222 tmr = &stcb->asoc.delete_prim_timer; 2223 break; 2224 case SCTP_TIMER_TYPE_AUTOCLOSE: 2225 if (stcb == NULL) { 2226 return; 2227 } 2228 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2229 /* 2230 * Really an error since stcb is NOT set to 2231 * autoclose 2232 */ 2233 return; 2234 } 2235 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2236 tmr = &stcb->asoc.autoclose_timer; 2237 break; 2238 default: 2239 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2240 __FUNCTION__, t_type); 2241 return; 2242 break; 2243 }; 2244 if ((to_ticks <= 0) || (tmr == NULL)) { 2245 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2246 __FUNCTION__, t_type, to_ticks, tmr); 2247 return; 2248 } 2249 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2250 /* 2251 * we do NOT allow you to have it already running. if it is 2252 * we leave the current one up unchanged 2253 */ 2254 return; 2255 } 2256 /* At this point we can proceed */ 2257 if (t_type == SCTP_TIMER_TYPE_SEND) { 2258 stcb->asoc.num_send_timers_up++; 2259 } 2260 tmr->stopped_from = 0; 2261 tmr->type = t_type; 2262 tmr->ep = (void *)inp; 2263 tmr->tcb = (void *)stcb; 2264 tmr->net = (void *)net; 2265 tmr->self = (void *)tmr; 2266 tmr->ticks = sctp_get_tick_count(); 2267 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2268 return; 2269 } 2270 2271 void 2272 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2273 struct sctp_nets *net, uint32_t from) 2274 { 2275 struct sctp_timer *tmr; 2276 2277 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2278 (inp == NULL)) 2279 return; 2280 2281 tmr = NULL; 2282 if (stcb) { 2283 SCTP_TCB_LOCK_ASSERT(stcb); 2284 } 2285 switch (t_type) { 2286 case SCTP_TIMER_TYPE_ZERO_COPY: 2287 tmr = &inp->sctp_ep.zero_copy_timer; 2288 break; 2289 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2290 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2291 break; 2292 case SCTP_TIMER_TYPE_ADDR_WQ: 2293 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2294 break; 2295 case SCTP_TIMER_TYPE_EARLYFR: 2296 if ((stcb == NULL) || (net == NULL)) { 2297 return; 2298 } 2299 tmr = &net->fr_timer; 2300 SCTP_STAT_INCR(sctps_earlyfrstop); 2301 break; 2302 case SCTP_TIMER_TYPE_ITERATOR: 2303 { 2304 struct sctp_iterator *it; 2305 2306 it = (struct sctp_iterator *)inp; 2307 tmr = &it->tmr; 2308 } 2309 break; 2310 case SCTP_TIMER_TYPE_SEND: 2311 if ((stcb == NULL) || (net == NULL)) { 2312 return; 2313 } 2314 tmr = &net->rxt_timer; 2315 break; 2316 case SCTP_TIMER_TYPE_INIT: 2317 if ((stcb == NULL) || (net == NULL)) { 2318 return; 2319 } 2320 tmr = &net->rxt_timer; 2321 break; 2322 case SCTP_TIMER_TYPE_RECV: 2323 if (stcb == NULL) { 2324 return; 2325 } 2326 tmr = &stcb->asoc.dack_timer; 2327 break; 2328 case SCTP_TIMER_TYPE_SHUTDOWN: 2329 if ((stcb == NULL) || (net == NULL)) { 2330 return; 2331 } 2332 tmr = &net->rxt_timer; 2333 break; 2334 case SCTP_TIMER_TYPE_HEARTBEAT: 2335 if (stcb == NULL) { 2336 return; 2337 } 2338 tmr = &stcb->asoc.hb_timer; 2339 break; 2340 case SCTP_TIMER_TYPE_COOKIE: 2341 if ((stcb == NULL) || (net == NULL)) { 2342 return; 2343 } 2344 tmr = &net->rxt_timer; 2345 break; 2346 case SCTP_TIMER_TYPE_NEWCOOKIE: 2347 /* nothing needed but the endpoint here */ 2348 tmr = &inp->sctp_ep.signature_change; 2349 /* 2350 * We re-use the newcookie timer for the INP kill timer. We 2351 * must assure that we do not kill it by accident. 2352 */ 2353 break; 2354 case SCTP_TIMER_TYPE_ASOCKILL: 2355 /* 2356 * Stop the asoc kill timer. 2357 */ 2358 if (stcb == NULL) { 2359 return; 2360 } 2361 tmr = &stcb->asoc.strreset_timer; 2362 break; 2363 2364 case SCTP_TIMER_TYPE_INPKILL: 2365 /* 2366 * The inp is setup to die. We re-use the signature_chage 2367 * timer since that has stopped and we are in the GONE 2368 * state. 2369 */ 2370 tmr = &inp->sctp_ep.signature_change; 2371 break; 2372 case SCTP_TIMER_TYPE_PATHMTURAISE: 2373 if ((stcb == NULL) || (net == NULL)) { 2374 return; 2375 } 2376 tmr = &net->pmtu_timer; 2377 break; 2378 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2379 if ((stcb == NULL) || (net == NULL)) { 2380 return; 2381 } 2382 tmr = &net->rxt_timer; 2383 break; 2384 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2385 if (stcb == NULL) { 2386 return; 2387 } 2388 tmr = &stcb->asoc.shut_guard_timer; 2389 break; 2390 case SCTP_TIMER_TYPE_STRRESET: 2391 if (stcb == NULL) { 2392 return; 2393 } 2394 tmr = &stcb->asoc.strreset_timer; 2395 break; 2396 case SCTP_TIMER_TYPE_ASCONF: 2397 if (stcb == NULL) { 2398 return; 2399 } 2400 tmr = &stcb->asoc.asconf_timer; 2401 break; 2402 case SCTP_TIMER_TYPE_PRIM_DELETED: 2403 if (stcb == NULL) { 2404 return; 2405 } 2406 tmr = &stcb->asoc.delete_prim_timer; 2407 break; 2408 case SCTP_TIMER_TYPE_AUTOCLOSE: 2409 if (stcb == NULL) { 2410 return; 2411 } 2412 tmr = &stcb->asoc.autoclose_timer; 2413 break; 2414 default: 2415 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2416 __FUNCTION__, t_type); 2417 break; 2418 }; 2419 if (tmr == NULL) { 2420 return; 2421 } 2422 if ((tmr->type != t_type) && tmr->type) { 2423 /* 2424 * Ok we have a timer that is under joint use. Cookie timer 2425 * per chance with the SEND timer. We therefore are NOT 2426 * running the timer that the caller wants stopped. So just 2427 * return. 2428 */ 2429 return; 2430 } 2431 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2432 stcb->asoc.num_send_timers_up--; 2433 if (stcb->asoc.num_send_timers_up < 0) { 2434 stcb->asoc.num_send_timers_up = 0; 2435 } 2436 } 2437 tmr->self = NULL; 2438 tmr->stopped_from = from; 2439 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2440 return; 2441 } 2442 2443 uint32_t 2444 sctp_calculate_len(struct mbuf *m) 2445 { 2446 uint32_t tlen = 0; 2447 struct mbuf *at; 2448 2449 at = m; 2450 while (at) { 2451 tlen += SCTP_BUF_LEN(at); 2452 at = SCTP_BUF_NEXT(at); 2453 } 2454 return (tlen); 2455 } 2456 2457 void 2458 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2459 struct sctp_association *asoc, uint32_t mtu) 2460 { 2461 /* 2462 * Reset the P-MTU size on this association, this involves changing 2463 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2464 * allow the DF flag to be cleared. 2465 */ 2466 struct sctp_tmit_chunk *chk; 2467 unsigned int eff_mtu, ovh; 2468 2469 #ifdef SCTP_PRINT_FOR_B_AND_M 2470 SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n", 2471 inp, asoc, mtu); 2472 #endif 2473 asoc->smallest_mtu = mtu; 2474 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2475 ovh = SCTP_MIN_OVERHEAD; 2476 } else { 2477 ovh = SCTP_MIN_V4_OVERHEAD; 2478 } 2479 eff_mtu = mtu - ovh; 2480 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2481 2482 if (chk->send_size > eff_mtu) { 2483 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2484 } 2485 } 2486 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2487 if (chk->send_size > eff_mtu) { 2488 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2489 } 2490 } 2491 } 2492 2493 2494 /* 2495 * given an association and starting time of the current RTT period return 2496 * RTO in number of msecs net should point to the current network 2497 */ 2498 uint32_t 2499 sctp_calculate_rto(struct sctp_tcb *stcb, 2500 struct sctp_association *asoc, 2501 struct sctp_nets *net, 2502 struct timeval *told, 2503 int safe) 2504 { 2505 /*- 2506 * given an association and the starting time of the current RTT 2507 * period (in value1/value2) return RTO in number of msecs. 2508 */ 2509 int calc_time = 0; 2510 int o_calctime; 2511 uint32_t new_rto = 0; 2512 int first_measure = 0; 2513 struct timeval now, then, *old; 2514 2515 /* Copy it out for sparc64 */ 2516 if (safe == sctp_align_unsafe_makecopy) { 2517 old = &then; 2518 memcpy(&then, told, sizeof(struct timeval)); 2519 } else if (safe == sctp_align_safe_nocopy) { 2520 old = told; 2521 } else { 2522 /* error */ 2523 SCTP_PRINTF("Huh, bad rto calc call\n"); 2524 return (0); 2525 } 2526 /************************/ 2527 /* 1. calculate new RTT */ 2528 /************************/ 2529 /* get the current time */ 2530 (void)SCTP_GETTIME_TIMEVAL(&now); 2531 /* compute the RTT value */ 2532 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2533 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2534 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2535 calc_time += (((u_long)now.tv_usec - 2536 (u_long)old->tv_usec) / 1000); 2537 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2538 /* Borrow 1,000ms from current calculation */ 2539 calc_time -= 1000; 2540 /* Add in the slop over */ 2541 calc_time += ((int)now.tv_usec / 1000); 2542 /* Add in the pre-second ms's */ 2543 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2544 } 2545 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2546 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2547 calc_time = ((u_long)now.tv_usec - 2548 (u_long)old->tv_usec) / 1000; 2549 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2550 /* impossible .. garbage in nothing out */ 2551 goto calc_rto; 2552 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) { 2553 /* 2554 * We have to have 1 usec :-D this must be the 2555 * loopback. 2556 */ 2557 calc_time = 1; 2558 } else { 2559 /* impossible .. garbage in nothing out */ 2560 goto calc_rto; 2561 } 2562 } else { 2563 /* Clock wrapped? */ 2564 goto calc_rto; 2565 } 2566 /***************************/ 2567 /* 2. update RTTVAR & SRTT */ 2568 /***************************/ 2569 net->rtt = o_calctime = calc_time; 2570 /* this is Van Jacobson's integer version */ 2571 if (net->RTO_measured) { 2572 calc_time -= (net->lastsa >> SCTP_RTT_SHIFT); /* take away 1/8th when 2573 * shift=3 */ 2574 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2575 rto_logging(net, SCTP_LOG_RTTVAR); 2576 } 2577 net->prev_rtt = o_calctime; 2578 net->lastsa += calc_time; /* add 7/8th into sa when 2579 * shift=3 */ 2580 if (calc_time < 0) { 2581 calc_time = -calc_time; 2582 } 2583 calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); /* take away 1/4 when 2584 * VAR shift=2 */ 2585 net->lastsv += calc_time; 2586 if (net->lastsv == 0) { 2587 net->lastsv = SCTP_CLOCK_GRANULARITY; 2588 } 2589 } else { 2590 /* First RTO measurment */ 2591 net->RTO_measured = 1; 2592 net->lastsa = calc_time << SCTP_RTT_SHIFT; /* Multiply by 8 when 2593 * shift=3 */ 2594 net->lastsv = calc_time; 2595 if (net->lastsv == 0) { 2596 net->lastsv = SCTP_CLOCK_GRANULARITY; 2597 } 2598 first_measure = 1; 2599 net->prev_rtt = o_calctime; 2600 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2601 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2602 } 2603 } 2604 calc_rto: 2605 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2606 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2607 (stcb->asoc.sat_network_lockout == 0)) { 2608 stcb->asoc.sat_network = 1; 2609 } else if ((!first_measure) && stcb->asoc.sat_network) { 2610 stcb->asoc.sat_network = 0; 2611 stcb->asoc.sat_network_lockout = 1; 2612 } 2613 /* bound it, per C6/C7 in Section 5.3.1 */ 2614 if (new_rto < stcb->asoc.minrto) { 2615 new_rto = stcb->asoc.minrto; 2616 } 2617 if (new_rto > stcb->asoc.maxrto) { 2618 new_rto = stcb->asoc.maxrto; 2619 } 2620 /* we are now returning the RTO */ 2621 return (new_rto); 2622 } 2623 2624 /* 2625 * return a pointer to a contiguous piece of data from the given mbuf chain 2626 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2627 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2628 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2629 */ 2630 caddr_t 2631 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2632 { 2633 uint32_t count; 2634 uint8_t *ptr; 2635 2636 ptr = in_ptr; 2637 if ((off < 0) || (len <= 0)) 2638 return (NULL); 2639 2640 /* find the desired start location */ 2641 while ((m != NULL) && (off > 0)) { 2642 if (off < SCTP_BUF_LEN(m)) 2643 break; 2644 off -= SCTP_BUF_LEN(m); 2645 m = SCTP_BUF_NEXT(m); 2646 } 2647 if (m == NULL) 2648 return (NULL); 2649 2650 /* is the current mbuf large enough (eg. contiguous)? */ 2651 if ((SCTP_BUF_LEN(m) - off) >= len) { 2652 return (mtod(m, caddr_t)+off); 2653 } else { 2654 /* else, it spans more than one mbuf, so save a temp copy... */ 2655 while ((m != NULL) && (len > 0)) { 2656 count = min(SCTP_BUF_LEN(m) - off, len); 2657 bcopy(mtod(m, caddr_t)+off, ptr, count); 2658 len -= count; 2659 ptr += count; 2660 off = 0; 2661 m = SCTP_BUF_NEXT(m); 2662 } 2663 if ((m == NULL) && (len > 0)) 2664 return (NULL); 2665 else 2666 return ((caddr_t)in_ptr); 2667 } 2668 } 2669 2670 2671 2672 struct sctp_paramhdr * 2673 sctp_get_next_param(struct mbuf *m, 2674 int offset, 2675 struct sctp_paramhdr *pull, 2676 int pull_limit) 2677 { 2678 /* This just provides a typed signature to Peter's Pull routine */ 2679 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2680 (uint8_t *) pull)); 2681 } 2682 2683 2684 int 2685 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2686 { 2687 /* 2688 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2689 * padlen is > 3 this routine will fail. 2690 */ 2691 uint8_t *dp; 2692 int i; 2693 2694 if (padlen > 3) { 2695 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2696 return (ENOBUFS); 2697 } 2698 if (padlen <= M_TRAILINGSPACE(m)) { 2699 /* 2700 * The easy way. We hope the majority of the time we hit 2701 * here :) 2702 */ 2703 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2704 SCTP_BUF_LEN(m) += padlen; 2705 } else { 2706 /* Hard way we must grow the mbuf */ 2707 struct mbuf *tmp; 2708 2709 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2710 if (tmp == NULL) { 2711 /* Out of space GAK! we are in big trouble. */ 2712 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 2713 return (ENOSPC); 2714 } 2715 /* setup and insert in middle */ 2716 SCTP_BUF_LEN(tmp) = padlen; 2717 SCTP_BUF_NEXT(tmp) = NULL; 2718 SCTP_BUF_NEXT(m) = tmp; 2719 dp = mtod(tmp, uint8_t *); 2720 } 2721 /* zero out the pad */ 2722 for (i = 0; i < padlen; i++) { 2723 *dp = 0; 2724 dp++; 2725 } 2726 return (0); 2727 } 2728 2729 int 2730 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2731 { 2732 /* find the last mbuf in chain and pad it */ 2733 struct mbuf *m_at; 2734 2735 m_at = m; 2736 if (last_mbuf) { 2737 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2738 } else { 2739 while (m_at) { 2740 if (SCTP_BUF_NEXT(m_at) == NULL) { 2741 return (sctp_add_pad_tombuf(m_at, padval)); 2742 } 2743 m_at = SCTP_BUF_NEXT(m_at); 2744 } 2745 } 2746 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 2747 return (EFAULT); 2748 } 2749 2750 int sctp_asoc_change_wake = 0; 2751 2752 static void 2753 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2754 uint32_t error, void *data, int so_locked 2755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2756 SCTP_UNUSED 2757 #endif 2758 ) 2759 { 2760 struct mbuf *m_notify; 2761 struct sctp_assoc_change *sac; 2762 struct sctp_queued_to_read *control; 2763 2764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2765 struct socket *so; 2766 2767 #endif 2768 2769 /* 2770 * For TCP model AND UDP connected sockets we will send an error up 2771 * when an ABORT comes in. 2772 */ 2773 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2774 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2775 ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) { 2776 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2777 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2778 stcb->sctp_socket->so_error = ECONNREFUSED; 2779 } else { 2780 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2781 stcb->sctp_socket->so_error = ECONNRESET; 2782 } 2783 /* Wake ANY sleepers */ 2784 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2785 so = SCTP_INP_SO(stcb->sctp_ep); 2786 if (!so_locked) { 2787 atomic_add_int(&stcb->asoc.refcnt, 1); 2788 SCTP_TCB_UNLOCK(stcb); 2789 SCTP_SOCKET_LOCK(so, 1); 2790 SCTP_TCB_LOCK(stcb); 2791 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2792 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2793 SCTP_SOCKET_UNLOCK(so, 1); 2794 return; 2795 } 2796 } 2797 #endif 2798 sorwakeup(stcb->sctp_socket); 2799 sowwakeup(stcb->sctp_socket); 2800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2801 if (!so_locked) { 2802 SCTP_SOCKET_UNLOCK(so, 1); 2803 } 2804 #endif 2805 sctp_asoc_change_wake++; 2806 } 2807 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2808 /* event not enabled */ 2809 return; 2810 } 2811 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA); 2812 if (m_notify == NULL) 2813 /* no space left */ 2814 return; 2815 SCTP_BUF_LEN(m_notify) = 0; 2816 2817 sac = mtod(m_notify, struct sctp_assoc_change *); 2818 sac->sac_type = SCTP_ASSOC_CHANGE; 2819 sac->sac_flags = 0; 2820 sac->sac_length = sizeof(struct sctp_assoc_change); 2821 sac->sac_state = event; 2822 sac->sac_error = error; 2823 /* XXX verify these stream counts */ 2824 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2825 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2826 sac->sac_assoc_id = sctp_get_associd(stcb); 2827 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change); 2828 SCTP_BUF_NEXT(m_notify) = NULL; 2829 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2830 0, 0, 0, 0, 0, 0, 2831 m_notify); 2832 if (control == NULL) { 2833 /* no memory */ 2834 sctp_m_freem(m_notify); 2835 return; 2836 } 2837 control->length = SCTP_BUF_LEN(m_notify); 2838 /* not that we need this */ 2839 control->tail_mbuf = m_notify; 2840 control->spec_flags = M_NOTIFICATION; 2841 sctp_add_to_readq(stcb->sctp_ep, stcb, 2842 control, 2843 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2844 so_locked); 2845 if (event == SCTP_COMM_LOST) { 2846 /* Wake up any sleeper */ 2847 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2848 so = SCTP_INP_SO(stcb->sctp_ep); 2849 if (!so_locked) { 2850 atomic_add_int(&stcb->asoc.refcnt, 1); 2851 SCTP_TCB_UNLOCK(stcb); 2852 SCTP_SOCKET_LOCK(so, 1); 2853 SCTP_TCB_LOCK(stcb); 2854 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2855 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2856 SCTP_SOCKET_UNLOCK(so, 1); 2857 return; 2858 } 2859 } 2860 #endif 2861 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2862 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2863 if (!so_locked) { 2864 SCTP_SOCKET_UNLOCK(so, 1); 2865 } 2866 #endif 2867 } 2868 } 2869 2870 static void 2871 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2872 struct sockaddr *sa, uint32_t error) 2873 { 2874 struct mbuf *m_notify; 2875 struct sctp_paddr_change *spc; 2876 struct sctp_queued_to_read *control; 2877 2878 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2879 /* event not enabled */ 2880 return; 2881 } 2882 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2883 if (m_notify == NULL) 2884 return; 2885 SCTP_BUF_LEN(m_notify) = 0; 2886 spc = mtod(m_notify, struct sctp_paddr_change *); 2887 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2888 spc->spc_flags = 0; 2889 spc->spc_length = sizeof(struct sctp_paddr_change); 2890 switch (sa->sa_family) { 2891 case AF_INET: 2892 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2893 break; 2894 #ifdef INET6 2895 case AF_INET6: 2896 { 2897 struct sockaddr_in6 *sin6; 2898 2899 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2900 2901 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2902 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2903 if (sin6->sin6_scope_id == 0) { 2904 /* recover scope_id for user */ 2905 (void)sa6_recoverscope(sin6); 2906 } else { 2907 /* clear embedded scope_id for user */ 2908 in6_clearscope(&sin6->sin6_addr); 2909 } 2910 } 2911 break; 2912 } 2913 #endif 2914 default: 2915 /* TSNH */ 2916 break; 2917 } 2918 spc->spc_state = state; 2919 spc->spc_error = error; 2920 spc->spc_assoc_id = sctp_get_associd(stcb); 2921 2922 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2923 SCTP_BUF_NEXT(m_notify) = NULL; 2924 2925 /* append to socket */ 2926 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2927 0, 0, 0, 0, 0, 0, 2928 m_notify); 2929 if (control == NULL) { 2930 /* no memory */ 2931 sctp_m_freem(m_notify); 2932 return; 2933 } 2934 control->length = SCTP_BUF_LEN(m_notify); 2935 control->spec_flags = M_NOTIFICATION; 2936 /* not that we need this */ 2937 control->tail_mbuf = m_notify; 2938 sctp_add_to_readq(stcb->sctp_ep, stcb, 2939 control, 2940 &stcb->sctp_socket->so_rcv, 1, 2941 SCTP_READ_LOCK_NOT_HELD, 2942 SCTP_SO_NOT_LOCKED); 2943 } 2944 2945 2946 static void 2947 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2948 struct sctp_tmit_chunk *chk, int so_locked 2949 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2950 SCTP_UNUSED 2951 #endif 2952 ) 2953 { 2954 struct mbuf *m_notify; 2955 struct sctp_send_failed *ssf; 2956 struct sctp_queued_to_read *control; 2957 int length; 2958 2959 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) { 2960 /* event not enabled */ 2961 return; 2962 } 2963 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 2964 if (m_notify == NULL) 2965 /* no space left */ 2966 return; 2967 length = sizeof(struct sctp_send_failed) + chk->send_size; 2968 length -= sizeof(struct sctp_data_chunk); 2969 SCTP_BUF_LEN(m_notify) = 0; 2970 ssf = mtod(m_notify, struct sctp_send_failed *); 2971 ssf->ssf_type = SCTP_SEND_FAILED; 2972 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2973 ssf->ssf_flags = SCTP_DATA_UNSENT; 2974 else 2975 ssf->ssf_flags = SCTP_DATA_SENT; 2976 ssf->ssf_length = length; 2977 ssf->ssf_error = error; 2978 /* not exactly what the user sent in, but should be close :) */ 2979 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2980 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2981 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2982 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2983 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2984 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2985 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2986 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2987 2988 if (chk->data) { 2989 /* 2990 * trim off the sctp chunk header(it should be there) 2991 */ 2992 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 2993 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 2994 sctp_mbuf_crush(chk->data); 2995 chk->send_size -= sizeof(struct sctp_data_chunk); 2996 } 2997 } 2998 SCTP_BUF_NEXT(m_notify) = chk->data; 2999 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 3000 /* Steal off the mbuf */ 3001 chk->data = NULL; 3002 /* 3003 * For this case, we check the actual socket buffer, since the assoc 3004 * is going away we don't want to overfill the socket buffer for a 3005 * non-reader 3006 */ 3007 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3008 sctp_m_freem(m_notify); 3009 return; 3010 } 3011 /* append to socket */ 3012 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3013 0, 0, 0, 0, 0, 0, 3014 m_notify); 3015 if (control == NULL) { 3016 /* no memory */ 3017 sctp_m_freem(m_notify); 3018 return; 3019 } 3020 control->spec_flags = M_NOTIFICATION; 3021 sctp_add_to_readq(stcb->sctp_ep, stcb, 3022 control, 3023 &stcb->sctp_socket->so_rcv, 1, 3024 SCTP_READ_LOCK_NOT_HELD, 3025 so_locked); 3026 } 3027 3028 3029 static void 3030 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3031 struct sctp_stream_queue_pending *sp, int so_locked 3032 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3033 SCTP_UNUSED 3034 #endif 3035 ) 3036 { 3037 struct mbuf *m_notify; 3038 struct sctp_send_failed *ssf; 3039 struct sctp_queued_to_read *control; 3040 int length; 3041 3042 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) { 3043 /* event not enabled */ 3044 return; 3045 } 3046 length = sizeof(struct sctp_send_failed) + sp->length; 3047 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 3048 if (m_notify == NULL) 3049 /* no space left */ 3050 return; 3051 SCTP_BUF_LEN(m_notify) = 0; 3052 ssf = mtod(m_notify, struct sctp_send_failed *); 3053 ssf->ssf_type = SCTP_SEND_FAILED; 3054 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 3055 ssf->ssf_flags = SCTP_DATA_UNSENT; 3056 else 3057 ssf->ssf_flags = SCTP_DATA_SENT; 3058 ssf->ssf_length = length; 3059 ssf->ssf_error = error; 3060 /* not exactly what the user sent in, but should be close :) */ 3061 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 3062 ssf->ssf_info.sinfo_stream = sp->stream; 3063 ssf->ssf_info.sinfo_ssn = sp->strseq; 3064 if (sp->some_taken) { 3065 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3066 } else { 3067 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3068 } 3069 ssf->ssf_info.sinfo_ppid = sp->ppid; 3070 ssf->ssf_info.sinfo_context = sp->context; 3071 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3072 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3073 SCTP_BUF_NEXT(m_notify) = sp->data; 3074 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 3075 3076 /* Steal off the mbuf */ 3077 sp->data = NULL; 3078 /* 3079 * For this case, we check the actual socket buffer, since the assoc 3080 * is going away we don't want to overfill the socket buffer for a 3081 * non-reader 3082 */ 3083 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3084 sctp_m_freem(m_notify); 3085 return; 3086 } 3087 /* append to socket */ 3088 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3089 0, 0, 0, 0, 0, 0, 3090 m_notify); 3091 if (control == NULL) { 3092 /* no memory */ 3093 sctp_m_freem(m_notify); 3094 return; 3095 } 3096 control->spec_flags = M_NOTIFICATION; 3097 sctp_add_to_readq(stcb->sctp_ep, stcb, 3098 control, 3099 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3100 } 3101 3102 3103 3104 static void 3105 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 3106 uint32_t error) 3107 { 3108 struct mbuf *m_notify; 3109 struct sctp_adaptation_event *sai; 3110 struct sctp_queued_to_read *control; 3111 3112 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3113 /* event not enabled */ 3114 return; 3115 } 3116 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 3117 if (m_notify == NULL) 3118 /* no space left */ 3119 return; 3120 SCTP_BUF_LEN(m_notify) = 0; 3121 sai = mtod(m_notify, struct sctp_adaptation_event *); 3122 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3123 sai->sai_flags = 0; 3124 sai->sai_length = sizeof(struct sctp_adaptation_event); 3125 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3126 sai->sai_assoc_id = sctp_get_associd(stcb); 3127 3128 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3129 SCTP_BUF_NEXT(m_notify) = NULL; 3130 3131 /* append to socket */ 3132 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3133 0, 0, 0, 0, 0, 0, 3134 m_notify); 3135 if (control == NULL) { 3136 /* no memory */ 3137 sctp_m_freem(m_notify); 3138 return; 3139 } 3140 control->length = SCTP_BUF_LEN(m_notify); 3141 control->spec_flags = M_NOTIFICATION; 3142 /* not that we need this */ 3143 control->tail_mbuf = m_notify; 3144 sctp_add_to_readq(stcb->sctp_ep, stcb, 3145 control, 3146 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3147 } 3148 3149 /* This always must be called with the read-queue LOCKED in the INP */ 3150 static void 3151 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3152 uint32_t val, int so_locked 3153 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3154 SCTP_UNUSED 3155 #endif 3156 ) 3157 { 3158 struct mbuf *m_notify; 3159 struct sctp_pdapi_event *pdapi; 3160 struct sctp_queued_to_read *control; 3161 struct sockbuf *sb; 3162 3163 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3164 /* event not enabled */ 3165 return; 3166 } 3167 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 3168 if (m_notify == NULL) 3169 /* no space left */ 3170 return; 3171 SCTP_BUF_LEN(m_notify) = 0; 3172 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3173 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3174 pdapi->pdapi_flags = 0; 3175 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3176 pdapi->pdapi_indication = error; 3177 pdapi->pdapi_stream = (val >> 16); 3178 pdapi->pdapi_seq = (val & 0x0000ffff); 3179 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3180 3181 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3182 SCTP_BUF_NEXT(m_notify) = NULL; 3183 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3184 0, 0, 0, 0, 0, 0, 3185 m_notify); 3186 if (control == NULL) { 3187 /* no memory */ 3188 sctp_m_freem(m_notify); 3189 return; 3190 } 3191 control->spec_flags = M_NOTIFICATION; 3192 control->length = SCTP_BUF_LEN(m_notify); 3193 /* not that we need this */ 3194 control->tail_mbuf = m_notify; 3195 control->held_length = 0; 3196 control->length = 0; 3197 sb = &stcb->sctp_socket->so_rcv; 3198 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3199 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3200 } 3201 sctp_sballoc(stcb, sb, m_notify); 3202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3203 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3204 } 3205 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3206 control->end_added = 1; 3207 if (stcb->asoc.control_pdapi) 3208 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3209 else { 3210 /* we really should not see this case */ 3211 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3212 } 3213 if (stcb->sctp_ep && stcb->sctp_socket) { 3214 /* This should always be the case */ 3215 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3216 struct socket *so; 3217 3218 so = SCTP_INP_SO(stcb->sctp_ep); 3219 if (!so_locked) { 3220 atomic_add_int(&stcb->asoc.refcnt, 1); 3221 SCTP_TCB_UNLOCK(stcb); 3222 SCTP_SOCKET_LOCK(so, 1); 3223 SCTP_TCB_LOCK(stcb); 3224 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3225 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3226 SCTP_SOCKET_UNLOCK(so, 1); 3227 return; 3228 } 3229 } 3230 #endif 3231 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3233 if (!so_locked) { 3234 SCTP_SOCKET_UNLOCK(so, 1); 3235 } 3236 #endif 3237 } 3238 } 3239 3240 static void 3241 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3242 { 3243 struct mbuf *m_notify; 3244 struct sctp_shutdown_event *sse; 3245 struct sctp_queued_to_read *control; 3246 3247 /* 3248 * For TCP model AND UDP connected sockets we will send an error up 3249 * when an SHUTDOWN completes 3250 */ 3251 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3252 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3253 /* mark socket closed for read/write and wakeup! */ 3254 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3255 struct socket *so; 3256 3257 so = SCTP_INP_SO(stcb->sctp_ep); 3258 atomic_add_int(&stcb->asoc.refcnt, 1); 3259 SCTP_TCB_UNLOCK(stcb); 3260 SCTP_SOCKET_LOCK(so, 1); 3261 SCTP_TCB_LOCK(stcb); 3262 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3263 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3264 SCTP_SOCKET_UNLOCK(so, 1); 3265 return; 3266 } 3267 #endif 3268 socantsendmore(stcb->sctp_socket); 3269 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3270 SCTP_SOCKET_UNLOCK(so, 1); 3271 #endif 3272 } 3273 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3274 /* event not enabled */ 3275 return; 3276 } 3277 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 3278 if (m_notify == NULL) 3279 /* no space left */ 3280 return; 3281 sse = mtod(m_notify, struct sctp_shutdown_event *); 3282 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3283 sse->sse_flags = 0; 3284 sse->sse_length = sizeof(struct sctp_shutdown_event); 3285 sse->sse_assoc_id = sctp_get_associd(stcb); 3286 3287 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3288 SCTP_BUF_NEXT(m_notify) = NULL; 3289 3290 /* append to socket */ 3291 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3292 0, 0, 0, 0, 0, 0, 3293 m_notify); 3294 if (control == NULL) { 3295 /* no memory */ 3296 sctp_m_freem(m_notify); 3297 return; 3298 } 3299 control->spec_flags = M_NOTIFICATION; 3300 control->length = SCTP_BUF_LEN(m_notify); 3301 /* not that we need this */ 3302 control->tail_mbuf = m_notify; 3303 sctp_add_to_readq(stcb->sctp_ep, stcb, 3304 control, 3305 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3306 } 3307 3308 static void 3309 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3310 int so_locked 3311 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3312 SCTP_UNUSED 3313 #endif 3314 ) 3315 { 3316 struct mbuf *m_notify; 3317 struct sctp_sender_dry_event *event; 3318 struct sctp_queued_to_read *control; 3319 3320 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) { 3321 /* event not enabled */ 3322 return; 3323 } 3324 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA); 3325 if (m_notify == NULL) { 3326 /* no space left */ 3327 return; 3328 } 3329 SCTP_BUF_LEN(m_notify) = 0; 3330 event = mtod(m_notify, struct sctp_sender_dry_event *); 3331 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3332 event->sender_dry_flags = 0; 3333 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3334 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3335 3336 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3337 SCTP_BUF_NEXT(m_notify) = NULL; 3338 3339 /* append to socket */ 3340 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3341 0, 0, 0, 0, 0, 0, m_notify); 3342 if (control == NULL) { 3343 /* no memory */ 3344 sctp_m_freem(m_notify); 3345 return; 3346 } 3347 control->length = SCTP_BUF_LEN(m_notify); 3348 control->spec_flags = M_NOTIFICATION; 3349 /* not that we need this */ 3350 control->tail_mbuf = m_notify; 3351 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3352 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3353 } 3354 3355 3356 static void 3357 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag) 3358 { 3359 struct mbuf *m_notify; 3360 struct sctp_queued_to_read *control; 3361 struct sctp_stream_reset_event *strreset; 3362 int len; 3363 3364 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) { 3365 /* event not enabled */ 3366 return; 3367 } 3368 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3369 if (m_notify == NULL) 3370 /* no space left */ 3371 return; 3372 SCTP_BUF_LEN(m_notify) = 0; 3373 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3374 if (len > M_TRAILINGSPACE(m_notify)) { 3375 /* never enough room */ 3376 sctp_m_freem(m_notify); 3377 return; 3378 } 3379 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3380 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3381 strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag; 3382 strreset->strreset_length = len; 3383 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3384 strreset->strreset_list[0] = number_entries; 3385 3386 SCTP_BUF_LEN(m_notify) = len; 3387 SCTP_BUF_NEXT(m_notify) = NULL; 3388 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3389 /* no space */ 3390 sctp_m_freem(m_notify); 3391 return; 3392 } 3393 /* append to socket */ 3394 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3395 0, 0, 0, 0, 0, 0, 3396 m_notify); 3397 if (control == NULL) { 3398 /* no memory */ 3399 sctp_m_freem(m_notify); 3400 return; 3401 } 3402 control->spec_flags = M_NOTIFICATION; 3403 control->length = SCTP_BUF_LEN(m_notify); 3404 /* not that we need this */ 3405 control->tail_mbuf = m_notify; 3406 sctp_add_to_readq(stcb->sctp_ep, stcb, 3407 control, 3408 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3409 } 3410 3411 3412 static void 3413 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3414 int number_entries, uint16_t * list, int flag) 3415 { 3416 struct mbuf *m_notify; 3417 struct sctp_queued_to_read *control; 3418 struct sctp_stream_reset_event *strreset; 3419 int len; 3420 3421 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) { 3422 /* event not enabled */ 3423 return; 3424 } 3425 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3426 if (m_notify == NULL) 3427 /* no space left */ 3428 return; 3429 SCTP_BUF_LEN(m_notify) = 0; 3430 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3431 if (len > M_TRAILINGSPACE(m_notify)) { 3432 /* never enough room */ 3433 sctp_m_freem(m_notify); 3434 return; 3435 } 3436 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3437 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3438 if (number_entries == 0) { 3439 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 3440 } else { 3441 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 3442 } 3443 strreset->strreset_length = len; 3444 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3445 if (number_entries) { 3446 int i; 3447 3448 for (i = 0; i < number_entries; i++) { 3449 strreset->strreset_list[i] = ntohs(list[i]); 3450 } 3451 } 3452 SCTP_BUF_LEN(m_notify) = len; 3453 SCTP_BUF_NEXT(m_notify) = NULL; 3454 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3455 /* no space */ 3456 sctp_m_freem(m_notify); 3457 return; 3458 } 3459 /* append to socket */ 3460 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3461 0, 0, 0, 0, 0, 0, 3462 m_notify); 3463 if (control == NULL) { 3464 /* no memory */ 3465 sctp_m_freem(m_notify); 3466 return; 3467 } 3468 control->spec_flags = M_NOTIFICATION; 3469 control->length = SCTP_BUF_LEN(m_notify); 3470 /* not that we need this */ 3471 control->tail_mbuf = m_notify; 3472 sctp_add_to_readq(stcb->sctp_ep, stcb, 3473 control, 3474 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3475 } 3476 3477 3478 void 3479 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3480 uint32_t error, void *data, int so_locked 3481 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3482 SCTP_UNUSED 3483 #endif 3484 ) 3485 { 3486 if ((stcb == NULL) || 3487 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3488 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3489 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3490 /* If the socket is gone we are out of here */ 3491 return; 3492 } 3493 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3494 return; 3495 } 3496 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3497 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) { 3498 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3499 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3500 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3501 /* Don't report these in front states */ 3502 return; 3503 } 3504 } 3505 switch (notification) { 3506 case SCTP_NOTIFY_ASSOC_UP: 3507 if (stcb->asoc.assoc_up_sent == 0) { 3508 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked); 3509 stcb->asoc.assoc_up_sent = 1; 3510 } 3511 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3512 sctp_notify_adaptation_layer(stcb, error); 3513 } 3514 if (stcb->asoc.peer_supports_auth == 0) { 3515 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3516 NULL, so_locked); 3517 } 3518 break; 3519 case SCTP_NOTIFY_ASSOC_DOWN: 3520 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked); 3521 break; 3522 case SCTP_NOTIFY_INTERFACE_DOWN: 3523 { 3524 struct sctp_nets *net; 3525 3526 net = (struct sctp_nets *)data; 3527 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3528 (struct sockaddr *)&net->ro._l_addr, error); 3529 break; 3530 } 3531 case SCTP_NOTIFY_INTERFACE_UP: 3532 { 3533 struct sctp_nets *net; 3534 3535 net = (struct sctp_nets *)data; 3536 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3537 (struct sockaddr *)&net->ro._l_addr, error); 3538 break; 3539 } 3540 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3541 { 3542 struct sctp_nets *net; 3543 3544 net = (struct sctp_nets *)data; 3545 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3546 (struct sockaddr *)&net->ro._l_addr, error); 3547 break; 3548 } 3549 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3550 sctp_notify_send_failed2(stcb, error, 3551 (struct sctp_stream_queue_pending *)data, so_locked); 3552 break; 3553 case SCTP_NOTIFY_DG_FAIL: 3554 sctp_notify_send_failed(stcb, error, 3555 (struct sctp_tmit_chunk *)data, so_locked); 3556 break; 3557 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3558 { 3559 uint32_t val; 3560 3561 val = *((uint32_t *) data); 3562 3563 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3564 break; 3565 } 3566 case SCTP_NOTIFY_STRDATA_ERR: 3567 break; 3568 case SCTP_NOTIFY_ASSOC_ABORTED: 3569 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3570 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3571 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked); 3572 } else { 3573 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked); 3574 } 3575 break; 3576 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3577 break; 3578 case SCTP_NOTIFY_STREAM_OPENED_OK: 3579 break; 3580 case SCTP_NOTIFY_ASSOC_RESTART: 3581 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked); 3582 if (stcb->asoc.peer_supports_auth == 0) { 3583 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3584 NULL, so_locked); 3585 } 3586 break; 3587 case SCTP_NOTIFY_HB_RESP: 3588 break; 3589 case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK: 3590 sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR); 3591 break; 3592 case SCTP_NOTIFY_STR_RESET_ADD_OK: 3593 sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR); 3594 break; 3595 case SCTP_NOTIFY_STR_RESET_ADD_FAIL: 3596 sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR)); 3597 break; 3598 3599 case SCTP_NOTIFY_STR_RESET_SEND: 3600 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3601 break; 3602 case SCTP_NOTIFY_STR_RESET_RECV: 3603 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3604 break; 3605 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3606 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED)); 3607 break; 3608 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3609 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED)); 3610 break; 3611 case SCTP_NOTIFY_ASCONF_ADD_IP: 3612 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3613 error); 3614 break; 3615 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3616 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3617 error); 3618 break; 3619 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3620 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3621 error); 3622 break; 3623 case SCTP_NOTIFY_ASCONF_SUCCESS: 3624 break; 3625 case SCTP_NOTIFY_ASCONF_FAILED: 3626 break; 3627 case SCTP_NOTIFY_PEER_SHUTDOWN: 3628 sctp_notify_shutdown_event(stcb); 3629 break; 3630 case SCTP_NOTIFY_AUTH_NEW_KEY: 3631 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3632 (uint16_t) (uintptr_t) data, 3633 so_locked); 3634 break; 3635 case SCTP_NOTIFY_AUTH_FREE_KEY: 3636 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3637 (uint16_t) (uintptr_t) data, 3638 so_locked); 3639 break; 3640 case SCTP_NOTIFY_NO_PEER_AUTH: 3641 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3642 (uint16_t) (uintptr_t) data, 3643 so_locked); 3644 break; 3645 case SCTP_NOTIFY_SENDER_DRY: 3646 sctp_notify_sender_dry_event(stcb, so_locked); 3647 break; 3648 default: 3649 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3650 __FUNCTION__, notification, notification); 3651 break; 3652 } /* end switch */ 3653 } 3654 3655 void 3656 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked 3657 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3658 SCTP_UNUSED 3659 #endif 3660 ) 3661 { 3662 struct sctp_association *asoc; 3663 struct sctp_stream_out *outs; 3664 struct sctp_tmit_chunk *chk; 3665 struct sctp_stream_queue_pending *sp; 3666 int i; 3667 3668 asoc = &stcb->asoc; 3669 3670 if (stcb == NULL) { 3671 return; 3672 } 3673 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3674 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3675 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3676 return; 3677 } 3678 /* now through all the gunk freeing chunks */ 3679 if (holds_lock == 0) { 3680 SCTP_TCB_SEND_LOCK(stcb); 3681 } 3682 /* sent queue SHOULD be empty */ 3683 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3684 chk = TAILQ_FIRST(&asoc->sent_queue); 3685 while (chk) { 3686 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3687 asoc->sent_queue_cnt--; 3688 if (chk->data != NULL) { 3689 sctp_free_bufspace(stcb, asoc, chk, 1); 3690 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3691 SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked); 3692 if (chk->data) { 3693 sctp_m_freem(chk->data); 3694 chk->data = NULL; 3695 } 3696 } 3697 sctp_free_a_chunk(stcb, chk); 3698 /* sa_ignore FREED_MEMORY */ 3699 chk = TAILQ_FIRST(&asoc->sent_queue); 3700 } 3701 } 3702 /* pending send queue SHOULD be empty */ 3703 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3704 chk = TAILQ_FIRST(&asoc->send_queue); 3705 while (chk) { 3706 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3707 asoc->send_queue_cnt--; 3708 if (chk->data != NULL) { 3709 sctp_free_bufspace(stcb, asoc, chk, 1); 3710 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3711 SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked); 3712 if (chk->data) { 3713 sctp_m_freem(chk->data); 3714 chk->data = NULL; 3715 } 3716 } 3717 sctp_free_a_chunk(stcb, chk); 3718 /* sa_ignore FREED_MEMORY */ 3719 chk = TAILQ_FIRST(&asoc->send_queue); 3720 } 3721 } 3722 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3723 /* For each stream */ 3724 outs = &stcb->asoc.strmout[i]; 3725 /* clean up any sends there */ 3726 stcb->asoc.locked_on_sending = NULL; 3727 sp = TAILQ_FIRST(&outs->outqueue); 3728 while (sp) { 3729 stcb->asoc.stream_queue_cnt--; 3730 TAILQ_REMOVE(&outs->outqueue, sp, next); 3731 sctp_free_spbufspace(stcb, asoc, sp); 3732 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3733 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked); 3734 if (sp->data) { 3735 sctp_m_freem(sp->data); 3736 sp->data = NULL; 3737 } 3738 if (sp->net) 3739 sctp_free_remote_addr(sp->net); 3740 sp->net = NULL; 3741 /* Free the chunk */ 3742 sctp_free_a_strmoq(stcb, sp); 3743 /* sa_ignore FREED_MEMORY */ 3744 sp = TAILQ_FIRST(&outs->outqueue); 3745 } 3746 } 3747 3748 if (holds_lock == 0) { 3749 SCTP_TCB_SEND_UNLOCK(stcb); 3750 } 3751 } 3752 3753 void 3754 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked 3755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3756 SCTP_UNUSED 3757 #endif 3758 ) 3759 { 3760 3761 if (stcb == NULL) { 3762 return; 3763 } 3764 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3765 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3766 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3767 return; 3768 } 3769 /* Tell them we lost the asoc */ 3770 sctp_report_all_outbound(stcb, 1, so_locked); 3771 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3772 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3773 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3774 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3775 } 3776 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked); 3777 } 3778 3779 void 3780 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3781 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err, 3782 uint32_t vrf_id, uint16_t port) 3783 { 3784 uint32_t vtag; 3785 3786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3787 struct socket *so; 3788 3789 #endif 3790 3791 vtag = 0; 3792 if (stcb != NULL) { 3793 /* We have a TCB to abort, send notification too */ 3794 vtag = stcb->asoc.peer_vtag; 3795 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 3796 /* get the assoc vrf id and table id */ 3797 vrf_id = stcb->asoc.vrf_id; 3798 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3799 } 3800 sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port); 3801 if (stcb != NULL) { 3802 /* Ok, now lets free it */ 3803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3804 so = SCTP_INP_SO(inp); 3805 atomic_add_int(&stcb->asoc.refcnt, 1); 3806 SCTP_TCB_UNLOCK(stcb); 3807 SCTP_SOCKET_LOCK(so, 1); 3808 SCTP_TCB_LOCK(stcb); 3809 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3810 #endif 3811 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3813 SCTP_SOCKET_UNLOCK(so, 1); 3814 #endif 3815 } else { 3816 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3817 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3818 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3819 SCTP_CALLED_DIRECTLY_NOCMPSET); 3820 } 3821 } 3822 } 3823 } 3824 3825 #ifdef SCTP_ASOCLOG_OF_TSNS 3826 void 3827 sctp_print_out_track_log(struct sctp_tcb *stcb) 3828 { 3829 #ifdef NOSIY_PRINTS 3830 int i; 3831 3832 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 3833 SCTP_PRINTF("IN bound TSN log-aaa\n"); 3834 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 3835 SCTP_PRINTF("None rcvd\n"); 3836 goto none_in; 3837 } 3838 if (stcb->asoc.tsn_in_wrapped) { 3839 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 3840 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3841 stcb->asoc.in_tsnlog[i].tsn, 3842 stcb->asoc.in_tsnlog[i].strm, 3843 stcb->asoc.in_tsnlog[i].seq, 3844 stcb->asoc.in_tsnlog[i].flgs, 3845 stcb->asoc.in_tsnlog[i].sz); 3846 } 3847 } 3848 if (stcb->asoc.tsn_in_at) { 3849 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 3850 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3851 stcb->asoc.in_tsnlog[i].tsn, 3852 stcb->asoc.in_tsnlog[i].strm, 3853 stcb->asoc.in_tsnlog[i].seq, 3854 stcb->asoc.in_tsnlog[i].flgs, 3855 stcb->asoc.in_tsnlog[i].sz); 3856 } 3857 } 3858 none_in: 3859 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 3860 if ((stcb->asoc.tsn_out_at == 0) && 3861 (stcb->asoc.tsn_out_wrapped == 0)) { 3862 SCTP_PRINTF("None sent\n"); 3863 } 3864 if (stcb->asoc.tsn_out_wrapped) { 3865 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 3866 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3867 stcb->asoc.out_tsnlog[i].tsn, 3868 stcb->asoc.out_tsnlog[i].strm, 3869 stcb->asoc.out_tsnlog[i].seq, 3870 stcb->asoc.out_tsnlog[i].flgs, 3871 stcb->asoc.out_tsnlog[i].sz); 3872 } 3873 } 3874 if (stcb->asoc.tsn_out_at) { 3875 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 3876 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3877 stcb->asoc.out_tsnlog[i].tsn, 3878 stcb->asoc.out_tsnlog[i].strm, 3879 stcb->asoc.out_tsnlog[i].seq, 3880 stcb->asoc.out_tsnlog[i].flgs, 3881 stcb->asoc.out_tsnlog[i].sz); 3882 } 3883 } 3884 #endif 3885 } 3886 3887 #endif 3888 3889 void 3890 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3891 int error, struct mbuf *op_err, 3892 int so_locked 3893 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3894 SCTP_UNUSED 3895 #endif 3896 ) 3897 { 3898 uint32_t vtag; 3899 3900 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3901 struct socket *so; 3902 3903 #endif 3904 3905 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3906 so = SCTP_INP_SO(inp); 3907 #endif 3908 if (stcb == NULL) { 3909 /* Got to have a TCB */ 3910 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3911 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3912 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3913 SCTP_CALLED_DIRECTLY_NOCMPSET); 3914 } 3915 } 3916 return; 3917 } else { 3918 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3919 } 3920 vtag = stcb->asoc.peer_vtag; 3921 /* notify the ulp */ 3922 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3923 sctp_abort_notification(stcb, error, so_locked); 3924 /* notify the peer */ 3925 #if defined(SCTP_PANIC_ON_ABORT) 3926 panic("aborting an association"); 3927 #endif 3928 sctp_send_abort_tcb(stcb, op_err, so_locked); 3929 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3930 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3931 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3932 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3933 } 3934 /* now free the asoc */ 3935 #ifdef SCTP_ASOCLOG_OF_TSNS 3936 sctp_print_out_track_log(stcb); 3937 #endif 3938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3939 if (!so_locked) { 3940 atomic_add_int(&stcb->asoc.refcnt, 1); 3941 SCTP_TCB_UNLOCK(stcb); 3942 SCTP_SOCKET_LOCK(so, 1); 3943 SCTP_TCB_LOCK(stcb); 3944 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3945 } 3946 #endif 3947 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 3948 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3949 if (!so_locked) { 3950 SCTP_SOCKET_UNLOCK(so, 1); 3951 } 3952 #endif 3953 } 3954 3955 void 3956 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3957 struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port) 3958 { 3959 struct sctp_chunkhdr *ch, chunk_buf; 3960 unsigned int chk_length; 3961 3962 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3963 /* Generate a TO address for future reference */ 3964 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3965 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3966 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3967 SCTP_CALLED_DIRECTLY_NOCMPSET); 3968 } 3969 } 3970 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3971 sizeof(*ch), (uint8_t *) & chunk_buf); 3972 while (ch != NULL) { 3973 chk_length = ntohs(ch->chunk_length); 3974 if (chk_length < sizeof(*ch)) { 3975 /* break to abort land */ 3976 break; 3977 } 3978 switch (ch->chunk_type) { 3979 case SCTP_COOKIE_ECHO: 3980 /* We hit here only if the assoc is being freed */ 3981 return; 3982 case SCTP_PACKET_DROPPED: 3983 /* we don't respond to pkt-dropped */ 3984 return; 3985 case SCTP_ABORT_ASSOCIATION: 3986 /* we don't respond with an ABORT to an ABORT */ 3987 return; 3988 case SCTP_SHUTDOWN_COMPLETE: 3989 /* 3990 * we ignore it since we are not waiting for it and 3991 * peer is gone 3992 */ 3993 return; 3994 case SCTP_SHUTDOWN_ACK: 3995 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port); 3996 return; 3997 default: 3998 break; 3999 } 4000 offset += SCTP_SIZE32(chk_length); 4001 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4002 sizeof(*ch), (uint8_t *) & chunk_buf); 4003 } 4004 sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port); 4005 } 4006 4007 /* 4008 * check the inbound datagram to make sure there is not an abort inside it, 4009 * if there is return 1, else return 0. 4010 */ 4011 int 4012 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4013 { 4014 struct sctp_chunkhdr *ch; 4015 struct sctp_init_chunk *init_chk, chunk_buf; 4016 int offset; 4017 unsigned int chk_length; 4018 4019 offset = iphlen + sizeof(struct sctphdr); 4020 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4021 (uint8_t *) & chunk_buf); 4022 while (ch != NULL) { 4023 chk_length = ntohs(ch->chunk_length); 4024 if (chk_length < sizeof(*ch)) { 4025 /* packet is probably corrupt */ 4026 break; 4027 } 4028 /* we seem to be ok, is it an abort? */ 4029 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4030 /* yep, tell them */ 4031 return (1); 4032 } 4033 if (ch->chunk_type == SCTP_INITIATION) { 4034 /* need to update the Vtag */ 4035 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4036 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4037 if (init_chk != NULL) { 4038 *vtagfill = ntohl(init_chk->init.initiate_tag); 4039 } 4040 } 4041 /* Nope, move to the next chunk */ 4042 offset += SCTP_SIZE32(chk_length); 4043 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4044 sizeof(*ch), (uint8_t *) & chunk_buf); 4045 } 4046 return (0); 4047 } 4048 4049 /* 4050 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4051 * set (i.e. it's 0) so, create this function to compare link local scopes 4052 */ 4053 #ifdef INET6 4054 uint32_t 4055 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4056 { 4057 struct sockaddr_in6 a, b; 4058 4059 /* save copies */ 4060 a = *addr1; 4061 b = *addr2; 4062 4063 if (a.sin6_scope_id == 0) 4064 if (sa6_recoverscope(&a)) { 4065 /* can't get scope, so can't match */ 4066 return (0); 4067 } 4068 if (b.sin6_scope_id == 0) 4069 if (sa6_recoverscope(&b)) { 4070 /* can't get scope, so can't match */ 4071 return (0); 4072 } 4073 if (a.sin6_scope_id != b.sin6_scope_id) 4074 return (0); 4075 4076 return (1); 4077 } 4078 4079 /* 4080 * returns a sockaddr_in6 with embedded scope recovered and removed 4081 */ 4082 struct sockaddr_in6 * 4083 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4084 { 4085 /* check and strip embedded scope junk */ 4086 if (addr->sin6_family == AF_INET6) { 4087 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4088 if (addr->sin6_scope_id == 0) { 4089 *store = *addr; 4090 if (!sa6_recoverscope(store)) { 4091 /* use the recovered scope */ 4092 addr = store; 4093 } 4094 } else { 4095 /* else, return the original "to" addr */ 4096 in6_clearscope(&addr->sin6_addr); 4097 } 4098 } 4099 } 4100 return (addr); 4101 } 4102 4103 #endif 4104 4105 /* 4106 * are the two addresses the same? currently a "scopeless" check returns: 1 4107 * if same, 0 if not 4108 */ 4109 int 4110 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4111 { 4112 4113 /* must be valid */ 4114 if (sa1 == NULL || sa2 == NULL) 4115 return (0); 4116 4117 /* must be the same family */ 4118 if (sa1->sa_family != sa2->sa_family) 4119 return (0); 4120 4121 switch (sa1->sa_family) { 4122 #ifdef INET6 4123 case AF_INET6: 4124 { 4125 /* IPv6 addresses */ 4126 struct sockaddr_in6 *sin6_1, *sin6_2; 4127 4128 sin6_1 = (struct sockaddr_in6 *)sa1; 4129 sin6_2 = (struct sockaddr_in6 *)sa2; 4130 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4131 sin6_2)); 4132 } 4133 #endif 4134 case AF_INET: 4135 { 4136 /* IPv4 addresses */ 4137 struct sockaddr_in *sin_1, *sin_2; 4138 4139 sin_1 = (struct sockaddr_in *)sa1; 4140 sin_2 = (struct sockaddr_in *)sa2; 4141 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4142 } 4143 default: 4144 /* we don't do these... */ 4145 return (0); 4146 } 4147 } 4148 4149 void 4150 sctp_print_address(struct sockaddr *sa) 4151 { 4152 #ifdef INET6 4153 char ip6buf[INET6_ADDRSTRLEN]; 4154 4155 ip6buf[0] = 0; 4156 #endif 4157 4158 switch (sa->sa_family) { 4159 #ifdef INET6 4160 case AF_INET6: 4161 { 4162 struct sockaddr_in6 *sin6; 4163 4164 sin6 = (struct sockaddr_in6 *)sa; 4165 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4166 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4167 ntohs(sin6->sin6_port), 4168 sin6->sin6_scope_id); 4169 break; 4170 } 4171 #endif 4172 case AF_INET: 4173 { 4174 struct sockaddr_in *sin; 4175 unsigned char *p; 4176 4177 sin = (struct sockaddr_in *)sa; 4178 p = (unsigned char *)&sin->sin_addr; 4179 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4180 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4181 break; 4182 } 4183 default: 4184 SCTP_PRINTF("?\n"); 4185 break; 4186 } 4187 } 4188 4189 void 4190 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 4191 { 4192 switch (iph->ip_v) { 4193 case IPVERSION: 4194 { 4195 struct sockaddr_in lsa, fsa; 4196 4197 bzero(&lsa, sizeof(lsa)); 4198 lsa.sin_len = sizeof(lsa); 4199 lsa.sin_family = AF_INET; 4200 lsa.sin_addr = iph->ip_src; 4201 lsa.sin_port = sh->src_port; 4202 bzero(&fsa, sizeof(fsa)); 4203 fsa.sin_len = sizeof(fsa); 4204 fsa.sin_family = AF_INET; 4205 fsa.sin_addr = iph->ip_dst; 4206 fsa.sin_port = sh->dest_port; 4207 SCTP_PRINTF("src: "); 4208 sctp_print_address((struct sockaddr *)&lsa); 4209 SCTP_PRINTF("dest: "); 4210 sctp_print_address((struct sockaddr *)&fsa); 4211 break; 4212 } 4213 #ifdef INET6 4214 case IPV6_VERSION >> 4: 4215 { 4216 struct ip6_hdr *ip6; 4217 struct sockaddr_in6 lsa6, fsa6; 4218 4219 ip6 = (struct ip6_hdr *)iph; 4220 bzero(&lsa6, sizeof(lsa6)); 4221 lsa6.sin6_len = sizeof(lsa6); 4222 lsa6.sin6_family = AF_INET6; 4223 lsa6.sin6_addr = ip6->ip6_src; 4224 lsa6.sin6_port = sh->src_port; 4225 bzero(&fsa6, sizeof(fsa6)); 4226 fsa6.sin6_len = sizeof(fsa6); 4227 fsa6.sin6_family = AF_INET6; 4228 fsa6.sin6_addr = ip6->ip6_dst; 4229 fsa6.sin6_port = sh->dest_port; 4230 SCTP_PRINTF("src: "); 4231 sctp_print_address((struct sockaddr *)&lsa6); 4232 SCTP_PRINTF("dest: "); 4233 sctp_print_address((struct sockaddr *)&fsa6); 4234 break; 4235 } 4236 #endif 4237 default: 4238 /* TSNH */ 4239 break; 4240 } 4241 } 4242 4243 void 4244 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4245 struct sctp_inpcb *new_inp, 4246 struct sctp_tcb *stcb, 4247 int waitflags) 4248 { 4249 /* 4250 * go through our old INP and pull off any control structures that 4251 * belong to stcb and move then to the new inp. 4252 */ 4253 struct socket *old_so, *new_so; 4254 struct sctp_queued_to_read *control, *nctl; 4255 struct sctp_readhead tmp_queue; 4256 struct mbuf *m; 4257 int error = 0; 4258 4259 old_so = old_inp->sctp_socket; 4260 new_so = new_inp->sctp_socket; 4261 TAILQ_INIT(&tmp_queue); 4262 error = sblock(&old_so->so_rcv, waitflags); 4263 if (error) { 4264 /* 4265 * Gak, can't get sblock, we have a problem. data will be 4266 * left stranded.. and we don't dare look at it since the 4267 * other thread may be reading something. Oh well, its a 4268 * screwed up app that does a peeloff OR a accept while 4269 * reading from the main socket... actually its only the 4270 * peeloff() case, since I think read will fail on a 4271 * listening socket.. 4272 */ 4273 return; 4274 } 4275 /* lock the socket buffers */ 4276 SCTP_INP_READ_LOCK(old_inp); 4277 control = TAILQ_FIRST(&old_inp->read_queue); 4278 /* Pull off all for out target stcb */ 4279 while (control) { 4280 nctl = TAILQ_NEXT(control, next); 4281 if (control->stcb == stcb) { 4282 /* remove it we want it */ 4283 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4284 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4285 m = control->data; 4286 while (m) { 4287 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4288 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4289 } 4290 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4292 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4293 } 4294 m = SCTP_BUF_NEXT(m); 4295 } 4296 } 4297 control = nctl; 4298 } 4299 SCTP_INP_READ_UNLOCK(old_inp); 4300 /* Remove the sb-lock on the old socket */ 4301 4302 sbunlock(&old_so->so_rcv); 4303 /* Now we move them over to the new socket buffer */ 4304 control = TAILQ_FIRST(&tmp_queue); 4305 SCTP_INP_READ_LOCK(new_inp); 4306 while (control) { 4307 nctl = TAILQ_NEXT(control, next); 4308 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4309 m = control->data; 4310 while (m) { 4311 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4312 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4313 } 4314 sctp_sballoc(stcb, &new_so->so_rcv, m); 4315 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4316 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4317 } 4318 m = SCTP_BUF_NEXT(m); 4319 } 4320 control = nctl; 4321 } 4322 SCTP_INP_READ_UNLOCK(new_inp); 4323 } 4324 4325 void 4326 sctp_add_to_readq(struct sctp_inpcb *inp, 4327 struct sctp_tcb *stcb, 4328 struct sctp_queued_to_read *control, 4329 struct sockbuf *sb, 4330 int end, 4331 int inp_read_lock_held, 4332 int so_locked 4333 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4334 SCTP_UNUSED 4335 #endif 4336 ) 4337 { 4338 /* 4339 * Here we must place the control on the end of the socket read 4340 * queue AND increment sb_cc so that select will work properly on 4341 * read. 4342 */ 4343 struct mbuf *m, *prev = NULL; 4344 4345 if (inp == NULL) { 4346 /* Gak, TSNH!! */ 4347 #ifdef INVARIANTS 4348 panic("Gak, inp NULL on add_to_readq"); 4349 #endif 4350 return; 4351 } 4352 if (inp_read_lock_held == 0) 4353 SCTP_INP_READ_LOCK(inp); 4354 if (!(control->spec_flags & M_NOTIFICATION)) { 4355 atomic_add_int(&inp->total_recvs, 1); 4356 if (!control->do_not_ref_stcb) { 4357 atomic_add_int(&stcb->total_recvs, 1); 4358 } 4359 } 4360 m = control->data; 4361 control->held_length = 0; 4362 control->length = 0; 4363 while (m) { 4364 if (SCTP_BUF_LEN(m) == 0) { 4365 /* Skip mbufs with NO length */ 4366 if (prev == NULL) { 4367 /* First one */ 4368 control->data = sctp_m_free(m); 4369 m = control->data; 4370 } else { 4371 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4372 m = SCTP_BUF_NEXT(prev); 4373 } 4374 if (m == NULL) { 4375 control->tail_mbuf = prev;; 4376 } 4377 continue; 4378 } 4379 prev = m; 4380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4381 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4382 } 4383 sctp_sballoc(stcb, sb, m); 4384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4385 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4386 } 4387 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4388 m = SCTP_BUF_NEXT(m); 4389 } 4390 if (prev != NULL) { 4391 control->tail_mbuf = prev; 4392 } else { 4393 /* Everything got collapsed out?? */ 4394 if (inp_read_lock_held == 0) 4395 SCTP_INP_READ_UNLOCK(inp); 4396 return; 4397 } 4398 if (end) { 4399 control->end_added = 1; 4400 } 4401 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4402 if (inp_read_lock_held == 0) 4403 SCTP_INP_READ_UNLOCK(inp); 4404 if (inp && inp->sctp_socket) { 4405 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4406 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4407 } else { 4408 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4409 struct socket *so; 4410 4411 so = SCTP_INP_SO(inp); 4412 if (!so_locked) { 4413 atomic_add_int(&stcb->asoc.refcnt, 1); 4414 SCTP_TCB_UNLOCK(stcb); 4415 SCTP_SOCKET_LOCK(so, 1); 4416 SCTP_TCB_LOCK(stcb); 4417 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4418 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4419 SCTP_SOCKET_UNLOCK(so, 1); 4420 return; 4421 } 4422 } 4423 #endif 4424 sctp_sorwakeup(inp, inp->sctp_socket); 4425 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4426 if (!so_locked) { 4427 SCTP_SOCKET_UNLOCK(so, 1); 4428 } 4429 #endif 4430 } 4431 } 4432 } 4433 4434 4435 int 4436 sctp_append_to_readq(struct sctp_inpcb *inp, 4437 struct sctp_tcb *stcb, 4438 struct sctp_queued_to_read *control, 4439 struct mbuf *m, 4440 int end, 4441 int ctls_cumack, 4442 struct sockbuf *sb) 4443 { 4444 /* 4445 * A partial delivery API event is underway. OR we are appending on 4446 * the reassembly queue. 4447 * 4448 * If PDAPI this means we need to add m to the end of the data. 4449 * Increase the length in the control AND increment the sb_cc. 4450 * Otherwise sb is NULL and all we need to do is put it at the end 4451 * of the mbuf chain. 4452 */ 4453 int len = 0; 4454 struct mbuf *mm, *tail = NULL, *prev = NULL; 4455 4456 if (inp) { 4457 SCTP_INP_READ_LOCK(inp); 4458 } 4459 if (control == NULL) { 4460 get_out: 4461 if (inp) { 4462 SCTP_INP_READ_UNLOCK(inp); 4463 } 4464 return (-1); 4465 } 4466 if (control->end_added) { 4467 /* huh this one is complete? */ 4468 goto get_out; 4469 } 4470 mm = m; 4471 if (mm == NULL) { 4472 goto get_out; 4473 } 4474 while (mm) { 4475 if (SCTP_BUF_LEN(mm) == 0) { 4476 /* Skip mbufs with NO lenght */ 4477 if (prev == NULL) { 4478 /* First one */ 4479 m = sctp_m_free(mm); 4480 mm = m; 4481 } else { 4482 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 4483 mm = SCTP_BUF_NEXT(prev); 4484 } 4485 continue; 4486 } 4487 prev = mm; 4488 len += SCTP_BUF_LEN(mm); 4489 if (sb) { 4490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4491 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4492 } 4493 sctp_sballoc(stcb, sb, mm); 4494 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4495 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4496 } 4497 } 4498 mm = SCTP_BUF_NEXT(mm); 4499 } 4500 if (prev) { 4501 tail = prev; 4502 } else { 4503 /* Really there should always be a prev */ 4504 if (m == NULL) { 4505 /* Huh nothing left? */ 4506 #ifdef INVARIANTS 4507 panic("Nothing left to add?"); 4508 #else 4509 goto get_out; 4510 #endif 4511 } 4512 tail = m; 4513 } 4514 if (control->tail_mbuf) { 4515 /* append */ 4516 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4517 control->tail_mbuf = tail; 4518 } else { 4519 /* nothing there */ 4520 #ifdef INVARIANTS 4521 if (control->data != NULL) { 4522 panic("This should NOT happen"); 4523 } 4524 #endif 4525 control->data = m; 4526 control->tail_mbuf = tail; 4527 } 4528 atomic_add_int(&control->length, len); 4529 if (end) { 4530 /* message is complete */ 4531 if (stcb && (control == stcb->asoc.control_pdapi)) { 4532 stcb->asoc.control_pdapi = NULL; 4533 } 4534 control->held_length = 0; 4535 control->end_added = 1; 4536 } 4537 if (stcb == NULL) { 4538 control->do_not_ref_stcb = 1; 4539 } 4540 /* 4541 * When we are appending in partial delivery, the cum-ack is used 4542 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4543 * is populated in the outbound sinfo structure from the true cumack 4544 * if the association exists... 4545 */ 4546 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4547 if (inp) { 4548 SCTP_INP_READ_UNLOCK(inp); 4549 } 4550 if (inp && inp->sctp_socket) { 4551 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4552 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4553 } else { 4554 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4555 struct socket *so; 4556 4557 so = SCTP_INP_SO(inp); 4558 atomic_add_int(&stcb->asoc.refcnt, 1); 4559 SCTP_TCB_UNLOCK(stcb); 4560 SCTP_SOCKET_LOCK(so, 1); 4561 SCTP_TCB_LOCK(stcb); 4562 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4563 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4564 SCTP_SOCKET_UNLOCK(so, 1); 4565 return (0); 4566 } 4567 #endif 4568 sctp_sorwakeup(inp, inp->sctp_socket); 4569 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4570 SCTP_SOCKET_UNLOCK(so, 1); 4571 #endif 4572 } 4573 } 4574 return (0); 4575 } 4576 4577 4578 4579 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4580 *************ALTERNATE ROUTING CODE 4581 */ 4582 4583 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4584 *************ALTERNATE ROUTING CODE 4585 */ 4586 4587 struct mbuf * 4588 sctp_generate_invmanparam(int err) 4589 { 4590 /* Return a MBUF with a invalid mandatory parameter */ 4591 struct mbuf *m; 4592 4593 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 4594 if (m) { 4595 struct sctp_paramhdr *ph; 4596 4597 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4598 ph = mtod(m, struct sctp_paramhdr *); 4599 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4600 ph->param_type = htons(err); 4601 } 4602 return (m); 4603 } 4604 4605 #ifdef SCTP_MBCNT_LOGGING 4606 void 4607 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4608 struct sctp_tmit_chunk *tp1, int chk_cnt) 4609 { 4610 if (tp1->data == NULL) { 4611 return; 4612 } 4613 asoc->chunks_on_out_queue -= chk_cnt; 4614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4615 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4616 asoc->total_output_queue_size, 4617 tp1->book_size, 4618 0, 4619 tp1->mbcnt); 4620 } 4621 if (asoc->total_output_queue_size >= tp1->book_size) { 4622 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4623 } else { 4624 asoc->total_output_queue_size = 0; 4625 } 4626 4627 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4628 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4629 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4630 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4631 } else { 4632 stcb->sctp_socket->so_snd.sb_cc = 0; 4633 4634 } 4635 } 4636 } 4637 4638 #endif 4639 4640 int 4641 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4642 int reason, int so_locked 4643 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4644 SCTP_UNUSED 4645 #endif 4646 ) 4647 { 4648 struct sctp_stream_out *strq; 4649 struct sctp_tmit_chunk *chk = NULL; 4650 struct sctp_stream_queue_pending *sp; 4651 uint16_t stream = 0, seq = 0; 4652 uint8_t foundeom = 0; 4653 int ret_sz = 0; 4654 int notdone; 4655 int do_wakeup_routine = 0; 4656 4657 stream = tp1->rec.data.stream_number; 4658 seq = tp1->rec.data.stream_seq; 4659 do { 4660 ret_sz += tp1->book_size; 4661 if (tp1->data != NULL) { 4662 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4663 sctp_flight_size_decrease(tp1); 4664 sctp_total_flight_decrease(stcb, tp1); 4665 } 4666 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4667 stcb->asoc.peers_rwnd += tp1->send_size; 4668 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4669 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked); 4670 if (tp1->data) { 4671 sctp_m_freem(tp1->data); 4672 tp1->data = NULL; 4673 } 4674 do_wakeup_routine = 1; 4675 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4676 stcb->asoc.sent_queue_cnt_removeable--; 4677 } 4678 } 4679 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4680 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4681 SCTP_DATA_NOT_FRAG) { 4682 /* not frag'ed we ae done */ 4683 notdone = 0; 4684 foundeom = 1; 4685 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4686 /* end of frag, we are done */ 4687 notdone = 0; 4688 foundeom = 1; 4689 } else { 4690 /* 4691 * Its a begin or middle piece, we must mark all of 4692 * it 4693 */ 4694 notdone = 1; 4695 tp1 = TAILQ_NEXT(tp1, sctp_next); 4696 } 4697 } while (tp1 && notdone); 4698 if (foundeom == 0) { 4699 /* 4700 * The multi-part message was scattered across the send and 4701 * sent queue. 4702 */ 4703 next_on_sent: 4704 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 4705 /* 4706 * recurse throught the send_queue too, starting at the 4707 * beginning. 4708 */ 4709 if ((tp1) && 4710 (tp1->rec.data.stream_number == stream) && 4711 (tp1->rec.data.stream_seq == seq) 4712 ) { 4713 /* 4714 * save to chk in case we have some on stream out 4715 * queue. If so and we have an un-transmitted one we 4716 * don't have to fudge the TSN. 4717 */ 4718 chk = tp1; 4719 ret_sz += tp1->book_size; 4720 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4721 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked); 4722 if (tp1->data) { 4723 sctp_m_freem(tp1->data); 4724 tp1->data = NULL; 4725 } 4726 /* No flight involved here book the size to 0 */ 4727 tp1->book_size = 0; 4728 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4729 foundeom = 1; 4730 } 4731 do_wakeup_routine = 1; 4732 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4733 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4734 /* 4735 * on to the sent queue so we can wait for it to be 4736 * passed by. 4737 */ 4738 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4739 sctp_next); 4740 stcb->asoc.send_queue_cnt--; 4741 stcb->asoc.sent_queue_cnt++; 4742 goto next_on_sent; 4743 } 4744 } 4745 if (foundeom == 0) { 4746 /* 4747 * Still no eom found. That means there is stuff left on the 4748 * stream out queue.. yuck. 4749 */ 4750 strq = &stcb->asoc.strmout[stream]; 4751 SCTP_TCB_SEND_LOCK(stcb); 4752 sp = TAILQ_FIRST(&strq->outqueue); 4753 while (sp->strseq <= seq) { 4754 /* Check if its our SEQ */ 4755 if (sp->strseq == seq) { 4756 sp->discard_rest = 1; 4757 /* 4758 * We may need to put a chunk on the queue 4759 * that holds the TSN that would have been 4760 * sent with the LAST bit. 4761 */ 4762 if (chk == NULL) { 4763 /* Yep, we have to */ 4764 sctp_alloc_a_chunk(stcb, chk); 4765 if (chk == NULL) { 4766 /* 4767 * we are hosed. All we can 4768 * do is nothing.. which 4769 * will cause an abort if 4770 * the peer is paying 4771 * attention. 4772 */ 4773 goto oh_well; 4774 } 4775 memset(chk, 0, sizeof(*chk)); 4776 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG; 4777 chk->sent = SCTP_FORWARD_TSN_SKIP; 4778 chk->asoc = &stcb->asoc; 4779 chk->rec.data.stream_seq = sp->strseq; 4780 chk->rec.data.stream_number = sp->stream; 4781 chk->rec.data.payloadtype = sp->ppid; 4782 chk->rec.data.context = sp->context; 4783 chk->flags = sp->act_flags; 4784 chk->addr_over = sp->addr_over; 4785 chk->whoTo = sp->net; 4786 atomic_add_int(&chk->whoTo->ref_count, 1); 4787 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4788 stcb->asoc.pr_sctp_cnt++; 4789 chk->pr_sctp_on = 1; 4790 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4791 stcb->asoc.sent_queue_cnt++; 4792 stcb->asoc.pr_sctp_cnt++; 4793 } else { 4794 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4795 } 4796 oh_well: 4797 if (sp->data) { 4798 /* 4799 * Pull any data to free up the SB 4800 * and allow sender to "add more" 4801 * whilc we will throw away :-) 4802 */ 4803 sctp_free_spbufspace(stcb, &stcb->asoc, 4804 sp); 4805 ret_sz += sp->length; 4806 do_wakeup_routine = 1; 4807 sp->some_taken = 1; 4808 sctp_m_freem(sp->data); 4809 sp->length = 0; 4810 sp->data = NULL; 4811 sp->tail_mbuf = NULL; 4812 } 4813 break; 4814 } else { 4815 /* Next one please */ 4816 sp = TAILQ_NEXT(sp, next); 4817 } 4818 } /* End while */ 4819 SCTP_TCB_SEND_UNLOCK(stcb); 4820 } 4821 if (do_wakeup_routine) { 4822 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4823 struct socket *so; 4824 4825 so = SCTP_INP_SO(stcb->sctp_ep); 4826 if (!so_locked) { 4827 atomic_add_int(&stcb->asoc.refcnt, 1); 4828 SCTP_TCB_UNLOCK(stcb); 4829 SCTP_SOCKET_LOCK(so, 1); 4830 SCTP_TCB_LOCK(stcb); 4831 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4832 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4833 /* assoc was freed while we were unlocked */ 4834 SCTP_SOCKET_UNLOCK(so, 1); 4835 return (ret_sz); 4836 } 4837 } 4838 #endif 4839 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4840 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4841 if (!so_locked) { 4842 SCTP_SOCKET_UNLOCK(so, 1); 4843 } 4844 #endif 4845 } 4846 return (ret_sz); 4847 } 4848 4849 /* 4850 * checks to see if the given address, sa, is one that is currently known by 4851 * the kernel note: can't distinguish the same address on multiple interfaces 4852 * and doesn't handle multiple addresses with different zone/scope id's note: 4853 * ifa_ifwithaddr() compares the entire sockaddr struct 4854 */ 4855 struct sctp_ifa * 4856 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4857 int holds_lock) 4858 { 4859 struct sctp_laddr *laddr; 4860 4861 if (holds_lock == 0) { 4862 SCTP_INP_RLOCK(inp); 4863 } 4864 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4865 if (laddr->ifa == NULL) 4866 continue; 4867 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4868 continue; 4869 if (addr->sa_family == AF_INET) { 4870 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4871 laddr->ifa->address.sin.sin_addr.s_addr) { 4872 /* found him. */ 4873 if (holds_lock == 0) { 4874 SCTP_INP_RUNLOCK(inp); 4875 } 4876 return (laddr->ifa); 4877 break; 4878 } 4879 } 4880 #ifdef INET6 4881 if (addr->sa_family == AF_INET6) { 4882 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4883 &laddr->ifa->address.sin6)) { 4884 /* found him. */ 4885 if (holds_lock == 0) { 4886 SCTP_INP_RUNLOCK(inp); 4887 } 4888 return (laddr->ifa); 4889 break; 4890 } 4891 } 4892 #endif 4893 } 4894 if (holds_lock == 0) { 4895 SCTP_INP_RUNLOCK(inp); 4896 } 4897 return (NULL); 4898 } 4899 4900 uint32_t 4901 sctp_get_ifa_hash_val(struct sockaddr *addr) 4902 { 4903 if (addr->sa_family == AF_INET) { 4904 struct sockaddr_in *sin; 4905 4906 sin = (struct sockaddr_in *)addr; 4907 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4908 } else if (addr->sa_family == AF_INET6) { 4909 struct sockaddr_in6 *sin6; 4910 uint32_t hash_of_addr; 4911 4912 sin6 = (struct sockaddr_in6 *)addr; 4913 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4914 sin6->sin6_addr.s6_addr32[1] + 4915 sin6->sin6_addr.s6_addr32[2] + 4916 sin6->sin6_addr.s6_addr32[3]); 4917 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4918 return (hash_of_addr); 4919 } 4920 return (0); 4921 } 4922 4923 struct sctp_ifa * 4924 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4925 { 4926 struct sctp_ifa *sctp_ifap; 4927 struct sctp_vrf *vrf; 4928 struct sctp_ifalist *hash_head; 4929 uint32_t hash_of_addr; 4930 4931 if (holds_lock == 0) 4932 SCTP_IPI_ADDR_RLOCK(); 4933 4934 vrf = sctp_find_vrf(vrf_id); 4935 if (vrf == NULL) { 4936 stage_right: 4937 if (holds_lock == 0) 4938 SCTP_IPI_ADDR_RUNLOCK(); 4939 return (NULL); 4940 } 4941 hash_of_addr = sctp_get_ifa_hash_val(addr); 4942 4943 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 4944 if (hash_head == NULL) { 4945 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 4946 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 4947 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 4948 sctp_print_address(addr); 4949 SCTP_PRINTF("No such bucket for address\n"); 4950 if (holds_lock == 0) 4951 SCTP_IPI_ADDR_RUNLOCK(); 4952 4953 return (NULL); 4954 } 4955 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 4956 if (sctp_ifap == NULL) { 4957 #ifdef INVARIANTS 4958 panic("Huh LIST_FOREACH corrupt"); 4959 goto stage_right; 4960 #else 4961 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n"); 4962 goto stage_right; 4963 #endif 4964 } 4965 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4966 continue; 4967 if (addr->sa_family == AF_INET) { 4968 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4969 sctp_ifap->address.sin.sin_addr.s_addr) { 4970 /* found him. */ 4971 if (holds_lock == 0) 4972 SCTP_IPI_ADDR_RUNLOCK(); 4973 return (sctp_ifap); 4974 break; 4975 } 4976 } 4977 #ifdef INET6 4978 if (addr->sa_family == AF_INET6) { 4979 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4980 &sctp_ifap->address.sin6)) { 4981 /* found him. */ 4982 if (holds_lock == 0) 4983 SCTP_IPI_ADDR_RUNLOCK(); 4984 return (sctp_ifap); 4985 break; 4986 } 4987 } 4988 #endif 4989 } 4990 if (holds_lock == 0) 4991 SCTP_IPI_ADDR_RUNLOCK(); 4992 return (NULL); 4993 } 4994 4995 static void 4996 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 4997 uint32_t rwnd_req) 4998 { 4999 /* User pulled some data, do we need a rwnd update? */ 5000 int r_unlocked = 0; 5001 uint32_t dif, rwnd; 5002 struct socket *so = NULL; 5003 5004 if (stcb == NULL) 5005 return; 5006 5007 atomic_add_int(&stcb->asoc.refcnt, 1); 5008 5009 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5010 SCTP_STATE_SHUTDOWN_RECEIVED | 5011 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5012 /* Pre-check If we are freeing no update */ 5013 goto no_lock; 5014 } 5015 SCTP_INP_INCR_REF(stcb->sctp_ep); 5016 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5017 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5018 goto out; 5019 } 5020 so = stcb->sctp_socket; 5021 if (so == NULL) { 5022 goto out; 5023 } 5024 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5025 /* Have you have freed enough to look */ 5026 *freed_so_far = 0; 5027 /* Yep, its worth a look and the lock overhead */ 5028 5029 /* Figure out what the rwnd would be */ 5030 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5031 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5032 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5033 } else { 5034 dif = 0; 5035 } 5036 if (dif >= rwnd_req) { 5037 if (hold_rlock) { 5038 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5039 r_unlocked = 1; 5040 } 5041 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5042 /* 5043 * One last check before we allow the guy possibly 5044 * to get in. There is a race, where the guy has not 5045 * reached the gate. In that case 5046 */ 5047 goto out; 5048 } 5049 SCTP_TCB_LOCK(stcb); 5050 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5051 /* No reports here */ 5052 SCTP_TCB_UNLOCK(stcb); 5053 goto out; 5054 } 5055 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5056 /* 5057 * EY if nr_sacks used then send an nr-sack , a sack 5058 * otherwise 5059 */ 5060 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) 5061 sctp_send_nr_sack(stcb); 5062 else 5063 sctp_send_sack(stcb); 5064 5065 sctp_chunk_output(stcb->sctp_ep, stcb, 5066 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5067 /* make sure no timer is running */ 5068 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5069 SCTP_TCB_UNLOCK(stcb); 5070 } else { 5071 /* Update how much we have pending */ 5072 stcb->freed_by_sorcv_sincelast = dif; 5073 } 5074 out: 5075 if (so && r_unlocked && hold_rlock) { 5076 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5077 } 5078 SCTP_INP_DECR_REF(stcb->sctp_ep); 5079 no_lock: 5080 atomic_add_int(&stcb->asoc.refcnt, -1); 5081 return; 5082 } 5083 5084 int 5085 sctp_sorecvmsg(struct socket *so, 5086 struct uio *uio, 5087 struct mbuf **mp, 5088 struct sockaddr *from, 5089 int fromlen, 5090 int *msg_flags, 5091 struct sctp_sndrcvinfo *sinfo, 5092 int filling_sinfo) 5093 { 5094 /* 5095 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5096 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5097 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5098 * On the way out we may send out any combination of: 5099 * MSG_NOTIFICATION MSG_EOR 5100 * 5101 */ 5102 struct sctp_inpcb *inp = NULL; 5103 int my_len = 0; 5104 int cp_len = 0, error = 0; 5105 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5106 struct mbuf *m = NULL, *embuf = NULL; 5107 struct sctp_tcb *stcb = NULL; 5108 int wakeup_read_socket = 0; 5109 int freecnt_applied = 0; 5110 int out_flags = 0, in_flags = 0; 5111 int block_allowed = 1; 5112 uint32_t freed_so_far = 0; 5113 uint32_t copied_so_far = 0; 5114 int in_eeor_mode = 0; 5115 int no_rcv_needed = 0; 5116 uint32_t rwnd_req = 0; 5117 int hold_sblock = 0; 5118 int hold_rlock = 0; 5119 int slen = 0; 5120 uint32_t held_length = 0; 5121 int sockbuf_lock = 0; 5122 5123 if (uio == NULL) { 5124 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5125 return (EINVAL); 5126 } 5127 if (msg_flags) { 5128 in_flags = *msg_flags; 5129 if (in_flags & MSG_PEEK) 5130 SCTP_STAT_INCR(sctps_read_peeks); 5131 } else { 5132 in_flags = 0; 5133 } 5134 slen = uio->uio_resid; 5135 5136 /* Pull in and set up our int flags */ 5137 if (in_flags & MSG_OOB) { 5138 /* Out of band's NOT supported */ 5139 return (EOPNOTSUPP); 5140 } 5141 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5142 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5143 return (EINVAL); 5144 } 5145 if ((in_flags & (MSG_DONTWAIT 5146 | MSG_NBIO 5147 )) || 5148 SCTP_SO_IS_NBIO(so)) { 5149 block_allowed = 0; 5150 } 5151 /* setup the endpoint */ 5152 inp = (struct sctp_inpcb *)so->so_pcb; 5153 if (inp == NULL) { 5154 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5155 return (EFAULT); 5156 } 5157 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5158 /* Must be at least a MTU's worth */ 5159 if (rwnd_req < SCTP_MIN_RWND) 5160 rwnd_req = SCTP_MIN_RWND; 5161 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5162 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5163 sctp_misc_ints(SCTP_SORECV_ENTER, 5164 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 5165 } 5166 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5167 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5168 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 5169 } 5170 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5171 sockbuf_lock = 1; 5172 if (error) { 5173 goto release_unlocked; 5174 } 5175 restart: 5176 5177 5178 restart_nosblocks: 5179 if (hold_sblock == 0) { 5180 SOCKBUF_LOCK(&so->so_rcv); 5181 hold_sblock = 1; 5182 } 5183 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5184 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5185 goto out; 5186 } 5187 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5188 if (so->so_error) { 5189 error = so->so_error; 5190 if ((in_flags & MSG_PEEK) == 0) 5191 so->so_error = 0; 5192 goto out; 5193 } else { 5194 if (so->so_rcv.sb_cc == 0) { 5195 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5196 /* indicate EOF */ 5197 error = 0; 5198 goto out; 5199 } 5200 } 5201 } 5202 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 5203 /* we need to wait for data */ 5204 if ((so->so_rcv.sb_cc == 0) && 5205 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5206 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5207 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5208 /* 5209 * For active open side clear flags for 5210 * re-use passive open is blocked by 5211 * connect. 5212 */ 5213 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5214 /* 5215 * You were aborted, passive side 5216 * always hits here 5217 */ 5218 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5219 error = ECONNRESET; 5220 /* 5221 * You get this once if you are 5222 * active open side 5223 */ 5224 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5225 /* 5226 * Remove flag if on the 5227 * active open side 5228 */ 5229 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 5230 } 5231 } 5232 so->so_state &= ~(SS_ISCONNECTING | 5233 SS_ISDISCONNECTING | 5234 SS_ISCONFIRMING | 5235 SS_ISCONNECTED); 5236 if (error == 0) { 5237 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5238 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5239 error = ENOTCONN; 5240 } else { 5241 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 5242 } 5243 } 5244 goto out; 5245 } 5246 } 5247 error = sbwait(&so->so_rcv); 5248 if (error) { 5249 goto out; 5250 } 5251 held_length = 0; 5252 goto restart_nosblocks; 5253 } else if (so->so_rcv.sb_cc == 0) { 5254 if (so->so_error) { 5255 error = so->so_error; 5256 if ((in_flags & MSG_PEEK) == 0) 5257 so->so_error = 0; 5258 } else { 5259 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5260 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5261 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5262 /* 5263 * For active open side clear flags 5264 * for re-use passive open is 5265 * blocked by connect. 5266 */ 5267 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5268 /* 5269 * You were aborted, passive 5270 * side always hits here 5271 */ 5272 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5273 error = ECONNRESET; 5274 /* 5275 * You get this once if you 5276 * are active open side 5277 */ 5278 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5279 /* 5280 * Remove flag if on 5281 * the active open 5282 * side 5283 */ 5284 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 5285 } 5286 } 5287 so->so_state &= ~(SS_ISCONNECTING | 5288 SS_ISDISCONNECTING | 5289 SS_ISCONFIRMING | 5290 SS_ISCONNECTED); 5291 if (error == 0) { 5292 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5293 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5294 error = ENOTCONN; 5295 } else { 5296 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 5297 } 5298 } 5299 goto out; 5300 } 5301 } 5302 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5303 error = EWOULDBLOCK; 5304 } 5305 goto out; 5306 } 5307 if (hold_sblock == 1) { 5308 SOCKBUF_UNLOCK(&so->so_rcv); 5309 hold_sblock = 0; 5310 } 5311 /* we possibly have data we can read */ 5312 /* sa_ignore FREED_MEMORY */ 5313 control = TAILQ_FIRST(&inp->read_queue); 5314 if (control == NULL) { 5315 /* 5316 * This could be happening since the appender did the 5317 * increment but as not yet did the tailq insert onto the 5318 * read_queue 5319 */ 5320 if (hold_rlock == 0) { 5321 SCTP_INP_READ_LOCK(inp); 5322 hold_rlock = 1; 5323 } 5324 control = TAILQ_FIRST(&inp->read_queue); 5325 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5326 #ifdef INVARIANTS 5327 panic("Huh, its non zero and nothing on control?"); 5328 #endif 5329 so->so_rcv.sb_cc = 0; 5330 } 5331 SCTP_INP_READ_UNLOCK(inp); 5332 hold_rlock = 0; 5333 goto restart; 5334 } 5335 if ((control->length == 0) && 5336 (control->do_not_ref_stcb)) { 5337 /* 5338 * Clean up code for freeing assoc that left behind a 5339 * pdapi.. maybe a peer in EEOR that just closed after 5340 * sending and never indicated a EOR. 5341 */ 5342 if (hold_rlock == 0) { 5343 hold_rlock = 1; 5344 SCTP_INP_READ_LOCK(inp); 5345 } 5346 control->held_length = 0; 5347 if (control->data) { 5348 /* Hmm there is data here .. fix */ 5349 struct mbuf *m_tmp; 5350 int cnt = 0; 5351 5352 m_tmp = control->data; 5353 while (m_tmp) { 5354 cnt += SCTP_BUF_LEN(m_tmp); 5355 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5356 control->tail_mbuf = m_tmp; 5357 control->end_added = 1; 5358 } 5359 m_tmp = SCTP_BUF_NEXT(m_tmp); 5360 } 5361 control->length = cnt; 5362 } else { 5363 /* remove it */ 5364 TAILQ_REMOVE(&inp->read_queue, control, next); 5365 /* Add back any hiddend data */ 5366 sctp_free_remote_addr(control->whoFrom); 5367 sctp_free_a_readq(stcb, control); 5368 } 5369 if (hold_rlock) { 5370 hold_rlock = 0; 5371 SCTP_INP_READ_UNLOCK(inp); 5372 } 5373 goto restart; 5374 } 5375 if ((control->length == 0) && 5376 (control->end_added == 1)) { 5377 /* 5378 * Do we also need to check for (control->pdapi_aborted == 5379 * 1)? 5380 */ 5381 if (hold_rlock == 0) { 5382 hold_rlock = 1; 5383 SCTP_INP_READ_LOCK(inp); 5384 } 5385 TAILQ_REMOVE(&inp->read_queue, control, next); 5386 if (control->data) { 5387 #ifdef INVARIANTS 5388 panic("control->data not null but control->length == 0"); 5389 #else 5390 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5391 sctp_m_freem(control->data); 5392 control->data = NULL; 5393 #endif 5394 } 5395 if (control->aux_data) { 5396 sctp_m_free(control->aux_data); 5397 control->aux_data = NULL; 5398 } 5399 sctp_free_remote_addr(control->whoFrom); 5400 sctp_free_a_readq(stcb, control); 5401 if (hold_rlock) { 5402 hold_rlock = 0; 5403 SCTP_INP_READ_UNLOCK(inp); 5404 } 5405 goto restart; 5406 } 5407 if (control->length == 0) { 5408 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5409 (filling_sinfo)) { 5410 /* find a more suitable one then this */ 5411 ctl = TAILQ_NEXT(control, next); 5412 while (ctl) { 5413 if ((ctl->stcb != control->stcb) && (ctl->length) && 5414 (ctl->some_taken || 5415 (ctl->spec_flags & M_NOTIFICATION) || 5416 ((ctl->do_not_ref_stcb == 0) && 5417 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5418 ) { 5419 /*- 5420 * If we have a different TCB next, and there is data 5421 * present. If we have already taken some (pdapi), OR we can 5422 * ref the tcb and no delivery as started on this stream, we 5423 * take it. Note we allow a notification on a different 5424 * assoc to be delivered.. 5425 */ 5426 control = ctl; 5427 goto found_one; 5428 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5429 (ctl->length) && 5430 ((ctl->some_taken) || 5431 ((ctl->do_not_ref_stcb == 0) && 5432 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5433 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5434 ) { 5435 /*- 5436 * If we have the same tcb, and there is data present, and we 5437 * have the strm interleave feature present. Then if we have 5438 * taken some (pdapi) or we can refer to tht tcb AND we have 5439 * not started a delivery for this stream, we can take it. 5440 * Note we do NOT allow a notificaiton on the same assoc to 5441 * be delivered. 5442 */ 5443 control = ctl; 5444 goto found_one; 5445 } 5446 ctl = TAILQ_NEXT(ctl, next); 5447 } 5448 } 5449 /* 5450 * if we reach here, not suitable replacement is available 5451 * <or> fragment interleave is NOT on. So stuff the sb_cc 5452 * into the our held count, and its time to sleep again. 5453 */ 5454 held_length = so->so_rcv.sb_cc; 5455 control->held_length = so->so_rcv.sb_cc; 5456 goto restart; 5457 } 5458 /* Clear the held length since there is something to read */ 5459 control->held_length = 0; 5460 if (hold_rlock) { 5461 SCTP_INP_READ_UNLOCK(inp); 5462 hold_rlock = 0; 5463 } 5464 found_one: 5465 /* 5466 * If we reach here, control has a some data for us to read off. 5467 * Note that stcb COULD be NULL. 5468 */ 5469 control->some_taken++; 5470 if (hold_sblock) { 5471 SOCKBUF_UNLOCK(&so->so_rcv); 5472 hold_sblock = 0; 5473 } 5474 stcb = control->stcb; 5475 if (stcb) { 5476 if ((control->do_not_ref_stcb == 0) && 5477 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5478 if (freecnt_applied == 0) 5479 stcb = NULL; 5480 } else if (control->do_not_ref_stcb == 0) { 5481 /* you can't free it on me please */ 5482 /* 5483 * The lock on the socket buffer protects us so the 5484 * free code will stop. But since we used the 5485 * socketbuf lock and the sender uses the tcb_lock 5486 * to increment, we need to use the atomic add to 5487 * the refcnt 5488 */ 5489 if (freecnt_applied) { 5490 #ifdef INVARIANTS 5491 panic("refcnt already incremented"); 5492 #else 5493 printf("refcnt already incremented?\n"); 5494 #endif 5495 } else { 5496 atomic_add_int(&stcb->asoc.refcnt, 1); 5497 freecnt_applied = 1; 5498 } 5499 /* 5500 * Setup to remember how much we have not yet told 5501 * the peer our rwnd has opened up. Note we grab the 5502 * value from the tcb from last time. Note too that 5503 * sack sending clears this when a sack is sent, 5504 * which is fine. Once we hit the rwnd_req, we then 5505 * will go to the sctp_user_rcvd() that will not 5506 * lock until it KNOWs it MUST send a WUP-SACK. 5507 */ 5508 freed_so_far = stcb->freed_by_sorcv_sincelast; 5509 stcb->freed_by_sorcv_sincelast = 0; 5510 } 5511 } 5512 if (stcb && 5513 ((control->spec_flags & M_NOTIFICATION) == 0) && 5514 control->do_not_ref_stcb == 0) { 5515 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5516 } 5517 /* First lets get off the sinfo and sockaddr info */ 5518 if ((sinfo) && filling_sinfo) { 5519 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 5520 nxt = TAILQ_NEXT(control, next); 5521 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 5522 struct sctp_extrcvinfo *s_extra; 5523 5524 s_extra = (struct sctp_extrcvinfo *)sinfo; 5525 if ((nxt) && 5526 (nxt->length)) { 5527 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5528 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5529 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5530 } 5531 if (nxt->spec_flags & M_NOTIFICATION) { 5532 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5533 } 5534 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id; 5535 s_extra->sreinfo_next_length = nxt->length; 5536 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid; 5537 s_extra->sreinfo_next_stream = nxt->sinfo_stream; 5538 if (nxt->tail_mbuf != NULL) { 5539 if (nxt->end_added) { 5540 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5541 } 5542 } 5543 } else { 5544 /* 5545 * we explicitly 0 this, since the memcpy 5546 * got some other things beyond the older 5547 * sinfo_ that is on the control's structure 5548 * :-D 5549 */ 5550 nxt = NULL; 5551 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 5552 s_extra->sreinfo_next_aid = 0; 5553 s_extra->sreinfo_next_length = 0; 5554 s_extra->sreinfo_next_ppid = 0; 5555 s_extra->sreinfo_next_stream = 0; 5556 } 5557 } 5558 /* 5559 * update off the real current cum-ack, if we have an stcb. 5560 */ 5561 if ((control->do_not_ref_stcb == 0) && stcb) 5562 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5563 /* 5564 * mask off the high bits, we keep the actual chunk bits in 5565 * there. 5566 */ 5567 sinfo->sinfo_flags &= 0x00ff; 5568 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5569 sinfo->sinfo_flags |= SCTP_UNORDERED; 5570 } 5571 } 5572 #ifdef SCTP_ASOCLOG_OF_TSNS 5573 { 5574 int index, newindex; 5575 struct sctp_pcbtsn_rlog *entry; 5576 5577 do { 5578 index = inp->readlog_index; 5579 newindex = index + 1; 5580 if (newindex >= SCTP_READ_LOG_SIZE) { 5581 newindex = 0; 5582 } 5583 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5584 entry = &inp->readlog[index]; 5585 entry->vtag = control->sinfo_assoc_id; 5586 entry->strm = control->sinfo_stream; 5587 entry->seq = control->sinfo_ssn; 5588 entry->sz = control->length; 5589 entry->flgs = control->sinfo_flags; 5590 } 5591 #endif 5592 if (fromlen && from) { 5593 struct sockaddr *to; 5594 5595 #ifdef INET 5596 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len); 5597 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5598 ((struct sockaddr_in *)from)->sin_port = control->port_from; 5599 #else 5600 /* No AF_INET use AF_INET6 */ 5601 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len); 5602 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5603 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 5604 #endif 5605 5606 to = from; 5607 #if defined(INET) && defined(INET6) 5608 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 5609 (to->sa_family == AF_INET) && 5610 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 5611 struct sockaddr_in *sin; 5612 struct sockaddr_in6 sin6; 5613 5614 sin = (struct sockaddr_in *)to; 5615 bzero(&sin6, sizeof(sin6)); 5616 sin6.sin6_family = AF_INET6; 5617 sin6.sin6_len = sizeof(struct sockaddr_in6); 5618 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 5619 bcopy(&sin->sin_addr, 5620 &sin6.sin6_addr.s6_addr32[3], 5621 sizeof(sin6.sin6_addr.s6_addr32[3])); 5622 sin6.sin6_port = sin->sin_port; 5623 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 5624 } 5625 #endif 5626 #if defined(INET6) 5627 { 5628 struct sockaddr_in6 lsa6, *to6; 5629 5630 to6 = (struct sockaddr_in6 *)to; 5631 sctp_recover_scope_mac(to6, (&lsa6)); 5632 } 5633 #endif 5634 } 5635 /* now copy out what data we can */ 5636 if (mp == NULL) { 5637 /* copy out each mbuf in the chain up to length */ 5638 get_more_data: 5639 m = control->data; 5640 while (m) { 5641 /* Move out all we can */ 5642 cp_len = (int)uio->uio_resid; 5643 my_len = (int)SCTP_BUF_LEN(m); 5644 if (cp_len > my_len) { 5645 /* not enough in this buf */ 5646 cp_len = my_len; 5647 } 5648 if (hold_rlock) { 5649 SCTP_INP_READ_UNLOCK(inp); 5650 hold_rlock = 0; 5651 } 5652 if (cp_len > 0) 5653 error = uiomove(mtod(m, char *), cp_len, uio); 5654 /* re-read */ 5655 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5656 goto release; 5657 } 5658 if ((control->do_not_ref_stcb == 0) && stcb && 5659 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5660 no_rcv_needed = 1; 5661 } 5662 if (error) { 5663 /* error we are out of here */ 5664 goto release; 5665 } 5666 if ((SCTP_BUF_NEXT(m) == NULL) && 5667 (cp_len >= SCTP_BUF_LEN(m)) && 5668 ((control->end_added == 0) || 5669 (control->end_added && 5670 (TAILQ_NEXT(control, next) == NULL))) 5671 ) { 5672 SCTP_INP_READ_LOCK(inp); 5673 hold_rlock = 1; 5674 } 5675 if (cp_len == SCTP_BUF_LEN(m)) { 5676 if ((SCTP_BUF_NEXT(m) == NULL) && 5677 (control->end_added)) { 5678 out_flags |= MSG_EOR; 5679 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5680 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5681 } 5682 if (control->spec_flags & M_NOTIFICATION) { 5683 out_flags |= MSG_NOTIFICATION; 5684 } 5685 /* we ate up the mbuf */ 5686 if (in_flags & MSG_PEEK) { 5687 /* just looking */ 5688 m = SCTP_BUF_NEXT(m); 5689 copied_so_far += cp_len; 5690 } else { 5691 /* dispose of the mbuf */ 5692 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5693 sctp_sblog(&so->so_rcv, 5694 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5695 } 5696 sctp_sbfree(control, stcb, &so->so_rcv, m); 5697 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5698 sctp_sblog(&so->so_rcv, 5699 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5700 } 5701 embuf = m; 5702 copied_so_far += cp_len; 5703 freed_so_far += cp_len; 5704 freed_so_far += MSIZE; 5705 atomic_subtract_int(&control->length, cp_len); 5706 control->data = sctp_m_free(m); 5707 m = control->data; 5708 /* 5709 * been through it all, must hold sb 5710 * lock ok to null tail 5711 */ 5712 if (control->data == NULL) { 5713 #ifdef INVARIANTS 5714 if ((control->end_added == 0) || 5715 (TAILQ_NEXT(control, next) == NULL)) { 5716 /* 5717 * If the end is not 5718 * added, OR the 5719 * next is NOT null 5720 * we MUST have the 5721 * lock. 5722 */ 5723 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5724 panic("Hmm we don't own the lock?"); 5725 } 5726 } 5727 #endif 5728 control->tail_mbuf = NULL; 5729 #ifdef INVARIANTS 5730 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5731 panic("end_added, nothing left and no MSG_EOR"); 5732 } 5733 #endif 5734 } 5735 } 5736 } else { 5737 /* Do we need to trim the mbuf? */ 5738 if (control->spec_flags & M_NOTIFICATION) { 5739 out_flags |= MSG_NOTIFICATION; 5740 } 5741 if ((in_flags & MSG_PEEK) == 0) { 5742 SCTP_BUF_RESV_UF(m, cp_len); 5743 SCTP_BUF_LEN(m) -= cp_len; 5744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5745 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5746 } 5747 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5748 if ((control->do_not_ref_stcb == 0) && 5749 stcb) { 5750 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5751 } 5752 copied_so_far += cp_len; 5753 embuf = m; 5754 freed_so_far += cp_len; 5755 freed_so_far += MSIZE; 5756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5757 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5758 SCTP_LOG_SBRESULT, 0); 5759 } 5760 atomic_subtract_int(&control->length, cp_len); 5761 } else { 5762 copied_so_far += cp_len; 5763 } 5764 } 5765 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5766 break; 5767 } 5768 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5769 (control->do_not_ref_stcb == 0) && 5770 (freed_so_far >= rwnd_req)) { 5771 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5772 } 5773 } /* end while(m) */ 5774 /* 5775 * At this point we have looked at it all and we either have 5776 * a MSG_EOR/or read all the user wants... <OR> 5777 * control->length == 0. 5778 */ 5779 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5780 /* we are done with this control */ 5781 if (control->length == 0) { 5782 if (control->data) { 5783 #ifdef INVARIANTS 5784 panic("control->data not null at read eor?"); 5785 #else 5786 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5787 sctp_m_freem(control->data); 5788 control->data = NULL; 5789 #endif 5790 } 5791 done_with_control: 5792 if (TAILQ_NEXT(control, next) == NULL) { 5793 /* 5794 * If we don't have a next we need a 5795 * lock, if there is a next 5796 * interrupt is filling ahead of us 5797 * and we don't need a lock to 5798 * remove this guy (which is the 5799 * head of the queue). 5800 */ 5801 if (hold_rlock == 0) { 5802 SCTP_INP_READ_LOCK(inp); 5803 hold_rlock = 1; 5804 } 5805 } 5806 TAILQ_REMOVE(&inp->read_queue, control, next); 5807 /* Add back any hiddend data */ 5808 if (control->held_length) { 5809 held_length = 0; 5810 control->held_length = 0; 5811 wakeup_read_socket = 1; 5812 } 5813 if (control->aux_data) { 5814 sctp_m_free(control->aux_data); 5815 control->aux_data = NULL; 5816 } 5817 no_rcv_needed = control->do_not_ref_stcb; 5818 sctp_free_remote_addr(control->whoFrom); 5819 control->data = NULL; 5820 sctp_free_a_readq(stcb, control); 5821 control = NULL; 5822 if ((freed_so_far >= rwnd_req) && 5823 (no_rcv_needed == 0)) 5824 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5825 5826 } else { 5827 /* 5828 * The user did not read all of this 5829 * message, turn off the returned MSG_EOR 5830 * since we are leaving more behind on the 5831 * control to read. 5832 */ 5833 #ifdef INVARIANTS 5834 if (control->end_added && 5835 (control->data == NULL) && 5836 (control->tail_mbuf == NULL)) { 5837 panic("Gak, control->length is corrupt?"); 5838 } 5839 #endif 5840 no_rcv_needed = control->do_not_ref_stcb; 5841 out_flags &= ~MSG_EOR; 5842 } 5843 } 5844 if (out_flags & MSG_EOR) { 5845 goto release; 5846 } 5847 if ((uio->uio_resid == 0) || 5848 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5849 ) { 5850 goto release; 5851 } 5852 /* 5853 * If I hit here the receiver wants more and this message is 5854 * NOT done (pd-api). So two questions. Can we block? if not 5855 * we are done. Did the user NOT set MSG_WAITALL? 5856 */ 5857 if (block_allowed == 0) { 5858 goto release; 5859 } 5860 /* 5861 * We need to wait for more data a few things: - We don't 5862 * sbunlock() so we don't get someone else reading. - We 5863 * must be sure to account for the case where what is added 5864 * is NOT to our control when we wakeup. 5865 */ 5866 5867 /* 5868 * Do we need to tell the transport a rwnd update might be 5869 * needed before we go to sleep? 5870 */ 5871 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5872 ((freed_so_far >= rwnd_req) && 5873 (control->do_not_ref_stcb == 0) && 5874 (no_rcv_needed == 0))) { 5875 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5876 } 5877 wait_some_more: 5878 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5879 goto release; 5880 } 5881 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5882 goto release; 5883 5884 if (hold_rlock == 1) { 5885 SCTP_INP_READ_UNLOCK(inp); 5886 hold_rlock = 0; 5887 } 5888 if (hold_sblock == 0) { 5889 SOCKBUF_LOCK(&so->so_rcv); 5890 hold_sblock = 1; 5891 } 5892 if ((copied_so_far) && (control->length == 0) && 5893 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) 5894 ) { 5895 goto release; 5896 } 5897 if (so->so_rcv.sb_cc <= control->held_length) { 5898 error = sbwait(&so->so_rcv); 5899 if (error) { 5900 goto release; 5901 } 5902 control->held_length = 0; 5903 } 5904 if (hold_sblock) { 5905 SOCKBUF_UNLOCK(&so->so_rcv); 5906 hold_sblock = 0; 5907 } 5908 if (control->length == 0) { 5909 /* still nothing here */ 5910 if (control->end_added == 1) { 5911 /* he aborted, or is done i.e.did a shutdown */ 5912 out_flags |= MSG_EOR; 5913 if (control->pdapi_aborted) { 5914 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5915 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5916 5917 out_flags |= MSG_TRUNC; 5918 } else { 5919 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5920 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5921 } 5922 goto done_with_control; 5923 } 5924 if (so->so_rcv.sb_cc > held_length) { 5925 control->held_length = so->so_rcv.sb_cc; 5926 held_length = 0; 5927 } 5928 goto wait_some_more; 5929 } else if (control->data == NULL) { 5930 /* 5931 * we must re-sync since data is probably being 5932 * added 5933 */ 5934 SCTP_INP_READ_LOCK(inp); 5935 if ((control->length > 0) && (control->data == NULL)) { 5936 /* 5937 * big trouble.. we have the lock and its 5938 * corrupt? 5939 */ 5940 #ifdef INVARIANTS 5941 panic("Impossible data==NULL length !=0"); 5942 #endif 5943 out_flags |= MSG_EOR; 5944 out_flags |= MSG_TRUNC; 5945 control->length = 0; 5946 SCTP_INP_READ_UNLOCK(inp); 5947 goto done_with_control; 5948 } 5949 SCTP_INP_READ_UNLOCK(inp); 5950 /* We will fall around to get more data */ 5951 } 5952 goto get_more_data; 5953 } else { 5954 /*- 5955 * Give caller back the mbuf chain, 5956 * store in uio_resid the length 5957 */ 5958 wakeup_read_socket = 0; 5959 if ((control->end_added == 0) || 5960 (TAILQ_NEXT(control, next) == NULL)) { 5961 /* Need to get rlock */ 5962 if (hold_rlock == 0) { 5963 SCTP_INP_READ_LOCK(inp); 5964 hold_rlock = 1; 5965 } 5966 } 5967 if (control->end_added) { 5968 out_flags |= MSG_EOR; 5969 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5970 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5971 } 5972 if (control->spec_flags & M_NOTIFICATION) { 5973 out_flags |= MSG_NOTIFICATION; 5974 } 5975 uio->uio_resid = control->length; 5976 *mp = control->data; 5977 m = control->data; 5978 while (m) { 5979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5980 sctp_sblog(&so->so_rcv, 5981 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5982 } 5983 sctp_sbfree(control, stcb, &so->so_rcv, m); 5984 freed_so_far += SCTP_BUF_LEN(m); 5985 freed_so_far += MSIZE; 5986 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5987 sctp_sblog(&so->so_rcv, 5988 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5989 } 5990 m = SCTP_BUF_NEXT(m); 5991 } 5992 control->data = control->tail_mbuf = NULL; 5993 control->length = 0; 5994 if (out_flags & MSG_EOR) { 5995 /* Done with this control */ 5996 goto done_with_control; 5997 } 5998 } 5999 release: 6000 if (hold_rlock == 1) { 6001 SCTP_INP_READ_UNLOCK(inp); 6002 hold_rlock = 0; 6003 } 6004 if (hold_sblock == 1) { 6005 SOCKBUF_UNLOCK(&so->so_rcv); 6006 hold_sblock = 0; 6007 } 6008 sbunlock(&so->so_rcv); 6009 sockbuf_lock = 0; 6010 6011 release_unlocked: 6012 if (hold_sblock) { 6013 SOCKBUF_UNLOCK(&so->so_rcv); 6014 hold_sblock = 0; 6015 } 6016 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6017 if ((freed_so_far >= rwnd_req) && 6018 (control && (control->do_not_ref_stcb == 0)) && 6019 (no_rcv_needed == 0)) 6020 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6021 } 6022 out: 6023 if (msg_flags) { 6024 *msg_flags = out_flags; 6025 } 6026 if (((out_flags & MSG_EOR) == 0) && 6027 ((in_flags & MSG_PEEK) == 0) && 6028 (sinfo) && 6029 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) { 6030 struct sctp_extrcvinfo *s_extra; 6031 6032 s_extra = (struct sctp_extrcvinfo *)sinfo; 6033 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 6034 } 6035 if (hold_rlock == 1) { 6036 SCTP_INP_READ_UNLOCK(inp); 6037 hold_rlock = 0; 6038 } 6039 if (hold_sblock) { 6040 SOCKBUF_UNLOCK(&so->so_rcv); 6041 hold_sblock = 0; 6042 } 6043 if (sockbuf_lock) { 6044 sbunlock(&so->so_rcv); 6045 } 6046 if (freecnt_applied) { 6047 /* 6048 * The lock on the socket buffer protects us so the free 6049 * code will stop. But since we used the socketbuf lock and 6050 * the sender uses the tcb_lock to increment, we need to use 6051 * the atomic add to the refcnt. 6052 */ 6053 if (stcb == NULL) { 6054 #ifdef INVARIANTS 6055 panic("stcb for refcnt has gone NULL?"); 6056 goto stage_left; 6057 #else 6058 goto stage_left; 6059 #endif 6060 } 6061 atomic_add_int(&stcb->asoc.refcnt, -1); 6062 freecnt_applied = 0; 6063 /* Save the value back for next time */ 6064 stcb->freed_by_sorcv_sincelast = freed_so_far; 6065 } 6066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6067 if (stcb) { 6068 sctp_misc_ints(SCTP_SORECV_DONE, 6069 freed_so_far, 6070 ((uio) ? (slen - uio->uio_resid) : slen), 6071 stcb->asoc.my_rwnd, 6072 so->so_rcv.sb_cc); 6073 } else { 6074 sctp_misc_ints(SCTP_SORECV_DONE, 6075 freed_so_far, 6076 ((uio) ? (slen - uio->uio_resid) : slen), 6077 0, 6078 so->so_rcv.sb_cc); 6079 } 6080 } 6081 stage_left: 6082 if (wakeup_read_socket) { 6083 sctp_sorwakeup(inp, so); 6084 } 6085 return (error); 6086 } 6087 6088 6089 #ifdef SCTP_MBUF_LOGGING 6090 struct mbuf * 6091 sctp_m_free(struct mbuf *m) 6092 { 6093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6094 if (SCTP_BUF_IS_EXTENDED(m)) { 6095 sctp_log_mb(m, SCTP_MBUF_IFREE); 6096 } 6097 } 6098 return (m_free(m)); 6099 } 6100 6101 void 6102 sctp_m_freem(struct mbuf *mb) 6103 { 6104 while (mb != NULL) 6105 mb = sctp_m_free(mb); 6106 } 6107 6108 #endif 6109 6110 int 6111 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6112 { 6113 /* 6114 * Given a local address. For all associations that holds the 6115 * address, request a peer-set-primary. 6116 */ 6117 struct sctp_ifa *ifa; 6118 struct sctp_laddr *wi; 6119 6120 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6121 if (ifa == NULL) { 6122 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6123 return (EADDRNOTAVAIL); 6124 } 6125 /* 6126 * Now that we have the ifa we must awaken the iterator with this 6127 * message. 6128 */ 6129 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6130 if (wi == NULL) { 6131 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6132 return (ENOMEM); 6133 } 6134 /* Now incr the count and int wi structure */ 6135 SCTP_INCR_LADDR_COUNT(); 6136 bzero(wi, sizeof(*wi)); 6137 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6138 wi->ifa = ifa; 6139 wi->action = SCTP_SET_PRIM_ADDR; 6140 atomic_add_int(&ifa->refcount, 1); 6141 6142 /* Now add it to the work queue */ 6143 SCTP_IPI_ITERATOR_WQ_LOCK(); 6144 /* 6145 * Should this really be a tailq? As it is we will process the 6146 * newest first :-0 6147 */ 6148 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6149 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6150 (struct sctp_inpcb *)NULL, 6151 (struct sctp_tcb *)NULL, 6152 (struct sctp_nets *)NULL); 6153 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 6154 return (0); 6155 } 6156 6157 6158 int 6159 sctp_soreceive(struct socket *so, 6160 struct sockaddr **psa, 6161 struct uio *uio, 6162 struct mbuf **mp0, 6163 struct mbuf **controlp, 6164 int *flagsp) 6165 { 6166 int error, fromlen; 6167 uint8_t sockbuf[256]; 6168 struct sockaddr *from; 6169 struct sctp_extrcvinfo sinfo; 6170 int filling_sinfo = 1; 6171 struct sctp_inpcb *inp; 6172 6173 inp = (struct sctp_inpcb *)so->so_pcb; 6174 /* pickup the assoc we are reading from */ 6175 if (inp == NULL) { 6176 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6177 return (EINVAL); 6178 } 6179 if ((sctp_is_feature_off(inp, 6180 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 6181 (controlp == NULL)) { 6182 /* user does not want the sndrcv ctl */ 6183 filling_sinfo = 0; 6184 } 6185 if (psa) { 6186 from = (struct sockaddr *)sockbuf; 6187 fromlen = sizeof(sockbuf); 6188 from->sa_len = 0; 6189 } else { 6190 from = NULL; 6191 fromlen = 0; 6192 } 6193 6194 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6195 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6196 if ((controlp) && (filling_sinfo)) { 6197 /* copy back the sinfo in a CMSG format */ 6198 if (filling_sinfo) 6199 *controlp = sctp_build_ctl_nchunk(inp, 6200 (struct sctp_sndrcvinfo *)&sinfo); 6201 else 6202 *controlp = NULL; 6203 } 6204 if (psa) { 6205 /* copy back the address info */ 6206 if (from && from->sa_len) { 6207 *psa = sodupsockaddr(from, M_NOWAIT); 6208 } else { 6209 *psa = NULL; 6210 } 6211 } 6212 return (error); 6213 } 6214 6215 6216 int 6217 sctp_l_soreceive(struct socket *so, 6218 struct sockaddr **name, 6219 struct uio *uio, 6220 char **controlp, 6221 int *controllen, 6222 int *flag) 6223 { 6224 int error, fromlen; 6225 uint8_t sockbuf[256]; 6226 struct sockaddr *from; 6227 struct sctp_extrcvinfo sinfo; 6228 int filling_sinfo = 1; 6229 struct sctp_inpcb *inp; 6230 6231 inp = (struct sctp_inpcb *)so->so_pcb; 6232 /* pickup the assoc we are reading from */ 6233 if (inp == NULL) { 6234 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6235 return (EINVAL); 6236 } 6237 if ((sctp_is_feature_off(inp, 6238 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 6239 (controlp == NULL)) { 6240 /* user does not want the sndrcv ctl */ 6241 filling_sinfo = 0; 6242 } 6243 if (name) { 6244 from = (struct sockaddr *)sockbuf; 6245 fromlen = sizeof(sockbuf); 6246 from->sa_len = 0; 6247 } else { 6248 from = NULL; 6249 fromlen = 0; 6250 } 6251 6252 error = sctp_sorecvmsg(so, uio, 6253 (struct mbuf **)NULL, 6254 from, fromlen, flag, 6255 (struct sctp_sndrcvinfo *)&sinfo, 6256 filling_sinfo); 6257 if ((controlp) && (filling_sinfo)) { 6258 /* 6259 * copy back the sinfo in a CMSG format note that the caller 6260 * has reponsibility for freeing the memory. 6261 */ 6262 if (filling_sinfo) 6263 *controlp = sctp_build_ctl_cchunk(inp, 6264 controllen, 6265 (struct sctp_sndrcvinfo *)&sinfo); 6266 } 6267 if (name) { 6268 /* copy back the address info */ 6269 if (from && from->sa_len) { 6270 *name = sodupsockaddr(from, M_WAIT); 6271 } else { 6272 *name = NULL; 6273 } 6274 } 6275 return (error); 6276 } 6277 6278 6279 6280 6281 6282 6283 6284 int 6285 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6286 int totaddr, int *error) 6287 { 6288 int added = 0; 6289 int i; 6290 struct sctp_inpcb *inp; 6291 struct sockaddr *sa; 6292 size_t incr = 0; 6293 6294 sa = addr; 6295 inp = stcb->sctp_ep; 6296 *error = 0; 6297 for (i = 0; i < totaddr; i++) { 6298 if (sa->sa_family == AF_INET) { 6299 incr = sizeof(struct sockaddr_in); 6300 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6301 /* assoc gone no un-lock */ 6302 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6303 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6304 *error = ENOBUFS; 6305 goto out_now; 6306 } 6307 added++; 6308 } else if (sa->sa_family == AF_INET6) { 6309 incr = sizeof(struct sockaddr_in6); 6310 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6311 /* assoc gone no un-lock */ 6312 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6313 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6314 *error = ENOBUFS; 6315 goto out_now; 6316 } 6317 added++; 6318 } 6319 sa = (struct sockaddr *)((caddr_t)sa + incr); 6320 } 6321 out_now: 6322 return (added); 6323 } 6324 6325 struct sctp_tcb * 6326 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6327 int *totaddr, int *num_v4, int *num_v6, int *error, 6328 int limit, int *bad_addr) 6329 { 6330 struct sockaddr *sa; 6331 struct sctp_tcb *stcb = NULL; 6332 size_t incr, at, i; 6333 6334 at = incr = 0; 6335 sa = addr; 6336 *error = *num_v6 = *num_v4 = 0; 6337 /* account and validate addresses */ 6338 for (i = 0; i < (size_t)*totaddr; i++) { 6339 if (sa->sa_family == AF_INET) { 6340 (*num_v4) += 1; 6341 incr = sizeof(struct sockaddr_in); 6342 if (sa->sa_len != incr) { 6343 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6344 *error = EINVAL; 6345 *bad_addr = 1; 6346 return (NULL); 6347 } 6348 } else if (sa->sa_family == AF_INET6) { 6349 struct sockaddr_in6 *sin6; 6350 6351 sin6 = (struct sockaddr_in6 *)sa; 6352 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6353 /* Must be non-mapped for connectx */ 6354 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6355 *error = EINVAL; 6356 *bad_addr = 1; 6357 return (NULL); 6358 } 6359 (*num_v6) += 1; 6360 incr = sizeof(struct sockaddr_in6); 6361 if (sa->sa_len != incr) { 6362 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6363 *error = EINVAL; 6364 *bad_addr = 1; 6365 return (NULL); 6366 } 6367 } else { 6368 *totaddr = i; 6369 /* we are done */ 6370 break; 6371 } 6372 SCTP_INP_INCR_REF(inp); 6373 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6374 if (stcb != NULL) { 6375 /* Already have or am bring up an association */ 6376 return (stcb); 6377 } else { 6378 SCTP_INP_DECR_REF(inp); 6379 } 6380 if ((at + incr) > (size_t)limit) { 6381 *totaddr = i; 6382 break; 6383 } 6384 sa = (struct sockaddr *)((caddr_t)sa + incr); 6385 } 6386 return ((struct sctp_tcb *)NULL); 6387 } 6388 6389 /* 6390 * sctp_bindx(ADD) for one address. 6391 * assumes all arguments are valid/checked by caller. 6392 */ 6393 void 6394 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6395 struct sockaddr *sa, sctp_assoc_t assoc_id, 6396 uint32_t vrf_id, int *error, void *p) 6397 { 6398 struct sockaddr *addr_touse; 6399 6400 #ifdef INET6 6401 struct sockaddr_in sin; 6402 6403 #endif 6404 6405 /* see if we're bound all already! */ 6406 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6407 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6408 *error = EINVAL; 6409 return; 6410 } 6411 addr_touse = sa; 6412 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */ 6413 if (sa->sa_family == AF_INET6) { 6414 struct sockaddr_in6 *sin6; 6415 6416 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6417 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6418 *error = EINVAL; 6419 return; 6420 } 6421 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6422 /* can only bind v6 on PF_INET6 sockets */ 6423 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6424 *error = EINVAL; 6425 return; 6426 } 6427 sin6 = (struct sockaddr_in6 *)addr_touse; 6428 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6429 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6430 SCTP_IPV6_V6ONLY(inp)) { 6431 /* can't bind v4-mapped on PF_INET sockets */ 6432 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6433 *error = EINVAL; 6434 return; 6435 } 6436 in6_sin6_2_sin(&sin, sin6); 6437 addr_touse = (struct sockaddr *)&sin; 6438 } 6439 } 6440 #endif 6441 if (sa->sa_family == AF_INET) { 6442 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6443 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6444 *error = EINVAL; 6445 return; 6446 } 6447 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6448 SCTP_IPV6_V6ONLY(inp)) { 6449 /* can't bind v4 on PF_INET sockets */ 6450 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6451 *error = EINVAL; 6452 return; 6453 } 6454 } 6455 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6456 if (p == NULL) { 6457 /* Can't get proc for Net/Open BSD */ 6458 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6459 *error = EINVAL; 6460 return; 6461 } 6462 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6463 return; 6464 } 6465 /* 6466 * No locks required here since bind and mgmt_ep_sa all do their own 6467 * locking. If we do something for the FIX: below we may need to 6468 * lock in that case. 6469 */ 6470 if (assoc_id == 0) { 6471 /* add the address */ 6472 struct sctp_inpcb *lep; 6473 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6474 6475 /* validate the incoming port */ 6476 if ((lsin->sin_port != 0) && 6477 (lsin->sin_port != inp->sctp_lport)) { 6478 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6479 *error = EINVAL; 6480 return; 6481 } else { 6482 /* user specified 0 port, set it to existing port */ 6483 lsin->sin_port = inp->sctp_lport; 6484 } 6485 6486 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6487 if (lep != NULL) { 6488 /* 6489 * We must decrement the refcount since we have the 6490 * ep already and are binding. No remove going on 6491 * here. 6492 */ 6493 SCTP_INP_DECR_REF(lep); 6494 } 6495 if (lep == inp) { 6496 /* already bound to it.. ok */ 6497 return; 6498 } else if (lep == NULL) { 6499 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6500 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6501 SCTP_ADD_IP_ADDRESS, 6502 vrf_id, NULL); 6503 } else { 6504 *error = EADDRINUSE; 6505 } 6506 if (*error) 6507 return; 6508 } else { 6509 /* 6510 * FIX: decide whether we allow assoc based bindx 6511 */ 6512 } 6513 } 6514 6515 /* 6516 * sctp_bindx(DELETE) for one address. 6517 * assumes all arguments are valid/checked by caller. 6518 */ 6519 void 6520 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp, 6521 struct sockaddr *sa, sctp_assoc_t assoc_id, 6522 uint32_t vrf_id, int *error) 6523 { 6524 struct sockaddr *addr_touse; 6525 6526 #ifdef INET6 6527 struct sockaddr_in sin; 6528 6529 #endif 6530 6531 /* see if we're bound all already! */ 6532 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6533 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6534 *error = EINVAL; 6535 return; 6536 } 6537 addr_touse = sa; 6538 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */ 6539 if (sa->sa_family == AF_INET6) { 6540 struct sockaddr_in6 *sin6; 6541 6542 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6543 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6544 *error = EINVAL; 6545 return; 6546 } 6547 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6548 /* can only bind v6 on PF_INET6 sockets */ 6549 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6550 *error = EINVAL; 6551 return; 6552 } 6553 sin6 = (struct sockaddr_in6 *)addr_touse; 6554 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6555 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6556 SCTP_IPV6_V6ONLY(inp)) { 6557 /* can't bind mapped-v4 on PF_INET sockets */ 6558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6559 *error = EINVAL; 6560 return; 6561 } 6562 in6_sin6_2_sin(&sin, sin6); 6563 addr_touse = (struct sockaddr *)&sin; 6564 } 6565 } 6566 #endif 6567 if (sa->sa_family == AF_INET) { 6568 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6569 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6570 *error = EINVAL; 6571 return; 6572 } 6573 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6574 SCTP_IPV6_V6ONLY(inp)) { 6575 /* can't bind v4 on PF_INET sockets */ 6576 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6577 *error = EINVAL; 6578 return; 6579 } 6580 } 6581 /* 6582 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6583 * below is ever changed we may need to lock before calling 6584 * association level binding. 6585 */ 6586 if (assoc_id == 0) { 6587 /* delete the address */ 6588 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6589 SCTP_DEL_IP_ADDRESS, 6590 vrf_id, NULL); 6591 } else { 6592 /* 6593 * FIX: decide whether we allow assoc based bindx 6594 */ 6595 } 6596 } 6597 6598 /* 6599 * returns the valid local address count for an assoc, taking into account 6600 * all scoping rules 6601 */ 6602 int 6603 sctp_local_addr_count(struct sctp_tcb *stcb) 6604 { 6605 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 6606 int ipv4_addr_legal, ipv6_addr_legal; 6607 struct sctp_vrf *vrf; 6608 struct sctp_ifn *sctp_ifn; 6609 struct sctp_ifa *sctp_ifa; 6610 int count = 0; 6611 6612 /* Turn on all the appropriate scopes */ 6613 loopback_scope = stcb->asoc.loopback_scope; 6614 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 6615 local_scope = stcb->asoc.local_scope; 6616 site_scope = stcb->asoc.site_scope; 6617 ipv4_addr_legal = ipv6_addr_legal = 0; 6618 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6619 ipv6_addr_legal = 1; 6620 if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) { 6621 ipv4_addr_legal = 1; 6622 } 6623 } else { 6624 ipv4_addr_legal = 1; 6625 } 6626 6627 SCTP_IPI_ADDR_RLOCK(); 6628 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6629 if (vrf == NULL) { 6630 /* no vrf, no addresses */ 6631 SCTP_IPI_ADDR_RUNLOCK(); 6632 return (0); 6633 } 6634 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6635 /* 6636 * bound all case: go through all ifns on the vrf 6637 */ 6638 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6639 if ((loopback_scope == 0) && 6640 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6641 continue; 6642 } 6643 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6644 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6645 continue; 6646 switch (sctp_ifa->address.sa.sa_family) { 6647 case AF_INET: 6648 if (ipv4_addr_legal) { 6649 struct sockaddr_in *sin; 6650 6651 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 6652 if (sin->sin_addr.s_addr == 0) { 6653 /* 6654 * skip unspecified 6655 * addrs 6656 */ 6657 continue; 6658 } 6659 if ((ipv4_local_scope == 0) && 6660 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6661 continue; 6662 } 6663 /* count this one */ 6664 count++; 6665 } else { 6666 continue; 6667 } 6668 break; 6669 #ifdef INET6 6670 case AF_INET6: 6671 if (ipv6_addr_legal) { 6672 struct sockaddr_in6 *sin6; 6673 6674 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 6675 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6676 continue; 6677 } 6678 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6679 if (local_scope == 0) 6680 continue; 6681 if (sin6->sin6_scope_id == 0) { 6682 if (sa6_recoverscope(sin6) != 0) 6683 /* 6684 * 6685 * bad 6686 * 6687 * li 6688 * nk 6689 * 6690 * loc 6691 * al 6692 * 6693 * add 6694 * re 6695 * ss 6696 * */ 6697 continue; 6698 } 6699 } 6700 if ((site_scope == 0) && 6701 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6702 continue; 6703 } 6704 /* count this one */ 6705 count++; 6706 } 6707 break; 6708 #endif 6709 default: 6710 /* TSNH */ 6711 break; 6712 } 6713 } 6714 } 6715 } else { 6716 /* 6717 * subset bound case 6718 */ 6719 struct sctp_laddr *laddr; 6720 6721 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6722 sctp_nxt_addr) { 6723 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6724 continue; 6725 } 6726 /* count this one */ 6727 count++; 6728 } 6729 } 6730 SCTP_IPI_ADDR_RUNLOCK(); 6731 return (count); 6732 } 6733 6734 #if defined(SCTP_LOCAL_TRACE_BUF) 6735 6736 void 6737 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6738 { 6739 uint32_t saveindex, newindex; 6740 6741 do { 6742 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6743 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6744 newindex = 1; 6745 } else { 6746 newindex = saveindex + 1; 6747 } 6748 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6749 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6750 saveindex = 0; 6751 } 6752 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6753 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6754 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6755 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6756 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6757 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6758 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6759 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6760 } 6761 6762 #endif 6763 /* We will need to add support 6764 * to bind the ports and such here 6765 * so we can do UDP tunneling. In 6766 * the mean-time, we return error 6767 */ 6768 #include <netinet/udp.h> 6769 #include <netinet/udp_var.h> 6770 #include <sys/proc.h> 6771 #ifdef INET6 6772 #include <netinet6/sctp6_var.h> 6773 #endif 6774 6775 static void 6776 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored) 6777 { 6778 struct ip *iph; 6779 struct mbuf *sp, *last; 6780 struct udphdr *uhdr; 6781 uint16_t port = 0, len; 6782 int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr); 6783 6784 /* 6785 * Split out the mbuf chain. Leave the IP header in m, place the 6786 * rest in the sp. 6787 */ 6788 if ((m->m_flags & M_PKTHDR) == 0) { 6789 /* Can't handle one that is not a pkt hdr */ 6790 goto out; 6791 } 6792 /* pull the src port */ 6793 iph = mtod(m, struct ip *); 6794 uhdr = (struct udphdr *)((caddr_t)iph + off); 6795 6796 port = uhdr->uh_sport; 6797 sp = m_split(m, off, M_DONTWAIT); 6798 if (sp == NULL) { 6799 /* Gak, drop packet, we can't do a split */ 6800 goto out; 6801 } 6802 if (sp->m_pkthdr.len < header_size) { 6803 /* Gak, packet can't have an SCTP header in it - to small */ 6804 m_freem(sp); 6805 goto out; 6806 } 6807 /* ok now pull up the UDP header and SCTP header together */ 6808 sp = m_pullup(sp, header_size); 6809 if (sp == NULL) { 6810 /* Gak pullup failed */ 6811 goto out; 6812 } 6813 /* trim out the UDP header */ 6814 m_adj(sp, sizeof(struct udphdr)); 6815 6816 /* Now reconstruct the mbuf chain */ 6817 /* 1) find last one */ 6818 last = m; 6819 while (last->m_next != NULL) { 6820 last = last->m_next; 6821 } 6822 last->m_next = sp; 6823 m->m_pkthdr.len += sp->m_pkthdr.len; 6824 last = m; 6825 while (last != NULL) { 6826 last = last->m_next; 6827 } 6828 /* Now its ready for sctp_input or sctp6_input */ 6829 iph = mtod(m, struct ip *); 6830 switch (iph->ip_v) { 6831 case IPVERSION: 6832 { 6833 /* its IPv4 */ 6834 len = SCTP_GET_IPV4_LENGTH(iph); 6835 len -= sizeof(struct udphdr); 6836 SCTP_GET_IPV4_LENGTH(iph) = len; 6837 sctp_input_with_port(m, off, port); 6838 break; 6839 } 6840 #ifdef INET6 6841 case IPV6_VERSION >> 4: 6842 { 6843 /* its IPv6 - NOT supported */ 6844 goto out; 6845 break; 6846 6847 } 6848 #endif 6849 default: 6850 { 6851 m_freem(m); 6852 break; 6853 } 6854 } 6855 return; 6856 out: 6857 m_freem(m); 6858 } 6859 6860 void 6861 sctp_over_udp_stop(void) 6862 { 6863 struct socket *sop; 6864 6865 /* 6866 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6867 * for writting! 6868 */ 6869 if (SCTP_BASE_INFO(udp_tun_socket) == NULL) { 6870 /* Nothing to do */ 6871 return; 6872 } 6873 sop = SCTP_BASE_INFO(udp_tun_socket); 6874 soclose(sop); 6875 SCTP_BASE_INFO(udp_tun_socket) = NULL; 6876 } 6877 int 6878 sctp_over_udp_start(void) 6879 { 6880 uint16_t port; 6881 int ret; 6882 struct sockaddr_in sin; 6883 struct socket *sop = NULL; 6884 struct thread *th; 6885 struct ucred *cred; 6886 6887 /* 6888 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6889 * for writting! 6890 */ 6891 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 6892 if (port == 0) { 6893 /* Must have a port set */ 6894 return (EINVAL); 6895 } 6896 if (SCTP_BASE_INFO(udp_tun_socket) != NULL) { 6897 /* Already running -- must stop first */ 6898 return (EALREADY); 6899 } 6900 th = curthread; 6901 cred = th->td_ucred; 6902 if ((ret = socreate(PF_INET, &sop, 6903 SOCK_DGRAM, IPPROTO_UDP, cred, th))) { 6904 return (ret); 6905 } 6906 SCTP_BASE_INFO(udp_tun_socket) = sop; 6907 /* call the special UDP hook */ 6908 ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet); 6909 if (ret) { 6910 goto exit_stage_left; 6911 } 6912 /* Ok we have a socket, bind it to the port */ 6913 memset(&sin, 0, sizeof(sin)); 6914 sin.sin_len = sizeof(sin); 6915 sin.sin_family = AF_INET; 6916 sin.sin_port = htons(port); 6917 ret = sobind(sop, (struct sockaddr *)&sin, th); 6918 if (ret) { 6919 /* Close up we cant get the port */ 6920 exit_stage_left: 6921 sctp_over_udp_stop(); 6922 return (ret); 6923 } 6924 /* 6925 * Ok we should now get UDP packets directly to our input routine 6926 * sctp_recv_upd_tunneled_packet(). 6927 */ 6928 return (0); 6929 } 6930