1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 if (override_tag) { 1195 asoc->init_seq_number = initial_tsn; 1196 } else { 1197 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1198 } 1199 asoc->asconf_seq_out = asoc->init_seq_number; 1200 asoc->str_reset_seq_out = asoc->init_seq_number; 1201 asoc->sending_seq = asoc->init_seq_number; 1202 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1203 /* we are optimistic here */ 1204 asoc->peer_supports_nat = 0; 1205 asoc->sent_queue_retran_cnt = 0; 1206 1207 /* for CMT */ 1208 asoc->last_net_cmt_send_started = NULL; 1209 1210 asoc->last_acked_seq = asoc->init_seq_number - 1; 1211 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1212 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1213 1214 /* here we are different, we hold the next one we expect */ 1215 asoc->str_reset_seq_in = asoc->init_seq_number; 1216 1217 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1218 asoc->initial_rto = inp->sctp_ep.initial_rto; 1219 1220 asoc->default_mtu = inp->sctp_ep.default_mtu; 1221 asoc->max_init_times = inp->sctp_ep.max_init_times; 1222 asoc->max_send_times = inp->sctp_ep.max_send_times; 1223 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1224 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1225 asoc->free_chunk_cnt = 0; 1226 1227 asoc->iam_blocking = 0; 1228 asoc->context = inp->sctp_context; 1229 asoc->local_strreset_support = inp->local_strreset_support; 1230 asoc->def_send = inp->def_send; 1231 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1232 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1233 asoc->pr_sctp_cnt = 0; 1234 asoc->total_output_queue_size = 0; 1235 1236 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1237 asoc->scope.ipv6_addr_legal = 1; 1238 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } else { 1241 asoc->scope.ipv4_addr_legal = 0; 1242 } 1243 } else { 1244 asoc->scope.ipv6_addr_legal = 0; 1245 asoc->scope.ipv4_addr_legal = 1; 1246 } 1247 1248 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1249 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1250 1251 asoc->smallest_mtu = 0; 1252 asoc->minrto = inp->sctp_ep.sctp_minrto; 1253 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1254 1255 asoc->stream_locked_on = 0; 1256 asoc->ecn_echo_cnt_onq = 0; 1257 asoc->stream_locked = 0; 1258 1259 asoc->send_sack = 1; 1260 1261 LIST_INIT(&asoc->sctp_restricted_addrs); 1262 1263 TAILQ_INIT(&asoc->nets); 1264 TAILQ_INIT(&asoc->pending_reply_queue); 1265 TAILQ_INIT(&asoc->asconf_ack_sent); 1266 /* Setup to fill the hb random cache at first HB */ 1267 asoc->hb_random_idx = 4; 1268 1269 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1270 1271 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1272 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1273 1274 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1275 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1276 1277 /* 1278 * Now the stream parameters, here we allocate space for all streams 1279 * that we request by default. 1280 */ 1281 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1282 o_strms; 1283 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1284 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1285 SCTP_M_STRMO); 1286 if (asoc->strmout == NULL) { 1287 /* big trouble no memory */ 1288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1289 return (ENOMEM); 1290 } 1291 SCTP_TCB_LOCK(stcb); 1292 for (i = 0; i < asoc->streamoutcnt; i++) { 1293 /* 1294 * inbound side must be set to 0xffff, also NOTE when we get 1295 * the INIT-ACK back (for INIT sender) we MUST reduce the 1296 * count (streamoutcnt) but first check if we sent to any of 1297 * the upper streams that were dropped (if some were). Those 1298 * that were dropped must be notified to the upper layer as 1299 * failed to send. 1300 */ 1301 TAILQ_INIT(&asoc->strmout[i].outqueue); 1302 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1303 asoc->strmout[i].chunks_on_queues = 0; 1304 #if defined(SCTP_DETAILED_STR_STATS) 1305 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1306 asoc->strmout[i].abandoned_sent[j] = 0; 1307 asoc->strmout[i].abandoned_unsent[j] = 0; 1308 } 1309 #else 1310 asoc->strmout[i].abandoned_sent[0] = 0; 1311 asoc->strmout[i].abandoned_unsent[0] = 0; 1312 #endif 1313 asoc->strmout[i].next_mid_ordered = 0; 1314 asoc->strmout[i].next_mid_unordered = 0; 1315 asoc->strmout[i].sid = i; 1316 asoc->strmout[i].last_msg_incomplete = 0; 1317 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1318 } 1319 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1320 SCTP_TCB_UNLOCK(stcb); 1321 1322 /* Now the mapping array */ 1323 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1324 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1329 return (ENOMEM); 1330 } 1331 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1332 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1333 SCTP_M_MAP); 1334 if (asoc->nr_mapping_array == NULL) { 1335 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1336 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1337 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1338 return (ENOMEM); 1339 } 1340 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1341 1342 /* Now the init of the other outqueues */ 1343 TAILQ_INIT(&asoc->free_chunks); 1344 TAILQ_INIT(&asoc->control_send_queue); 1345 TAILQ_INIT(&asoc->asconf_send_queue); 1346 TAILQ_INIT(&asoc->send_queue); 1347 TAILQ_INIT(&asoc->sent_queue); 1348 TAILQ_INIT(&asoc->resetHead); 1349 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1350 TAILQ_INIT(&asoc->asconf_queue); 1351 /* authentication fields */ 1352 asoc->authinfo.random = NULL; 1353 asoc->authinfo.active_keyid = 0; 1354 asoc->authinfo.assoc_key = NULL; 1355 asoc->authinfo.assoc_keyid = 0; 1356 asoc->authinfo.recv_key = NULL; 1357 asoc->authinfo.recv_keyid = 0; 1358 LIST_INIT(&asoc->shared_keys); 1359 asoc->marked_retrans = 0; 1360 asoc->port = inp->sctp_ep.port; 1361 asoc->timoinit = 0; 1362 asoc->timodata = 0; 1363 asoc->timosack = 0; 1364 asoc->timoshutdown = 0; 1365 asoc->timoheartbeat = 0; 1366 asoc->timocookie = 0; 1367 asoc->timoshutdownack = 0; 1368 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1369 asoc->discontinuity_time = asoc->start_time; 1370 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1371 asoc->abandoned_unsent[i] = 0; 1372 asoc->abandoned_sent[i] = 0; 1373 } 1374 /* 1375 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1376 * freed later when the association is freed. 1377 */ 1378 return (0); 1379 } 1380 1381 void 1382 sctp_print_mapping_array(struct sctp_association *asoc) 1383 { 1384 unsigned int i, limit; 1385 1386 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1387 asoc->mapping_array_size, 1388 asoc->mapping_array_base_tsn, 1389 asoc->cumulative_tsn, 1390 asoc->highest_tsn_inside_map, 1391 asoc->highest_tsn_inside_nr_map); 1392 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1393 if (asoc->mapping_array[limit - 1] != 0) { 1394 break; 1395 } 1396 } 1397 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1398 for (i = 0; i < limit; i++) { 1399 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1400 } 1401 if (limit % 16) 1402 SCTP_PRINTF("\n"); 1403 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1404 if (asoc->nr_mapping_array[limit - 1]) { 1405 break; 1406 } 1407 } 1408 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1409 for (i = 0; i < limit; i++) { 1410 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1411 } 1412 if (limit % 16) 1413 SCTP_PRINTF("\n"); 1414 } 1415 1416 int 1417 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1418 { 1419 /* mapping array needs to grow */ 1420 uint8_t *new_array1, *new_array2; 1421 uint32_t new_size; 1422 1423 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1424 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1425 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1426 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1427 /* can't get more, forget it */ 1428 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1429 if (new_array1) { 1430 SCTP_FREE(new_array1, SCTP_M_MAP); 1431 } 1432 if (new_array2) { 1433 SCTP_FREE(new_array2, SCTP_M_MAP); 1434 } 1435 return (-1); 1436 } 1437 memset(new_array1, 0, new_size); 1438 memset(new_array2, 0, new_size); 1439 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1440 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1441 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1442 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1443 asoc->mapping_array = new_array1; 1444 asoc->nr_mapping_array = new_array2; 1445 asoc->mapping_array_size = new_size; 1446 return (0); 1447 } 1448 1449 static void 1450 sctp_iterator_work(struct sctp_iterator *it) 1451 { 1452 struct epoch_tracker et; 1453 struct sctp_inpcb *tinp; 1454 int iteration_count = 0; 1455 int inp_skip = 0; 1456 int first_in = 1; 1457 1458 NET_EPOCH_ENTER(et); 1459 SCTP_INP_INFO_RLOCK(); 1460 SCTP_ITERATOR_LOCK(); 1461 sctp_it_ctl.cur_it = it; 1462 if (it->inp) { 1463 SCTP_INP_RLOCK(it->inp); 1464 SCTP_INP_DECR_REF(it->inp); 1465 } 1466 if (it->inp == NULL) { 1467 /* iterator is complete */ 1468 done_with_iterator: 1469 sctp_it_ctl.cur_it = NULL; 1470 SCTP_ITERATOR_UNLOCK(); 1471 SCTP_INP_INFO_RUNLOCK(); 1472 if (it->function_atend != NULL) { 1473 (*it->function_atend) (it->pointer, it->val); 1474 } 1475 SCTP_FREE(it, SCTP_M_ITER); 1476 NET_EPOCH_EXIT(et); 1477 return; 1478 } 1479 select_a_new_ep: 1480 if (first_in) { 1481 first_in = 0; 1482 } else { 1483 SCTP_INP_RLOCK(it->inp); 1484 } 1485 while (((it->pcb_flags) && 1486 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1487 ((it->pcb_features) && 1488 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1489 /* endpoint flags or features don't match, so keep looking */ 1490 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1491 SCTP_INP_RUNLOCK(it->inp); 1492 goto done_with_iterator; 1493 } 1494 tinp = it->inp; 1495 it->inp = LIST_NEXT(it->inp, sctp_list); 1496 it->stcb = NULL; 1497 SCTP_INP_RUNLOCK(tinp); 1498 if (it->inp == NULL) { 1499 goto done_with_iterator; 1500 } 1501 SCTP_INP_RLOCK(it->inp); 1502 } 1503 /* now go through each assoc which is in the desired state */ 1504 if (it->done_current_ep == 0) { 1505 if (it->function_inp != NULL) 1506 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1507 it->done_current_ep = 1; 1508 } 1509 if (it->stcb == NULL) { 1510 /* run the per instance function */ 1511 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1512 } 1513 if ((inp_skip) || it->stcb == NULL) { 1514 if (it->function_inp_end != NULL) { 1515 inp_skip = (*it->function_inp_end) (it->inp, 1516 it->pointer, 1517 it->val); 1518 } 1519 SCTP_INP_RUNLOCK(it->inp); 1520 goto no_stcb; 1521 } 1522 while (it->stcb != NULL) { 1523 SCTP_TCB_LOCK(it->stcb); 1524 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1525 /* not in the right state... keep looking */ 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 goto next_assoc; 1528 } 1529 /* see if we have limited out the iterator loop */ 1530 iteration_count++; 1531 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1532 /* Pause to let others grab the lock */ 1533 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1534 SCTP_TCB_UNLOCK(it->stcb); 1535 SCTP_INP_INCR_REF(it->inp); 1536 SCTP_INP_RUNLOCK(it->inp); 1537 SCTP_ITERATOR_UNLOCK(); 1538 SCTP_INP_INFO_RUNLOCK(); 1539 SCTP_INP_INFO_RLOCK(); 1540 SCTP_ITERATOR_LOCK(); 1541 if (sctp_it_ctl.iterator_flags) { 1542 /* We won't be staying here */ 1543 SCTP_INP_DECR_REF(it->inp); 1544 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1545 if (sctp_it_ctl.iterator_flags & 1546 SCTP_ITERATOR_STOP_CUR_IT) { 1547 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1548 goto done_with_iterator; 1549 } 1550 if (sctp_it_ctl.iterator_flags & 1551 SCTP_ITERATOR_STOP_CUR_INP) { 1552 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1553 goto no_stcb; 1554 } 1555 /* If we reach here huh? */ 1556 SCTP_PRINTF("Unknown it ctl flag %x\n", 1557 sctp_it_ctl.iterator_flags); 1558 sctp_it_ctl.iterator_flags = 0; 1559 } 1560 SCTP_INP_RLOCK(it->inp); 1561 SCTP_INP_DECR_REF(it->inp); 1562 SCTP_TCB_LOCK(it->stcb); 1563 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1564 iteration_count = 0; 1565 } 1566 KASSERT(it->inp == it->stcb->sctp_ep, 1567 ("%s: stcb %p does not belong to inp %p, but inp %p", 1568 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1569 SCTP_INP_RLOCK_ASSERT(it->inp); 1570 SCTP_TCB_LOCK_ASSERT(it->stcb); 1571 1572 /* run function on this one */ 1573 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1574 SCTP_INP_RLOCK_ASSERT(it->inp); 1575 SCTP_TCB_LOCK_ASSERT(it->stcb); 1576 1577 /* 1578 * we lie here, it really needs to have its own type but 1579 * first I must verify that this won't effect things :-0 1580 */ 1581 if (it->no_chunk_output == 0) { 1582 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1583 SCTP_INP_RLOCK_ASSERT(it->inp); 1584 SCTP_TCB_LOCK_ASSERT(it->stcb); 1585 } 1586 1587 SCTP_TCB_UNLOCK(it->stcb); 1588 next_assoc: 1589 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1590 if (it->stcb == NULL) { 1591 /* Run last function */ 1592 if (it->function_inp_end != NULL) { 1593 inp_skip = (*it->function_inp_end) (it->inp, 1594 it->pointer, 1595 it->val); 1596 } 1597 } 1598 } 1599 SCTP_INP_RUNLOCK(it->inp); 1600 no_stcb: 1601 /* done with all assocs on this endpoint, move on to next endpoint */ 1602 it->done_current_ep = 0; 1603 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1604 it->inp = NULL; 1605 } else { 1606 it->inp = LIST_NEXT(it->inp, sctp_list); 1607 } 1608 it->stcb = NULL; 1609 if (it->inp == NULL) { 1610 goto done_with_iterator; 1611 } 1612 goto select_a_new_ep; 1613 } 1614 1615 void 1616 sctp_iterator_worker(void) 1617 { 1618 struct sctp_iterator *it; 1619 1620 /* This function is called with the WQ lock in place */ 1621 sctp_it_ctl.iterator_running = 1; 1622 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1623 /* now lets work on this one */ 1624 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1625 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1626 CURVNET_SET(it->vn); 1627 sctp_iterator_work(it); 1628 CURVNET_RESTORE(); 1629 SCTP_IPI_ITERATOR_WQ_LOCK(); 1630 /* sa_ignore FREED_MEMORY */ 1631 } 1632 sctp_it_ctl.iterator_running = 0; 1633 return; 1634 } 1635 1636 static void 1637 sctp_handle_addr_wq(void) 1638 { 1639 /* deal with the ADDR wq from the rtsock calls */ 1640 struct sctp_laddr *wi, *nwi; 1641 struct sctp_asconf_iterator *asc; 1642 1643 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1644 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1645 if (asc == NULL) { 1646 /* Try later, no memory */ 1647 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1648 (struct sctp_inpcb *)NULL, 1649 (struct sctp_tcb *)NULL, 1650 (struct sctp_nets *)NULL); 1651 return; 1652 } 1653 LIST_INIT(&asc->list_of_work); 1654 asc->cnt = 0; 1655 1656 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1657 LIST_REMOVE(wi, sctp_nxt_addr); 1658 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1659 asc->cnt++; 1660 } 1661 1662 if (asc->cnt == 0) { 1663 SCTP_FREE(asc, SCTP_M_ASC_IT); 1664 } else { 1665 int ret; 1666 1667 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1668 sctp_asconf_iterator_stcb, 1669 NULL, /* No ep end for boundall */ 1670 SCTP_PCB_FLAGS_BOUNDALL, 1671 SCTP_PCB_ANY_FEATURES, 1672 SCTP_ASOC_ANY_STATE, 1673 (void *)asc, 0, 1674 sctp_asconf_iterator_end, NULL, 0); 1675 if (ret) { 1676 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1677 /* 1678 * Freeing if we are stopping or put back on the 1679 * addr_wq. 1680 */ 1681 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1682 sctp_asconf_iterator_end(asc, 0); 1683 } else { 1684 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1685 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1686 } 1687 SCTP_FREE(asc, SCTP_M_ASC_IT); 1688 } 1689 } 1690 } 1691 } 1692 1693 /*- 1694 * The following table shows which pointers for the inp, stcb, or net are 1695 * stored for each timer after it was started. 1696 * 1697 *|Name |Timer |inp |stcb|net | 1698 *|-----------------------------|-----------------------------|----|----|----| 1699 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1701 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1704 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1706 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1708 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1710 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1711 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1713 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1714 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1715 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1716 */ 1717 1718 void 1719 sctp_timeout_handler(void *t) 1720 { 1721 struct epoch_tracker et; 1722 struct timeval tv; 1723 struct sctp_inpcb *inp; 1724 struct sctp_tcb *stcb; 1725 struct sctp_nets *net; 1726 struct sctp_timer *tmr; 1727 struct mbuf *op_err; 1728 int type; 1729 int i, secret; 1730 bool did_output, released_asoc_reference; 1731 1732 /* 1733 * If inp, stcb or net are not NULL, then references to these were 1734 * added when the timer was started, and must be released before 1735 * this function returns. 1736 */ 1737 tmr = (struct sctp_timer *)t; 1738 inp = (struct sctp_inpcb *)tmr->ep; 1739 stcb = (struct sctp_tcb *)tmr->tcb; 1740 net = (struct sctp_nets *)tmr->net; 1741 CURVNET_SET((struct vnet *)tmr->vnet); 1742 NET_EPOCH_ENTER(et); 1743 released_asoc_reference = false; 1744 1745 #ifdef SCTP_AUDITING_ENABLED 1746 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1747 sctp_auditing(3, inp, stcb, net); 1748 #endif 1749 1750 /* sanity checks... */ 1751 KASSERT(tmr->self == NULL || tmr->self == tmr, 1752 ("sctp_timeout_handler: tmr->self corrupted")); 1753 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1754 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1755 type = tmr->type; 1756 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1757 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1758 type, stcb, stcb->sctp_ep)); 1759 tmr->stopped_from = 0xa001; 1760 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1761 SCTPDBG(SCTP_DEBUG_TIMER2, 1762 "Timer type %d handler exiting due to CLOSED association.\n", 1763 type); 1764 goto out_decr; 1765 } 1766 tmr->stopped_from = 0xa002; 1767 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1768 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1769 SCTPDBG(SCTP_DEBUG_TIMER2, 1770 "Timer type %d handler exiting due to not being active.\n", 1771 type); 1772 goto out_decr; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 /* 1779 * Release reference so that association can be freed if 1780 * necessary below. This is safe now that we have acquired 1781 * the lock. 1782 */ 1783 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1784 released_asoc_reference = true; 1785 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1786 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1787 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1788 SCTPDBG(SCTP_DEBUG_TIMER2, 1789 "Timer type %d handler exiting due to CLOSED association.\n", 1790 type); 1791 goto out; 1792 } 1793 } else if (inp != NULL) { 1794 SCTP_INP_WLOCK(inp); 1795 } else { 1796 SCTP_WQ_ADDR_LOCK(); 1797 } 1798 1799 /* Record in stopped_from which timeout occurred. */ 1800 tmr->stopped_from = type; 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 did_output = true; 1840 if ((stcb->asoc.num_send_timers_up == 0) && 1841 (stcb->asoc.sent_queue_cnt > 0)) { 1842 struct sctp_tmit_chunk *chk; 1843 1844 /* 1845 * Safeguard. If there on some on the sent queue 1846 * somewhere but no timers running something is 1847 * wrong... so we start a timer on the first chunk 1848 * on the send queue on whatever net it is sent to. 1849 */ 1850 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1851 if (chk->whoTo != NULL) { 1852 break; 1853 } 1854 } 1855 if (chk != NULL) { 1856 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1857 } 1858 } 1859 break; 1860 case SCTP_TIMER_TYPE_INIT: 1861 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1862 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1863 type, inp, stcb, net)); 1864 SCTP_STAT_INCR(sctps_timoinit); 1865 stcb->asoc.timoinit++; 1866 if (sctp_t1init_timer(inp, stcb, net)) { 1867 /* no need to unlock on tcb its gone */ 1868 goto out_decr; 1869 } 1870 did_output = false; 1871 break; 1872 case SCTP_TIMER_TYPE_RECV: 1873 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1874 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1875 type, inp, stcb, net)); 1876 SCTP_STAT_INCR(sctps_timosack); 1877 stcb->asoc.timosack++; 1878 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1879 #ifdef SCTP_AUDITING_ENABLED 1880 sctp_auditing(4, inp, stcb, NULL); 1881 #endif 1882 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1883 did_output = true; 1884 break; 1885 case SCTP_TIMER_TYPE_SHUTDOWN: 1886 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1887 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1888 type, inp, stcb, net)); 1889 SCTP_STAT_INCR(sctps_timoshutdown); 1890 stcb->asoc.timoshutdown++; 1891 if (sctp_shutdown_timer(inp, stcb, net)) { 1892 /* no need to unlock on tcb its gone */ 1893 goto out_decr; 1894 } 1895 #ifdef SCTP_AUDITING_ENABLED 1896 sctp_auditing(4, inp, stcb, net); 1897 #endif 1898 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1899 did_output = true; 1900 break; 1901 case SCTP_TIMER_TYPE_HEARTBEAT: 1902 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1903 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1904 type, inp, stcb, net)); 1905 SCTP_STAT_INCR(sctps_timoheartbeat); 1906 stcb->asoc.timoheartbeat++; 1907 if (sctp_heartbeat_timer(inp, stcb, net)) { 1908 /* no need to unlock on tcb its gone */ 1909 goto out_decr; 1910 } 1911 #ifdef SCTP_AUDITING_ENABLED 1912 sctp_auditing(4, inp, stcb, net); 1913 #endif 1914 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1915 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1916 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1917 did_output = true; 1918 } else { 1919 did_output = false; 1920 } 1921 break; 1922 case SCTP_TIMER_TYPE_COOKIE: 1923 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1924 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1925 type, inp, stcb, net)); 1926 SCTP_STAT_INCR(sctps_timocookie); 1927 stcb->asoc.timocookie++; 1928 if (sctp_cookie_timer(inp, stcb, net)) { 1929 /* no need to unlock on tcb its gone */ 1930 goto out_decr; 1931 } 1932 #ifdef SCTP_AUDITING_ENABLED 1933 sctp_auditing(4, inp, stcb, net); 1934 #endif 1935 /* 1936 * We consider T3 and Cookie timer pretty much the same with 1937 * respect to where from in chunk_output. 1938 */ 1939 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1940 did_output = true; 1941 break; 1942 case SCTP_TIMER_TYPE_NEWCOOKIE: 1943 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1944 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1945 type, inp, stcb, net)); 1946 SCTP_STAT_INCR(sctps_timosecret); 1947 (void)SCTP_GETTIME_TIMEVAL(&tv); 1948 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1949 inp->sctp_ep.last_secret_number = 1950 inp->sctp_ep.current_secret_number; 1951 inp->sctp_ep.current_secret_number++; 1952 if (inp->sctp_ep.current_secret_number >= 1953 SCTP_HOW_MANY_SECRETS) { 1954 inp->sctp_ep.current_secret_number = 0; 1955 } 1956 secret = (int)inp->sctp_ep.current_secret_number; 1957 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1958 inp->sctp_ep.secret_key[secret][i] = 1959 sctp_select_initial_TSN(&inp->sctp_ep); 1960 } 1961 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1962 did_output = false; 1963 break; 1964 case SCTP_TIMER_TYPE_PATHMTURAISE: 1965 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1966 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1967 type, inp, stcb, net)); 1968 SCTP_STAT_INCR(sctps_timopathmtu); 1969 sctp_pathmtu_timer(inp, stcb, net); 1970 did_output = false; 1971 break; 1972 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1973 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1974 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1975 type, inp, stcb, net)); 1976 if (sctp_shutdownack_timer(inp, stcb, net)) { 1977 /* no need to unlock on tcb its gone */ 1978 goto out_decr; 1979 } 1980 SCTP_STAT_INCR(sctps_timoshutdownack); 1981 stcb->asoc.timoshutdownack++; 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_ASCONF: 1989 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoasconf); 1993 if (sctp_asconf_timer(inp, stcb, net)) { 1994 /* no need to unlock on tcb its gone */ 1995 goto out_decr; 1996 } 1997 #ifdef SCTP_AUDITING_ENABLED 1998 sctp_auditing(4, inp, stcb, net); 1999 #endif 2000 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2001 did_output = true; 2002 break; 2003 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2004 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2005 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2006 type, inp, stcb, net)); 2007 SCTP_STAT_INCR(sctps_timoshutdownguard); 2008 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2009 "Shutdown guard timer expired"); 2010 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2011 /* no need to unlock on tcb its gone */ 2012 goto out_decr; 2013 case SCTP_TIMER_TYPE_AUTOCLOSE: 2014 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2015 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2016 type, inp, stcb, net)); 2017 SCTP_STAT_INCR(sctps_timoautoclose); 2018 sctp_autoclose_timer(inp, stcb); 2019 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2020 did_output = true; 2021 break; 2022 case SCTP_TIMER_TYPE_STRRESET: 2023 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2024 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2025 type, inp, stcb, net)); 2026 SCTP_STAT_INCR(sctps_timostrmrst); 2027 if (sctp_strreset_timer(inp, stcb)) { 2028 /* no need to unlock on tcb its gone */ 2029 goto out_decr; 2030 } 2031 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2032 did_output = true; 2033 break; 2034 case SCTP_TIMER_TYPE_INPKILL: 2035 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2036 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2037 type, inp, stcb, net)); 2038 SCTP_STAT_INCR(sctps_timoinpkill); 2039 /* 2040 * special case, take away our increment since WE are the 2041 * killer 2042 */ 2043 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2044 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2045 SCTP_INP_DECR_REF(inp); 2046 SCTP_INP_WUNLOCK(inp); 2047 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2048 SCTP_CALLED_FROM_INPKILL_TIMER); 2049 inp = NULL; 2050 goto out_decr; 2051 case SCTP_TIMER_TYPE_ASOCKILL: 2052 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2053 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2054 type, inp, stcb, net)); 2055 SCTP_STAT_INCR(sctps_timoassockill); 2056 /* Can we free it yet? */ 2057 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2058 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2059 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2060 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2061 /* 2062 * free asoc, always unlocks (or destroy's) so prevent 2063 * duplicate unlock or unlock of a free mtx :-0 2064 */ 2065 stcb = NULL; 2066 goto out_decr; 2067 case SCTP_TIMER_TYPE_ADDR_WQ: 2068 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2069 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2070 type, inp, stcb, net)); 2071 sctp_handle_addr_wq(); 2072 did_output = true; 2073 break; 2074 case SCTP_TIMER_TYPE_PRIM_DELETED: 2075 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2076 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2077 type, inp, stcb, net)); 2078 SCTP_STAT_INCR(sctps_timodelprim); 2079 sctp_delete_prim_timer(inp, stcb); 2080 did_output = false; 2081 break; 2082 default: 2083 #ifdef INVARIANTS 2084 panic("Unknown timer type %d", type); 2085 #else 2086 goto out; 2087 #endif 2088 } 2089 #ifdef SCTP_AUDITING_ENABLED 2090 sctp_audit_log(0xF1, (uint8_t)type); 2091 if (inp != NULL) 2092 sctp_auditing(5, inp, stcb, net); 2093 #endif 2094 if (did_output && (stcb != NULL)) { 2095 /* 2096 * Now we need to clean up the control chunk chain if an 2097 * ECNE is on it. It must be marked as UNSENT again so next 2098 * call will continue to send it until such time that we get 2099 * a CWR, to remove it. It is, however, less likely that we 2100 * will find a ecn echo on the chain though. 2101 */ 2102 sctp_fix_ecn_echo(&stcb->asoc); 2103 } 2104 out: 2105 if (stcb != NULL) { 2106 SCTP_TCB_UNLOCK(stcb); 2107 } else if (inp != NULL) { 2108 SCTP_INP_WUNLOCK(inp); 2109 } else { 2110 SCTP_WQ_ADDR_UNLOCK(); 2111 } 2112 2113 out_decr: 2114 /* These reference counts were incremented in sctp_timer_start(). */ 2115 if (inp != NULL) { 2116 SCTP_INP_DECR_REF(inp); 2117 } 2118 if ((stcb != NULL) && !released_asoc_reference) { 2119 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2120 } 2121 if (net != NULL) { 2122 sctp_free_remote_addr(net); 2123 } 2124 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2125 CURVNET_RESTORE(); 2126 NET_EPOCH_EXIT(et); 2127 } 2128 2129 /*- 2130 * The following table shows which parameters must be provided 2131 * when calling sctp_timer_start(). For parameters not being 2132 * provided, NULL must be used. 2133 * 2134 * |Name |inp |stcb|net | 2135 * |-----------------------------|----|----|----| 2136 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2139 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2141 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2143 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2145 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2147 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2148 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2149 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2150 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2151 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2152 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2153 * 2154 */ 2155 2156 void 2157 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2158 struct sctp_nets *net) 2159 { 2160 struct sctp_timer *tmr; 2161 uint32_t to_ticks; 2162 uint32_t rndval, jitter; 2163 2164 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2165 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2166 t_type, stcb, stcb->sctp_ep)); 2167 tmr = NULL; 2168 if (stcb != NULL) { 2169 SCTP_TCB_LOCK_ASSERT(stcb); 2170 } else if (inp != NULL) { 2171 SCTP_INP_WLOCK_ASSERT(inp); 2172 } else { 2173 SCTP_WQ_ADDR_LOCK_ASSERT(); 2174 } 2175 if (stcb != NULL) { 2176 /* 2177 * Don't restart timer on association that's about to be 2178 * killed. 2179 */ 2180 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2181 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2182 SCTPDBG(SCTP_DEBUG_TIMER2, 2183 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2184 t_type, inp, stcb, net); 2185 return; 2186 } 2187 /* Don't restart timer on net that's been removed. */ 2188 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2189 SCTPDBG(SCTP_DEBUG_TIMER2, 2190 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2191 t_type, inp, stcb, net); 2192 return; 2193 } 2194 } 2195 switch (t_type) { 2196 case SCTP_TIMER_TYPE_SEND: 2197 /* Here we use the RTO timer. */ 2198 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2199 #ifdef INVARIANTS 2200 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2201 t_type, inp, stcb, net); 2202 #else 2203 return; 2204 #endif 2205 } 2206 tmr = &net->rxt_timer; 2207 if (net->RTO == 0) { 2208 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2209 } else { 2210 to_ticks = sctp_msecs_to_ticks(net->RTO); 2211 } 2212 break; 2213 case SCTP_TIMER_TYPE_INIT: 2214 /* 2215 * Here we use the INIT timer default usually about 1 2216 * second. 2217 */ 2218 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2219 #ifdef INVARIANTS 2220 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2221 t_type, inp, stcb, net); 2222 #else 2223 return; 2224 #endif 2225 } 2226 tmr = &net->rxt_timer; 2227 if (net->RTO == 0) { 2228 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2229 } else { 2230 to_ticks = sctp_msecs_to_ticks(net->RTO); 2231 } 2232 break; 2233 case SCTP_TIMER_TYPE_RECV: 2234 /* 2235 * Here we use the Delayed-Ack timer value from the inp, 2236 * usually about 200ms. 2237 */ 2238 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2239 #ifdef INVARIANTS 2240 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2241 t_type, inp, stcb, net); 2242 #else 2243 return; 2244 #endif 2245 } 2246 tmr = &stcb->asoc.dack_timer; 2247 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2248 break; 2249 case SCTP_TIMER_TYPE_SHUTDOWN: 2250 /* Here we use the RTO of the destination. */ 2251 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2252 #ifdef INVARIANTS 2253 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2254 t_type, inp, stcb, net); 2255 #else 2256 return; 2257 #endif 2258 } 2259 tmr = &net->rxt_timer; 2260 if (net->RTO == 0) { 2261 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2262 } else { 2263 to_ticks = sctp_msecs_to_ticks(net->RTO); 2264 } 2265 break; 2266 case SCTP_TIMER_TYPE_HEARTBEAT: 2267 /* 2268 * The net is used here so that we can add in the RTO. Even 2269 * though we use a different timer. We also add the HB timer 2270 * PLUS a random jitter. 2271 */ 2272 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2273 #ifdef INVARIANTS 2274 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2275 t_type, inp, stcb, net); 2276 #else 2277 return; 2278 #endif 2279 } 2280 if ((net->dest_state & SCTP_ADDR_NOHB) && 2281 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2282 SCTPDBG(SCTP_DEBUG_TIMER2, 2283 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2284 t_type, inp, stcb, net); 2285 return; 2286 } 2287 tmr = &net->hb_timer; 2288 if (net->RTO == 0) { 2289 to_ticks = stcb->asoc.initial_rto; 2290 } else { 2291 to_ticks = net->RTO; 2292 } 2293 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2294 jitter = rndval % to_ticks; 2295 if (to_ticks > 1) { 2296 to_ticks >>= 1; 2297 } 2298 if (jitter < (UINT32_MAX - to_ticks)) { 2299 to_ticks += jitter; 2300 } else { 2301 to_ticks = UINT32_MAX; 2302 } 2303 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2304 !(net->dest_state & SCTP_ADDR_PF)) { 2305 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2306 to_ticks += net->heart_beat_delay; 2307 } else { 2308 to_ticks = UINT32_MAX; 2309 } 2310 } 2311 /* 2312 * Now we must convert the to_ticks that are now in ms to 2313 * ticks. 2314 */ 2315 to_ticks = sctp_msecs_to_ticks(to_ticks); 2316 break; 2317 case SCTP_TIMER_TYPE_COOKIE: 2318 /* 2319 * Here we can use the RTO timer from the network since one 2320 * RTT was complete. If a retransmission happened then we 2321 * will be using the RTO initial value. 2322 */ 2323 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2324 #ifdef INVARIANTS 2325 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2326 t_type, inp, stcb, net); 2327 #else 2328 return; 2329 #endif 2330 } 2331 tmr = &net->rxt_timer; 2332 if (net->RTO == 0) { 2333 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2334 } else { 2335 to_ticks = sctp_msecs_to_ticks(net->RTO); 2336 } 2337 break; 2338 case SCTP_TIMER_TYPE_NEWCOOKIE: 2339 /* 2340 * Nothing needed but the endpoint here usually about 60 2341 * minutes. 2342 */ 2343 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2344 #ifdef INVARIANTS 2345 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2346 t_type, inp, stcb, net); 2347 #else 2348 return; 2349 #endif 2350 } 2351 tmr = &inp->sctp_ep.signature_change; 2352 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2353 break; 2354 case SCTP_TIMER_TYPE_PATHMTURAISE: 2355 /* 2356 * Here we use the value found in the EP for PMTUD, usually 2357 * about 10 minutes. 2358 */ 2359 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2360 #ifdef INVARIANTS 2361 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2362 t_type, inp, stcb, net); 2363 #else 2364 return; 2365 #endif 2366 } 2367 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2368 SCTPDBG(SCTP_DEBUG_TIMER2, 2369 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2370 t_type, inp, stcb, net); 2371 return; 2372 } 2373 tmr = &net->pmtu_timer; 2374 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2375 break; 2376 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2377 /* Here we use the RTO of the destination. */ 2378 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2379 #ifdef INVARIANTS 2380 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2381 t_type, inp, stcb, net); 2382 #else 2383 return; 2384 #endif 2385 } 2386 tmr = &net->rxt_timer; 2387 if (net->RTO == 0) { 2388 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2389 } else { 2390 to_ticks = sctp_msecs_to_ticks(net->RTO); 2391 } 2392 break; 2393 case SCTP_TIMER_TYPE_ASCONF: 2394 /* 2395 * Here the timer comes from the stcb but its value is from 2396 * the net's RTO. 2397 */ 2398 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2399 #ifdef INVARIANTS 2400 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2401 t_type, inp, stcb, net); 2402 #else 2403 return; 2404 #endif 2405 } 2406 tmr = &stcb->asoc.asconf_timer; 2407 if (net->RTO == 0) { 2408 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(net->RTO); 2411 } 2412 break; 2413 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2414 /* 2415 * Here we use the endpoints shutdown guard timer usually 2416 * about 3 minutes. 2417 */ 2418 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2419 #ifdef INVARIANTS 2420 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2421 t_type, inp, stcb, net); 2422 #else 2423 return; 2424 #endif 2425 } 2426 tmr = &stcb->asoc.shut_guard_timer; 2427 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2428 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2429 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2430 } else { 2431 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2432 } 2433 } else { 2434 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2435 } 2436 break; 2437 case SCTP_TIMER_TYPE_AUTOCLOSE: 2438 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2439 #ifdef INVARIANTS 2440 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2441 t_type, inp, stcb, net); 2442 #else 2443 return; 2444 #endif 2445 } 2446 tmr = &stcb->asoc.autoclose_timer; 2447 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2448 break; 2449 case SCTP_TIMER_TYPE_STRRESET: 2450 /* 2451 * Here the timer comes from the stcb but its value is from 2452 * the net's RTO. 2453 */ 2454 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &stcb->asoc.strreset_timer; 2463 if (net->RTO == 0) { 2464 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2465 } else { 2466 to_ticks = sctp_msecs_to_ticks(net->RTO); 2467 } 2468 break; 2469 case SCTP_TIMER_TYPE_INPKILL: 2470 /* 2471 * The inp is setup to die. We re-use the signature_change 2472 * timer since that has stopped and we are in the GONE 2473 * state. 2474 */ 2475 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2476 #ifdef INVARIANTS 2477 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2478 t_type, inp, stcb, net); 2479 #else 2480 return; 2481 #endif 2482 } 2483 tmr = &inp->sctp_ep.signature_change; 2484 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2485 break; 2486 case SCTP_TIMER_TYPE_ASOCKILL: 2487 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2488 #ifdef INVARIANTS 2489 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2490 t_type, inp, stcb, net); 2491 #else 2492 return; 2493 #endif 2494 } 2495 tmr = &stcb->asoc.strreset_timer; 2496 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2497 break; 2498 case SCTP_TIMER_TYPE_ADDR_WQ: 2499 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2500 #ifdef INVARIANTS 2501 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2502 t_type, inp, stcb, net); 2503 #else 2504 return; 2505 #endif 2506 } 2507 /* Only 1 tick away :-) */ 2508 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2509 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2510 break; 2511 case SCTP_TIMER_TYPE_PRIM_DELETED: 2512 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2513 #ifdef INVARIANTS 2514 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2515 t_type, inp, stcb, net); 2516 #else 2517 return; 2518 #endif 2519 } 2520 tmr = &stcb->asoc.delete_prim_timer; 2521 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2522 break; 2523 default: 2524 #ifdef INVARIANTS 2525 panic("Unknown timer type %d", t_type); 2526 #else 2527 return; 2528 #endif 2529 } 2530 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2531 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2532 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2533 /* 2534 * We do NOT allow you to have it already running. If it is, 2535 * we leave the current one up unchanged. 2536 */ 2537 SCTPDBG(SCTP_DEBUG_TIMER2, 2538 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2539 t_type, inp, stcb, net); 2540 return; 2541 } 2542 /* At this point we can proceed. */ 2543 if (t_type == SCTP_TIMER_TYPE_SEND) { 2544 stcb->asoc.num_send_timers_up++; 2545 } 2546 tmr->stopped_from = 0; 2547 tmr->type = t_type; 2548 tmr->ep = (void *)inp; 2549 tmr->tcb = (void *)stcb; 2550 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2551 tmr->net = NULL; 2552 } else { 2553 tmr->net = (void *)net; 2554 } 2555 tmr->self = (void *)tmr; 2556 tmr->vnet = (void *)curvnet; 2557 tmr->ticks = sctp_get_tick_count(); 2558 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2559 SCTPDBG(SCTP_DEBUG_TIMER2, 2560 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2561 t_type, to_ticks, inp, stcb, net); 2562 /* 2563 * If this is a newly scheduled callout, as opposed to a 2564 * rescheduled one, increment relevant reference counts. 2565 */ 2566 if (tmr->ep != NULL) { 2567 SCTP_INP_INCR_REF(inp); 2568 } 2569 if (tmr->tcb != NULL) { 2570 atomic_add_int(&stcb->asoc.refcnt, 1); 2571 } 2572 if (tmr->net != NULL) { 2573 atomic_add_int(&net->ref_count, 1); 2574 } 2575 } else { 2576 /* 2577 * This should not happen, since we checked for pending 2578 * above. 2579 */ 2580 SCTPDBG(SCTP_DEBUG_TIMER2, 2581 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2582 t_type, to_ticks, inp, stcb, net); 2583 } 2584 return; 2585 } 2586 2587 /*- 2588 * The following table shows which parameters must be provided 2589 * when calling sctp_timer_stop(). For parameters not being 2590 * provided, NULL must be used. 2591 * 2592 * |Name |inp |stcb|net | 2593 * |-----------------------------|----|----|----| 2594 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2595 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2596 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2600 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2601 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2604 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2605 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2606 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2608 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2610 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2611 * 2612 */ 2613 2614 void 2615 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2616 struct sctp_nets *net, uint32_t from) 2617 { 2618 struct sctp_timer *tmr; 2619 2620 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2621 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2622 t_type, stcb, stcb->sctp_ep)); 2623 if (stcb != NULL) { 2624 SCTP_TCB_LOCK_ASSERT(stcb); 2625 } else if (inp != NULL) { 2626 SCTP_INP_WLOCK_ASSERT(inp); 2627 } else { 2628 SCTP_WQ_ADDR_LOCK_ASSERT(); 2629 } 2630 tmr = NULL; 2631 switch (t_type) { 2632 case SCTP_TIMER_TYPE_SEND: 2633 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2634 #ifdef INVARIANTS 2635 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2636 t_type, inp, stcb, net); 2637 #else 2638 return; 2639 #endif 2640 } 2641 tmr = &net->rxt_timer; 2642 break; 2643 case SCTP_TIMER_TYPE_INIT: 2644 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2645 #ifdef INVARIANTS 2646 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2647 t_type, inp, stcb, net); 2648 #else 2649 return; 2650 #endif 2651 } 2652 tmr = &net->rxt_timer; 2653 break; 2654 case SCTP_TIMER_TYPE_RECV: 2655 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2656 #ifdef INVARIANTS 2657 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2658 t_type, inp, stcb, net); 2659 #else 2660 return; 2661 #endif 2662 } 2663 tmr = &stcb->asoc.dack_timer; 2664 break; 2665 case SCTP_TIMER_TYPE_SHUTDOWN: 2666 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2667 #ifdef INVARIANTS 2668 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2669 t_type, inp, stcb, net); 2670 #else 2671 return; 2672 #endif 2673 } 2674 tmr = &net->rxt_timer; 2675 break; 2676 case SCTP_TIMER_TYPE_HEARTBEAT: 2677 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2678 #ifdef INVARIANTS 2679 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2680 t_type, inp, stcb, net); 2681 #else 2682 return; 2683 #endif 2684 } 2685 tmr = &net->hb_timer; 2686 break; 2687 case SCTP_TIMER_TYPE_COOKIE: 2688 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2689 #ifdef INVARIANTS 2690 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2691 t_type, inp, stcb, net); 2692 #else 2693 return; 2694 #endif 2695 } 2696 tmr = &net->rxt_timer; 2697 break; 2698 case SCTP_TIMER_TYPE_NEWCOOKIE: 2699 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2700 #ifdef INVARIANTS 2701 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2702 t_type, inp, stcb, net); 2703 #else 2704 return; 2705 #endif 2706 } 2707 tmr = &inp->sctp_ep.signature_change; 2708 break; 2709 case SCTP_TIMER_TYPE_PATHMTURAISE: 2710 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2711 #ifdef INVARIANTS 2712 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2713 t_type, inp, stcb, net); 2714 #else 2715 return; 2716 #endif 2717 } 2718 tmr = &net->pmtu_timer; 2719 break; 2720 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2721 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2722 #ifdef INVARIANTS 2723 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2724 t_type, inp, stcb, net); 2725 #else 2726 return; 2727 #endif 2728 } 2729 tmr = &net->rxt_timer; 2730 break; 2731 case SCTP_TIMER_TYPE_ASCONF: 2732 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2733 #ifdef INVARIANTS 2734 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2735 t_type, inp, stcb, net); 2736 #else 2737 return; 2738 #endif 2739 } 2740 tmr = &stcb->asoc.asconf_timer; 2741 break; 2742 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2743 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2744 #ifdef INVARIANTS 2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2746 t_type, inp, stcb, net); 2747 #else 2748 return; 2749 #endif 2750 } 2751 tmr = &stcb->asoc.shut_guard_timer; 2752 break; 2753 case SCTP_TIMER_TYPE_AUTOCLOSE: 2754 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2755 #ifdef INVARIANTS 2756 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2757 t_type, inp, stcb, net); 2758 #else 2759 return; 2760 #endif 2761 } 2762 tmr = &stcb->asoc.autoclose_timer; 2763 break; 2764 case SCTP_TIMER_TYPE_STRRESET: 2765 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2766 #ifdef INVARIANTS 2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2768 t_type, inp, stcb, net); 2769 #else 2770 return; 2771 #endif 2772 } 2773 tmr = &stcb->asoc.strreset_timer; 2774 break; 2775 case SCTP_TIMER_TYPE_INPKILL: 2776 /* 2777 * The inp is setup to die. We re-use the signature_change 2778 * timer since that has stopped and we are in the GONE 2779 * state. 2780 */ 2781 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2782 #ifdef INVARIANTS 2783 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2784 t_type, inp, stcb, net); 2785 #else 2786 return; 2787 #endif 2788 } 2789 tmr = &inp->sctp_ep.signature_change; 2790 break; 2791 case SCTP_TIMER_TYPE_ASOCKILL: 2792 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2793 #ifdef INVARIANTS 2794 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2795 t_type, inp, stcb, net); 2796 #else 2797 return; 2798 #endif 2799 } 2800 tmr = &stcb->asoc.strreset_timer; 2801 break; 2802 case SCTP_TIMER_TYPE_ADDR_WQ: 2803 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2804 #ifdef INVARIANTS 2805 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2806 t_type, inp, stcb, net); 2807 #else 2808 return; 2809 #endif 2810 } 2811 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2812 break; 2813 case SCTP_TIMER_TYPE_PRIM_DELETED: 2814 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2815 #ifdef INVARIANTS 2816 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2817 t_type, inp, stcb, net); 2818 #else 2819 return; 2820 #endif 2821 } 2822 tmr = &stcb->asoc.delete_prim_timer; 2823 break; 2824 default: 2825 #ifdef INVARIANTS 2826 panic("Unknown timer type %d", t_type); 2827 #else 2828 return; 2829 #endif 2830 } 2831 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2832 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2833 (tmr->type != t_type)) { 2834 /* 2835 * Ok we have a timer that is under joint use. Cookie timer 2836 * per chance with the SEND timer. We therefore are NOT 2837 * running the timer that the caller wants stopped. So just 2838 * return. 2839 */ 2840 SCTPDBG(SCTP_DEBUG_TIMER2, 2841 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2842 t_type, inp, stcb, net); 2843 return; 2844 } 2845 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2846 stcb->asoc.num_send_timers_up--; 2847 if (stcb->asoc.num_send_timers_up < 0) { 2848 stcb->asoc.num_send_timers_up = 0; 2849 } 2850 } 2851 tmr->self = NULL; 2852 tmr->stopped_from = from; 2853 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2854 KASSERT(tmr->ep == inp, 2855 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2856 t_type, inp, tmr->ep)); 2857 KASSERT(tmr->tcb == stcb, 2858 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2859 t_type, stcb, tmr->tcb)); 2860 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2861 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2862 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2863 t_type, net, tmr->net)); 2864 SCTPDBG(SCTP_DEBUG_TIMER2, 2865 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2866 t_type, inp, stcb, net); 2867 /* 2868 * If the timer was actually stopped, decrement reference 2869 * counts that were incremented in sctp_timer_start(). 2870 */ 2871 if (tmr->ep != NULL) { 2872 SCTP_INP_DECR_REF(inp); 2873 tmr->ep = NULL; 2874 } 2875 if (tmr->tcb != NULL) { 2876 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2877 tmr->tcb = NULL; 2878 } 2879 if (tmr->net != NULL) { 2880 /* 2881 * Can't use net, since it doesn't work for 2882 * SCTP_TIMER_TYPE_ASCONF. 2883 */ 2884 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2885 tmr->net = NULL; 2886 } 2887 } else { 2888 SCTPDBG(SCTP_DEBUG_TIMER2, 2889 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2890 t_type, inp, stcb, net); 2891 } 2892 return; 2893 } 2894 2895 uint32_t 2896 sctp_calculate_len(struct mbuf *m) 2897 { 2898 struct mbuf *at; 2899 uint32_t tlen; 2900 2901 tlen = 0; 2902 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2903 tlen += SCTP_BUF_LEN(at); 2904 } 2905 return (tlen); 2906 } 2907 2908 /* 2909 * Given an association and starting time of the current RTT period, update 2910 * RTO in number of msecs. net should point to the current network. 2911 * Return 1, if an RTO update was performed, return 0 if no update was 2912 * performed due to invalid starting point. 2913 */ 2914 2915 int 2916 sctp_calculate_rto(struct sctp_tcb *stcb, 2917 struct sctp_association *asoc, 2918 struct sctp_nets *net, 2919 struct timeval *old, 2920 int rtt_from_sack) 2921 { 2922 struct timeval now; 2923 uint64_t rtt_us; /* RTT in us */ 2924 int32_t rtt; /* RTT in ms */ 2925 uint32_t new_rto; 2926 int first_measure = 0; 2927 2928 /************************/ 2929 /* 1. calculate new RTT */ 2930 /************************/ 2931 /* get the current time */ 2932 if (stcb->asoc.use_precise_time) { 2933 (void)SCTP_GETPTIME_TIMEVAL(&now); 2934 } else { 2935 (void)SCTP_GETTIME_TIMEVAL(&now); 2936 } 2937 if ((old->tv_sec > now.tv_sec) || 2938 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2939 /* The starting point is in the future. */ 2940 return (0); 2941 } 2942 timevalsub(&now, old); 2943 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2944 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2945 /* The RTT is larger than a sane value. */ 2946 return (0); 2947 } 2948 /* store the current RTT in us */ 2949 net->rtt = rtt_us; 2950 /* compute rtt in ms */ 2951 rtt = (int32_t)(net->rtt / 1000); 2952 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2953 /* 2954 * Tell the CC module that a new update has just occurred 2955 * from a sack 2956 */ 2957 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2958 } 2959 /* 2960 * Do we need to determine the lan? We do this only on sacks i.e. 2961 * RTT being determined from data not non-data (HB/INIT->INITACK). 2962 */ 2963 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2964 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2965 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2966 net->lan_type = SCTP_LAN_INTERNET; 2967 } else { 2968 net->lan_type = SCTP_LAN_LOCAL; 2969 } 2970 } 2971 2972 /***************************/ 2973 /* 2. update RTTVAR & SRTT */ 2974 /***************************/ 2975 /*- 2976 * Compute the scaled average lastsa and the 2977 * scaled variance lastsv as described in van Jacobson 2978 * Paper "Congestion Avoidance and Control", Annex A. 2979 * 2980 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2981 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2982 */ 2983 if (net->RTO_measured) { 2984 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2985 net->lastsa += rtt; 2986 if (rtt < 0) { 2987 rtt = -rtt; 2988 } 2989 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2990 net->lastsv += rtt; 2991 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2992 rto_logging(net, SCTP_LOG_RTTVAR); 2993 } 2994 } else { 2995 /* First RTO measurement */ 2996 net->RTO_measured = 1; 2997 first_measure = 1; 2998 net->lastsa = rtt << SCTP_RTT_SHIFT; 2999 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3000 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3001 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3002 } 3003 } 3004 if (net->lastsv == 0) { 3005 net->lastsv = SCTP_CLOCK_GRANULARITY; 3006 } 3007 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3008 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3009 (stcb->asoc.sat_network_lockout == 0)) { 3010 stcb->asoc.sat_network = 1; 3011 } else if ((!first_measure) && stcb->asoc.sat_network) { 3012 stcb->asoc.sat_network = 0; 3013 stcb->asoc.sat_network_lockout = 1; 3014 } 3015 /* bound it, per C6/C7 in Section 5.3.1 */ 3016 if (new_rto < stcb->asoc.minrto) { 3017 new_rto = stcb->asoc.minrto; 3018 } 3019 if (new_rto > stcb->asoc.maxrto) { 3020 new_rto = stcb->asoc.maxrto; 3021 } 3022 net->RTO = new_rto; 3023 return (1); 3024 } 3025 3026 /* 3027 * return a pointer to a contiguous piece of data from the given mbuf chain 3028 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3029 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3030 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3031 */ 3032 caddr_t 3033 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3034 { 3035 uint32_t count; 3036 uint8_t *ptr; 3037 3038 ptr = in_ptr; 3039 if ((off < 0) || (len <= 0)) 3040 return (NULL); 3041 3042 /* find the desired start location */ 3043 while ((m != NULL) && (off > 0)) { 3044 if (off < SCTP_BUF_LEN(m)) 3045 break; 3046 off -= SCTP_BUF_LEN(m); 3047 m = SCTP_BUF_NEXT(m); 3048 } 3049 if (m == NULL) 3050 return (NULL); 3051 3052 /* is the current mbuf large enough (eg. contiguous)? */ 3053 if ((SCTP_BUF_LEN(m) - off) >= len) { 3054 return (mtod(m, caddr_t)+off); 3055 } else { 3056 /* else, it spans more than one mbuf, so save a temp copy... */ 3057 while ((m != NULL) && (len > 0)) { 3058 count = min(SCTP_BUF_LEN(m) - off, len); 3059 memcpy(ptr, mtod(m, caddr_t)+off, count); 3060 len -= count; 3061 ptr += count; 3062 off = 0; 3063 m = SCTP_BUF_NEXT(m); 3064 } 3065 if ((m == NULL) && (len > 0)) 3066 return (NULL); 3067 else 3068 return ((caddr_t)in_ptr); 3069 } 3070 } 3071 3072 struct sctp_paramhdr * 3073 sctp_get_next_param(struct mbuf *m, 3074 int offset, 3075 struct sctp_paramhdr *pull, 3076 int pull_limit) 3077 { 3078 /* This just provides a typed signature to Peter's Pull routine */ 3079 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3080 (uint8_t *)pull)); 3081 } 3082 3083 struct mbuf * 3084 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3085 { 3086 struct mbuf *m_last; 3087 caddr_t dp; 3088 3089 if (padlen > 3) { 3090 return (NULL); 3091 } 3092 if (padlen <= M_TRAILINGSPACE(m)) { 3093 /* 3094 * The easy way. We hope the majority of the time we hit 3095 * here :) 3096 */ 3097 m_last = m; 3098 } else { 3099 /* Hard way we must grow the mbuf chain */ 3100 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3101 if (m_last == NULL) { 3102 return (NULL); 3103 } 3104 SCTP_BUF_LEN(m_last) = 0; 3105 SCTP_BUF_NEXT(m_last) = NULL; 3106 SCTP_BUF_NEXT(m) = m_last; 3107 } 3108 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3109 SCTP_BUF_LEN(m_last) += padlen; 3110 memset(dp, 0, padlen); 3111 return (m_last); 3112 } 3113 3114 struct mbuf * 3115 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3116 { 3117 /* find the last mbuf in chain and pad it */ 3118 struct mbuf *m_at; 3119 3120 if (last_mbuf != NULL) { 3121 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3122 } else { 3123 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3124 if (SCTP_BUF_NEXT(m_at) == NULL) { 3125 return (sctp_add_pad_tombuf(m_at, padval)); 3126 } 3127 } 3128 } 3129 return (NULL); 3130 } 3131 3132 static void 3133 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3134 uint16_t error, struct sctp_abort_chunk *abort, 3135 bool from_peer, bool timedout, int so_locked) 3136 { 3137 struct mbuf *m_notify; 3138 struct sctp_assoc_change *sac; 3139 struct sctp_queued_to_read *control; 3140 unsigned int notif_len; 3141 uint16_t abort_len; 3142 unsigned int i; 3143 3144 KASSERT(abort == NULL || from_peer, 3145 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3146 KASSERT(!from_peer || !timedout, 3147 ("sctp_notify_assoc_change: timeouts can only be local")); 3148 if (stcb == NULL) { 3149 return; 3150 } 3151 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3152 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3153 if (abort != NULL) { 3154 abort_len = ntohs(abort->ch.chunk_length); 3155 /* 3156 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3157 * contiguous. 3158 */ 3159 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3160 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3161 } 3162 } else { 3163 abort_len = 0; 3164 } 3165 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3166 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3167 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3168 notif_len += abort_len; 3169 } 3170 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3171 if (m_notify == NULL) { 3172 /* Retry with smaller value. */ 3173 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3174 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3175 if (m_notify == NULL) { 3176 goto set_error; 3177 } 3178 } 3179 SCTP_BUF_NEXT(m_notify) = NULL; 3180 sac = mtod(m_notify, struct sctp_assoc_change *); 3181 memset(sac, 0, notif_len); 3182 sac->sac_type = SCTP_ASSOC_CHANGE; 3183 sac->sac_flags = 0; 3184 sac->sac_length = sizeof(struct sctp_assoc_change); 3185 sac->sac_state = state; 3186 sac->sac_error = error; 3187 if (state == SCTP_CANT_STR_ASSOC) { 3188 sac->sac_outbound_streams = 0; 3189 sac->sac_inbound_streams = 0; 3190 } else { 3191 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3192 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3193 } 3194 sac->sac_assoc_id = sctp_get_associd(stcb); 3195 if (notif_len > sizeof(struct sctp_assoc_change)) { 3196 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3197 i = 0; 3198 if (stcb->asoc.prsctp_supported == 1) { 3199 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3200 } 3201 if (stcb->asoc.auth_supported == 1) { 3202 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3203 } 3204 if (stcb->asoc.asconf_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3206 } 3207 if (stcb->asoc.idata_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3209 } 3210 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3211 if (stcb->asoc.reconfig_supported == 1) { 3212 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3213 } 3214 sac->sac_length += i; 3215 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3216 memcpy(sac->sac_info, abort, abort_len); 3217 sac->sac_length += abort_len; 3218 } 3219 } 3220 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3221 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3222 0, 0, stcb->asoc.context, 0, 0, 0, 3223 m_notify); 3224 if (control != NULL) { 3225 control->length = SCTP_BUF_LEN(m_notify); 3226 control->spec_flags = M_NOTIFICATION; 3227 /* not that we need this */ 3228 control->tail_mbuf = m_notify; 3229 sctp_add_to_readq(stcb->sctp_ep, stcb, 3230 control, 3231 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3232 so_locked); 3233 } else { 3234 sctp_m_freem(m_notify); 3235 } 3236 } 3237 /* 3238 * For 1-to-1 style sockets, we send up and error when an ABORT 3239 * comes in. 3240 */ 3241 set_error: 3242 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3243 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3244 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3245 SOCK_LOCK(stcb->sctp_socket); 3246 if (from_peer) { 3247 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3248 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3249 stcb->sctp_socket->so_error = ECONNREFUSED; 3250 } else { 3251 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3252 stcb->sctp_socket->so_error = ECONNRESET; 3253 } 3254 } else { 3255 if (timedout) { 3256 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3257 stcb->sctp_socket->so_error = ETIMEDOUT; 3258 } else { 3259 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3260 stcb->sctp_socket->so_error = ECONNABORTED; 3261 } 3262 } 3263 SOCK_UNLOCK(stcb->sctp_socket); 3264 } 3265 /* Wake ANY sleepers */ 3266 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3267 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3268 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3269 socantrcvmore(stcb->sctp_socket); 3270 } 3271 sorwakeup(stcb->sctp_socket); 3272 sowwakeup(stcb->sctp_socket); 3273 } 3274 3275 static void 3276 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3277 struct sockaddr *sa, uint32_t error, int so_locked) 3278 { 3279 struct mbuf *m_notify; 3280 struct sctp_paddr_change *spc; 3281 struct sctp_queued_to_read *control; 3282 3283 if ((stcb == NULL) || 3284 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3285 /* event not enabled */ 3286 return; 3287 } 3288 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3289 if (m_notify == NULL) 3290 return; 3291 SCTP_BUF_LEN(m_notify) = 0; 3292 spc = mtod(m_notify, struct sctp_paddr_change *); 3293 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3294 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3295 spc->spc_flags = 0; 3296 spc->spc_length = sizeof(struct sctp_paddr_change); 3297 switch (sa->sa_family) { 3298 #ifdef INET 3299 case AF_INET: 3300 #ifdef INET6 3301 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3302 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3303 (struct sockaddr_in6 *)&spc->spc_aaddr); 3304 } else { 3305 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3306 } 3307 #else 3308 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3309 #endif 3310 break; 3311 #endif 3312 #ifdef INET6 3313 case AF_INET6: 3314 { 3315 struct sockaddr_in6 *sin6; 3316 3317 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3318 3319 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3320 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3321 if (sin6->sin6_scope_id == 0) { 3322 /* recover scope_id for user */ 3323 (void)sa6_recoverscope(sin6); 3324 } else { 3325 /* clear embedded scope_id for user */ 3326 in6_clearscope(&sin6->sin6_addr); 3327 } 3328 } 3329 break; 3330 } 3331 #endif 3332 default: 3333 /* TSNH */ 3334 break; 3335 } 3336 spc->spc_state = state; 3337 spc->spc_error = error; 3338 spc->spc_assoc_id = sctp_get_associd(stcb); 3339 3340 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3341 SCTP_BUF_NEXT(m_notify) = NULL; 3342 3343 /* append to socket */ 3344 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3345 0, 0, stcb->asoc.context, 0, 0, 0, 3346 m_notify); 3347 if (control == NULL) { 3348 /* no memory */ 3349 sctp_m_freem(m_notify); 3350 return; 3351 } 3352 control->length = SCTP_BUF_LEN(m_notify); 3353 control->spec_flags = M_NOTIFICATION; 3354 /* not that we need this */ 3355 control->tail_mbuf = m_notify; 3356 sctp_add_to_readq(stcb->sctp_ep, stcb, 3357 control, 3358 &stcb->sctp_socket->so_rcv, 1, 3359 SCTP_READ_LOCK_NOT_HELD, 3360 so_locked); 3361 } 3362 3363 static void 3364 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3365 struct sctp_tmit_chunk *chk, int so_locked) 3366 { 3367 struct mbuf *m_notify; 3368 struct sctp_send_failed *ssf; 3369 struct sctp_send_failed_event *ssfe; 3370 struct sctp_queued_to_read *control; 3371 struct sctp_chunkhdr *chkhdr; 3372 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3373 3374 if ((stcb == NULL) || 3375 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3376 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3377 /* event not enabled */ 3378 return; 3379 } 3380 3381 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3382 notifhdr_len = sizeof(struct sctp_send_failed_event); 3383 } else { 3384 notifhdr_len = sizeof(struct sctp_send_failed); 3385 } 3386 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3387 if (m_notify == NULL) 3388 /* no space left */ 3389 return; 3390 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3391 if (stcb->asoc.idata_supported) { 3392 chkhdr_len = sizeof(struct sctp_idata_chunk); 3393 } else { 3394 chkhdr_len = sizeof(struct sctp_data_chunk); 3395 } 3396 /* Use some defaults in case we can't access the chunk header */ 3397 if (chk->send_size >= chkhdr_len) { 3398 payload_len = chk->send_size - chkhdr_len; 3399 } else { 3400 payload_len = 0; 3401 } 3402 padding_len = 0; 3403 if (chk->data != NULL) { 3404 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3405 if (chkhdr != NULL) { 3406 chk_len = ntohs(chkhdr->chunk_length); 3407 if ((chk_len >= chkhdr_len) && 3408 (chk->send_size >= chk_len) && 3409 (chk->send_size - chk_len < 4)) { 3410 padding_len = chk->send_size - chk_len; 3411 payload_len = chk->send_size - chkhdr_len - padding_len; 3412 } 3413 } 3414 } 3415 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3416 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3417 memset(ssfe, 0, notifhdr_len); 3418 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3419 if (sent) { 3420 ssfe->ssfe_flags = SCTP_DATA_SENT; 3421 } else { 3422 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3423 } 3424 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3425 ssfe->ssfe_error = error; 3426 /* not exactly what the user sent in, but should be close :) */ 3427 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3428 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3429 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3430 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3431 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3432 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3433 } else { 3434 ssf = mtod(m_notify, struct sctp_send_failed *); 3435 memset(ssf, 0, notifhdr_len); 3436 ssf->ssf_type = SCTP_SEND_FAILED; 3437 if (sent) { 3438 ssf->ssf_flags = SCTP_DATA_SENT; 3439 } else { 3440 ssf->ssf_flags = SCTP_DATA_UNSENT; 3441 } 3442 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3443 ssf->ssf_error = error; 3444 /* not exactly what the user sent in, but should be close :) */ 3445 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3446 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3447 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3448 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3449 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3450 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3451 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3452 } 3453 if (chk->data != NULL) { 3454 /* Trim off the sctp chunk header (it should be there) */ 3455 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3456 m_adj(chk->data, chkhdr_len); 3457 m_adj(chk->data, -padding_len); 3458 sctp_mbuf_crush(chk->data); 3459 chk->send_size -= (chkhdr_len + padding_len); 3460 } 3461 } 3462 SCTP_BUF_NEXT(m_notify) = chk->data; 3463 /* Steal off the mbuf */ 3464 chk->data = NULL; 3465 /* 3466 * For this case, we check the actual socket buffer, since the assoc 3467 * is going away we don't want to overfill the socket buffer for a 3468 * non-reader 3469 */ 3470 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3471 sctp_m_freem(m_notify); 3472 return; 3473 } 3474 /* append to socket */ 3475 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3476 0, 0, stcb->asoc.context, 0, 0, 0, 3477 m_notify); 3478 if (control == NULL) { 3479 /* no memory */ 3480 sctp_m_freem(m_notify); 3481 return; 3482 } 3483 control->length = SCTP_BUF_LEN(m_notify); 3484 control->spec_flags = M_NOTIFICATION; 3485 /* not that we need this */ 3486 control->tail_mbuf = m_notify; 3487 sctp_add_to_readq(stcb->sctp_ep, stcb, 3488 control, 3489 &stcb->sctp_socket->so_rcv, 1, 3490 SCTP_READ_LOCK_NOT_HELD, 3491 so_locked); 3492 } 3493 3494 static void 3495 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3496 struct sctp_stream_queue_pending *sp, int so_locked) 3497 { 3498 struct mbuf *m_notify; 3499 struct sctp_send_failed *ssf; 3500 struct sctp_send_failed_event *ssfe; 3501 struct sctp_queued_to_read *control; 3502 int notifhdr_len; 3503 3504 if ((stcb == NULL) || 3505 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3506 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3507 /* event not enabled */ 3508 return; 3509 } 3510 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3511 notifhdr_len = sizeof(struct sctp_send_failed_event); 3512 } else { 3513 notifhdr_len = sizeof(struct sctp_send_failed); 3514 } 3515 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3516 if (m_notify == NULL) { 3517 /* no space left */ 3518 return; 3519 } 3520 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3521 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3522 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3523 memset(ssfe, 0, notifhdr_len); 3524 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3525 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3526 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3527 ssfe->ssfe_error = error; 3528 /* not exactly what the user sent in, but should be close :) */ 3529 ssfe->ssfe_info.snd_sid = sp->sid; 3530 if (sp->some_taken) { 3531 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3532 } else { 3533 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3534 } 3535 ssfe->ssfe_info.snd_ppid = sp->ppid; 3536 ssfe->ssfe_info.snd_context = sp->context; 3537 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3538 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3539 } else { 3540 ssf = mtod(m_notify, struct sctp_send_failed *); 3541 memset(ssf, 0, notifhdr_len); 3542 ssf->ssf_type = SCTP_SEND_FAILED; 3543 ssf->ssf_flags = SCTP_DATA_UNSENT; 3544 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3545 ssf->ssf_error = error; 3546 /* not exactly what the user sent in, but should be close :) */ 3547 ssf->ssf_info.sinfo_stream = sp->sid; 3548 ssf->ssf_info.sinfo_ssn = 0; 3549 if (sp->some_taken) { 3550 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3551 } else { 3552 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3553 } 3554 ssf->ssf_info.sinfo_ppid = sp->ppid; 3555 ssf->ssf_info.sinfo_context = sp->context; 3556 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3557 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3558 } 3559 SCTP_BUF_NEXT(m_notify) = sp->data; 3560 3561 /* Steal off the mbuf */ 3562 sp->data = NULL; 3563 /* 3564 * For this case, we check the actual socket buffer, since the assoc 3565 * is going away we don't want to overfill the socket buffer for a 3566 * non-reader 3567 */ 3568 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3569 sctp_m_freem(m_notify); 3570 return; 3571 } 3572 /* append to socket */ 3573 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3574 0, 0, stcb->asoc.context, 0, 0, 0, 3575 m_notify); 3576 if (control == NULL) { 3577 /* no memory */ 3578 sctp_m_freem(m_notify); 3579 return; 3580 } 3581 control->length = SCTP_BUF_LEN(m_notify); 3582 control->spec_flags = M_NOTIFICATION; 3583 /* not that we need this */ 3584 control->tail_mbuf = m_notify; 3585 sctp_add_to_readq(stcb->sctp_ep, stcb, 3586 control, 3587 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3588 } 3589 3590 static void 3591 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3592 { 3593 struct mbuf *m_notify; 3594 struct sctp_adaptation_event *sai; 3595 struct sctp_queued_to_read *control; 3596 3597 if ((stcb == NULL) || 3598 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3599 /* event not enabled */ 3600 return; 3601 } 3602 3603 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3604 if (m_notify == NULL) 3605 /* no space left */ 3606 return; 3607 SCTP_BUF_LEN(m_notify) = 0; 3608 sai = mtod(m_notify, struct sctp_adaptation_event *); 3609 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3610 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3611 sai->sai_flags = 0; 3612 sai->sai_length = sizeof(struct sctp_adaptation_event); 3613 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3614 sai->sai_assoc_id = sctp_get_associd(stcb); 3615 3616 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3617 SCTP_BUF_NEXT(m_notify) = NULL; 3618 3619 /* append to socket */ 3620 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3621 0, 0, stcb->asoc.context, 0, 0, 0, 3622 m_notify); 3623 if (control == NULL) { 3624 /* no memory */ 3625 sctp_m_freem(m_notify); 3626 return; 3627 } 3628 control->length = SCTP_BUF_LEN(m_notify); 3629 control->spec_flags = M_NOTIFICATION; 3630 /* not that we need this */ 3631 control->tail_mbuf = m_notify; 3632 sctp_add_to_readq(stcb->sctp_ep, stcb, 3633 control, 3634 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3635 } 3636 3637 /* This always must be called with the read-queue LOCKED in the INP */ 3638 static void 3639 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3640 uint32_t val, int so_locked) 3641 { 3642 struct mbuf *m_notify; 3643 struct sctp_pdapi_event *pdapi; 3644 struct sctp_queued_to_read *control; 3645 struct sockbuf *sb; 3646 3647 if ((stcb == NULL) || 3648 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3649 /* event not enabled */ 3650 return; 3651 } 3652 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3653 return; 3654 } 3655 3656 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3657 if (m_notify == NULL) 3658 /* no space left */ 3659 return; 3660 SCTP_BUF_LEN(m_notify) = 0; 3661 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3662 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3663 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3664 pdapi->pdapi_flags = 0; 3665 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3666 pdapi->pdapi_indication = error; 3667 pdapi->pdapi_stream = (val >> 16); 3668 pdapi->pdapi_seq = (val & 0x0000ffff); 3669 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3670 3671 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3672 SCTP_BUF_NEXT(m_notify) = NULL; 3673 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3674 0, 0, stcb->asoc.context, 0, 0, 0, 3675 m_notify); 3676 if (control == NULL) { 3677 /* no memory */ 3678 sctp_m_freem(m_notify); 3679 return; 3680 } 3681 control->length = SCTP_BUF_LEN(m_notify); 3682 control->spec_flags = M_NOTIFICATION; 3683 /* not that we need this */ 3684 control->tail_mbuf = m_notify; 3685 sb = &stcb->sctp_socket->so_rcv; 3686 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3687 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3688 } 3689 sctp_sballoc(stcb, sb, m_notify); 3690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3691 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3692 } 3693 control->end_added = 1; 3694 if (stcb->asoc.control_pdapi) 3695 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3696 else { 3697 /* we really should not see this case */ 3698 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3699 } 3700 if (stcb->sctp_ep && stcb->sctp_socket) { 3701 /* This should always be the case */ 3702 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3703 } 3704 } 3705 3706 static void 3707 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3708 { 3709 struct mbuf *m_notify; 3710 struct sctp_shutdown_event *sse; 3711 struct sctp_queued_to_read *control; 3712 3713 /* 3714 * For TCP model AND UDP connected sockets we will send an error up 3715 * when an SHUTDOWN completes 3716 */ 3717 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3718 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3719 /* mark socket closed for read/write and wakeup! */ 3720 socantsendmore(stcb->sctp_socket); 3721 } 3722 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3723 /* event not enabled */ 3724 return; 3725 } 3726 3727 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3728 if (m_notify == NULL) 3729 /* no space left */ 3730 return; 3731 sse = mtod(m_notify, struct sctp_shutdown_event *); 3732 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3733 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3734 sse->sse_flags = 0; 3735 sse->sse_length = sizeof(struct sctp_shutdown_event); 3736 sse->sse_assoc_id = sctp_get_associd(stcb); 3737 3738 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3739 SCTP_BUF_NEXT(m_notify) = NULL; 3740 3741 /* append to socket */ 3742 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3743 0, 0, stcb->asoc.context, 0, 0, 0, 3744 m_notify); 3745 if (control == NULL) { 3746 /* no memory */ 3747 sctp_m_freem(m_notify); 3748 return; 3749 } 3750 control->length = SCTP_BUF_LEN(m_notify); 3751 control->spec_flags = M_NOTIFICATION; 3752 /* not that we need this */ 3753 control->tail_mbuf = m_notify; 3754 sctp_add_to_readq(stcb->sctp_ep, stcb, 3755 control, 3756 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3757 } 3758 3759 static void 3760 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3761 int so_locked) 3762 { 3763 struct mbuf *m_notify; 3764 struct sctp_sender_dry_event *event; 3765 struct sctp_queued_to_read *control; 3766 3767 if ((stcb == NULL) || 3768 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3769 /* event not enabled */ 3770 return; 3771 } 3772 3773 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3774 if (m_notify == NULL) { 3775 /* no space left */ 3776 return; 3777 } 3778 SCTP_BUF_LEN(m_notify) = 0; 3779 event = mtod(m_notify, struct sctp_sender_dry_event *); 3780 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3781 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3782 event->sender_dry_flags = 0; 3783 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3784 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3785 3786 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3787 SCTP_BUF_NEXT(m_notify) = NULL; 3788 3789 /* append to socket */ 3790 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3791 0, 0, stcb->asoc.context, 0, 0, 0, 3792 m_notify); 3793 if (control == NULL) { 3794 /* no memory */ 3795 sctp_m_freem(m_notify); 3796 return; 3797 } 3798 control->length = SCTP_BUF_LEN(m_notify); 3799 control->spec_flags = M_NOTIFICATION; 3800 /* not that we need this */ 3801 control->tail_mbuf = m_notify; 3802 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3803 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3804 } 3805 3806 void 3807 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3808 { 3809 struct mbuf *m_notify; 3810 struct sctp_queued_to_read *control; 3811 struct sctp_stream_change_event *stradd; 3812 3813 if ((stcb == NULL) || 3814 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3815 /* event not enabled */ 3816 return; 3817 } 3818 if ((stcb->asoc.peer_req_out) && flag) { 3819 /* Peer made the request, don't tell the local user */ 3820 stcb->asoc.peer_req_out = 0; 3821 return; 3822 } 3823 stcb->asoc.peer_req_out = 0; 3824 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3825 if (m_notify == NULL) 3826 /* no space left */ 3827 return; 3828 SCTP_BUF_LEN(m_notify) = 0; 3829 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3830 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3831 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3832 stradd->strchange_flags = flag; 3833 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3834 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3835 stradd->strchange_instrms = numberin; 3836 stradd->strchange_outstrms = numberout; 3837 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3838 SCTP_BUF_NEXT(m_notify) = NULL; 3839 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3840 /* no space */ 3841 sctp_m_freem(m_notify); 3842 return; 3843 } 3844 /* append to socket */ 3845 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3846 0, 0, stcb->asoc.context, 0, 0, 0, 3847 m_notify); 3848 if (control == NULL) { 3849 /* no memory */ 3850 sctp_m_freem(m_notify); 3851 return; 3852 } 3853 control->length = SCTP_BUF_LEN(m_notify); 3854 control->spec_flags = M_NOTIFICATION; 3855 /* not that we need this */ 3856 control->tail_mbuf = m_notify; 3857 sctp_add_to_readq(stcb->sctp_ep, stcb, 3858 control, 3859 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3860 } 3861 3862 void 3863 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3864 { 3865 struct mbuf *m_notify; 3866 struct sctp_queued_to_read *control; 3867 struct sctp_assoc_reset_event *strasoc; 3868 3869 if ((stcb == NULL) || 3870 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3871 /* event not enabled */ 3872 return; 3873 } 3874 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3875 if (m_notify == NULL) 3876 /* no space left */ 3877 return; 3878 SCTP_BUF_LEN(m_notify) = 0; 3879 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3880 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3881 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3882 strasoc->assocreset_flags = flag; 3883 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3884 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3885 strasoc->assocreset_local_tsn = sending_tsn; 3886 strasoc->assocreset_remote_tsn = recv_tsn; 3887 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3888 SCTP_BUF_NEXT(m_notify) = NULL; 3889 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3890 /* no space */ 3891 sctp_m_freem(m_notify); 3892 return; 3893 } 3894 /* append to socket */ 3895 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3896 0, 0, stcb->asoc.context, 0, 0, 0, 3897 m_notify); 3898 if (control == NULL) { 3899 /* no memory */ 3900 sctp_m_freem(m_notify); 3901 return; 3902 } 3903 control->length = SCTP_BUF_LEN(m_notify); 3904 control->spec_flags = M_NOTIFICATION; 3905 /* not that we need this */ 3906 control->tail_mbuf = m_notify; 3907 sctp_add_to_readq(stcb->sctp_ep, stcb, 3908 control, 3909 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3910 } 3911 3912 static void 3913 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3914 int number_entries, uint16_t *list, int flag) 3915 { 3916 struct mbuf *m_notify; 3917 struct sctp_queued_to_read *control; 3918 struct sctp_stream_reset_event *strreset; 3919 int len; 3920 3921 if ((stcb == NULL) || 3922 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3923 /* event not enabled */ 3924 return; 3925 } 3926 3927 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3928 if (m_notify == NULL) 3929 /* no space left */ 3930 return; 3931 SCTP_BUF_LEN(m_notify) = 0; 3932 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3933 if (len > M_TRAILINGSPACE(m_notify)) { 3934 /* never enough room */ 3935 sctp_m_freem(m_notify); 3936 return; 3937 } 3938 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3939 memset(strreset, 0, len); 3940 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3941 strreset->strreset_flags = flag; 3942 strreset->strreset_length = len; 3943 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3944 if (number_entries) { 3945 int i; 3946 3947 for (i = 0; i < number_entries; i++) { 3948 strreset->strreset_stream_list[i] = ntohs(list[i]); 3949 } 3950 } 3951 SCTP_BUF_LEN(m_notify) = len; 3952 SCTP_BUF_NEXT(m_notify) = NULL; 3953 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3954 /* no space */ 3955 sctp_m_freem(m_notify); 3956 return; 3957 } 3958 /* append to socket */ 3959 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3960 0, 0, stcb->asoc.context, 0, 0, 0, 3961 m_notify); 3962 if (control == NULL) { 3963 /* no memory */ 3964 sctp_m_freem(m_notify); 3965 return; 3966 } 3967 control->length = SCTP_BUF_LEN(m_notify); 3968 control->spec_flags = M_NOTIFICATION; 3969 /* not that we need this */ 3970 control->tail_mbuf = m_notify; 3971 sctp_add_to_readq(stcb->sctp_ep, stcb, 3972 control, 3973 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3974 } 3975 3976 static void 3977 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3978 { 3979 struct mbuf *m_notify; 3980 struct sctp_remote_error *sre; 3981 struct sctp_queued_to_read *control; 3982 unsigned int notif_len; 3983 uint16_t chunk_len; 3984 3985 if ((stcb == NULL) || 3986 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3987 return; 3988 } 3989 if (chunk != NULL) { 3990 chunk_len = ntohs(chunk->ch.chunk_length); 3991 /* 3992 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3993 * contiguous. 3994 */ 3995 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3996 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3997 } 3998 } else { 3999 chunk_len = 0; 4000 } 4001 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4002 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4003 if (m_notify == NULL) { 4004 /* Retry with smaller value. */ 4005 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4006 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4007 if (m_notify == NULL) { 4008 return; 4009 } 4010 } 4011 SCTP_BUF_NEXT(m_notify) = NULL; 4012 sre = mtod(m_notify, struct sctp_remote_error *); 4013 memset(sre, 0, notif_len); 4014 sre->sre_type = SCTP_REMOTE_ERROR; 4015 sre->sre_flags = 0; 4016 sre->sre_length = sizeof(struct sctp_remote_error); 4017 sre->sre_error = error; 4018 sre->sre_assoc_id = sctp_get_associd(stcb); 4019 if (notif_len > sizeof(struct sctp_remote_error)) { 4020 memcpy(sre->sre_data, chunk, chunk_len); 4021 sre->sre_length += chunk_len; 4022 } 4023 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4024 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4025 0, 0, stcb->asoc.context, 0, 0, 0, 4026 m_notify); 4027 if (control != NULL) { 4028 control->length = SCTP_BUF_LEN(m_notify); 4029 control->spec_flags = M_NOTIFICATION; 4030 /* not that we need this */ 4031 control->tail_mbuf = m_notify; 4032 sctp_add_to_readq(stcb->sctp_ep, stcb, 4033 control, 4034 &stcb->sctp_socket->so_rcv, 1, 4035 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4036 } else { 4037 sctp_m_freem(m_notify); 4038 } 4039 } 4040 4041 void 4042 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4043 uint32_t error, void *data, int so_locked) 4044 { 4045 if ((stcb == NULL) || 4046 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4047 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4048 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4049 /* If the socket is gone we are out of here */ 4050 return; 4051 } 4052 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4053 return; 4054 } 4055 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4056 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4057 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4058 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4059 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4060 /* Don't report these in front states */ 4061 return; 4062 } 4063 } 4064 switch (notification) { 4065 case SCTP_NOTIFY_ASSOC_UP: 4066 if (stcb->asoc.assoc_up_sent == 0) { 4067 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4068 stcb->asoc.assoc_up_sent = 1; 4069 } 4070 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4071 sctp_notify_adaptation_layer(stcb); 4072 } 4073 if (stcb->asoc.auth_supported == 0) { 4074 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4075 NULL, so_locked); 4076 } 4077 break; 4078 case SCTP_NOTIFY_ASSOC_DOWN: 4079 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4080 break; 4081 case SCTP_NOTIFY_INTERFACE_DOWN: 4082 { 4083 struct sctp_nets *net; 4084 4085 net = (struct sctp_nets *)data; 4086 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4087 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4088 break; 4089 } 4090 case SCTP_NOTIFY_INTERFACE_UP: 4091 { 4092 struct sctp_nets *net; 4093 4094 net = (struct sctp_nets *)data; 4095 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4096 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4097 break; 4098 } 4099 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4100 { 4101 struct sctp_nets *net; 4102 4103 net = (struct sctp_nets *)data; 4104 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4105 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4106 break; 4107 } 4108 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4109 sctp_notify_send_failed2(stcb, error, 4110 (struct sctp_stream_queue_pending *)data, so_locked); 4111 break; 4112 case SCTP_NOTIFY_SENT_DG_FAIL: 4113 sctp_notify_send_failed(stcb, 1, error, 4114 (struct sctp_tmit_chunk *)data, so_locked); 4115 break; 4116 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4117 sctp_notify_send_failed(stcb, 0, error, 4118 (struct sctp_tmit_chunk *)data, so_locked); 4119 break; 4120 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4121 { 4122 uint32_t val; 4123 4124 val = *((uint32_t *)data); 4125 4126 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4127 break; 4128 } 4129 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4130 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4131 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4132 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4133 } else { 4134 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4135 } 4136 break; 4137 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4138 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4139 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4140 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4141 } else { 4142 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4143 } 4144 break; 4145 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4146 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4147 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4148 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4149 } else { 4150 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4151 } 4152 break; 4153 case SCTP_NOTIFY_ASSOC_RESTART: 4154 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4155 if (stcb->asoc.auth_supported == 0) { 4156 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4157 NULL, so_locked); 4158 } 4159 break; 4160 case SCTP_NOTIFY_STR_RESET_SEND: 4161 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4162 break; 4163 case SCTP_NOTIFY_STR_RESET_RECV: 4164 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4165 break; 4166 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4167 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4168 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4169 break; 4170 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4171 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4172 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4173 break; 4174 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4175 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4176 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4177 break; 4178 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4179 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4180 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4181 break; 4182 case SCTP_NOTIFY_ASCONF_ADD_IP: 4183 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4184 error, so_locked); 4185 break; 4186 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4187 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4188 error, so_locked); 4189 break; 4190 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4191 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4192 error, so_locked); 4193 break; 4194 case SCTP_NOTIFY_PEER_SHUTDOWN: 4195 sctp_notify_shutdown_event(stcb); 4196 break; 4197 case SCTP_NOTIFY_AUTH_NEW_KEY: 4198 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4199 (uint16_t)(uintptr_t)data, 4200 so_locked); 4201 break; 4202 case SCTP_NOTIFY_AUTH_FREE_KEY: 4203 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4204 (uint16_t)(uintptr_t)data, 4205 so_locked); 4206 break; 4207 case SCTP_NOTIFY_NO_PEER_AUTH: 4208 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4209 (uint16_t)(uintptr_t)data, 4210 so_locked); 4211 break; 4212 case SCTP_NOTIFY_SENDER_DRY: 4213 sctp_notify_sender_dry_event(stcb, so_locked); 4214 break; 4215 case SCTP_NOTIFY_REMOTE_ERROR: 4216 sctp_notify_remote_error(stcb, error, data); 4217 break; 4218 default: 4219 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4220 __func__, notification, notification); 4221 break; 4222 } /* end switch */ 4223 } 4224 4225 void 4226 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4227 { 4228 struct sctp_association *asoc; 4229 struct sctp_stream_out *outs; 4230 struct sctp_tmit_chunk *chk, *nchk; 4231 struct sctp_stream_queue_pending *sp, *nsp; 4232 int i; 4233 4234 if (stcb == NULL) { 4235 return; 4236 } 4237 asoc = &stcb->asoc; 4238 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4239 /* already being freed */ 4240 return; 4241 } 4242 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4243 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4244 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4245 return; 4246 } 4247 /* now through all the gunk freeing chunks */ 4248 /* sent queue SHOULD be empty */ 4249 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4250 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4251 asoc->sent_queue_cnt--; 4252 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4253 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4254 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4255 #ifdef INVARIANTS 4256 } else { 4257 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4258 #endif 4259 } 4260 } 4261 if (chk->data != NULL) { 4262 sctp_free_bufspace(stcb, asoc, chk, 1); 4263 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4264 error, chk, so_locked); 4265 if (chk->data) { 4266 sctp_m_freem(chk->data); 4267 chk->data = NULL; 4268 } 4269 } 4270 sctp_free_a_chunk(stcb, chk, so_locked); 4271 /* sa_ignore FREED_MEMORY */ 4272 } 4273 /* pending send queue SHOULD be empty */ 4274 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4275 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4276 asoc->send_queue_cnt--; 4277 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4278 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4279 #ifdef INVARIANTS 4280 } else { 4281 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4282 #endif 4283 } 4284 if (chk->data != NULL) { 4285 sctp_free_bufspace(stcb, asoc, chk, 1); 4286 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4287 error, chk, so_locked); 4288 if (chk->data) { 4289 sctp_m_freem(chk->data); 4290 chk->data = NULL; 4291 } 4292 } 4293 sctp_free_a_chunk(stcb, chk, so_locked); 4294 /* sa_ignore FREED_MEMORY */ 4295 } 4296 for (i = 0; i < asoc->streamoutcnt; i++) { 4297 /* For each stream */ 4298 outs = &asoc->strmout[i]; 4299 /* clean up any sends there */ 4300 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4301 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4302 TAILQ_REMOVE(&outs->outqueue, sp, next); 4303 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4304 sctp_free_spbufspace(stcb, asoc, sp); 4305 if (sp->data) { 4306 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4307 error, (void *)sp, so_locked); 4308 if (sp->data) { 4309 sctp_m_freem(sp->data); 4310 sp->data = NULL; 4311 sp->tail_mbuf = NULL; 4312 sp->length = 0; 4313 } 4314 } 4315 if (sp->net) { 4316 sctp_free_remote_addr(sp->net); 4317 sp->net = NULL; 4318 } 4319 /* Free the chunk */ 4320 sctp_free_a_strmoq(stcb, sp, so_locked); 4321 /* sa_ignore FREED_MEMORY */ 4322 } 4323 } 4324 } 4325 4326 void 4327 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4328 uint16_t error, struct sctp_abort_chunk *abort, 4329 int so_locked) 4330 { 4331 if (stcb == NULL) { 4332 return; 4333 } 4334 SCTP_TCB_LOCK_ASSERT(stcb); 4335 4336 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4337 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4338 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4339 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4340 } 4341 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4342 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4343 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4344 return; 4345 } 4346 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4347 /* Tell them we lost the asoc */ 4348 sctp_report_all_outbound(stcb, error, so_locked); 4349 if (from_peer) { 4350 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4351 } else { 4352 if (timeout) { 4353 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4354 } else { 4355 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4356 } 4357 } 4358 } 4359 4360 void 4361 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4362 struct mbuf *m, int iphlen, 4363 struct sockaddr *src, struct sockaddr *dst, 4364 struct sctphdr *sh, struct mbuf *op_err, 4365 uint8_t mflowtype, uint32_t mflowid, 4366 uint32_t vrf_id, uint16_t port) 4367 { 4368 struct sctp_gen_error_cause *cause; 4369 uint32_t vtag; 4370 uint16_t cause_code; 4371 4372 if (stcb != NULL) { 4373 vtag = stcb->asoc.peer_vtag; 4374 vrf_id = stcb->asoc.vrf_id; 4375 if (op_err != NULL) { 4376 /* Read the cause code from the error cause. */ 4377 cause = mtod(op_err, struct sctp_gen_error_cause *); 4378 cause_code = ntohs(cause->code); 4379 } else { 4380 cause_code = 0; 4381 } 4382 } else { 4383 vtag = 0; 4384 } 4385 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4386 mflowtype, mflowid, inp->fibnum, 4387 vrf_id, port); 4388 if (stcb != NULL) { 4389 /* We have a TCB to abort, send notification too */ 4390 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4391 /* Ok, now lets free it */ 4392 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4393 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4394 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4395 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4396 } 4397 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4398 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4399 } 4400 } 4401 #ifdef SCTP_ASOCLOG_OF_TSNS 4402 void 4403 sctp_print_out_track_log(struct sctp_tcb *stcb) 4404 { 4405 #ifdef NOSIY_PRINTS 4406 int i; 4407 4408 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4409 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4410 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4411 SCTP_PRINTF("None rcvd\n"); 4412 goto none_in; 4413 } 4414 if (stcb->asoc.tsn_in_wrapped) { 4415 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4416 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4417 stcb->asoc.in_tsnlog[i].tsn, 4418 stcb->asoc.in_tsnlog[i].strm, 4419 stcb->asoc.in_tsnlog[i].seq, 4420 stcb->asoc.in_tsnlog[i].flgs, 4421 stcb->asoc.in_tsnlog[i].sz); 4422 } 4423 } 4424 if (stcb->asoc.tsn_in_at) { 4425 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4426 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4427 stcb->asoc.in_tsnlog[i].tsn, 4428 stcb->asoc.in_tsnlog[i].strm, 4429 stcb->asoc.in_tsnlog[i].seq, 4430 stcb->asoc.in_tsnlog[i].flgs, 4431 stcb->asoc.in_tsnlog[i].sz); 4432 } 4433 } 4434 none_in: 4435 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4436 if ((stcb->asoc.tsn_out_at == 0) && 4437 (stcb->asoc.tsn_out_wrapped == 0)) { 4438 SCTP_PRINTF("None sent\n"); 4439 } 4440 if (stcb->asoc.tsn_out_wrapped) { 4441 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4442 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4443 stcb->asoc.out_tsnlog[i].tsn, 4444 stcb->asoc.out_tsnlog[i].strm, 4445 stcb->asoc.out_tsnlog[i].seq, 4446 stcb->asoc.out_tsnlog[i].flgs, 4447 stcb->asoc.out_tsnlog[i].sz); 4448 } 4449 } 4450 if (stcb->asoc.tsn_out_at) { 4451 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4452 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4453 stcb->asoc.out_tsnlog[i].tsn, 4454 stcb->asoc.out_tsnlog[i].strm, 4455 stcb->asoc.out_tsnlog[i].seq, 4456 stcb->asoc.out_tsnlog[i].flgs, 4457 stcb->asoc.out_tsnlog[i].sz); 4458 } 4459 } 4460 #endif 4461 } 4462 #endif 4463 4464 void 4465 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4466 struct mbuf *op_err, bool timedout, int so_locked) 4467 { 4468 struct sctp_gen_error_cause *cause; 4469 uint16_t cause_code; 4470 4471 if (stcb == NULL) { 4472 /* Got to have a TCB */ 4473 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4474 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4475 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4476 SCTP_CALLED_DIRECTLY_NOCMPSET); 4477 } 4478 } 4479 return; 4480 } 4481 if (op_err != NULL) { 4482 /* Read the cause code from the error cause. */ 4483 cause = mtod(op_err, struct sctp_gen_error_cause *); 4484 cause_code = ntohs(cause->code); 4485 } else { 4486 cause_code = 0; 4487 } 4488 /* notify the peer */ 4489 sctp_send_abort_tcb(stcb, op_err, so_locked); 4490 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4491 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4492 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4493 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4494 } 4495 /* notify the ulp */ 4496 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4497 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4498 } 4499 /* now free the asoc */ 4500 #ifdef SCTP_ASOCLOG_OF_TSNS 4501 sctp_print_out_track_log(stcb); 4502 #endif 4503 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4504 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4505 } 4506 4507 void 4508 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4509 struct sockaddr *src, struct sockaddr *dst, 4510 struct sctphdr *sh, struct sctp_inpcb *inp, 4511 struct mbuf *cause, 4512 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4513 uint32_t vrf_id, uint16_t port) 4514 { 4515 struct sctp_chunkhdr *ch, chunk_buf; 4516 unsigned int chk_length; 4517 int contains_init_chunk; 4518 4519 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4520 /* Generate a TO address for future reference */ 4521 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4522 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4523 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4524 SCTP_CALLED_DIRECTLY_NOCMPSET); 4525 } 4526 } 4527 contains_init_chunk = 0; 4528 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4529 sizeof(*ch), (uint8_t *)&chunk_buf); 4530 while (ch != NULL) { 4531 chk_length = ntohs(ch->chunk_length); 4532 if (chk_length < sizeof(*ch)) { 4533 /* break to abort land */ 4534 break; 4535 } 4536 switch (ch->chunk_type) { 4537 case SCTP_INIT: 4538 contains_init_chunk = 1; 4539 break; 4540 case SCTP_PACKET_DROPPED: 4541 /* we don't respond to pkt-dropped */ 4542 return; 4543 case SCTP_ABORT_ASSOCIATION: 4544 /* we don't respond with an ABORT to an ABORT */ 4545 return; 4546 case SCTP_SHUTDOWN_COMPLETE: 4547 /* 4548 * we ignore it since we are not waiting for it and 4549 * peer is gone 4550 */ 4551 return; 4552 case SCTP_SHUTDOWN_ACK: 4553 sctp_send_shutdown_complete2(src, dst, sh, 4554 mflowtype, mflowid, fibnum, 4555 vrf_id, port); 4556 return; 4557 default: 4558 break; 4559 } 4560 offset += SCTP_SIZE32(chk_length); 4561 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4562 sizeof(*ch), (uint8_t *)&chunk_buf); 4563 } 4564 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4565 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4566 (contains_init_chunk == 0))) { 4567 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4568 mflowtype, mflowid, fibnum, 4569 vrf_id, port); 4570 } 4571 } 4572 4573 /* 4574 * check the inbound datagram to make sure there is not an abort inside it, 4575 * if there is return 1, else return 0. 4576 */ 4577 int 4578 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4579 { 4580 struct sctp_chunkhdr *ch; 4581 struct sctp_init_chunk *init_chk, chunk_buf; 4582 int offset; 4583 unsigned int chk_length; 4584 4585 offset = iphlen + sizeof(struct sctphdr); 4586 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4587 (uint8_t *)&chunk_buf); 4588 while (ch != NULL) { 4589 chk_length = ntohs(ch->chunk_length); 4590 if (chk_length < sizeof(*ch)) { 4591 /* packet is probably corrupt */ 4592 break; 4593 } 4594 /* we seem to be ok, is it an abort? */ 4595 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4596 /* yep, tell them */ 4597 return (1); 4598 } 4599 if ((ch->chunk_type == SCTP_INITIATION) || 4600 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4601 /* need to update the Vtag */ 4602 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4603 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4604 if (init_chk != NULL) { 4605 *vtag = ntohl(init_chk->init.initiate_tag); 4606 } 4607 } 4608 /* Nope, move to the next chunk */ 4609 offset += SCTP_SIZE32(chk_length); 4610 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4611 sizeof(*ch), (uint8_t *)&chunk_buf); 4612 } 4613 return (0); 4614 } 4615 4616 /* 4617 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4618 * set (i.e. it's 0) so, create this function to compare link local scopes 4619 */ 4620 #ifdef INET6 4621 uint32_t 4622 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4623 { 4624 struct sockaddr_in6 a, b; 4625 4626 /* save copies */ 4627 a = *addr1; 4628 b = *addr2; 4629 4630 if (a.sin6_scope_id == 0) 4631 if (sa6_recoverscope(&a)) { 4632 /* can't get scope, so can't match */ 4633 return (0); 4634 } 4635 if (b.sin6_scope_id == 0) 4636 if (sa6_recoverscope(&b)) { 4637 /* can't get scope, so can't match */ 4638 return (0); 4639 } 4640 if (a.sin6_scope_id != b.sin6_scope_id) 4641 return (0); 4642 4643 return (1); 4644 } 4645 4646 /* 4647 * returns a sockaddr_in6 with embedded scope recovered and removed 4648 */ 4649 struct sockaddr_in6 * 4650 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4651 { 4652 /* check and strip embedded scope junk */ 4653 if (addr->sin6_family == AF_INET6) { 4654 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4655 if (addr->sin6_scope_id == 0) { 4656 *store = *addr; 4657 if (!sa6_recoverscope(store)) { 4658 /* use the recovered scope */ 4659 addr = store; 4660 } 4661 } else { 4662 /* else, return the original "to" addr */ 4663 in6_clearscope(&addr->sin6_addr); 4664 } 4665 } 4666 } 4667 return (addr); 4668 } 4669 #endif 4670 4671 /* 4672 * are the two addresses the same? currently a "scopeless" check returns: 1 4673 * if same, 0 if not 4674 */ 4675 int 4676 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4677 { 4678 4679 /* must be valid */ 4680 if (sa1 == NULL || sa2 == NULL) 4681 return (0); 4682 4683 /* must be the same family */ 4684 if (sa1->sa_family != sa2->sa_family) 4685 return (0); 4686 4687 switch (sa1->sa_family) { 4688 #ifdef INET6 4689 case AF_INET6: 4690 { 4691 /* IPv6 addresses */ 4692 struct sockaddr_in6 *sin6_1, *sin6_2; 4693 4694 sin6_1 = (struct sockaddr_in6 *)sa1; 4695 sin6_2 = (struct sockaddr_in6 *)sa2; 4696 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4697 sin6_2)); 4698 } 4699 #endif 4700 #ifdef INET 4701 case AF_INET: 4702 { 4703 /* IPv4 addresses */ 4704 struct sockaddr_in *sin_1, *sin_2; 4705 4706 sin_1 = (struct sockaddr_in *)sa1; 4707 sin_2 = (struct sockaddr_in *)sa2; 4708 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4709 } 4710 #endif 4711 default: 4712 /* we don't do these... */ 4713 return (0); 4714 } 4715 } 4716 4717 void 4718 sctp_print_address(struct sockaddr *sa) 4719 { 4720 #ifdef INET6 4721 char ip6buf[INET6_ADDRSTRLEN]; 4722 #endif 4723 4724 switch (sa->sa_family) { 4725 #ifdef INET6 4726 case AF_INET6: 4727 { 4728 struct sockaddr_in6 *sin6; 4729 4730 sin6 = (struct sockaddr_in6 *)sa; 4731 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4732 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4733 ntohs(sin6->sin6_port), 4734 sin6->sin6_scope_id); 4735 break; 4736 } 4737 #endif 4738 #ifdef INET 4739 case AF_INET: 4740 { 4741 struct sockaddr_in *sin; 4742 unsigned char *p; 4743 4744 sin = (struct sockaddr_in *)sa; 4745 p = (unsigned char *)&sin->sin_addr; 4746 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4747 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4748 break; 4749 } 4750 #endif 4751 default: 4752 SCTP_PRINTF("?\n"); 4753 break; 4754 } 4755 } 4756 4757 void 4758 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4759 struct sctp_inpcb *new_inp, 4760 struct sctp_tcb *stcb, 4761 int waitflags) 4762 { 4763 /* 4764 * go through our old INP and pull off any control structures that 4765 * belong to stcb and move then to the new inp. 4766 */ 4767 struct socket *old_so, *new_so; 4768 struct sctp_queued_to_read *control, *nctl; 4769 struct sctp_readhead tmp_queue; 4770 struct mbuf *m; 4771 int error = 0; 4772 4773 old_so = old_inp->sctp_socket; 4774 new_so = new_inp->sctp_socket; 4775 TAILQ_INIT(&tmp_queue); 4776 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4777 if (error) { 4778 /* 4779 * Gak, can't get I/O lock, we have a problem. data will be 4780 * left stranded.. and we don't dare look at it since the 4781 * other thread may be reading something. Oh well, its a 4782 * screwed up app that does a peeloff OR a accept while 4783 * reading from the main socket... actually its only the 4784 * peeloff() case, since I think read will fail on a 4785 * listening socket.. 4786 */ 4787 return; 4788 } 4789 /* lock the socket buffers */ 4790 SCTP_INP_READ_LOCK(old_inp); 4791 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4792 /* Pull off all for out target stcb */ 4793 if (control->stcb == stcb) { 4794 /* remove it we want it */ 4795 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4796 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4797 m = control->data; 4798 while (m) { 4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4800 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4801 } 4802 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4803 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4804 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4805 } 4806 m = SCTP_BUF_NEXT(m); 4807 } 4808 } 4809 } 4810 SCTP_INP_READ_UNLOCK(old_inp); 4811 /* Remove the recv-lock on the old socket */ 4812 SOCK_IO_RECV_UNLOCK(old_so); 4813 /* Now we move them over to the new socket buffer */ 4814 SCTP_INP_READ_LOCK(new_inp); 4815 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4816 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4817 m = control->data; 4818 while (m) { 4819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4820 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4821 } 4822 sctp_sballoc(stcb, &new_so->so_rcv, m); 4823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4824 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4825 } 4826 m = SCTP_BUF_NEXT(m); 4827 } 4828 } 4829 SCTP_INP_READ_UNLOCK(new_inp); 4830 } 4831 4832 void 4833 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4834 struct sctp_tcb *stcb, 4835 int so_locked 4836 SCTP_UNUSED 4837 ) 4838 { 4839 if ((inp != NULL) && 4840 (inp->sctp_socket != NULL) && 4841 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4842 !SCTP_IS_LISTENING(inp))) { 4843 sctp_sorwakeup(inp, inp->sctp_socket); 4844 } 4845 } 4846 4847 void 4848 sctp_add_to_readq(struct sctp_inpcb *inp, 4849 struct sctp_tcb *stcb, 4850 struct sctp_queued_to_read *control, 4851 struct sockbuf *sb, 4852 int end, 4853 int inp_read_lock_held, 4854 int so_locked) 4855 { 4856 /* 4857 * Here we must place the control on the end of the socket read 4858 * queue AND increment sb_cc so that select will work properly on 4859 * read. 4860 */ 4861 struct mbuf *m, *prev = NULL; 4862 4863 if (inp == NULL) { 4864 /* Gak, TSNH!! */ 4865 #ifdef INVARIANTS 4866 panic("Gak, inp NULL on add_to_readq"); 4867 #endif 4868 return; 4869 } 4870 if (inp_read_lock_held == 0) 4871 SCTP_INP_READ_LOCK(inp); 4872 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4873 if (!control->on_strm_q) { 4874 sctp_free_remote_addr(control->whoFrom); 4875 if (control->data) { 4876 sctp_m_freem(control->data); 4877 control->data = NULL; 4878 } 4879 sctp_free_a_readq(stcb, control); 4880 } 4881 if (inp_read_lock_held == 0) 4882 SCTP_INP_READ_UNLOCK(inp); 4883 return; 4884 } 4885 if (!(control->spec_flags & M_NOTIFICATION)) { 4886 atomic_add_int(&inp->total_recvs, 1); 4887 if (!control->do_not_ref_stcb) { 4888 atomic_add_int(&stcb->total_recvs, 1); 4889 } 4890 } 4891 m = control->data; 4892 control->held_length = 0; 4893 control->length = 0; 4894 while (m) { 4895 if (SCTP_BUF_LEN(m) == 0) { 4896 /* Skip mbufs with NO length */ 4897 if (prev == NULL) { 4898 /* First one */ 4899 control->data = sctp_m_free(m); 4900 m = control->data; 4901 } else { 4902 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4903 m = SCTP_BUF_NEXT(prev); 4904 } 4905 if (m == NULL) { 4906 control->tail_mbuf = prev; 4907 } 4908 continue; 4909 } 4910 prev = m; 4911 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4912 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4913 } 4914 sctp_sballoc(stcb, sb, m); 4915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4916 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4917 } 4918 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4919 m = SCTP_BUF_NEXT(m); 4920 } 4921 if (prev != NULL) { 4922 control->tail_mbuf = prev; 4923 } else { 4924 /* Everything got collapsed out?? */ 4925 if (!control->on_strm_q) { 4926 sctp_free_remote_addr(control->whoFrom); 4927 sctp_free_a_readq(stcb, control); 4928 } 4929 if (inp_read_lock_held == 0) 4930 SCTP_INP_READ_UNLOCK(inp); 4931 return; 4932 } 4933 if (end) { 4934 control->end_added = 1; 4935 } 4936 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4937 control->on_read_q = 1; 4938 if (inp_read_lock_held == 0) 4939 SCTP_INP_READ_UNLOCK(inp); 4940 if (inp && inp->sctp_socket) { 4941 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4942 } 4943 } 4944 4945 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4946 *************ALTERNATE ROUTING CODE 4947 */ 4948 4949 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4950 *************ALTERNATE ROUTING CODE 4951 */ 4952 4953 struct mbuf * 4954 sctp_generate_cause(uint16_t code, char *info) 4955 { 4956 struct mbuf *m; 4957 struct sctp_gen_error_cause *cause; 4958 size_t info_len; 4959 uint16_t len; 4960 4961 if ((code == 0) || (info == NULL)) { 4962 return (NULL); 4963 } 4964 info_len = strlen(info); 4965 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4966 return (NULL); 4967 } 4968 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4969 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4970 if (m != NULL) { 4971 SCTP_BUF_LEN(m) = len; 4972 cause = mtod(m, struct sctp_gen_error_cause *); 4973 cause->code = htons(code); 4974 cause->length = htons(len); 4975 memcpy(cause->info, info, info_len); 4976 } 4977 return (m); 4978 } 4979 4980 struct mbuf * 4981 sctp_generate_no_user_data_cause(uint32_t tsn) 4982 { 4983 struct mbuf *m; 4984 struct sctp_error_no_user_data *no_user_data_cause; 4985 uint16_t len; 4986 4987 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4988 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4989 if (m != NULL) { 4990 SCTP_BUF_LEN(m) = len; 4991 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4992 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4993 no_user_data_cause->cause.length = htons(len); 4994 no_user_data_cause->tsn = htonl(tsn); 4995 } 4996 return (m); 4997 } 4998 4999 #ifdef SCTP_MBCNT_LOGGING 5000 void 5001 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5002 struct sctp_tmit_chunk *tp1, int chk_cnt) 5003 { 5004 if (tp1->data == NULL) { 5005 return; 5006 } 5007 asoc->chunks_on_out_queue -= chk_cnt; 5008 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5009 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5010 asoc->total_output_queue_size, 5011 tp1->book_size, 5012 0, 5013 tp1->mbcnt); 5014 } 5015 if (asoc->total_output_queue_size >= tp1->book_size) { 5016 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5017 } else { 5018 asoc->total_output_queue_size = 0; 5019 } 5020 5021 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5022 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5023 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5024 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5025 } else { 5026 stcb->sctp_socket->so_snd.sb_cc = 0; 5027 } 5028 } 5029 } 5030 5031 #endif 5032 5033 int 5034 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5035 uint8_t sent, int so_locked) 5036 { 5037 struct sctp_stream_out *strq; 5038 struct sctp_tmit_chunk *chk = NULL, *tp2; 5039 struct sctp_stream_queue_pending *sp; 5040 uint32_t mid; 5041 uint16_t sid; 5042 uint8_t foundeom = 0; 5043 int ret_sz = 0; 5044 int notdone; 5045 int do_wakeup_routine = 0; 5046 5047 SCTP_TCB_LOCK_ASSERT(stcb); 5048 5049 sid = tp1->rec.data.sid; 5050 mid = tp1->rec.data.mid; 5051 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5052 stcb->asoc.abandoned_sent[0]++; 5053 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5054 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5055 #if defined(SCTP_DETAILED_STR_STATS) 5056 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5057 #endif 5058 } else { 5059 stcb->asoc.abandoned_unsent[0]++; 5060 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5061 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5062 #if defined(SCTP_DETAILED_STR_STATS) 5063 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5064 #endif 5065 } 5066 do { 5067 ret_sz += tp1->book_size; 5068 if (tp1->data != NULL) { 5069 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5070 sctp_flight_size_decrease(tp1); 5071 sctp_total_flight_decrease(stcb, tp1); 5072 } 5073 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5074 stcb->asoc.peers_rwnd += tp1->send_size; 5075 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5076 if (sent) { 5077 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5078 } else { 5079 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5080 } 5081 if (tp1->data) { 5082 sctp_m_freem(tp1->data); 5083 tp1->data = NULL; 5084 } 5085 do_wakeup_routine = 1; 5086 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5087 stcb->asoc.sent_queue_cnt_removeable--; 5088 } 5089 } 5090 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5091 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5092 SCTP_DATA_NOT_FRAG) { 5093 /* not frag'ed we ae done */ 5094 notdone = 0; 5095 foundeom = 1; 5096 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5097 /* end of frag, we are done */ 5098 notdone = 0; 5099 foundeom = 1; 5100 } else { 5101 /* 5102 * Its a begin or middle piece, we must mark all of 5103 * it 5104 */ 5105 notdone = 1; 5106 tp1 = TAILQ_NEXT(tp1, sctp_next); 5107 } 5108 } while (tp1 && notdone); 5109 if (foundeom == 0) { 5110 /* 5111 * The multi-part message was scattered across the send and 5112 * sent queue. 5113 */ 5114 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5115 if ((tp1->rec.data.sid != sid) || 5116 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5117 break; 5118 } 5119 /* 5120 * save to chk in case we have some on stream out 5121 * queue. If so and we have an un-transmitted one we 5122 * don't have to fudge the TSN. 5123 */ 5124 chk = tp1; 5125 ret_sz += tp1->book_size; 5126 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5127 if (sent) { 5128 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5129 } else { 5130 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5131 } 5132 if (tp1->data) { 5133 sctp_m_freem(tp1->data); 5134 tp1->data = NULL; 5135 } 5136 /* No flight involved here book the size to 0 */ 5137 tp1->book_size = 0; 5138 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5139 foundeom = 1; 5140 } 5141 do_wakeup_routine = 1; 5142 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5143 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5144 /* 5145 * on to the sent queue so we can wait for it to be 5146 * passed by. 5147 */ 5148 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5149 sctp_next); 5150 stcb->asoc.send_queue_cnt--; 5151 stcb->asoc.sent_queue_cnt++; 5152 } 5153 } 5154 if (foundeom == 0) { 5155 /* 5156 * Still no eom found. That means there is stuff left on the 5157 * stream out queue.. yuck. 5158 */ 5159 strq = &stcb->asoc.strmout[sid]; 5160 sp = TAILQ_FIRST(&strq->outqueue); 5161 if (sp != NULL) { 5162 sp->discard_rest = 1; 5163 /* 5164 * We may need to put a chunk on the queue that 5165 * holds the TSN that would have been sent with the 5166 * LAST bit. 5167 */ 5168 if (chk == NULL) { 5169 /* Yep, we have to */ 5170 sctp_alloc_a_chunk(stcb, chk); 5171 if (chk == NULL) { 5172 /* 5173 * we are hosed. All we can do is 5174 * nothing.. which will cause an 5175 * abort if the peer is paying 5176 * attention. 5177 */ 5178 goto oh_well; 5179 } 5180 memset(chk, 0, sizeof(*chk)); 5181 chk->rec.data.rcv_flags = 0; 5182 chk->sent = SCTP_FORWARD_TSN_SKIP; 5183 chk->asoc = &stcb->asoc; 5184 if (stcb->asoc.idata_supported == 0) { 5185 if (sp->sinfo_flags & SCTP_UNORDERED) { 5186 chk->rec.data.mid = 0; 5187 } else { 5188 chk->rec.data.mid = strq->next_mid_ordered; 5189 } 5190 } else { 5191 if (sp->sinfo_flags & SCTP_UNORDERED) { 5192 chk->rec.data.mid = strq->next_mid_unordered; 5193 } else { 5194 chk->rec.data.mid = strq->next_mid_ordered; 5195 } 5196 } 5197 chk->rec.data.sid = sp->sid; 5198 chk->rec.data.ppid = sp->ppid; 5199 chk->rec.data.context = sp->context; 5200 chk->flags = sp->act_flags; 5201 chk->whoTo = NULL; 5202 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5203 strq->chunks_on_queues++; 5204 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5205 stcb->asoc.sent_queue_cnt++; 5206 stcb->asoc.pr_sctp_cnt++; 5207 } 5208 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5209 if (sp->sinfo_flags & SCTP_UNORDERED) { 5210 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5211 } 5212 if (stcb->asoc.idata_supported == 0) { 5213 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5214 strq->next_mid_ordered++; 5215 } 5216 } else { 5217 if (sp->sinfo_flags & SCTP_UNORDERED) { 5218 strq->next_mid_unordered++; 5219 } else { 5220 strq->next_mid_ordered++; 5221 } 5222 } 5223 oh_well: 5224 if (sp->data) { 5225 /* 5226 * Pull any data to free up the SB and allow 5227 * sender to "add more" while we will throw 5228 * away :-) 5229 */ 5230 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5231 ret_sz += sp->length; 5232 do_wakeup_routine = 1; 5233 sp->some_taken = 1; 5234 sctp_m_freem(sp->data); 5235 sp->data = NULL; 5236 sp->tail_mbuf = NULL; 5237 sp->length = 0; 5238 } 5239 } 5240 } 5241 if (do_wakeup_routine) { 5242 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5243 } 5244 return (ret_sz); 5245 } 5246 5247 /* 5248 * checks to see if the given address, sa, is one that is currently known by 5249 * the kernel note: can't distinguish the same address on multiple interfaces 5250 * and doesn't handle multiple addresses with different zone/scope id's note: 5251 * ifa_ifwithaddr() compares the entire sockaddr struct 5252 */ 5253 struct sctp_ifa * 5254 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5255 int holds_lock) 5256 { 5257 struct sctp_laddr *laddr; 5258 5259 if (holds_lock == 0) { 5260 SCTP_INP_RLOCK(inp); 5261 } 5262 5263 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5264 if (laddr->ifa == NULL) 5265 continue; 5266 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5267 continue; 5268 #ifdef INET 5269 if (addr->sa_family == AF_INET) { 5270 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5271 laddr->ifa->address.sin.sin_addr.s_addr) { 5272 /* found him. */ 5273 break; 5274 } 5275 } 5276 #endif 5277 #ifdef INET6 5278 if (addr->sa_family == AF_INET6) { 5279 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5280 &laddr->ifa->address.sin6)) { 5281 /* found him. */ 5282 break; 5283 } 5284 } 5285 #endif 5286 } 5287 if (holds_lock == 0) { 5288 SCTP_INP_RUNLOCK(inp); 5289 } 5290 if (laddr != NULL) { 5291 return (laddr->ifa); 5292 } else { 5293 return (NULL); 5294 } 5295 } 5296 5297 uint32_t 5298 sctp_get_ifa_hash_val(struct sockaddr *addr) 5299 { 5300 switch (addr->sa_family) { 5301 #ifdef INET 5302 case AF_INET: 5303 { 5304 struct sockaddr_in *sin; 5305 5306 sin = (struct sockaddr_in *)addr; 5307 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5308 } 5309 #endif 5310 #ifdef INET6 5311 case AF_INET6: 5312 { 5313 struct sockaddr_in6 *sin6; 5314 uint32_t hash_of_addr; 5315 5316 sin6 = (struct sockaddr_in6 *)addr; 5317 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5318 sin6->sin6_addr.s6_addr32[1] + 5319 sin6->sin6_addr.s6_addr32[2] + 5320 sin6->sin6_addr.s6_addr32[3]); 5321 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5322 return (hash_of_addr); 5323 } 5324 #endif 5325 default: 5326 break; 5327 } 5328 return (0); 5329 } 5330 5331 struct sctp_ifa * 5332 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5333 { 5334 struct sctp_ifa *sctp_ifap; 5335 struct sctp_vrf *vrf; 5336 struct sctp_ifalist *hash_head; 5337 uint32_t hash_of_addr; 5338 5339 if (holds_lock == 0) { 5340 SCTP_IPI_ADDR_RLOCK(); 5341 } else { 5342 SCTP_IPI_ADDR_LOCK_ASSERT(); 5343 } 5344 5345 vrf = sctp_find_vrf(vrf_id); 5346 if (vrf == NULL) { 5347 if (holds_lock == 0) 5348 SCTP_IPI_ADDR_RUNLOCK(); 5349 return (NULL); 5350 } 5351 5352 hash_of_addr = sctp_get_ifa_hash_val(addr); 5353 5354 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5355 if (hash_head == NULL) { 5356 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5357 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5358 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5359 sctp_print_address(addr); 5360 SCTP_PRINTF("No such bucket for address\n"); 5361 if (holds_lock == 0) 5362 SCTP_IPI_ADDR_RUNLOCK(); 5363 5364 return (NULL); 5365 } 5366 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5367 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5368 continue; 5369 #ifdef INET 5370 if (addr->sa_family == AF_INET) { 5371 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5372 sctp_ifap->address.sin.sin_addr.s_addr) { 5373 /* found him. */ 5374 break; 5375 } 5376 } 5377 #endif 5378 #ifdef INET6 5379 if (addr->sa_family == AF_INET6) { 5380 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5381 &sctp_ifap->address.sin6)) { 5382 /* found him. */ 5383 break; 5384 } 5385 } 5386 #endif 5387 } 5388 if (holds_lock == 0) 5389 SCTP_IPI_ADDR_RUNLOCK(); 5390 return (sctp_ifap); 5391 } 5392 5393 static void 5394 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5395 uint32_t rwnd_req) 5396 { 5397 /* User pulled some data, do we need a rwnd update? */ 5398 struct epoch_tracker et; 5399 int r_unlocked = 0; 5400 uint32_t dif, rwnd; 5401 struct socket *so = NULL; 5402 5403 if (stcb == NULL) 5404 return; 5405 5406 atomic_add_int(&stcb->asoc.refcnt, 1); 5407 5408 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5409 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5410 /* Pre-check If we are freeing no update */ 5411 goto no_lock; 5412 } 5413 SCTP_INP_INCR_REF(stcb->sctp_ep); 5414 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5415 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5416 goto out; 5417 } 5418 so = stcb->sctp_socket; 5419 if (so == NULL) { 5420 goto out; 5421 } 5422 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5423 /* Have you have freed enough to look */ 5424 *freed_so_far = 0; 5425 /* Yep, its worth a look and the lock overhead */ 5426 5427 /* Figure out what the rwnd would be */ 5428 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5429 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5430 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5431 } else { 5432 dif = 0; 5433 } 5434 if (dif >= rwnd_req) { 5435 if (hold_rlock) { 5436 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5437 r_unlocked = 1; 5438 } 5439 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5440 /* 5441 * One last check before we allow the guy possibly 5442 * to get in. There is a race, where the guy has not 5443 * reached the gate. In that case 5444 */ 5445 goto out; 5446 } 5447 SCTP_TCB_LOCK(stcb); 5448 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5449 /* No reports here */ 5450 SCTP_TCB_UNLOCK(stcb); 5451 goto out; 5452 } 5453 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5454 NET_EPOCH_ENTER(et); 5455 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5456 5457 sctp_chunk_output(stcb->sctp_ep, stcb, 5458 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5459 /* make sure no timer is running */ 5460 NET_EPOCH_EXIT(et); 5461 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5462 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5463 SCTP_TCB_UNLOCK(stcb); 5464 } else { 5465 /* Update how much we have pending */ 5466 stcb->freed_by_sorcv_sincelast = dif; 5467 } 5468 out: 5469 if (so && r_unlocked && hold_rlock) { 5470 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5471 } 5472 5473 SCTP_INP_DECR_REF(stcb->sctp_ep); 5474 no_lock: 5475 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5476 return; 5477 } 5478 5479 int 5480 sctp_sorecvmsg(struct socket *so, 5481 struct uio *uio, 5482 struct mbuf **mp, 5483 struct sockaddr *from, 5484 int fromlen, 5485 int *msg_flags, 5486 struct sctp_sndrcvinfo *sinfo, 5487 int filling_sinfo) 5488 { 5489 /* 5490 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5491 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5492 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5493 * On the way out we may send out any combination of: 5494 * MSG_NOTIFICATION MSG_EOR 5495 * 5496 */ 5497 struct sctp_inpcb *inp = NULL; 5498 ssize_t my_len = 0; 5499 ssize_t cp_len = 0; 5500 int error = 0; 5501 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5502 struct mbuf *m = NULL; 5503 struct sctp_tcb *stcb = NULL; 5504 int wakeup_read_socket = 0; 5505 int freecnt_applied = 0; 5506 int out_flags = 0, in_flags = 0; 5507 int block_allowed = 1; 5508 uint32_t freed_so_far = 0; 5509 ssize_t copied_so_far = 0; 5510 int in_eeor_mode = 0; 5511 int no_rcv_needed = 0; 5512 uint32_t rwnd_req = 0; 5513 int hold_sblock = 0; 5514 int hold_rlock = 0; 5515 ssize_t slen = 0; 5516 uint32_t held_length = 0; 5517 int sockbuf_lock = 0; 5518 5519 if (uio == NULL) { 5520 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5521 return (EINVAL); 5522 } 5523 5524 if (msg_flags) { 5525 in_flags = *msg_flags; 5526 if (in_flags & MSG_PEEK) 5527 SCTP_STAT_INCR(sctps_read_peeks); 5528 } else { 5529 in_flags = 0; 5530 } 5531 slen = uio->uio_resid; 5532 5533 /* Pull in and set up our int flags */ 5534 if (in_flags & MSG_OOB) { 5535 /* Out of band's NOT supported */ 5536 return (EOPNOTSUPP); 5537 } 5538 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5539 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5540 return (EINVAL); 5541 } 5542 if ((in_flags & (MSG_DONTWAIT 5543 | MSG_NBIO 5544 )) || 5545 SCTP_SO_IS_NBIO(so)) { 5546 block_allowed = 0; 5547 } 5548 /* setup the endpoint */ 5549 inp = (struct sctp_inpcb *)so->so_pcb; 5550 if (inp == NULL) { 5551 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5552 return (EFAULT); 5553 } 5554 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5555 /* Must be at least a MTU's worth */ 5556 if (rwnd_req < SCTP_MIN_RWND) 5557 rwnd_req = SCTP_MIN_RWND; 5558 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5560 sctp_misc_ints(SCTP_SORECV_ENTER, 5561 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5562 } 5563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5564 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5565 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5566 } 5567 5568 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5569 if (error) { 5570 goto release_unlocked; 5571 } 5572 sockbuf_lock = 1; 5573 restart: 5574 5575 restart_nosblocks: 5576 if (hold_sblock == 0) { 5577 SOCKBUF_LOCK(&so->so_rcv); 5578 hold_sblock = 1; 5579 } 5580 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5581 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5582 goto out; 5583 } 5584 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5585 if (so->so_error) { 5586 error = so->so_error; 5587 if ((in_flags & MSG_PEEK) == 0) 5588 so->so_error = 0; 5589 goto out; 5590 } else { 5591 if (so->so_rcv.sb_cc == 0) { 5592 /* indicate EOF */ 5593 error = 0; 5594 goto out; 5595 } 5596 } 5597 } 5598 if (so->so_rcv.sb_cc <= held_length) { 5599 if (so->so_error) { 5600 error = so->so_error; 5601 if ((in_flags & MSG_PEEK) == 0) { 5602 so->so_error = 0; 5603 } 5604 goto out; 5605 } 5606 if ((so->so_rcv.sb_cc == 0) && 5607 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5608 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5609 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5610 /* 5611 * For active open side clear flags for 5612 * re-use passive open is blocked by 5613 * connect. 5614 */ 5615 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5616 /* 5617 * You were aborted, passive side 5618 * always hits here 5619 */ 5620 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5621 error = ECONNRESET; 5622 } 5623 so->so_state &= ~(SS_ISCONNECTING | 5624 SS_ISDISCONNECTING | 5625 SS_ISCONFIRMING | 5626 SS_ISCONNECTED); 5627 if (error == 0) { 5628 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5629 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5630 error = ENOTCONN; 5631 } 5632 } 5633 goto out; 5634 } 5635 } 5636 if (block_allowed) { 5637 error = sbwait(&so->so_rcv); 5638 if (error) { 5639 goto out; 5640 } 5641 held_length = 0; 5642 goto restart_nosblocks; 5643 } else { 5644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5645 error = EWOULDBLOCK; 5646 goto out; 5647 } 5648 } 5649 if (hold_sblock == 1) { 5650 SOCKBUF_UNLOCK(&so->so_rcv); 5651 hold_sblock = 0; 5652 } 5653 /* we possibly have data we can read */ 5654 /* sa_ignore FREED_MEMORY */ 5655 control = TAILQ_FIRST(&inp->read_queue); 5656 if (control == NULL) { 5657 /* 5658 * This could be happening since the appender did the 5659 * increment but as not yet did the tailq insert onto the 5660 * read_queue 5661 */ 5662 if (hold_rlock == 0) { 5663 SCTP_INP_READ_LOCK(inp); 5664 } 5665 control = TAILQ_FIRST(&inp->read_queue); 5666 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5667 #ifdef INVARIANTS 5668 panic("Huh, its non zero and nothing on control?"); 5669 #endif 5670 so->so_rcv.sb_cc = 0; 5671 } 5672 SCTP_INP_READ_UNLOCK(inp); 5673 hold_rlock = 0; 5674 goto restart; 5675 } 5676 5677 if ((control->length == 0) && 5678 (control->do_not_ref_stcb)) { 5679 /* 5680 * Clean up code for freeing assoc that left behind a 5681 * pdapi.. maybe a peer in EEOR that just closed after 5682 * sending and never indicated a EOR. 5683 */ 5684 if (hold_rlock == 0) { 5685 hold_rlock = 1; 5686 SCTP_INP_READ_LOCK(inp); 5687 } 5688 control->held_length = 0; 5689 if (control->data) { 5690 /* Hmm there is data here .. fix */ 5691 struct mbuf *m_tmp; 5692 int cnt = 0; 5693 5694 m_tmp = control->data; 5695 while (m_tmp) { 5696 cnt += SCTP_BUF_LEN(m_tmp); 5697 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5698 control->tail_mbuf = m_tmp; 5699 control->end_added = 1; 5700 } 5701 m_tmp = SCTP_BUF_NEXT(m_tmp); 5702 } 5703 control->length = cnt; 5704 } else { 5705 /* remove it */ 5706 TAILQ_REMOVE(&inp->read_queue, control, next); 5707 /* Add back any hidden data */ 5708 sctp_free_remote_addr(control->whoFrom); 5709 sctp_free_a_readq(stcb, control); 5710 } 5711 if (hold_rlock) { 5712 hold_rlock = 0; 5713 SCTP_INP_READ_UNLOCK(inp); 5714 } 5715 goto restart; 5716 } 5717 if ((control->length == 0) && 5718 (control->end_added == 1)) { 5719 /* 5720 * Do we also need to check for (control->pdapi_aborted == 5721 * 1)? 5722 */ 5723 if (hold_rlock == 0) { 5724 hold_rlock = 1; 5725 SCTP_INP_READ_LOCK(inp); 5726 } 5727 TAILQ_REMOVE(&inp->read_queue, control, next); 5728 if (control->data) { 5729 #ifdef INVARIANTS 5730 panic("control->data not null but control->length == 0"); 5731 #else 5732 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5733 sctp_m_freem(control->data); 5734 control->data = NULL; 5735 #endif 5736 } 5737 if (control->aux_data) { 5738 sctp_m_free(control->aux_data); 5739 control->aux_data = NULL; 5740 } 5741 #ifdef INVARIANTS 5742 if (control->on_strm_q) { 5743 panic("About to free ctl:%p so:%p and its in %d", 5744 control, so, control->on_strm_q); 5745 } 5746 #endif 5747 sctp_free_remote_addr(control->whoFrom); 5748 sctp_free_a_readq(stcb, control); 5749 if (hold_rlock) { 5750 hold_rlock = 0; 5751 SCTP_INP_READ_UNLOCK(inp); 5752 } 5753 goto restart; 5754 } 5755 if (control->length == 0) { 5756 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5757 (filling_sinfo)) { 5758 /* find a more suitable one then this */ 5759 ctl = TAILQ_NEXT(control, next); 5760 while (ctl) { 5761 if ((ctl->stcb != control->stcb) && (ctl->length) && 5762 (ctl->some_taken || 5763 (ctl->spec_flags & M_NOTIFICATION) || 5764 ((ctl->do_not_ref_stcb == 0) && 5765 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5766 ) { 5767 /*- 5768 * If we have a different TCB next, and there is data 5769 * present. If we have already taken some (pdapi), OR we can 5770 * ref the tcb and no delivery as started on this stream, we 5771 * take it. Note we allow a notification on a different 5772 * assoc to be delivered.. 5773 */ 5774 control = ctl; 5775 goto found_one; 5776 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5777 (ctl->length) && 5778 ((ctl->some_taken) || 5779 ((ctl->do_not_ref_stcb == 0) && 5780 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5781 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5782 /*- 5783 * If we have the same tcb, and there is data present, and we 5784 * have the strm interleave feature present. Then if we have 5785 * taken some (pdapi) or we can refer to tht tcb AND we have 5786 * not started a delivery for this stream, we can take it. 5787 * Note we do NOT allow a notification on the same assoc to 5788 * be delivered. 5789 */ 5790 control = ctl; 5791 goto found_one; 5792 } 5793 ctl = TAILQ_NEXT(ctl, next); 5794 } 5795 } 5796 /* 5797 * if we reach here, not suitable replacement is available 5798 * <or> fragment interleave is NOT on. So stuff the sb_cc 5799 * into the our held count, and its time to sleep again. 5800 */ 5801 held_length = so->so_rcv.sb_cc; 5802 control->held_length = so->so_rcv.sb_cc; 5803 goto restart; 5804 } 5805 /* Clear the held length since there is something to read */ 5806 control->held_length = 0; 5807 found_one: 5808 /* 5809 * If we reach here, control has a some data for us to read off. 5810 * Note that stcb COULD be NULL. 5811 */ 5812 if (hold_rlock == 0) { 5813 hold_rlock = 1; 5814 SCTP_INP_READ_LOCK(inp); 5815 } 5816 control->some_taken++; 5817 stcb = control->stcb; 5818 if (stcb) { 5819 if ((control->do_not_ref_stcb == 0) && 5820 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5821 if (freecnt_applied == 0) 5822 stcb = NULL; 5823 } else if (control->do_not_ref_stcb == 0) { 5824 /* you can't free it on me please */ 5825 /* 5826 * The lock on the socket buffer protects us so the 5827 * free code will stop. But since we used the 5828 * socketbuf lock and the sender uses the tcb_lock 5829 * to increment, we need to use the atomic add to 5830 * the refcnt 5831 */ 5832 if (freecnt_applied) { 5833 #ifdef INVARIANTS 5834 panic("refcnt already incremented"); 5835 #else 5836 SCTP_PRINTF("refcnt already incremented?\n"); 5837 #endif 5838 } else { 5839 atomic_add_int(&stcb->asoc.refcnt, 1); 5840 freecnt_applied = 1; 5841 } 5842 /* 5843 * Setup to remember how much we have not yet told 5844 * the peer our rwnd has opened up. Note we grab the 5845 * value from the tcb from last time. Note too that 5846 * sack sending clears this when a sack is sent, 5847 * which is fine. Once we hit the rwnd_req, we then 5848 * will go to the sctp_user_rcvd() that will not 5849 * lock until it KNOWs it MUST send a WUP-SACK. 5850 */ 5851 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5852 stcb->freed_by_sorcv_sincelast = 0; 5853 } 5854 } 5855 if (stcb && 5856 ((control->spec_flags & M_NOTIFICATION) == 0) && 5857 control->do_not_ref_stcb == 0) { 5858 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5859 } 5860 5861 /* First lets get off the sinfo and sockaddr info */ 5862 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5863 sinfo->sinfo_stream = control->sinfo_stream; 5864 sinfo->sinfo_ssn = (uint16_t)control->mid; 5865 sinfo->sinfo_flags = control->sinfo_flags; 5866 sinfo->sinfo_ppid = control->sinfo_ppid; 5867 sinfo->sinfo_context = control->sinfo_context; 5868 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5869 sinfo->sinfo_tsn = control->sinfo_tsn; 5870 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5871 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5872 nxt = TAILQ_NEXT(control, next); 5873 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5874 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5875 struct sctp_extrcvinfo *s_extra; 5876 5877 s_extra = (struct sctp_extrcvinfo *)sinfo; 5878 if ((nxt) && 5879 (nxt->length)) { 5880 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5881 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5882 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5883 } 5884 if (nxt->spec_flags & M_NOTIFICATION) { 5885 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5886 } 5887 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5888 s_extra->serinfo_next_length = nxt->length; 5889 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5890 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5891 if (nxt->tail_mbuf != NULL) { 5892 if (nxt->end_added) { 5893 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5894 } 5895 } 5896 } else { 5897 /* 5898 * we explicitly 0 this, since the memcpy 5899 * got some other things beyond the older 5900 * sinfo_ that is on the control's structure 5901 * :-D 5902 */ 5903 nxt = NULL; 5904 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5905 s_extra->serinfo_next_aid = 0; 5906 s_extra->serinfo_next_length = 0; 5907 s_extra->serinfo_next_ppid = 0; 5908 s_extra->serinfo_next_stream = 0; 5909 } 5910 } 5911 /* 5912 * update off the real current cum-ack, if we have an stcb. 5913 */ 5914 if ((control->do_not_ref_stcb == 0) && stcb) 5915 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5916 /* 5917 * mask off the high bits, we keep the actual chunk bits in 5918 * there. 5919 */ 5920 sinfo->sinfo_flags &= 0x00ff; 5921 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5922 sinfo->sinfo_flags |= SCTP_UNORDERED; 5923 } 5924 } 5925 #ifdef SCTP_ASOCLOG_OF_TSNS 5926 { 5927 int index, newindex; 5928 struct sctp_pcbtsn_rlog *entry; 5929 5930 do { 5931 index = inp->readlog_index; 5932 newindex = index + 1; 5933 if (newindex >= SCTP_READ_LOG_SIZE) { 5934 newindex = 0; 5935 } 5936 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5937 entry = &inp->readlog[index]; 5938 entry->vtag = control->sinfo_assoc_id; 5939 entry->strm = control->sinfo_stream; 5940 entry->seq = (uint16_t)control->mid; 5941 entry->sz = control->length; 5942 entry->flgs = control->sinfo_flags; 5943 } 5944 #endif 5945 if ((fromlen > 0) && (from != NULL)) { 5946 union sctp_sockstore store; 5947 size_t len; 5948 5949 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5950 #ifdef INET6 5951 case AF_INET6: 5952 len = sizeof(struct sockaddr_in6); 5953 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5954 store.sin6.sin6_port = control->port_from; 5955 break; 5956 #endif 5957 #ifdef INET 5958 case AF_INET: 5959 #ifdef INET6 5960 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5961 len = sizeof(struct sockaddr_in6); 5962 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5963 &store.sin6); 5964 store.sin6.sin6_port = control->port_from; 5965 } else { 5966 len = sizeof(struct sockaddr_in); 5967 store.sin = control->whoFrom->ro._l_addr.sin; 5968 store.sin.sin_port = control->port_from; 5969 } 5970 #else 5971 len = sizeof(struct sockaddr_in); 5972 store.sin = control->whoFrom->ro._l_addr.sin; 5973 store.sin.sin_port = control->port_from; 5974 #endif 5975 break; 5976 #endif 5977 default: 5978 len = 0; 5979 break; 5980 } 5981 memcpy(from, &store, min((size_t)fromlen, len)); 5982 #ifdef INET6 5983 { 5984 struct sockaddr_in6 lsa6, *from6; 5985 5986 from6 = (struct sockaddr_in6 *)from; 5987 sctp_recover_scope_mac(from6, (&lsa6)); 5988 } 5989 #endif 5990 } 5991 if (hold_rlock) { 5992 SCTP_INP_READ_UNLOCK(inp); 5993 hold_rlock = 0; 5994 } 5995 if (hold_sblock) { 5996 SOCKBUF_UNLOCK(&so->so_rcv); 5997 hold_sblock = 0; 5998 } 5999 /* now copy out what data we can */ 6000 if (mp == NULL) { 6001 /* copy out each mbuf in the chain up to length */ 6002 get_more_data: 6003 m = control->data; 6004 while (m) { 6005 /* Move out all we can */ 6006 cp_len = uio->uio_resid; 6007 my_len = SCTP_BUF_LEN(m); 6008 if (cp_len > my_len) { 6009 /* not enough in this buf */ 6010 cp_len = my_len; 6011 } 6012 if (hold_rlock) { 6013 SCTP_INP_READ_UNLOCK(inp); 6014 hold_rlock = 0; 6015 } 6016 if (cp_len > 0) 6017 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6018 /* re-read */ 6019 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6020 goto release; 6021 } 6022 6023 if ((control->do_not_ref_stcb == 0) && stcb && 6024 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6025 no_rcv_needed = 1; 6026 } 6027 if (error) { 6028 /* error we are out of here */ 6029 goto release; 6030 } 6031 SCTP_INP_READ_LOCK(inp); 6032 hold_rlock = 1; 6033 if (cp_len == SCTP_BUF_LEN(m)) { 6034 if ((SCTP_BUF_NEXT(m) == NULL) && 6035 (control->end_added)) { 6036 out_flags |= MSG_EOR; 6037 if ((control->do_not_ref_stcb == 0) && 6038 (control->stcb != NULL) && 6039 ((control->spec_flags & M_NOTIFICATION) == 0)) 6040 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6041 } 6042 if (control->spec_flags & M_NOTIFICATION) { 6043 out_flags |= MSG_NOTIFICATION; 6044 } 6045 /* we ate up the mbuf */ 6046 if (in_flags & MSG_PEEK) { 6047 /* just looking */ 6048 m = SCTP_BUF_NEXT(m); 6049 copied_so_far += cp_len; 6050 } else { 6051 /* dispose of the mbuf */ 6052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6053 sctp_sblog(&so->so_rcv, 6054 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6055 } 6056 sctp_sbfree(control, stcb, &so->so_rcv, m); 6057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6058 sctp_sblog(&so->so_rcv, 6059 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6060 } 6061 copied_so_far += cp_len; 6062 freed_so_far += (uint32_t)cp_len; 6063 freed_so_far += MSIZE; 6064 atomic_subtract_int(&control->length, (int)cp_len); 6065 control->data = sctp_m_free(m); 6066 m = control->data; 6067 /* 6068 * been through it all, must hold sb 6069 * lock ok to null tail 6070 */ 6071 if (control->data == NULL) { 6072 #ifdef INVARIANTS 6073 if ((control->end_added == 0) || 6074 (TAILQ_NEXT(control, next) == NULL)) { 6075 /* 6076 * If the end is not 6077 * added, OR the 6078 * next is NOT null 6079 * we MUST have the 6080 * lock. 6081 */ 6082 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6083 panic("Hmm we don't own the lock?"); 6084 } 6085 } 6086 #endif 6087 control->tail_mbuf = NULL; 6088 #ifdef INVARIANTS 6089 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6090 panic("end_added, nothing left and no MSG_EOR"); 6091 } 6092 #endif 6093 } 6094 } 6095 } else { 6096 /* Do we need to trim the mbuf? */ 6097 if (control->spec_flags & M_NOTIFICATION) { 6098 out_flags |= MSG_NOTIFICATION; 6099 } 6100 if ((in_flags & MSG_PEEK) == 0) { 6101 SCTP_BUF_RESV_UF(m, cp_len); 6102 SCTP_BUF_LEN(m) -= (int)cp_len; 6103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6104 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6105 } 6106 atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); 6107 if ((control->do_not_ref_stcb == 0) && 6108 stcb) { 6109 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6110 } 6111 copied_so_far += cp_len; 6112 freed_so_far += (uint32_t)cp_len; 6113 freed_so_far += MSIZE; 6114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6115 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6116 SCTP_LOG_SBRESULT, 0); 6117 } 6118 atomic_subtract_int(&control->length, (int)cp_len); 6119 } else { 6120 copied_so_far += cp_len; 6121 } 6122 } 6123 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6124 break; 6125 } 6126 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6127 (control->do_not_ref_stcb == 0) && 6128 (freed_so_far >= rwnd_req)) { 6129 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6130 } 6131 } /* end while(m) */ 6132 /* 6133 * At this point we have looked at it all and we either have 6134 * a MSG_EOR/or read all the user wants... <OR> 6135 * control->length == 0. 6136 */ 6137 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6138 /* we are done with this control */ 6139 if (control->length == 0) { 6140 if (control->data) { 6141 #ifdef INVARIANTS 6142 panic("control->data not null at read eor?"); 6143 #else 6144 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6145 sctp_m_freem(control->data); 6146 control->data = NULL; 6147 #endif 6148 } 6149 done_with_control: 6150 if (hold_rlock == 0) { 6151 SCTP_INP_READ_LOCK(inp); 6152 hold_rlock = 1; 6153 } 6154 TAILQ_REMOVE(&inp->read_queue, control, next); 6155 /* Add back any hidden data */ 6156 if (control->held_length) { 6157 held_length = 0; 6158 control->held_length = 0; 6159 wakeup_read_socket = 1; 6160 } 6161 if (control->aux_data) { 6162 sctp_m_free(control->aux_data); 6163 control->aux_data = NULL; 6164 } 6165 no_rcv_needed = control->do_not_ref_stcb; 6166 sctp_free_remote_addr(control->whoFrom); 6167 control->data = NULL; 6168 #ifdef INVARIANTS 6169 if (control->on_strm_q) { 6170 panic("About to free ctl:%p so:%p and its in %d", 6171 control, so, control->on_strm_q); 6172 } 6173 #endif 6174 sctp_free_a_readq(stcb, control); 6175 control = NULL; 6176 if ((freed_so_far >= rwnd_req) && 6177 (no_rcv_needed == 0)) 6178 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6179 6180 } else { 6181 /* 6182 * The user did not read all of this 6183 * message, turn off the returned MSG_EOR 6184 * since we are leaving more behind on the 6185 * control to read. 6186 */ 6187 #ifdef INVARIANTS 6188 if (control->end_added && 6189 (control->data == NULL) && 6190 (control->tail_mbuf == NULL)) { 6191 panic("Gak, control->length is corrupt?"); 6192 } 6193 #endif 6194 no_rcv_needed = control->do_not_ref_stcb; 6195 out_flags &= ~MSG_EOR; 6196 } 6197 } 6198 if (out_flags & MSG_EOR) { 6199 goto release; 6200 } 6201 if ((uio->uio_resid == 0) || 6202 ((in_eeor_mode) && 6203 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6204 goto release; 6205 } 6206 /* 6207 * If I hit here the receiver wants more and this message is 6208 * NOT done (pd-api). So two questions. Can we block? if not 6209 * we are done. Did the user NOT set MSG_WAITALL? 6210 */ 6211 if (block_allowed == 0) { 6212 goto release; 6213 } 6214 /* 6215 * We need to wait for more data a few things: - We don't 6216 * release the I/O lock so we don't get someone else 6217 * reading. - We must be sure to account for the case where 6218 * what is added is NOT to our control when we wakeup. 6219 */ 6220 6221 /* 6222 * Do we need to tell the transport a rwnd update might be 6223 * needed before we go to sleep? 6224 */ 6225 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6226 ((freed_so_far >= rwnd_req) && 6227 (control->do_not_ref_stcb == 0) && 6228 (no_rcv_needed == 0))) { 6229 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6230 } 6231 wait_some_more: 6232 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6233 goto release; 6234 } 6235 6236 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6237 goto release; 6238 6239 if (hold_rlock == 1) { 6240 SCTP_INP_READ_UNLOCK(inp); 6241 hold_rlock = 0; 6242 } 6243 if (hold_sblock == 0) { 6244 SOCKBUF_LOCK(&so->so_rcv); 6245 hold_sblock = 1; 6246 } 6247 if ((copied_so_far) && (control->length == 0) && 6248 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6249 goto release; 6250 } 6251 if (so->so_rcv.sb_cc <= control->held_length) { 6252 error = sbwait(&so->so_rcv); 6253 if (error) { 6254 goto release; 6255 } 6256 control->held_length = 0; 6257 } 6258 if (hold_sblock) { 6259 SOCKBUF_UNLOCK(&so->so_rcv); 6260 hold_sblock = 0; 6261 } 6262 if (control->length == 0) { 6263 /* still nothing here */ 6264 if (control->end_added == 1) { 6265 /* he aborted, or is done i.e.did a shutdown */ 6266 out_flags |= MSG_EOR; 6267 if (control->pdapi_aborted) { 6268 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6269 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6270 6271 out_flags |= MSG_TRUNC; 6272 } else { 6273 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6274 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6275 } 6276 goto done_with_control; 6277 } 6278 if (so->so_rcv.sb_cc > held_length) { 6279 control->held_length = so->so_rcv.sb_cc; 6280 held_length = 0; 6281 } 6282 goto wait_some_more; 6283 } else if (control->data == NULL) { 6284 /* 6285 * we must re-sync since data is probably being 6286 * added 6287 */ 6288 SCTP_INP_READ_LOCK(inp); 6289 if ((control->length > 0) && (control->data == NULL)) { 6290 /* 6291 * big trouble.. we have the lock and its 6292 * corrupt? 6293 */ 6294 #ifdef INVARIANTS 6295 panic("Impossible data==NULL length !=0"); 6296 #endif 6297 out_flags |= MSG_EOR; 6298 out_flags |= MSG_TRUNC; 6299 control->length = 0; 6300 SCTP_INP_READ_UNLOCK(inp); 6301 goto done_with_control; 6302 } 6303 SCTP_INP_READ_UNLOCK(inp); 6304 /* We will fall around to get more data */ 6305 } 6306 goto get_more_data; 6307 } else { 6308 /*- 6309 * Give caller back the mbuf chain, 6310 * store in uio_resid the length 6311 */ 6312 wakeup_read_socket = 0; 6313 if ((control->end_added == 0) || 6314 (TAILQ_NEXT(control, next) == NULL)) { 6315 /* Need to get rlock */ 6316 if (hold_rlock == 0) { 6317 SCTP_INP_READ_LOCK(inp); 6318 hold_rlock = 1; 6319 } 6320 } 6321 if (control->end_added) { 6322 out_flags |= MSG_EOR; 6323 if ((control->do_not_ref_stcb == 0) && 6324 (control->stcb != NULL) && 6325 ((control->spec_flags & M_NOTIFICATION) == 0)) 6326 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6327 } 6328 if (control->spec_flags & M_NOTIFICATION) { 6329 out_flags |= MSG_NOTIFICATION; 6330 } 6331 uio->uio_resid = control->length; 6332 *mp = control->data; 6333 m = control->data; 6334 while (m) { 6335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6336 sctp_sblog(&so->so_rcv, 6337 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6338 } 6339 sctp_sbfree(control, stcb, &so->so_rcv, m); 6340 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6341 freed_so_far += MSIZE; 6342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6343 sctp_sblog(&so->so_rcv, 6344 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6345 } 6346 m = SCTP_BUF_NEXT(m); 6347 } 6348 control->data = control->tail_mbuf = NULL; 6349 control->length = 0; 6350 if (out_flags & MSG_EOR) { 6351 /* Done with this control */ 6352 goto done_with_control; 6353 } 6354 } 6355 release: 6356 if (hold_rlock == 1) { 6357 SCTP_INP_READ_UNLOCK(inp); 6358 hold_rlock = 0; 6359 } 6360 if (hold_sblock == 1) { 6361 SOCKBUF_UNLOCK(&so->so_rcv); 6362 hold_sblock = 0; 6363 } 6364 6365 SOCK_IO_RECV_UNLOCK(so); 6366 sockbuf_lock = 0; 6367 6368 release_unlocked: 6369 if (hold_sblock) { 6370 SOCKBUF_UNLOCK(&so->so_rcv); 6371 hold_sblock = 0; 6372 } 6373 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6374 if ((freed_so_far >= rwnd_req) && 6375 (control && (control->do_not_ref_stcb == 0)) && 6376 (no_rcv_needed == 0)) 6377 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6378 } 6379 out: 6380 if (msg_flags) { 6381 *msg_flags = out_flags; 6382 } 6383 if (((out_flags & MSG_EOR) == 0) && 6384 ((in_flags & MSG_PEEK) == 0) && 6385 (sinfo) && 6386 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6387 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6388 struct sctp_extrcvinfo *s_extra; 6389 6390 s_extra = (struct sctp_extrcvinfo *)sinfo; 6391 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6392 } 6393 if (hold_rlock == 1) { 6394 SCTP_INP_READ_UNLOCK(inp); 6395 } 6396 if (hold_sblock) { 6397 SOCKBUF_UNLOCK(&so->so_rcv); 6398 } 6399 if (sockbuf_lock) { 6400 SOCK_IO_RECV_UNLOCK(so); 6401 } 6402 6403 if (freecnt_applied) { 6404 /* 6405 * The lock on the socket buffer protects us so the free 6406 * code will stop. But since we used the socketbuf lock and 6407 * the sender uses the tcb_lock to increment, we need to use 6408 * the atomic add to the refcnt. 6409 */ 6410 if (stcb == NULL) { 6411 #ifdef INVARIANTS 6412 panic("stcb for refcnt has gone NULL?"); 6413 goto stage_left; 6414 #else 6415 goto stage_left; 6416 #endif 6417 } 6418 /* Save the value back for next time */ 6419 stcb->freed_by_sorcv_sincelast = freed_so_far; 6420 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6421 } 6422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6423 if (stcb) { 6424 sctp_misc_ints(SCTP_SORECV_DONE, 6425 freed_so_far, 6426 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6427 stcb->asoc.my_rwnd, 6428 so->so_rcv.sb_cc); 6429 } else { 6430 sctp_misc_ints(SCTP_SORECV_DONE, 6431 freed_so_far, 6432 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6433 0, 6434 so->so_rcv.sb_cc); 6435 } 6436 } 6437 stage_left: 6438 if (wakeup_read_socket) { 6439 sctp_sorwakeup(inp, so); 6440 } 6441 return (error); 6442 } 6443 6444 #ifdef SCTP_MBUF_LOGGING 6445 struct mbuf * 6446 sctp_m_free(struct mbuf *m) 6447 { 6448 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6449 sctp_log_mb(m, SCTP_MBUF_IFREE); 6450 } 6451 return (m_free(m)); 6452 } 6453 6454 void 6455 sctp_m_freem(struct mbuf *mb) 6456 { 6457 while (mb != NULL) 6458 mb = sctp_m_free(mb); 6459 } 6460 6461 #endif 6462 6463 int 6464 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6465 { 6466 /* 6467 * Given a local address. For all associations that holds the 6468 * address, request a peer-set-primary. 6469 */ 6470 struct sctp_ifa *ifa; 6471 struct sctp_laddr *wi; 6472 6473 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6474 if (ifa == NULL) { 6475 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6476 return (EADDRNOTAVAIL); 6477 } 6478 /* 6479 * Now that we have the ifa we must awaken the iterator with this 6480 * message. 6481 */ 6482 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6483 if (wi == NULL) { 6484 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6485 return (ENOMEM); 6486 } 6487 /* Now incr the count and int wi structure */ 6488 SCTP_INCR_LADDR_COUNT(); 6489 memset(wi, 0, sizeof(*wi)); 6490 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6491 wi->ifa = ifa; 6492 wi->action = SCTP_SET_PRIM_ADDR; 6493 atomic_add_int(&ifa->refcount, 1); 6494 6495 /* Now add it to the work queue */ 6496 SCTP_WQ_ADDR_LOCK(); 6497 /* 6498 * Should this really be a tailq? As it is we will process the 6499 * newest first :-0 6500 */ 6501 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6502 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6503 (struct sctp_inpcb *)NULL, 6504 (struct sctp_tcb *)NULL, 6505 (struct sctp_nets *)NULL); 6506 SCTP_WQ_ADDR_UNLOCK(); 6507 return (0); 6508 } 6509 6510 int 6511 sctp_soreceive(struct socket *so, 6512 struct sockaddr **psa, 6513 struct uio *uio, 6514 struct mbuf **mp0, 6515 struct mbuf **controlp, 6516 int *flagsp) 6517 { 6518 int error, fromlen; 6519 uint8_t sockbuf[256]; 6520 struct sockaddr *from; 6521 struct sctp_extrcvinfo sinfo; 6522 int filling_sinfo = 1; 6523 int flags; 6524 struct sctp_inpcb *inp; 6525 6526 inp = (struct sctp_inpcb *)so->so_pcb; 6527 /* pickup the assoc we are reading from */ 6528 if (inp == NULL) { 6529 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6530 return (EINVAL); 6531 } 6532 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6533 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6534 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6535 (controlp == NULL)) { 6536 /* user does not want the sndrcv ctl */ 6537 filling_sinfo = 0; 6538 } 6539 if (psa) { 6540 from = (struct sockaddr *)sockbuf; 6541 fromlen = sizeof(sockbuf); 6542 from->sa_len = 0; 6543 } else { 6544 from = NULL; 6545 fromlen = 0; 6546 } 6547 6548 if (filling_sinfo) { 6549 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6550 } 6551 if (flagsp != NULL) { 6552 flags = *flagsp; 6553 } else { 6554 flags = 0; 6555 } 6556 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6557 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6558 if (flagsp != NULL) { 6559 *flagsp = flags; 6560 } 6561 if (controlp != NULL) { 6562 /* copy back the sinfo in a CMSG format */ 6563 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6564 *controlp = sctp_build_ctl_nchunk(inp, 6565 (struct sctp_sndrcvinfo *)&sinfo); 6566 } else { 6567 *controlp = NULL; 6568 } 6569 } 6570 if (psa) { 6571 /* copy back the address info */ 6572 if (from && from->sa_len) { 6573 *psa = sodupsockaddr(from, M_NOWAIT); 6574 } else { 6575 *psa = NULL; 6576 } 6577 } 6578 return (error); 6579 } 6580 6581 int 6582 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6583 int totaddr, int *error) 6584 { 6585 int added = 0; 6586 int i; 6587 struct sctp_inpcb *inp; 6588 struct sockaddr *sa; 6589 size_t incr = 0; 6590 #ifdef INET 6591 struct sockaddr_in *sin; 6592 #endif 6593 #ifdef INET6 6594 struct sockaddr_in6 *sin6; 6595 #endif 6596 6597 sa = addr; 6598 inp = stcb->sctp_ep; 6599 *error = 0; 6600 for (i = 0; i < totaddr; i++) { 6601 switch (sa->sa_family) { 6602 #ifdef INET 6603 case AF_INET: 6604 incr = sizeof(struct sockaddr_in); 6605 sin = (struct sockaddr_in *)sa; 6606 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6607 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6608 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6609 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6610 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6611 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6612 *error = EINVAL; 6613 goto out_now; 6614 } 6615 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6616 SCTP_DONOT_SETSCOPE, 6617 SCTP_ADDR_IS_CONFIRMED)) { 6618 /* assoc gone no un-lock */ 6619 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6620 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6621 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6622 *error = ENOBUFS; 6623 goto out_now; 6624 } 6625 added++; 6626 break; 6627 #endif 6628 #ifdef INET6 6629 case AF_INET6: 6630 incr = sizeof(struct sockaddr_in6); 6631 sin6 = (struct sockaddr_in6 *)sa; 6632 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6633 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6634 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6635 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6636 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6637 *error = EINVAL; 6638 goto out_now; 6639 } 6640 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6641 SCTP_DONOT_SETSCOPE, 6642 SCTP_ADDR_IS_CONFIRMED)) { 6643 /* assoc gone no un-lock */ 6644 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6645 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6646 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6647 *error = ENOBUFS; 6648 goto out_now; 6649 } 6650 added++; 6651 break; 6652 #endif 6653 default: 6654 break; 6655 } 6656 sa = (struct sockaddr *)((caddr_t)sa + incr); 6657 } 6658 out_now: 6659 return (added); 6660 } 6661 6662 int 6663 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6664 unsigned int totaddr, 6665 unsigned int *num_v4, unsigned int *num_v6, 6666 unsigned int limit) 6667 { 6668 struct sockaddr *sa; 6669 struct sctp_tcb *stcb; 6670 unsigned int incr, at, i; 6671 6672 at = 0; 6673 sa = addr; 6674 *num_v6 = *num_v4 = 0; 6675 /* account and validate addresses */ 6676 if (totaddr == 0) { 6677 return (EINVAL); 6678 } 6679 for (i = 0; i < totaddr; i++) { 6680 if (at + sizeof(struct sockaddr) > limit) { 6681 return (EINVAL); 6682 } 6683 switch (sa->sa_family) { 6684 #ifdef INET 6685 case AF_INET: 6686 incr = (unsigned int)sizeof(struct sockaddr_in); 6687 if (sa->sa_len != incr) { 6688 return (EINVAL); 6689 } 6690 (*num_v4) += 1; 6691 break; 6692 #endif 6693 #ifdef INET6 6694 case AF_INET6: 6695 { 6696 struct sockaddr_in6 *sin6; 6697 6698 incr = (unsigned int)sizeof(struct sockaddr_in6); 6699 if (sa->sa_len != incr) { 6700 return (EINVAL); 6701 } 6702 sin6 = (struct sockaddr_in6 *)sa; 6703 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6704 /* Must be non-mapped for connectx */ 6705 return (EINVAL); 6706 } 6707 (*num_v6) += 1; 6708 break; 6709 } 6710 #endif 6711 default: 6712 return (EINVAL); 6713 } 6714 if ((at + incr) > limit) { 6715 return (EINVAL); 6716 } 6717 SCTP_INP_INCR_REF(inp); 6718 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6719 if (stcb != NULL) { 6720 SCTP_TCB_UNLOCK(stcb); 6721 return (EALREADY); 6722 } else { 6723 SCTP_INP_DECR_REF(inp); 6724 } 6725 at += incr; 6726 sa = (struct sockaddr *)((caddr_t)sa + incr); 6727 } 6728 return (0); 6729 } 6730 6731 /* 6732 * sctp_bindx(ADD) for one address. 6733 * assumes all arguments are valid/checked by caller. 6734 */ 6735 void 6736 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6737 struct sockaddr *sa, uint32_t vrf_id, int *error, 6738 void *p) 6739 { 6740 #if defined(INET) && defined(INET6) 6741 struct sockaddr_in sin; 6742 #endif 6743 #ifdef INET6 6744 struct sockaddr_in6 *sin6; 6745 #endif 6746 #ifdef INET 6747 struct sockaddr_in *sinp; 6748 #endif 6749 struct sockaddr *addr_to_use; 6750 struct sctp_inpcb *lep; 6751 uint16_t port; 6752 6753 /* see if we're bound all already! */ 6754 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6755 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6756 *error = EINVAL; 6757 return; 6758 } 6759 switch (sa->sa_family) { 6760 #ifdef INET6 6761 case AF_INET6: 6762 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6763 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6764 *error = EINVAL; 6765 return; 6766 } 6767 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6768 /* can only bind v6 on PF_INET6 sockets */ 6769 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6770 *error = EINVAL; 6771 return; 6772 } 6773 sin6 = (struct sockaddr_in6 *)sa; 6774 port = sin6->sin6_port; 6775 #ifdef INET 6776 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6777 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6778 SCTP_IPV6_V6ONLY(inp)) { 6779 /* can't bind v4-mapped on PF_INET sockets */ 6780 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6781 *error = EINVAL; 6782 return; 6783 } 6784 in6_sin6_2_sin(&sin, sin6); 6785 addr_to_use = (struct sockaddr *)&sin; 6786 } else { 6787 addr_to_use = sa; 6788 } 6789 #else 6790 addr_to_use = sa; 6791 #endif 6792 break; 6793 #endif 6794 #ifdef INET 6795 case AF_INET: 6796 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6798 *error = EINVAL; 6799 return; 6800 } 6801 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6802 SCTP_IPV6_V6ONLY(inp)) { 6803 /* can't bind v4 on PF_INET sockets */ 6804 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6805 *error = EINVAL; 6806 return; 6807 } 6808 sinp = (struct sockaddr_in *)sa; 6809 port = sinp->sin_port; 6810 addr_to_use = sa; 6811 break; 6812 #endif 6813 default: 6814 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6815 *error = EINVAL; 6816 return; 6817 } 6818 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6819 if (p == NULL) { 6820 /* Can't get proc for Net/Open BSD */ 6821 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6822 *error = EINVAL; 6823 return; 6824 } 6825 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6826 return; 6827 } 6828 /* Validate the incoming port. */ 6829 if ((port != 0) && (port != inp->sctp_lport)) { 6830 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6831 *error = EINVAL; 6832 return; 6833 } 6834 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6835 if (lep == NULL) { 6836 /* add the address */ 6837 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6838 SCTP_ADD_IP_ADDRESS, vrf_id); 6839 } else { 6840 if (lep != inp) { 6841 *error = EADDRINUSE; 6842 } 6843 SCTP_INP_DECR_REF(lep); 6844 } 6845 } 6846 6847 /* 6848 * sctp_bindx(DELETE) for one address. 6849 * assumes all arguments are valid/checked by caller. 6850 */ 6851 void 6852 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6853 struct sockaddr *sa, uint32_t vrf_id, int *error) 6854 { 6855 struct sockaddr *addr_to_use; 6856 #if defined(INET) && defined(INET6) 6857 struct sockaddr_in6 *sin6; 6858 struct sockaddr_in sin; 6859 #endif 6860 6861 /* see if we're bound all already! */ 6862 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6863 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6864 *error = EINVAL; 6865 return; 6866 } 6867 switch (sa->sa_family) { 6868 #ifdef INET6 6869 case AF_INET6: 6870 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6871 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6872 *error = EINVAL; 6873 return; 6874 } 6875 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6876 /* can only bind v6 on PF_INET6 sockets */ 6877 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6878 *error = EINVAL; 6879 return; 6880 } 6881 #ifdef INET 6882 sin6 = (struct sockaddr_in6 *)sa; 6883 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6884 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6885 SCTP_IPV6_V6ONLY(inp)) { 6886 /* can't bind mapped-v4 on PF_INET sockets */ 6887 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6888 *error = EINVAL; 6889 return; 6890 } 6891 in6_sin6_2_sin(&sin, sin6); 6892 addr_to_use = (struct sockaddr *)&sin; 6893 } else { 6894 addr_to_use = sa; 6895 } 6896 #else 6897 addr_to_use = sa; 6898 #endif 6899 break; 6900 #endif 6901 #ifdef INET 6902 case AF_INET: 6903 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6904 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6905 *error = EINVAL; 6906 return; 6907 } 6908 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6909 SCTP_IPV6_V6ONLY(inp)) { 6910 /* can't bind v4 on PF_INET sockets */ 6911 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6912 *error = EINVAL; 6913 return; 6914 } 6915 addr_to_use = sa; 6916 break; 6917 #endif 6918 default: 6919 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6920 *error = EINVAL; 6921 return; 6922 } 6923 /* No lock required mgmt_ep_sa does its own locking. */ 6924 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6925 vrf_id); 6926 } 6927 6928 /* 6929 * returns the valid local address count for an assoc, taking into account 6930 * all scoping rules 6931 */ 6932 int 6933 sctp_local_addr_count(struct sctp_tcb *stcb) 6934 { 6935 int loopback_scope; 6936 #if defined(INET) 6937 int ipv4_local_scope, ipv4_addr_legal; 6938 #endif 6939 #if defined(INET6) 6940 int local_scope, site_scope, ipv6_addr_legal; 6941 #endif 6942 struct sctp_vrf *vrf; 6943 struct sctp_ifn *sctp_ifn; 6944 struct sctp_ifa *sctp_ifa; 6945 int count = 0; 6946 6947 /* Turn on all the appropriate scopes */ 6948 loopback_scope = stcb->asoc.scope.loopback_scope; 6949 #if defined(INET) 6950 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6951 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6952 #endif 6953 #if defined(INET6) 6954 local_scope = stcb->asoc.scope.local_scope; 6955 site_scope = stcb->asoc.scope.site_scope; 6956 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6957 #endif 6958 SCTP_IPI_ADDR_RLOCK(); 6959 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6960 if (vrf == NULL) { 6961 /* no vrf, no addresses */ 6962 SCTP_IPI_ADDR_RUNLOCK(); 6963 return (0); 6964 } 6965 6966 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6967 /* 6968 * bound all case: go through all ifns on the vrf 6969 */ 6970 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6971 if ((loopback_scope == 0) && 6972 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6973 continue; 6974 } 6975 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6976 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6977 continue; 6978 switch (sctp_ifa->address.sa.sa_family) { 6979 #ifdef INET 6980 case AF_INET: 6981 if (ipv4_addr_legal) { 6982 struct sockaddr_in *sin; 6983 6984 sin = &sctp_ifa->address.sin; 6985 if (sin->sin_addr.s_addr == 0) { 6986 /* 6987 * skip unspecified 6988 * addrs 6989 */ 6990 continue; 6991 } 6992 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6993 &sin->sin_addr) != 0) { 6994 continue; 6995 } 6996 if ((ipv4_local_scope == 0) && 6997 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6998 continue; 6999 } 7000 /* count this one */ 7001 count++; 7002 } else { 7003 continue; 7004 } 7005 break; 7006 #endif 7007 #ifdef INET6 7008 case AF_INET6: 7009 if (ipv6_addr_legal) { 7010 struct sockaddr_in6 *sin6; 7011 7012 sin6 = &sctp_ifa->address.sin6; 7013 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7014 continue; 7015 } 7016 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7017 &sin6->sin6_addr) != 0) { 7018 continue; 7019 } 7020 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7021 if (local_scope == 0) 7022 continue; 7023 if (sin6->sin6_scope_id == 0) { 7024 if (sa6_recoverscope(sin6) != 0) 7025 /* 7026 * 7027 * bad 7028 * link 7029 * 7030 * local 7031 * 7032 * address 7033 */ 7034 continue; 7035 } 7036 } 7037 if ((site_scope == 0) && 7038 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7039 continue; 7040 } 7041 /* count this one */ 7042 count++; 7043 } 7044 break; 7045 #endif 7046 default: 7047 /* TSNH */ 7048 break; 7049 } 7050 } 7051 } 7052 } else { 7053 /* 7054 * subset bound case 7055 */ 7056 struct sctp_laddr *laddr; 7057 7058 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7059 sctp_nxt_addr) { 7060 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7061 continue; 7062 } 7063 /* count this one */ 7064 count++; 7065 } 7066 } 7067 SCTP_IPI_ADDR_RUNLOCK(); 7068 return (count); 7069 } 7070 7071 #if defined(SCTP_LOCAL_TRACE_BUF) 7072 7073 void 7074 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7075 { 7076 uint32_t saveindex, newindex; 7077 7078 do { 7079 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7080 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7081 newindex = 1; 7082 } else { 7083 newindex = saveindex + 1; 7084 } 7085 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7086 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7087 saveindex = 0; 7088 } 7089 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7090 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7091 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7092 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7093 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7094 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7095 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7096 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7097 } 7098 7099 #endif 7100 static bool 7101 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7102 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7103 { 7104 struct ip *iph; 7105 #ifdef INET6 7106 struct ip6_hdr *ip6; 7107 #endif 7108 struct mbuf *sp, *last; 7109 struct udphdr *uhdr; 7110 uint16_t port; 7111 7112 if ((m->m_flags & M_PKTHDR) == 0) { 7113 /* Can't handle one that is not a pkt hdr */ 7114 goto out; 7115 } 7116 /* Pull the src port */ 7117 iph = mtod(m, struct ip *); 7118 uhdr = (struct udphdr *)((caddr_t)iph + off); 7119 port = uhdr->uh_sport; 7120 /* 7121 * Split out the mbuf chain. Leave the IP header in m, place the 7122 * rest in the sp. 7123 */ 7124 sp = m_split(m, off, M_NOWAIT); 7125 if (sp == NULL) { 7126 /* Gak, drop packet, we can't do a split */ 7127 goto out; 7128 } 7129 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7130 /* Gak, packet can't have an SCTP header in it - too small */ 7131 m_freem(sp); 7132 goto out; 7133 } 7134 /* Now pull up the UDP header and SCTP header together */ 7135 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7136 if (sp == NULL) { 7137 /* Gak pullup failed */ 7138 goto out; 7139 } 7140 /* Trim out the UDP header */ 7141 m_adj(sp, sizeof(struct udphdr)); 7142 7143 /* Now reconstruct the mbuf chain */ 7144 for (last = m; last->m_next; last = last->m_next); 7145 last->m_next = sp; 7146 m->m_pkthdr.len += sp->m_pkthdr.len; 7147 /* 7148 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7149 * checksum and it was valid. Since CSUM_DATA_VALID == 7150 * CSUM_SCTP_VALID this would imply that the HW also verified the 7151 * SCTP checksum. Therefore, clear the bit. 7152 */ 7153 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7154 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7155 m->m_pkthdr.len, 7156 if_name(m->m_pkthdr.rcvif), 7157 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7158 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7159 iph = mtod(m, struct ip *); 7160 switch (iph->ip_v) { 7161 #ifdef INET 7162 case IPVERSION: 7163 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7164 sctp_input_with_port(m, off, port); 7165 break; 7166 #endif 7167 #ifdef INET6 7168 case IPV6_VERSION >> 4: 7169 ip6 = mtod(m, struct ip6_hdr *); 7170 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7171 sctp6_input_with_port(&m, &off, port); 7172 break; 7173 #endif 7174 default: 7175 goto out; 7176 break; 7177 } 7178 return (true); 7179 out: 7180 m_freem(m); 7181 7182 return (true); 7183 } 7184 7185 #ifdef INET 7186 static void 7187 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7188 { 7189 struct ip *outer_ip, *inner_ip; 7190 struct sctphdr *sh; 7191 struct icmp *icmp; 7192 struct udphdr *udp; 7193 struct sctp_inpcb *inp; 7194 struct sctp_tcb *stcb; 7195 struct sctp_nets *net; 7196 struct sctp_init_chunk *ch; 7197 struct sockaddr_in src, dst; 7198 uint8_t type, code; 7199 7200 inner_ip = (struct ip *)vip; 7201 icmp = (struct icmp *)((caddr_t)inner_ip - 7202 (sizeof(struct icmp) - sizeof(struct ip))); 7203 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7204 if (ntohs(outer_ip->ip_len) < 7205 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7206 return; 7207 } 7208 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7209 sh = (struct sctphdr *)(udp + 1); 7210 memset(&src, 0, sizeof(struct sockaddr_in)); 7211 src.sin_family = AF_INET; 7212 src.sin_len = sizeof(struct sockaddr_in); 7213 src.sin_port = sh->src_port; 7214 src.sin_addr = inner_ip->ip_src; 7215 memset(&dst, 0, sizeof(struct sockaddr_in)); 7216 dst.sin_family = AF_INET; 7217 dst.sin_len = sizeof(struct sockaddr_in); 7218 dst.sin_port = sh->dest_port; 7219 dst.sin_addr = inner_ip->ip_dst; 7220 /* 7221 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7222 * holds our local endpoint address. Thus we reverse the dst and the 7223 * src in the lookup. 7224 */ 7225 inp = NULL; 7226 net = NULL; 7227 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7228 (struct sockaddr *)&src, 7229 &inp, &net, 1, 7230 SCTP_DEFAULT_VRFID); 7231 if ((stcb != NULL) && 7232 (net != NULL) && 7233 (inp != NULL)) { 7234 /* Check the UDP port numbers */ 7235 if ((udp->uh_dport != net->port) || 7236 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7237 SCTP_TCB_UNLOCK(stcb); 7238 return; 7239 } 7240 /* Check the verification tag */ 7241 if (ntohl(sh->v_tag) != 0) { 7242 /* 7243 * This must be the verification tag used for 7244 * sending out packets. We don't consider packets 7245 * reflecting the verification tag. 7246 */ 7247 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7248 SCTP_TCB_UNLOCK(stcb); 7249 return; 7250 } 7251 } else { 7252 if (ntohs(outer_ip->ip_len) >= 7253 sizeof(struct ip) + 7254 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7255 /* 7256 * In this case we can check if we got an 7257 * INIT chunk and if the initiate tag 7258 * matches. 7259 */ 7260 ch = (struct sctp_init_chunk *)(sh + 1); 7261 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7262 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7263 SCTP_TCB_UNLOCK(stcb); 7264 return; 7265 } 7266 } else { 7267 SCTP_TCB_UNLOCK(stcb); 7268 return; 7269 } 7270 } 7271 type = icmp->icmp_type; 7272 code = icmp->icmp_code; 7273 if ((type == ICMP_UNREACH) && 7274 (code == ICMP_UNREACH_PORT)) { 7275 code = ICMP_UNREACH_PROTOCOL; 7276 } 7277 sctp_notify(inp, stcb, net, type, code, 7278 ntohs(inner_ip->ip_len), 7279 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7280 } else { 7281 if ((stcb == NULL) && (inp != NULL)) { 7282 /* reduce ref-count */ 7283 SCTP_INP_WLOCK(inp); 7284 SCTP_INP_DECR_REF(inp); 7285 SCTP_INP_WUNLOCK(inp); 7286 } 7287 if (stcb) { 7288 SCTP_TCB_UNLOCK(stcb); 7289 } 7290 } 7291 return; 7292 } 7293 #endif 7294 7295 #ifdef INET6 7296 static void 7297 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7298 { 7299 struct ip6ctlparam *ip6cp; 7300 struct sctp_inpcb *inp; 7301 struct sctp_tcb *stcb; 7302 struct sctp_nets *net; 7303 struct sctphdr sh; 7304 struct udphdr udp; 7305 struct sockaddr_in6 src, dst; 7306 uint8_t type, code; 7307 7308 ip6cp = (struct ip6ctlparam *)d; 7309 /* 7310 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7311 */ 7312 if (ip6cp->ip6c_m == NULL) { 7313 return; 7314 } 7315 /* 7316 * Check if we can safely examine the ports and the verification tag 7317 * of the SCTP common header. 7318 */ 7319 if (ip6cp->ip6c_m->m_pkthdr.len < 7320 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7321 return; 7322 } 7323 /* Copy out the UDP header. */ 7324 memset(&udp, 0, sizeof(struct udphdr)); 7325 m_copydata(ip6cp->ip6c_m, 7326 ip6cp->ip6c_off, 7327 sizeof(struct udphdr), 7328 (caddr_t)&udp); 7329 /* Copy out the port numbers and the verification tag. */ 7330 memset(&sh, 0, sizeof(struct sctphdr)); 7331 m_copydata(ip6cp->ip6c_m, 7332 ip6cp->ip6c_off + sizeof(struct udphdr), 7333 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7334 (caddr_t)&sh); 7335 memset(&src, 0, sizeof(struct sockaddr_in6)); 7336 src.sin6_family = AF_INET6; 7337 src.sin6_len = sizeof(struct sockaddr_in6); 7338 src.sin6_port = sh.src_port; 7339 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7340 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7341 return; 7342 } 7343 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7344 dst.sin6_family = AF_INET6; 7345 dst.sin6_len = sizeof(struct sockaddr_in6); 7346 dst.sin6_port = sh.dest_port; 7347 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7348 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7349 return; 7350 } 7351 inp = NULL; 7352 net = NULL; 7353 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7354 (struct sockaddr *)&src, 7355 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7356 if ((stcb != NULL) && 7357 (net != NULL) && 7358 (inp != NULL)) { 7359 /* Check the UDP port numbers */ 7360 if ((udp.uh_dport != net->port) || 7361 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7362 SCTP_TCB_UNLOCK(stcb); 7363 return; 7364 } 7365 /* Check the verification tag */ 7366 if (ntohl(sh.v_tag) != 0) { 7367 /* 7368 * This must be the verification tag used for 7369 * sending out packets. We don't consider packets 7370 * reflecting the verification tag. 7371 */ 7372 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7373 SCTP_TCB_UNLOCK(stcb); 7374 return; 7375 } 7376 } else { 7377 if (ip6cp->ip6c_m->m_pkthdr.len >= 7378 ip6cp->ip6c_off + sizeof(struct udphdr) + 7379 sizeof(struct sctphdr) + 7380 sizeof(struct sctp_chunkhdr) + 7381 offsetof(struct sctp_init, a_rwnd)) { 7382 /* 7383 * In this case we can check if we got an 7384 * INIT chunk and if the initiate tag 7385 * matches. 7386 */ 7387 uint32_t initiate_tag; 7388 uint8_t chunk_type; 7389 7390 m_copydata(ip6cp->ip6c_m, 7391 ip6cp->ip6c_off + 7392 sizeof(struct udphdr) + 7393 sizeof(struct sctphdr), 7394 sizeof(uint8_t), 7395 (caddr_t)&chunk_type); 7396 m_copydata(ip6cp->ip6c_m, 7397 ip6cp->ip6c_off + 7398 sizeof(struct udphdr) + 7399 sizeof(struct sctphdr) + 7400 sizeof(struct sctp_chunkhdr), 7401 sizeof(uint32_t), 7402 (caddr_t)&initiate_tag); 7403 if ((chunk_type != SCTP_INITIATION) || 7404 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7405 SCTP_TCB_UNLOCK(stcb); 7406 return; 7407 } 7408 } else { 7409 SCTP_TCB_UNLOCK(stcb); 7410 return; 7411 } 7412 } 7413 type = ip6cp->ip6c_icmp6->icmp6_type; 7414 code = ip6cp->ip6c_icmp6->icmp6_code; 7415 if ((type == ICMP6_DST_UNREACH) && 7416 (code == ICMP6_DST_UNREACH_NOPORT)) { 7417 type = ICMP6_PARAM_PROB; 7418 code = ICMP6_PARAMPROB_NEXTHEADER; 7419 } 7420 sctp6_notify(inp, stcb, net, type, code, 7421 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7422 } else { 7423 if ((stcb == NULL) && (inp != NULL)) { 7424 /* reduce inp's ref-count */ 7425 SCTP_INP_WLOCK(inp); 7426 SCTP_INP_DECR_REF(inp); 7427 SCTP_INP_WUNLOCK(inp); 7428 } 7429 if (stcb) { 7430 SCTP_TCB_UNLOCK(stcb); 7431 } 7432 } 7433 } 7434 #endif 7435 7436 void 7437 sctp_over_udp_stop(void) 7438 { 7439 /* 7440 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7441 * for writing! 7442 */ 7443 #ifdef INET 7444 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7445 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7446 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7447 } 7448 #endif 7449 #ifdef INET6 7450 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7451 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7452 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7453 } 7454 #endif 7455 } 7456 7457 int 7458 sctp_over_udp_start(void) 7459 { 7460 uint16_t port; 7461 int ret; 7462 #ifdef INET 7463 struct sockaddr_in sin; 7464 #endif 7465 #ifdef INET6 7466 struct sockaddr_in6 sin6; 7467 #endif 7468 /* 7469 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7470 * for writing! 7471 */ 7472 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7473 if (ntohs(port) == 0) { 7474 /* Must have a port set */ 7475 return (EINVAL); 7476 } 7477 #ifdef INET 7478 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7479 /* Already running -- must stop first */ 7480 return (EALREADY); 7481 } 7482 #endif 7483 #ifdef INET6 7484 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7485 /* Already running -- must stop first */ 7486 return (EALREADY); 7487 } 7488 #endif 7489 #ifdef INET 7490 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7491 SOCK_DGRAM, IPPROTO_UDP, 7492 curthread->td_ucred, curthread))) { 7493 sctp_over_udp_stop(); 7494 return (ret); 7495 } 7496 /* Call the special UDP hook. */ 7497 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7498 sctp_recv_udp_tunneled_packet, 7499 sctp_recv_icmp_tunneled_packet, 7500 NULL))) { 7501 sctp_over_udp_stop(); 7502 return (ret); 7503 } 7504 /* Ok, we have a socket, bind it to the port. */ 7505 memset(&sin, 0, sizeof(struct sockaddr_in)); 7506 sin.sin_len = sizeof(struct sockaddr_in); 7507 sin.sin_family = AF_INET; 7508 sin.sin_port = htons(port); 7509 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7510 (struct sockaddr *)&sin, curthread))) { 7511 sctp_over_udp_stop(); 7512 return (ret); 7513 } 7514 #endif 7515 #ifdef INET6 7516 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7517 SOCK_DGRAM, IPPROTO_UDP, 7518 curthread->td_ucred, curthread))) { 7519 sctp_over_udp_stop(); 7520 return (ret); 7521 } 7522 /* Call the special UDP hook. */ 7523 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7524 sctp_recv_udp_tunneled_packet, 7525 sctp_recv_icmp6_tunneled_packet, 7526 NULL))) { 7527 sctp_over_udp_stop(); 7528 return (ret); 7529 } 7530 /* Ok, we have a socket, bind it to the port. */ 7531 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7532 sin6.sin6_len = sizeof(struct sockaddr_in6); 7533 sin6.sin6_family = AF_INET6; 7534 sin6.sin6_port = htons(port); 7535 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7536 (struct sockaddr *)&sin6, curthread))) { 7537 sctp_over_udp_stop(); 7538 return (ret); 7539 } 7540 #endif 7541 return (0); 7542 } 7543 7544 /* 7545 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7546 * If all arguments are zero, zero is returned. 7547 */ 7548 uint32_t 7549 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7550 { 7551 if (mtu1 > 0) { 7552 if (mtu2 > 0) { 7553 if (mtu3 > 0) { 7554 return (min(mtu1, min(mtu2, mtu3))); 7555 } else { 7556 return (min(mtu1, mtu2)); 7557 } 7558 } else { 7559 if (mtu3 > 0) { 7560 return (min(mtu1, mtu3)); 7561 } else { 7562 return (mtu1); 7563 } 7564 } 7565 } else { 7566 if (mtu2 > 0) { 7567 if (mtu3 > 0) { 7568 return (min(mtu2, mtu3)); 7569 } else { 7570 return (mtu2); 7571 } 7572 } else { 7573 return (mtu3); 7574 } 7575 } 7576 } 7577 7578 void 7579 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7580 { 7581 struct in_conninfo inc; 7582 7583 memset(&inc, 0, sizeof(struct in_conninfo)); 7584 inc.inc_fibnum = fibnum; 7585 switch (addr->sa.sa_family) { 7586 #ifdef INET 7587 case AF_INET: 7588 inc.inc_faddr = addr->sin.sin_addr; 7589 break; 7590 #endif 7591 #ifdef INET6 7592 case AF_INET6: 7593 inc.inc_flags |= INC_ISIPV6; 7594 inc.inc6_faddr = addr->sin6.sin6_addr; 7595 break; 7596 #endif 7597 default: 7598 return; 7599 } 7600 tcp_hc_updatemtu(&inc, (u_long)mtu); 7601 } 7602 7603 uint32_t 7604 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7605 { 7606 struct in_conninfo inc; 7607 7608 memset(&inc, 0, sizeof(struct in_conninfo)); 7609 inc.inc_fibnum = fibnum; 7610 switch (addr->sa.sa_family) { 7611 #ifdef INET 7612 case AF_INET: 7613 inc.inc_faddr = addr->sin.sin_addr; 7614 break; 7615 #endif 7616 #ifdef INET6 7617 case AF_INET6: 7618 inc.inc_flags |= INC_ISIPV6; 7619 inc.inc6_faddr = addr->sin6.sin6_addr; 7620 break; 7621 #endif 7622 default: 7623 return (0); 7624 } 7625 return ((uint32_t)tcp_hc_getmtu(&inc)); 7626 } 7627 7628 void 7629 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7630 { 7631 #if defined(KDTRACE_HOOKS) 7632 int old_state = stcb->asoc.state; 7633 #endif 7634 7635 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7636 ("sctp_set_state: Can't set substate (new_state = %x)", 7637 new_state)); 7638 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7639 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7640 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7641 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7642 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7643 } 7644 #if defined(KDTRACE_HOOKS) 7645 if (((old_state & SCTP_STATE_MASK) != new_state) && 7646 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7647 (new_state == SCTP_STATE_INUSE))) { 7648 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7649 } 7650 #endif 7651 } 7652 7653 void 7654 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7655 { 7656 #if defined(KDTRACE_HOOKS) 7657 int old_state = stcb->asoc.state; 7658 #endif 7659 7660 KASSERT((substate & SCTP_STATE_MASK) == 0, 7661 ("sctp_add_substate: Can't set state (substate = %x)", 7662 substate)); 7663 stcb->asoc.state |= substate; 7664 #if defined(KDTRACE_HOOKS) 7665 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7666 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7667 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7668 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7669 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7670 } 7671 #endif 7672 } 7673