1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->rcv_edmid = inp->rcv_edmid; 1153 asoc->snd_edmid = SCTP_EDMID_NONE; 1154 asoc->sctp_cmt_pf = (uint8_t)0; 1155 asoc->sctp_frag_point = inp->sctp_frag_point; 1156 asoc->sctp_features = inp->sctp_features; 1157 asoc->default_dscp = inp->sctp_ep.default_dscp; 1158 asoc->max_cwnd = inp->max_cwnd; 1159 #ifdef INET6 1160 if (inp->sctp_ep.default_flowlabel) { 1161 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1162 } else { 1163 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1164 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1165 asoc->default_flowlabel &= 0x000fffff; 1166 asoc->default_flowlabel |= 0x80000000; 1167 } else { 1168 asoc->default_flowlabel = 0; 1169 } 1170 } 1171 #endif 1172 asoc->sb_send_resv = 0; 1173 if (override_tag) { 1174 asoc->my_vtag = override_tag; 1175 } else { 1176 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1177 } 1178 /* Get the nonce tags */ 1179 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1180 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1181 asoc->vrf_id = vrf_id; 1182 1183 #ifdef SCTP_ASOCLOG_OF_TSNS 1184 asoc->tsn_in_at = 0; 1185 asoc->tsn_out_at = 0; 1186 asoc->tsn_in_wrapped = 0; 1187 asoc->tsn_out_wrapped = 0; 1188 asoc->cumack_log_at = 0; 1189 asoc->cumack_log_atsnt = 0; 1190 #endif 1191 #ifdef SCTP_FS_SPEC_LOG 1192 asoc->fs_index = 0; 1193 #endif 1194 asoc->refcnt = 0; 1195 asoc->assoc_up_sent = 0; 1196 if (override_tag) { 1197 asoc->init_seq_number = initial_tsn; 1198 } else { 1199 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1200 } 1201 asoc->asconf_seq_out = asoc->init_seq_number; 1202 asoc->str_reset_seq_out = asoc->init_seq_number; 1203 asoc->sending_seq = asoc->init_seq_number; 1204 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1205 /* we are optimistic here */ 1206 asoc->peer_supports_nat = 0; 1207 asoc->sent_queue_retran_cnt = 0; 1208 1209 /* for CMT */ 1210 asoc->last_net_cmt_send_started = NULL; 1211 1212 asoc->last_acked_seq = asoc->init_seq_number - 1; 1213 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1214 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1215 1216 /* here we are different, we hold the next one we expect */ 1217 asoc->str_reset_seq_in = asoc->init_seq_number; 1218 1219 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1220 asoc->initial_rto = inp->sctp_ep.initial_rto; 1221 1222 asoc->default_mtu = inp->sctp_ep.default_mtu; 1223 asoc->max_init_times = inp->sctp_ep.max_init_times; 1224 asoc->max_send_times = inp->sctp_ep.max_send_times; 1225 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1226 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1227 asoc->free_chunk_cnt = 0; 1228 1229 asoc->iam_blocking = 0; 1230 asoc->context = inp->sctp_context; 1231 asoc->local_strreset_support = inp->local_strreset_support; 1232 asoc->def_send = inp->def_send; 1233 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1234 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1235 asoc->pr_sctp_cnt = 0; 1236 asoc->total_output_queue_size = 0; 1237 1238 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1239 asoc->scope.ipv6_addr_legal = 1; 1240 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1241 asoc->scope.ipv4_addr_legal = 1; 1242 } else { 1243 asoc->scope.ipv4_addr_legal = 0; 1244 } 1245 } else { 1246 asoc->scope.ipv6_addr_legal = 0; 1247 asoc->scope.ipv4_addr_legal = 1; 1248 } 1249 1250 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1251 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1252 1253 asoc->smallest_mtu = 0; 1254 asoc->minrto = inp->sctp_ep.sctp_minrto; 1255 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1256 1257 asoc->stream_locked_on = 0; 1258 asoc->ecn_echo_cnt_onq = 0; 1259 asoc->stream_locked = 0; 1260 1261 asoc->send_sack = 1; 1262 1263 LIST_INIT(&asoc->sctp_restricted_addrs); 1264 1265 TAILQ_INIT(&asoc->nets); 1266 TAILQ_INIT(&asoc->pending_reply_queue); 1267 TAILQ_INIT(&asoc->asconf_ack_sent); 1268 /* Setup to fill the hb random cache at first HB */ 1269 asoc->hb_random_idx = 4; 1270 1271 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1272 1273 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1274 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1275 1276 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1277 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1278 1279 /* 1280 * Now the stream parameters, here we allocate space for all streams 1281 * that we request by default. 1282 */ 1283 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1284 o_strms; 1285 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1286 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1287 SCTP_M_STRMO); 1288 if (asoc->strmout == NULL) { 1289 /* big trouble no memory */ 1290 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1291 return (ENOMEM); 1292 } 1293 SCTP_TCB_LOCK(stcb); 1294 for (i = 0; i < asoc->streamoutcnt; i++) { 1295 /* 1296 * inbound side must be set to 0xffff, also NOTE when we get 1297 * the INIT-ACK back (for INIT sender) we MUST reduce the 1298 * count (streamoutcnt) but first check if we sent to any of 1299 * the upper streams that were dropped (if some were). Those 1300 * that were dropped must be notified to the upper layer as 1301 * failed to send. 1302 */ 1303 TAILQ_INIT(&asoc->strmout[i].outqueue); 1304 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1305 asoc->strmout[i].chunks_on_queues = 0; 1306 #if defined(SCTP_DETAILED_STR_STATS) 1307 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1308 asoc->strmout[i].abandoned_sent[j] = 0; 1309 asoc->strmout[i].abandoned_unsent[j] = 0; 1310 } 1311 #else 1312 asoc->strmout[i].abandoned_sent[0] = 0; 1313 asoc->strmout[i].abandoned_unsent[0] = 0; 1314 #endif 1315 asoc->strmout[i].next_mid_ordered = 0; 1316 asoc->strmout[i].next_mid_unordered = 0; 1317 asoc->strmout[i].sid = i; 1318 asoc->strmout[i].last_msg_incomplete = 0; 1319 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1320 } 1321 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1322 SCTP_TCB_UNLOCK(stcb); 1323 1324 /* Now the mapping array */ 1325 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1326 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1327 SCTP_M_MAP); 1328 if (asoc->mapping_array == NULL) { 1329 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1334 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1335 SCTP_M_MAP); 1336 if (asoc->nr_mapping_array == NULL) { 1337 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1338 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1339 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1340 return (ENOMEM); 1341 } 1342 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1343 1344 /* Now the init of the other outqueues */ 1345 TAILQ_INIT(&asoc->free_chunks); 1346 TAILQ_INIT(&asoc->control_send_queue); 1347 TAILQ_INIT(&asoc->asconf_send_queue); 1348 TAILQ_INIT(&asoc->send_queue); 1349 TAILQ_INIT(&asoc->sent_queue); 1350 TAILQ_INIT(&asoc->resetHead); 1351 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1352 TAILQ_INIT(&asoc->asconf_queue); 1353 /* authentication fields */ 1354 asoc->authinfo.random = NULL; 1355 asoc->authinfo.active_keyid = 0; 1356 asoc->authinfo.assoc_key = NULL; 1357 asoc->authinfo.assoc_keyid = 0; 1358 asoc->authinfo.recv_key = NULL; 1359 asoc->authinfo.recv_keyid = 0; 1360 LIST_INIT(&asoc->shared_keys); 1361 asoc->marked_retrans = 0; 1362 asoc->port = inp->sctp_ep.port; 1363 asoc->timoinit = 0; 1364 asoc->timodata = 0; 1365 asoc->timosack = 0; 1366 asoc->timoshutdown = 0; 1367 asoc->timoheartbeat = 0; 1368 asoc->timocookie = 0; 1369 asoc->timoshutdownack = 0; 1370 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1371 asoc->discontinuity_time = asoc->start_time; 1372 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1373 asoc->abandoned_unsent[i] = 0; 1374 asoc->abandoned_sent[i] = 0; 1375 } 1376 /* 1377 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1378 * freed later when the association is freed. 1379 */ 1380 return (0); 1381 } 1382 1383 void 1384 sctp_print_mapping_array(struct sctp_association *asoc) 1385 { 1386 unsigned int i, limit; 1387 1388 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1389 asoc->mapping_array_size, 1390 asoc->mapping_array_base_tsn, 1391 asoc->cumulative_tsn, 1392 asoc->highest_tsn_inside_map, 1393 asoc->highest_tsn_inside_nr_map); 1394 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1395 if (asoc->mapping_array[limit - 1] != 0) { 1396 break; 1397 } 1398 } 1399 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1400 for (i = 0; i < limit; i++) { 1401 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1402 } 1403 if (limit % 16) 1404 SCTP_PRINTF("\n"); 1405 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1406 if (asoc->nr_mapping_array[limit - 1]) { 1407 break; 1408 } 1409 } 1410 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1411 for (i = 0; i < limit; i++) { 1412 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1413 } 1414 if (limit % 16) 1415 SCTP_PRINTF("\n"); 1416 } 1417 1418 int 1419 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1420 { 1421 /* mapping array needs to grow */ 1422 uint8_t *new_array1, *new_array2; 1423 uint32_t new_size; 1424 1425 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1426 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1427 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1428 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1429 /* can't get more, forget it */ 1430 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1431 if (new_array1) { 1432 SCTP_FREE(new_array1, SCTP_M_MAP); 1433 } 1434 if (new_array2) { 1435 SCTP_FREE(new_array2, SCTP_M_MAP); 1436 } 1437 return (-1); 1438 } 1439 memset(new_array1, 0, new_size); 1440 memset(new_array2, 0, new_size); 1441 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1442 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1443 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1444 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1445 asoc->mapping_array = new_array1; 1446 asoc->nr_mapping_array = new_array2; 1447 asoc->mapping_array_size = new_size; 1448 return (0); 1449 } 1450 1451 static void 1452 sctp_iterator_work(struct sctp_iterator *it) 1453 { 1454 struct epoch_tracker et; 1455 struct sctp_inpcb *tinp; 1456 int iteration_count = 0; 1457 int inp_skip = 0; 1458 int first_in = 1; 1459 1460 NET_EPOCH_ENTER(et); 1461 SCTP_INP_INFO_RLOCK(); 1462 SCTP_ITERATOR_LOCK(); 1463 sctp_it_ctl.cur_it = it; 1464 if (it->inp) { 1465 SCTP_INP_RLOCK(it->inp); 1466 SCTP_INP_DECR_REF(it->inp); 1467 } 1468 if (it->inp == NULL) { 1469 /* iterator is complete */ 1470 done_with_iterator: 1471 sctp_it_ctl.cur_it = NULL; 1472 SCTP_ITERATOR_UNLOCK(); 1473 SCTP_INP_INFO_RUNLOCK(); 1474 if (it->function_atend != NULL) { 1475 (*it->function_atend) (it->pointer, it->val); 1476 } 1477 SCTP_FREE(it, SCTP_M_ITER); 1478 NET_EPOCH_EXIT(et); 1479 return; 1480 } 1481 select_a_new_ep: 1482 if (first_in) { 1483 first_in = 0; 1484 } else { 1485 SCTP_INP_RLOCK(it->inp); 1486 } 1487 while (((it->pcb_flags) && 1488 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1489 ((it->pcb_features) && 1490 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1491 /* endpoint flags or features don't match, so keep looking */ 1492 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1493 SCTP_INP_RUNLOCK(it->inp); 1494 goto done_with_iterator; 1495 } 1496 tinp = it->inp; 1497 it->inp = LIST_NEXT(it->inp, sctp_list); 1498 it->stcb = NULL; 1499 SCTP_INP_RUNLOCK(tinp); 1500 if (it->inp == NULL) { 1501 goto done_with_iterator; 1502 } 1503 SCTP_INP_RLOCK(it->inp); 1504 } 1505 /* now go through each assoc which is in the desired state */ 1506 if (it->done_current_ep == 0) { 1507 if (it->function_inp != NULL) 1508 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1509 it->done_current_ep = 1; 1510 } 1511 if (it->stcb == NULL) { 1512 /* run the per instance function */ 1513 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1514 } 1515 if ((inp_skip) || it->stcb == NULL) { 1516 if (it->function_inp_end != NULL) { 1517 inp_skip = (*it->function_inp_end) (it->inp, 1518 it->pointer, 1519 it->val); 1520 } 1521 SCTP_INP_RUNLOCK(it->inp); 1522 goto no_stcb; 1523 } 1524 while (it->stcb != NULL) { 1525 SCTP_TCB_LOCK(it->stcb); 1526 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1527 /* not in the right state... keep looking */ 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 goto next_assoc; 1530 } 1531 /* see if we have limited out the iterator loop */ 1532 iteration_count++; 1533 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1534 /* Pause to let others grab the lock */ 1535 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1536 SCTP_TCB_UNLOCK(it->stcb); 1537 SCTP_INP_INCR_REF(it->inp); 1538 SCTP_INP_RUNLOCK(it->inp); 1539 SCTP_ITERATOR_UNLOCK(); 1540 SCTP_INP_INFO_RUNLOCK(); 1541 SCTP_INP_INFO_RLOCK(); 1542 SCTP_ITERATOR_LOCK(); 1543 if (sctp_it_ctl.iterator_flags) { 1544 /* We won't be staying here */ 1545 SCTP_INP_DECR_REF(it->inp); 1546 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1547 if (sctp_it_ctl.iterator_flags & 1548 SCTP_ITERATOR_STOP_CUR_IT) { 1549 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1550 goto done_with_iterator; 1551 } 1552 if (sctp_it_ctl.iterator_flags & 1553 SCTP_ITERATOR_STOP_CUR_INP) { 1554 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1555 goto no_stcb; 1556 } 1557 /* If we reach here huh? */ 1558 SCTP_PRINTF("Unknown it ctl flag %x\n", 1559 sctp_it_ctl.iterator_flags); 1560 sctp_it_ctl.iterator_flags = 0; 1561 } 1562 SCTP_INP_RLOCK(it->inp); 1563 SCTP_INP_DECR_REF(it->inp); 1564 SCTP_TCB_LOCK(it->stcb); 1565 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1566 iteration_count = 0; 1567 } 1568 KASSERT(it->inp == it->stcb->sctp_ep, 1569 ("%s: stcb %p does not belong to inp %p, but inp %p", 1570 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1571 SCTP_INP_RLOCK_ASSERT(it->inp); 1572 SCTP_TCB_LOCK_ASSERT(it->stcb); 1573 1574 /* run function on this one */ 1575 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1576 SCTP_INP_RLOCK_ASSERT(it->inp); 1577 SCTP_TCB_LOCK_ASSERT(it->stcb); 1578 1579 /* 1580 * we lie here, it really needs to have its own type but 1581 * first I must verify that this won't effect things :-0 1582 */ 1583 if (it->no_chunk_output == 0) { 1584 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1585 SCTP_INP_RLOCK_ASSERT(it->inp); 1586 SCTP_TCB_LOCK_ASSERT(it->stcb); 1587 } 1588 1589 SCTP_TCB_UNLOCK(it->stcb); 1590 next_assoc: 1591 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1592 if (it->stcb == NULL) { 1593 /* Run last function */ 1594 if (it->function_inp_end != NULL) { 1595 inp_skip = (*it->function_inp_end) (it->inp, 1596 it->pointer, 1597 it->val); 1598 } 1599 } 1600 } 1601 SCTP_INP_RUNLOCK(it->inp); 1602 no_stcb: 1603 /* done with all assocs on this endpoint, move on to next endpoint */ 1604 it->done_current_ep = 0; 1605 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1606 it->inp = NULL; 1607 } else { 1608 it->inp = LIST_NEXT(it->inp, sctp_list); 1609 } 1610 it->stcb = NULL; 1611 if (it->inp == NULL) { 1612 goto done_with_iterator; 1613 } 1614 goto select_a_new_ep; 1615 } 1616 1617 void 1618 sctp_iterator_worker(void) 1619 { 1620 struct sctp_iterator *it; 1621 1622 /* This function is called with the WQ lock in place */ 1623 sctp_it_ctl.iterator_running = 1; 1624 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1625 /* now lets work on this one */ 1626 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1627 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1628 CURVNET_SET(it->vn); 1629 sctp_iterator_work(it); 1630 CURVNET_RESTORE(); 1631 SCTP_IPI_ITERATOR_WQ_LOCK(); 1632 /* sa_ignore FREED_MEMORY */ 1633 } 1634 sctp_it_ctl.iterator_running = 0; 1635 return; 1636 } 1637 1638 static void 1639 sctp_handle_addr_wq(void) 1640 { 1641 /* deal with the ADDR wq from the rtsock calls */ 1642 struct sctp_laddr *wi, *nwi; 1643 struct sctp_asconf_iterator *asc; 1644 1645 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1646 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1647 if (asc == NULL) { 1648 /* Try later, no memory */ 1649 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1650 (struct sctp_inpcb *)NULL, 1651 (struct sctp_tcb *)NULL, 1652 (struct sctp_nets *)NULL); 1653 return; 1654 } 1655 LIST_INIT(&asc->list_of_work); 1656 asc->cnt = 0; 1657 1658 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1659 LIST_REMOVE(wi, sctp_nxt_addr); 1660 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1661 asc->cnt++; 1662 } 1663 1664 if (asc->cnt == 0) { 1665 SCTP_FREE(asc, SCTP_M_ASC_IT); 1666 } else { 1667 int ret; 1668 1669 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1670 sctp_asconf_iterator_stcb, 1671 NULL, /* No ep end for boundall */ 1672 SCTP_PCB_FLAGS_BOUNDALL, 1673 SCTP_PCB_ANY_FEATURES, 1674 SCTP_ASOC_ANY_STATE, 1675 (void *)asc, 0, 1676 sctp_asconf_iterator_end, NULL, 0); 1677 if (ret) { 1678 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1679 /* 1680 * Freeing if we are stopping or put back on the 1681 * addr_wq. 1682 */ 1683 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1684 sctp_asconf_iterator_end(asc, 0); 1685 } else { 1686 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1687 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1688 } 1689 SCTP_FREE(asc, SCTP_M_ASC_IT); 1690 } 1691 } 1692 } 1693 } 1694 1695 /*- 1696 * The following table shows which pointers for the inp, stcb, or net are 1697 * stored for each timer after it was started. 1698 * 1699 *|Name |Timer |inp |stcb|net | 1700 *|-----------------------------|-----------------------------|----|----|----| 1701 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1704 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1706 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1708 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1710 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1711 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1713 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1714 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1715 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1716 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1717 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1718 */ 1719 1720 void 1721 sctp_timeout_handler(void *t) 1722 { 1723 struct epoch_tracker et; 1724 struct timeval tv; 1725 struct sctp_inpcb *inp; 1726 struct sctp_tcb *stcb; 1727 struct sctp_nets *net; 1728 struct sctp_timer *tmr; 1729 struct mbuf *op_err; 1730 int type; 1731 int i, secret; 1732 bool did_output, released_asoc_reference; 1733 1734 /* 1735 * If inp, stcb or net are not NULL, then references to these were 1736 * added when the timer was started, and must be released before 1737 * this function returns. 1738 */ 1739 tmr = (struct sctp_timer *)t; 1740 inp = (struct sctp_inpcb *)tmr->ep; 1741 stcb = (struct sctp_tcb *)tmr->tcb; 1742 net = (struct sctp_nets *)tmr->net; 1743 CURVNET_SET((struct vnet *)tmr->vnet); 1744 NET_EPOCH_ENTER(et); 1745 released_asoc_reference = false; 1746 1747 #ifdef SCTP_AUDITING_ENABLED 1748 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1749 sctp_auditing(3, inp, stcb, net); 1750 #endif 1751 1752 /* sanity checks... */ 1753 KASSERT(tmr->self == NULL || tmr->self == tmr, 1754 ("sctp_timeout_handler: tmr->self corrupted")); 1755 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1756 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1757 type = tmr->type; 1758 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1759 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1760 type, stcb, stcb->sctp_ep)); 1761 tmr->stopped_from = 0xa001; 1762 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1763 SCTPDBG(SCTP_DEBUG_TIMER2, 1764 "Timer type %d handler exiting due to CLOSED association.\n", 1765 type); 1766 goto out_decr; 1767 } 1768 tmr->stopped_from = 0xa002; 1769 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1770 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1771 SCTPDBG(SCTP_DEBUG_TIMER2, 1772 "Timer type %d handler exiting due to not being active.\n", 1773 type); 1774 goto out_decr; 1775 } 1776 1777 tmr->stopped_from = 0xa003; 1778 if (stcb) { 1779 SCTP_TCB_LOCK(stcb); 1780 /* 1781 * Release reference so that association can be freed if 1782 * necessary below. This is safe now that we have acquired 1783 * the lock. 1784 */ 1785 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1786 released_asoc_reference = true; 1787 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1788 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1789 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1790 SCTPDBG(SCTP_DEBUG_TIMER2, 1791 "Timer type %d handler exiting due to CLOSED association.\n", 1792 type); 1793 goto out; 1794 } 1795 } else if (inp != NULL) { 1796 SCTP_INP_WLOCK(inp); 1797 } else { 1798 SCTP_WQ_ADDR_LOCK(); 1799 } 1800 1801 /* Record in stopped_from which timeout occurred. */ 1802 tmr->stopped_from = type; 1803 /* mark as being serviced now */ 1804 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1805 /* 1806 * Callout has been rescheduled. 1807 */ 1808 goto out; 1809 } 1810 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1811 /* 1812 * Not active, so no action. 1813 */ 1814 goto out; 1815 } 1816 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1817 1818 /* call the handler for the appropriate timer type */ 1819 switch (type) { 1820 case SCTP_TIMER_TYPE_SEND: 1821 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1822 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1823 type, inp, stcb, net)); 1824 SCTP_STAT_INCR(sctps_timodata); 1825 stcb->asoc.timodata++; 1826 stcb->asoc.num_send_timers_up--; 1827 if (stcb->asoc.num_send_timers_up < 0) { 1828 stcb->asoc.num_send_timers_up = 0; 1829 } 1830 SCTP_TCB_LOCK_ASSERT(stcb); 1831 if (sctp_t3rxt_timer(inp, stcb, net)) { 1832 /* no need to unlock on tcb its gone */ 1833 1834 goto out_decr; 1835 } 1836 SCTP_TCB_LOCK_ASSERT(stcb); 1837 #ifdef SCTP_AUDITING_ENABLED 1838 sctp_auditing(4, inp, stcb, net); 1839 #endif 1840 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1841 did_output = true; 1842 if ((stcb->asoc.num_send_timers_up == 0) && 1843 (stcb->asoc.sent_queue_cnt > 0)) { 1844 struct sctp_tmit_chunk *chk; 1845 1846 /* 1847 * Safeguard. If there on some on the sent queue 1848 * somewhere but no timers running something is 1849 * wrong... so we start a timer on the first chunk 1850 * on the send queue on whatever net it is sent to. 1851 */ 1852 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1853 if (chk->whoTo != NULL) { 1854 break; 1855 } 1856 } 1857 if (chk != NULL) { 1858 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1859 } 1860 } 1861 break; 1862 case SCTP_TIMER_TYPE_INIT: 1863 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1864 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1865 type, inp, stcb, net)); 1866 SCTP_STAT_INCR(sctps_timoinit); 1867 stcb->asoc.timoinit++; 1868 if (sctp_t1init_timer(inp, stcb, net)) { 1869 /* no need to unlock on tcb its gone */ 1870 goto out_decr; 1871 } 1872 did_output = false; 1873 break; 1874 case SCTP_TIMER_TYPE_RECV: 1875 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1876 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1877 type, inp, stcb, net)); 1878 SCTP_STAT_INCR(sctps_timosack); 1879 stcb->asoc.timosack++; 1880 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1881 #ifdef SCTP_AUDITING_ENABLED 1882 sctp_auditing(4, inp, stcb, NULL); 1883 #endif 1884 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1885 did_output = true; 1886 break; 1887 case SCTP_TIMER_TYPE_SHUTDOWN: 1888 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1889 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1890 type, inp, stcb, net)); 1891 SCTP_STAT_INCR(sctps_timoshutdown); 1892 stcb->asoc.timoshutdown++; 1893 if (sctp_shutdown_timer(inp, stcb, net)) { 1894 /* no need to unlock on tcb its gone */ 1895 goto out_decr; 1896 } 1897 #ifdef SCTP_AUDITING_ENABLED 1898 sctp_auditing(4, inp, stcb, net); 1899 #endif 1900 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1901 did_output = true; 1902 break; 1903 case SCTP_TIMER_TYPE_HEARTBEAT: 1904 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1905 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1906 type, inp, stcb, net)); 1907 SCTP_STAT_INCR(sctps_timoheartbeat); 1908 stcb->asoc.timoheartbeat++; 1909 if (sctp_heartbeat_timer(inp, stcb, net)) { 1910 /* no need to unlock on tcb its gone */ 1911 goto out_decr; 1912 } 1913 #ifdef SCTP_AUDITING_ENABLED 1914 sctp_auditing(4, inp, stcb, net); 1915 #endif 1916 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1917 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1918 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1919 did_output = true; 1920 } else { 1921 did_output = false; 1922 } 1923 break; 1924 case SCTP_TIMER_TYPE_COOKIE: 1925 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1926 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1927 type, inp, stcb, net)); 1928 SCTP_STAT_INCR(sctps_timocookie); 1929 stcb->asoc.timocookie++; 1930 if (sctp_cookie_timer(inp, stcb, net)) { 1931 /* no need to unlock on tcb its gone */ 1932 goto out_decr; 1933 } 1934 #ifdef SCTP_AUDITING_ENABLED 1935 sctp_auditing(4, inp, stcb, net); 1936 #endif 1937 /* 1938 * We consider T3 and Cookie timer pretty much the same with 1939 * respect to where from in chunk_output. 1940 */ 1941 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1942 did_output = true; 1943 break; 1944 case SCTP_TIMER_TYPE_NEWCOOKIE: 1945 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1946 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1947 type, inp, stcb, net)); 1948 SCTP_STAT_INCR(sctps_timosecret); 1949 (void)SCTP_GETTIME_TIMEVAL(&tv); 1950 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1951 inp->sctp_ep.last_secret_number = 1952 inp->sctp_ep.current_secret_number; 1953 inp->sctp_ep.current_secret_number++; 1954 if (inp->sctp_ep.current_secret_number >= 1955 SCTP_HOW_MANY_SECRETS) { 1956 inp->sctp_ep.current_secret_number = 0; 1957 } 1958 secret = (int)inp->sctp_ep.current_secret_number; 1959 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1960 inp->sctp_ep.secret_key[secret][i] = 1961 sctp_select_initial_TSN(&inp->sctp_ep); 1962 } 1963 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1964 did_output = false; 1965 break; 1966 case SCTP_TIMER_TYPE_PATHMTURAISE: 1967 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1968 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1969 type, inp, stcb, net)); 1970 SCTP_STAT_INCR(sctps_timopathmtu); 1971 sctp_pathmtu_timer(inp, stcb, net); 1972 did_output = false; 1973 break; 1974 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1975 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1976 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1977 type, inp, stcb, net)); 1978 if (sctp_shutdownack_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 SCTP_STAT_INCR(sctps_timoshutdownack); 1983 stcb->asoc.timoshutdownack++; 1984 #ifdef SCTP_AUDITING_ENABLED 1985 sctp_auditing(4, inp, stcb, net); 1986 #endif 1987 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1988 did_output = true; 1989 break; 1990 case SCTP_TIMER_TYPE_ASCONF: 1991 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1992 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1993 type, inp, stcb, net)); 1994 SCTP_STAT_INCR(sctps_timoasconf); 1995 if (sctp_asconf_timer(inp, stcb, net)) { 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 } 1999 #ifdef SCTP_AUDITING_ENABLED 2000 sctp_auditing(4, inp, stcb, net); 2001 #endif 2002 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2003 did_output = true; 2004 break; 2005 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2006 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2007 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2008 type, inp, stcb, net)); 2009 SCTP_STAT_INCR(sctps_timoshutdownguard); 2010 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2011 "Shutdown guard timer expired"); 2012 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 case SCTP_TIMER_TYPE_AUTOCLOSE: 2016 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2017 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2018 type, inp, stcb, net)); 2019 SCTP_STAT_INCR(sctps_timoautoclose); 2020 sctp_autoclose_timer(inp, stcb); 2021 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2022 did_output = true; 2023 break; 2024 case SCTP_TIMER_TYPE_STRRESET: 2025 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2026 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2027 type, inp, stcb, net)); 2028 SCTP_STAT_INCR(sctps_timostrmrst); 2029 if (sctp_strreset_timer(inp, stcb)) { 2030 /* no need to unlock on tcb its gone */ 2031 goto out_decr; 2032 } 2033 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2034 did_output = true; 2035 break; 2036 case SCTP_TIMER_TYPE_INPKILL: 2037 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoinpkill); 2041 /* 2042 * special case, take away our increment since WE are the 2043 * killer 2044 */ 2045 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2046 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2047 SCTP_INP_DECR_REF(inp); 2048 SCTP_INP_WUNLOCK(inp); 2049 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2050 SCTP_CALLED_FROM_INPKILL_TIMER); 2051 inp = NULL; 2052 goto out_decr; 2053 case SCTP_TIMER_TYPE_ASOCKILL: 2054 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2055 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2056 type, inp, stcb, net)); 2057 SCTP_STAT_INCR(sctps_timoassockill); 2058 /* Can we free it yet? */ 2059 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2060 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2061 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2062 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2063 /* 2064 * free asoc, always unlocks (or destroy's) so prevent 2065 * duplicate unlock or unlock of a free mtx :-0 2066 */ 2067 stcb = NULL; 2068 goto out_decr; 2069 case SCTP_TIMER_TYPE_ADDR_WQ: 2070 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2071 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2072 type, inp, stcb, net)); 2073 sctp_handle_addr_wq(); 2074 did_output = true; 2075 break; 2076 case SCTP_TIMER_TYPE_PRIM_DELETED: 2077 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2078 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2079 type, inp, stcb, net)); 2080 SCTP_STAT_INCR(sctps_timodelprim); 2081 sctp_delete_prim_timer(inp, stcb); 2082 did_output = false; 2083 break; 2084 default: 2085 #ifdef INVARIANTS 2086 panic("Unknown timer type %d", type); 2087 #else 2088 goto out; 2089 #endif 2090 } 2091 #ifdef SCTP_AUDITING_ENABLED 2092 sctp_audit_log(0xF1, (uint8_t)type); 2093 if (inp != NULL) 2094 sctp_auditing(5, inp, stcb, net); 2095 #endif 2096 if (did_output && (stcb != NULL)) { 2097 /* 2098 * Now we need to clean up the control chunk chain if an 2099 * ECNE is on it. It must be marked as UNSENT again so next 2100 * call will continue to send it until such time that we get 2101 * a CWR, to remove it. It is, however, less likely that we 2102 * will find a ecn echo on the chain though. 2103 */ 2104 sctp_fix_ecn_echo(&stcb->asoc); 2105 } 2106 out: 2107 if (stcb != NULL) { 2108 SCTP_TCB_UNLOCK(stcb); 2109 } else if (inp != NULL) { 2110 SCTP_INP_WUNLOCK(inp); 2111 } else { 2112 SCTP_WQ_ADDR_UNLOCK(); 2113 } 2114 2115 out_decr: 2116 /* These reference counts were incremented in sctp_timer_start(). */ 2117 if (inp != NULL) { 2118 SCTP_INP_DECR_REF(inp); 2119 } 2120 if ((stcb != NULL) && !released_asoc_reference) { 2121 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2122 } 2123 if (net != NULL) { 2124 sctp_free_remote_addr(net); 2125 } 2126 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2127 CURVNET_RESTORE(); 2128 NET_EPOCH_EXIT(et); 2129 } 2130 2131 /*- 2132 * The following table shows which parameters must be provided 2133 * when calling sctp_timer_start(). For parameters not being 2134 * provided, NULL must be used. 2135 * 2136 * |Name |inp |stcb|net | 2137 * |-----------------------------|----|----|----| 2138 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2143 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2145 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2147 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2148 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2149 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2150 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2151 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2152 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2153 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2154 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2155 * 2156 */ 2157 2158 void 2159 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2160 struct sctp_nets *net) 2161 { 2162 struct sctp_timer *tmr; 2163 uint32_t to_ticks; 2164 uint32_t rndval, jitter; 2165 2166 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2167 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2168 t_type, stcb, stcb->sctp_ep)); 2169 tmr = NULL; 2170 if (stcb != NULL) { 2171 SCTP_TCB_LOCK_ASSERT(stcb); 2172 } else if (inp != NULL) { 2173 SCTP_INP_WLOCK_ASSERT(inp); 2174 } else { 2175 SCTP_WQ_ADDR_LOCK_ASSERT(); 2176 } 2177 if (stcb != NULL) { 2178 /* 2179 * Don't restart timer on association that's about to be 2180 * killed. 2181 */ 2182 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2183 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2184 SCTPDBG(SCTP_DEBUG_TIMER2, 2185 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2186 t_type, inp, stcb, net); 2187 return; 2188 } 2189 /* Don't restart timer on net that's been removed. */ 2190 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2191 SCTPDBG(SCTP_DEBUG_TIMER2, 2192 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2193 t_type, inp, stcb, net); 2194 return; 2195 } 2196 } 2197 switch (t_type) { 2198 case SCTP_TIMER_TYPE_SEND: 2199 /* Here we use the RTO timer. */ 2200 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2201 #ifdef INVARIANTS 2202 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2203 t_type, inp, stcb, net); 2204 #else 2205 return; 2206 #endif 2207 } 2208 tmr = &net->rxt_timer; 2209 if (net->RTO == 0) { 2210 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2211 } else { 2212 to_ticks = sctp_msecs_to_ticks(net->RTO); 2213 } 2214 break; 2215 case SCTP_TIMER_TYPE_INIT: 2216 /* 2217 * Here we use the INIT timer default usually about 1 2218 * second. 2219 */ 2220 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2221 #ifdef INVARIANTS 2222 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2223 t_type, inp, stcb, net); 2224 #else 2225 return; 2226 #endif 2227 } 2228 tmr = &net->rxt_timer; 2229 if (net->RTO == 0) { 2230 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2231 } else { 2232 to_ticks = sctp_msecs_to_ticks(net->RTO); 2233 } 2234 break; 2235 case SCTP_TIMER_TYPE_RECV: 2236 /* 2237 * Here we use the Delayed-Ack timer value from the inp, 2238 * usually about 200ms. 2239 */ 2240 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2241 #ifdef INVARIANTS 2242 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2243 t_type, inp, stcb, net); 2244 #else 2245 return; 2246 #endif 2247 } 2248 tmr = &stcb->asoc.dack_timer; 2249 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2250 break; 2251 case SCTP_TIMER_TYPE_SHUTDOWN: 2252 /* Here we use the RTO of the destination. */ 2253 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2254 #ifdef INVARIANTS 2255 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2256 t_type, inp, stcb, net); 2257 #else 2258 return; 2259 #endif 2260 } 2261 tmr = &net->rxt_timer; 2262 if (net->RTO == 0) { 2263 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2264 } else { 2265 to_ticks = sctp_msecs_to_ticks(net->RTO); 2266 } 2267 break; 2268 case SCTP_TIMER_TYPE_HEARTBEAT: 2269 /* 2270 * The net is used here so that we can add in the RTO. Even 2271 * though we use a different timer. We also add the HB timer 2272 * PLUS a random jitter. 2273 */ 2274 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2275 #ifdef INVARIANTS 2276 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2277 t_type, inp, stcb, net); 2278 #else 2279 return; 2280 #endif 2281 } 2282 if ((net->dest_state & SCTP_ADDR_NOHB) && 2283 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2284 SCTPDBG(SCTP_DEBUG_TIMER2, 2285 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2286 t_type, inp, stcb, net); 2287 return; 2288 } 2289 tmr = &net->hb_timer; 2290 if (net->RTO == 0) { 2291 to_ticks = stcb->asoc.initial_rto; 2292 } else { 2293 to_ticks = net->RTO; 2294 } 2295 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2296 jitter = rndval % to_ticks; 2297 if (to_ticks > 1) { 2298 to_ticks >>= 1; 2299 } 2300 if (jitter < (UINT32_MAX - to_ticks)) { 2301 to_ticks += jitter; 2302 } else { 2303 to_ticks = UINT32_MAX; 2304 } 2305 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2306 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2307 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2308 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2309 to_ticks += net->heart_beat_delay; 2310 } else { 2311 to_ticks = UINT32_MAX; 2312 } 2313 } 2314 /* 2315 * Now we must convert the to_ticks that are now in ms to 2316 * ticks. 2317 */ 2318 to_ticks = sctp_msecs_to_ticks(to_ticks); 2319 break; 2320 case SCTP_TIMER_TYPE_COOKIE: 2321 /* 2322 * Here we can use the RTO timer from the network since one 2323 * RTT was complete. If a retransmission happened then we 2324 * will be using the RTO initial value. 2325 */ 2326 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2327 #ifdef INVARIANTS 2328 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2329 t_type, inp, stcb, net); 2330 #else 2331 return; 2332 #endif 2333 } 2334 tmr = &net->rxt_timer; 2335 if (net->RTO == 0) { 2336 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2337 } else { 2338 to_ticks = sctp_msecs_to_ticks(net->RTO); 2339 } 2340 break; 2341 case SCTP_TIMER_TYPE_NEWCOOKIE: 2342 /* 2343 * Nothing needed but the endpoint here usually about 60 2344 * minutes. 2345 */ 2346 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2347 #ifdef INVARIANTS 2348 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2349 t_type, inp, stcb, net); 2350 #else 2351 return; 2352 #endif 2353 } 2354 tmr = &inp->sctp_ep.signature_change; 2355 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2356 break; 2357 case SCTP_TIMER_TYPE_PATHMTURAISE: 2358 /* 2359 * Here we use the value found in the EP for PMTUD, usually 2360 * about 10 minutes. 2361 */ 2362 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2363 #ifdef INVARIANTS 2364 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2365 t_type, inp, stcb, net); 2366 #else 2367 return; 2368 #endif 2369 } 2370 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2371 SCTPDBG(SCTP_DEBUG_TIMER2, 2372 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2373 t_type, inp, stcb, net); 2374 return; 2375 } 2376 tmr = &net->pmtu_timer; 2377 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2378 break; 2379 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2380 /* Here we use the RTO of the destination. */ 2381 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2382 #ifdef INVARIANTS 2383 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2384 t_type, inp, stcb, net); 2385 #else 2386 return; 2387 #endif 2388 } 2389 tmr = &net->rxt_timer; 2390 if (net->RTO == 0) { 2391 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2392 } else { 2393 to_ticks = sctp_msecs_to_ticks(net->RTO); 2394 } 2395 break; 2396 case SCTP_TIMER_TYPE_ASCONF: 2397 /* 2398 * Here the timer comes from the stcb but its value is from 2399 * the net's RTO. 2400 */ 2401 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2402 #ifdef INVARIANTS 2403 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2404 t_type, inp, stcb, net); 2405 #else 2406 return; 2407 #endif 2408 } 2409 tmr = &stcb->asoc.asconf_timer; 2410 if (net->RTO == 0) { 2411 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2412 } else { 2413 to_ticks = sctp_msecs_to_ticks(net->RTO); 2414 } 2415 break; 2416 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2417 /* 2418 * Here we use the endpoints shutdown guard timer usually 2419 * about 3 minutes. 2420 */ 2421 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2422 #ifdef INVARIANTS 2423 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2424 t_type, inp, stcb, net); 2425 #else 2426 return; 2427 #endif 2428 } 2429 tmr = &stcb->asoc.shut_guard_timer; 2430 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2431 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2432 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2433 } else { 2434 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2435 } 2436 } else { 2437 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2438 } 2439 break; 2440 case SCTP_TIMER_TYPE_AUTOCLOSE: 2441 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2442 #ifdef INVARIANTS 2443 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2444 t_type, inp, stcb, net); 2445 #else 2446 return; 2447 #endif 2448 } 2449 tmr = &stcb->asoc.autoclose_timer; 2450 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2451 break; 2452 case SCTP_TIMER_TYPE_STRRESET: 2453 /* 2454 * Here the timer comes from the stcb but its value is from 2455 * the net's RTO. 2456 */ 2457 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2458 #ifdef INVARIANTS 2459 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2460 t_type, inp, stcb, net); 2461 #else 2462 return; 2463 #endif 2464 } 2465 tmr = &stcb->asoc.strreset_timer; 2466 if (net->RTO == 0) { 2467 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2468 } else { 2469 to_ticks = sctp_msecs_to_ticks(net->RTO); 2470 } 2471 break; 2472 case SCTP_TIMER_TYPE_INPKILL: 2473 /* 2474 * The inp is setup to die. We re-use the signature_change 2475 * timer since that has stopped and we are in the GONE 2476 * state. 2477 */ 2478 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 tmr = &inp->sctp_ep.signature_change; 2487 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2488 break; 2489 case SCTP_TIMER_TYPE_ASOCKILL: 2490 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2491 #ifdef INVARIANTS 2492 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2493 t_type, inp, stcb, net); 2494 #else 2495 return; 2496 #endif 2497 } 2498 tmr = &stcb->asoc.strreset_timer; 2499 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2500 break; 2501 case SCTP_TIMER_TYPE_ADDR_WQ: 2502 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2503 #ifdef INVARIANTS 2504 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2505 t_type, inp, stcb, net); 2506 #else 2507 return; 2508 #endif 2509 } 2510 /* Only 1 tick away :-) */ 2511 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2512 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2513 break; 2514 case SCTP_TIMER_TYPE_PRIM_DELETED: 2515 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2516 #ifdef INVARIANTS 2517 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2518 t_type, inp, stcb, net); 2519 #else 2520 return; 2521 #endif 2522 } 2523 tmr = &stcb->asoc.delete_prim_timer; 2524 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2525 break; 2526 default: 2527 #ifdef INVARIANTS 2528 panic("Unknown timer type %d", t_type); 2529 #else 2530 return; 2531 #endif 2532 } 2533 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2534 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2535 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2536 /* 2537 * We do NOT allow you to have it already running. If it is, 2538 * we leave the current one up unchanged. 2539 */ 2540 SCTPDBG(SCTP_DEBUG_TIMER2, 2541 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2542 t_type, inp, stcb, net); 2543 return; 2544 } 2545 /* At this point we can proceed. */ 2546 if (t_type == SCTP_TIMER_TYPE_SEND) { 2547 stcb->asoc.num_send_timers_up++; 2548 } 2549 tmr->stopped_from = 0; 2550 tmr->type = t_type; 2551 tmr->ep = (void *)inp; 2552 tmr->tcb = (void *)stcb; 2553 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2554 tmr->net = NULL; 2555 } else { 2556 tmr->net = (void *)net; 2557 } 2558 tmr->self = (void *)tmr; 2559 tmr->vnet = (void *)curvnet; 2560 tmr->ticks = sctp_get_tick_count(); 2561 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2562 SCTPDBG(SCTP_DEBUG_TIMER2, 2563 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2564 t_type, to_ticks, inp, stcb, net); 2565 /* 2566 * If this is a newly scheduled callout, as opposed to a 2567 * rescheduled one, increment relevant reference counts. 2568 */ 2569 if (tmr->ep != NULL) { 2570 SCTP_INP_INCR_REF(inp); 2571 } 2572 if (tmr->tcb != NULL) { 2573 atomic_add_int(&stcb->asoc.refcnt, 1); 2574 } 2575 if (tmr->net != NULL) { 2576 atomic_add_int(&net->ref_count, 1); 2577 } 2578 } else { 2579 /* 2580 * This should not happen, since we checked for pending 2581 * above. 2582 */ 2583 SCTPDBG(SCTP_DEBUG_TIMER2, 2584 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2585 t_type, to_ticks, inp, stcb, net); 2586 } 2587 return; 2588 } 2589 2590 /*- 2591 * The following table shows which parameters must be provided 2592 * when calling sctp_timer_stop(). For parameters not being 2593 * provided, NULL must be used. 2594 * 2595 * |Name |inp |stcb|net | 2596 * |-----------------------------|----|----|----| 2597 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2600 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2601 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2604 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2605 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2606 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2608 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2610 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2611 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2612 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2613 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2614 * 2615 */ 2616 2617 void 2618 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2619 struct sctp_nets *net, uint32_t from) 2620 { 2621 struct sctp_timer *tmr; 2622 2623 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2624 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2625 t_type, stcb, stcb->sctp_ep)); 2626 if (stcb != NULL) { 2627 SCTP_TCB_LOCK_ASSERT(stcb); 2628 } else if (inp != NULL) { 2629 SCTP_INP_WLOCK_ASSERT(inp); 2630 } else { 2631 SCTP_WQ_ADDR_LOCK_ASSERT(); 2632 } 2633 tmr = NULL; 2634 switch (t_type) { 2635 case SCTP_TIMER_TYPE_SEND: 2636 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2637 #ifdef INVARIANTS 2638 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2639 t_type, inp, stcb, net); 2640 #else 2641 return; 2642 #endif 2643 } 2644 tmr = &net->rxt_timer; 2645 break; 2646 case SCTP_TIMER_TYPE_INIT: 2647 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2648 #ifdef INVARIANTS 2649 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2650 t_type, inp, stcb, net); 2651 #else 2652 return; 2653 #endif 2654 } 2655 tmr = &net->rxt_timer; 2656 break; 2657 case SCTP_TIMER_TYPE_RECV: 2658 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2659 #ifdef INVARIANTS 2660 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2661 t_type, inp, stcb, net); 2662 #else 2663 return; 2664 #endif 2665 } 2666 tmr = &stcb->asoc.dack_timer; 2667 break; 2668 case SCTP_TIMER_TYPE_SHUTDOWN: 2669 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2670 #ifdef INVARIANTS 2671 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2672 t_type, inp, stcb, net); 2673 #else 2674 return; 2675 #endif 2676 } 2677 tmr = &net->rxt_timer; 2678 break; 2679 case SCTP_TIMER_TYPE_HEARTBEAT: 2680 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2681 #ifdef INVARIANTS 2682 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2683 t_type, inp, stcb, net); 2684 #else 2685 return; 2686 #endif 2687 } 2688 tmr = &net->hb_timer; 2689 break; 2690 case SCTP_TIMER_TYPE_COOKIE: 2691 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2692 #ifdef INVARIANTS 2693 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2694 t_type, inp, stcb, net); 2695 #else 2696 return; 2697 #endif 2698 } 2699 tmr = &net->rxt_timer; 2700 break; 2701 case SCTP_TIMER_TYPE_NEWCOOKIE: 2702 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2703 #ifdef INVARIANTS 2704 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2705 t_type, inp, stcb, net); 2706 #else 2707 return; 2708 #endif 2709 } 2710 tmr = &inp->sctp_ep.signature_change; 2711 break; 2712 case SCTP_TIMER_TYPE_PATHMTURAISE: 2713 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2714 #ifdef INVARIANTS 2715 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2716 t_type, inp, stcb, net); 2717 #else 2718 return; 2719 #endif 2720 } 2721 tmr = &net->pmtu_timer; 2722 break; 2723 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2724 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2725 #ifdef INVARIANTS 2726 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2727 t_type, inp, stcb, net); 2728 #else 2729 return; 2730 #endif 2731 } 2732 tmr = &net->rxt_timer; 2733 break; 2734 case SCTP_TIMER_TYPE_ASCONF: 2735 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2736 #ifdef INVARIANTS 2737 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2738 t_type, inp, stcb, net); 2739 #else 2740 return; 2741 #endif 2742 } 2743 tmr = &stcb->asoc.asconf_timer; 2744 break; 2745 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2746 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2747 #ifdef INVARIANTS 2748 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2749 t_type, inp, stcb, net); 2750 #else 2751 return; 2752 #endif 2753 } 2754 tmr = &stcb->asoc.shut_guard_timer; 2755 break; 2756 case SCTP_TIMER_TYPE_AUTOCLOSE: 2757 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2758 #ifdef INVARIANTS 2759 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2760 t_type, inp, stcb, net); 2761 #else 2762 return; 2763 #endif 2764 } 2765 tmr = &stcb->asoc.autoclose_timer; 2766 break; 2767 case SCTP_TIMER_TYPE_STRRESET: 2768 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2769 #ifdef INVARIANTS 2770 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2771 t_type, inp, stcb, net); 2772 #else 2773 return; 2774 #endif 2775 } 2776 tmr = &stcb->asoc.strreset_timer; 2777 break; 2778 case SCTP_TIMER_TYPE_INPKILL: 2779 /* 2780 * The inp is setup to die. We re-use the signature_change 2781 * timer since that has stopped and we are in the GONE 2782 * state. 2783 */ 2784 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2785 #ifdef INVARIANTS 2786 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2787 t_type, inp, stcb, net); 2788 #else 2789 return; 2790 #endif 2791 } 2792 tmr = &inp->sctp_ep.signature_change; 2793 break; 2794 case SCTP_TIMER_TYPE_ASOCKILL: 2795 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2796 #ifdef INVARIANTS 2797 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2798 t_type, inp, stcb, net); 2799 #else 2800 return; 2801 #endif 2802 } 2803 tmr = &stcb->asoc.strreset_timer; 2804 break; 2805 case SCTP_TIMER_TYPE_ADDR_WQ: 2806 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2807 #ifdef INVARIANTS 2808 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2809 t_type, inp, stcb, net); 2810 #else 2811 return; 2812 #endif 2813 } 2814 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2815 break; 2816 case SCTP_TIMER_TYPE_PRIM_DELETED: 2817 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2818 #ifdef INVARIANTS 2819 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2820 t_type, inp, stcb, net); 2821 #else 2822 return; 2823 #endif 2824 } 2825 tmr = &stcb->asoc.delete_prim_timer; 2826 break; 2827 default: 2828 #ifdef INVARIANTS 2829 panic("Unknown timer type %d", t_type); 2830 #else 2831 return; 2832 #endif 2833 } 2834 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2835 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2836 (tmr->type != t_type)) { 2837 /* 2838 * Ok we have a timer that is under joint use. Cookie timer 2839 * per chance with the SEND timer. We therefore are NOT 2840 * running the timer that the caller wants stopped. So just 2841 * return. 2842 */ 2843 SCTPDBG(SCTP_DEBUG_TIMER2, 2844 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2845 t_type, inp, stcb, net); 2846 return; 2847 } 2848 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2849 stcb->asoc.num_send_timers_up--; 2850 if (stcb->asoc.num_send_timers_up < 0) { 2851 stcb->asoc.num_send_timers_up = 0; 2852 } 2853 } 2854 tmr->self = NULL; 2855 tmr->stopped_from = from; 2856 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2857 KASSERT(tmr->ep == inp, 2858 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2859 t_type, inp, tmr->ep)); 2860 KASSERT(tmr->tcb == stcb, 2861 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2862 t_type, stcb, tmr->tcb)); 2863 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2864 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2865 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2866 t_type, net, tmr->net)); 2867 SCTPDBG(SCTP_DEBUG_TIMER2, 2868 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2869 t_type, inp, stcb, net); 2870 /* 2871 * If the timer was actually stopped, decrement reference 2872 * counts that were incremented in sctp_timer_start(). 2873 */ 2874 if (tmr->ep != NULL) { 2875 tmr->ep = NULL; 2876 SCTP_INP_DECR_REF(inp); 2877 } 2878 if (tmr->tcb != NULL) { 2879 tmr->tcb = NULL; 2880 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2881 } 2882 if (tmr->net != NULL) { 2883 struct sctp_nets *tmr_net; 2884 2885 /* 2886 * Can't use net, since it doesn't work for 2887 * SCTP_TIMER_TYPE_ASCONF. 2888 */ 2889 tmr_net = tmr->net; 2890 tmr->net = NULL; 2891 sctp_free_remote_addr(tmr_net); 2892 } 2893 } else { 2894 SCTPDBG(SCTP_DEBUG_TIMER2, 2895 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2896 t_type, inp, stcb, net); 2897 } 2898 return; 2899 } 2900 2901 uint32_t 2902 sctp_calculate_len(struct mbuf *m) 2903 { 2904 struct mbuf *at; 2905 uint32_t tlen; 2906 2907 tlen = 0; 2908 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2909 tlen += SCTP_BUF_LEN(at); 2910 } 2911 return (tlen); 2912 } 2913 2914 /* 2915 * Given an association and starting time of the current RTT period, update 2916 * RTO in number of msecs. net should point to the current network. 2917 * Return 1, if an RTO update was performed, return 0 if no update was 2918 * performed due to invalid starting point. 2919 */ 2920 2921 int 2922 sctp_calculate_rto(struct sctp_tcb *stcb, 2923 struct sctp_association *asoc, 2924 struct sctp_nets *net, 2925 struct timeval *old, 2926 int rtt_from_sack) 2927 { 2928 struct timeval now; 2929 uint64_t rtt_us; /* RTT in us */ 2930 int32_t rtt; /* RTT in ms */ 2931 uint32_t new_rto; 2932 int first_measure = 0; 2933 2934 /************************/ 2935 /* 1. calculate new RTT */ 2936 /************************/ 2937 /* get the current time */ 2938 if (stcb->asoc.use_precise_time) { 2939 (void)SCTP_GETPTIME_TIMEVAL(&now); 2940 } else { 2941 (void)SCTP_GETTIME_TIMEVAL(&now); 2942 } 2943 if ((old->tv_sec > now.tv_sec) || 2944 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2945 /* The starting point is in the future. */ 2946 return (0); 2947 } 2948 timevalsub(&now, old); 2949 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2950 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2951 /* The RTT is larger than a sane value. */ 2952 return (0); 2953 } 2954 /* store the current RTT in us */ 2955 net->rtt = rtt_us; 2956 /* compute rtt in ms */ 2957 rtt = (int32_t)(net->rtt / 1000); 2958 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2959 /* 2960 * Tell the CC module that a new update has just occurred 2961 * from a sack 2962 */ 2963 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2964 } 2965 /* 2966 * Do we need to determine the lan? We do this only on sacks i.e. 2967 * RTT being determined from data not non-data (HB/INIT->INITACK). 2968 */ 2969 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2970 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2971 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2972 net->lan_type = SCTP_LAN_INTERNET; 2973 } else { 2974 net->lan_type = SCTP_LAN_LOCAL; 2975 } 2976 } 2977 2978 /***************************/ 2979 /* 2. update RTTVAR & SRTT */ 2980 /***************************/ 2981 /*- 2982 * Compute the scaled average lastsa and the 2983 * scaled variance lastsv as described in van Jacobson 2984 * Paper "Congestion Avoidance and Control", Annex A. 2985 * 2986 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2987 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2988 */ 2989 if (net->RTO_measured) { 2990 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2991 net->lastsa += rtt; 2992 if (rtt < 0) { 2993 rtt = -rtt; 2994 } 2995 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2996 net->lastsv += rtt; 2997 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2998 rto_logging(net, SCTP_LOG_RTTVAR); 2999 } 3000 } else { 3001 /* First RTO measurement */ 3002 net->RTO_measured = 1; 3003 first_measure = 1; 3004 net->lastsa = rtt << SCTP_RTT_SHIFT; 3005 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3007 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3008 } 3009 } 3010 if (net->lastsv == 0) { 3011 net->lastsv = SCTP_CLOCK_GRANULARITY; 3012 } 3013 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3014 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3015 (stcb->asoc.sat_network_lockout == 0)) { 3016 stcb->asoc.sat_network = 1; 3017 } else if ((!first_measure) && stcb->asoc.sat_network) { 3018 stcb->asoc.sat_network = 0; 3019 stcb->asoc.sat_network_lockout = 1; 3020 } 3021 /* bound it, per C6/C7 in Section 5.3.1 */ 3022 if (new_rto < stcb->asoc.minrto) { 3023 new_rto = stcb->asoc.minrto; 3024 } 3025 if (new_rto > stcb->asoc.maxrto) { 3026 new_rto = stcb->asoc.maxrto; 3027 } 3028 net->RTO = new_rto; 3029 return (1); 3030 } 3031 3032 /* 3033 * return a pointer to a contiguous piece of data from the given mbuf chain 3034 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3035 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3036 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3037 */ 3038 caddr_t 3039 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3040 { 3041 uint32_t count; 3042 uint8_t *ptr; 3043 3044 ptr = in_ptr; 3045 if ((off < 0) || (len <= 0)) 3046 return (NULL); 3047 3048 /* find the desired start location */ 3049 while ((m != NULL) && (off > 0)) { 3050 if (off < SCTP_BUF_LEN(m)) 3051 break; 3052 off -= SCTP_BUF_LEN(m); 3053 m = SCTP_BUF_NEXT(m); 3054 } 3055 if (m == NULL) 3056 return (NULL); 3057 3058 /* is the current mbuf large enough (eg. contiguous)? */ 3059 if ((SCTP_BUF_LEN(m) - off) >= len) { 3060 return (mtod(m, caddr_t)+off); 3061 } else { 3062 /* else, it spans more than one mbuf, so save a temp copy... */ 3063 while ((m != NULL) && (len > 0)) { 3064 count = min(SCTP_BUF_LEN(m) - off, len); 3065 memcpy(ptr, mtod(m, caddr_t)+off, count); 3066 len -= count; 3067 ptr += count; 3068 off = 0; 3069 m = SCTP_BUF_NEXT(m); 3070 } 3071 if ((m == NULL) && (len > 0)) 3072 return (NULL); 3073 else 3074 return ((caddr_t)in_ptr); 3075 } 3076 } 3077 3078 struct sctp_paramhdr * 3079 sctp_get_next_param(struct mbuf *m, 3080 int offset, 3081 struct sctp_paramhdr *pull, 3082 int pull_limit) 3083 { 3084 /* This just provides a typed signature to Peter's Pull routine */ 3085 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3086 (uint8_t *)pull)); 3087 } 3088 3089 struct mbuf * 3090 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3091 { 3092 struct mbuf *m_last; 3093 caddr_t dp; 3094 3095 if (padlen > 3) { 3096 return (NULL); 3097 } 3098 if (padlen <= M_TRAILINGSPACE(m)) { 3099 /* 3100 * The easy way. We hope the majority of the time we hit 3101 * here :) 3102 */ 3103 m_last = m; 3104 } else { 3105 /* Hard way we must grow the mbuf chain */ 3106 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3107 if (m_last == NULL) { 3108 return (NULL); 3109 } 3110 SCTP_BUF_LEN(m_last) = 0; 3111 SCTP_BUF_NEXT(m_last) = NULL; 3112 SCTP_BUF_NEXT(m) = m_last; 3113 } 3114 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3115 SCTP_BUF_LEN(m_last) += padlen; 3116 memset(dp, 0, padlen); 3117 return (m_last); 3118 } 3119 3120 struct mbuf * 3121 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3122 { 3123 /* find the last mbuf in chain and pad it */ 3124 struct mbuf *m_at; 3125 3126 if (last_mbuf != NULL) { 3127 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3128 } else { 3129 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3130 if (SCTP_BUF_NEXT(m_at) == NULL) { 3131 return (sctp_add_pad_tombuf(m_at, padval)); 3132 } 3133 } 3134 } 3135 return (NULL); 3136 } 3137 3138 static void 3139 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3140 uint16_t error, struct sctp_abort_chunk *abort, 3141 bool from_peer, bool timedout, int so_locked) 3142 { 3143 struct mbuf *m_notify; 3144 struct sctp_assoc_change *sac; 3145 struct sctp_queued_to_read *control; 3146 unsigned int notif_len; 3147 uint16_t abort_len; 3148 unsigned int i; 3149 3150 KASSERT(abort == NULL || from_peer, 3151 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3152 KASSERT(!from_peer || !timedout, 3153 ("sctp_notify_assoc_change: timeouts can only be local")); 3154 if (stcb == NULL) { 3155 return; 3156 } 3157 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3158 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3159 if (abort != NULL) { 3160 abort_len = ntohs(abort->ch.chunk_length); 3161 /* 3162 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3163 * contiguous. 3164 */ 3165 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3166 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3167 } 3168 } else { 3169 abort_len = 0; 3170 } 3171 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3172 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3173 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3174 notif_len += abort_len; 3175 } 3176 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3177 if (m_notify == NULL) { 3178 /* Retry with smaller value. */ 3179 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3180 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3181 if (m_notify == NULL) { 3182 goto set_error; 3183 } 3184 } 3185 SCTP_BUF_NEXT(m_notify) = NULL; 3186 sac = mtod(m_notify, struct sctp_assoc_change *); 3187 memset(sac, 0, notif_len); 3188 sac->sac_type = SCTP_ASSOC_CHANGE; 3189 sac->sac_flags = 0; 3190 sac->sac_length = sizeof(struct sctp_assoc_change); 3191 sac->sac_state = state; 3192 sac->sac_error = error; 3193 if (state == SCTP_CANT_STR_ASSOC) { 3194 sac->sac_outbound_streams = 0; 3195 sac->sac_inbound_streams = 0; 3196 } else { 3197 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3198 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3199 } 3200 sac->sac_assoc_id = sctp_get_associd(stcb); 3201 if (notif_len > sizeof(struct sctp_assoc_change)) { 3202 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3203 i = 0; 3204 if (stcb->asoc.prsctp_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3206 } 3207 if (stcb->asoc.auth_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3209 } 3210 if (stcb->asoc.asconf_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3212 } 3213 if (stcb->asoc.idata_supported == 1) { 3214 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3215 } 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3217 if (stcb->asoc.reconfig_supported == 1) { 3218 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3219 } 3220 sac->sac_length += i; 3221 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3222 memcpy(sac->sac_info, abort, abort_len); 3223 sac->sac_length += abort_len; 3224 } 3225 } 3226 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3228 0, 0, stcb->asoc.context, 0, 0, 0, 3229 m_notify); 3230 if (control != NULL) { 3231 control->length = SCTP_BUF_LEN(m_notify); 3232 control->spec_flags = M_NOTIFICATION; 3233 /* not that we need this */ 3234 control->tail_mbuf = m_notify; 3235 sctp_add_to_readq(stcb->sctp_ep, stcb, 3236 control, 3237 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3238 so_locked); 3239 } else { 3240 sctp_m_freem(m_notify); 3241 } 3242 } 3243 /* 3244 * For 1-to-1 style sockets, we send up and error when an ABORT 3245 * comes in. 3246 */ 3247 set_error: 3248 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3249 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3250 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3251 SOCK_LOCK(stcb->sctp_socket); 3252 if (from_peer) { 3253 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3255 stcb->sctp_socket->so_error = ECONNREFUSED; 3256 } else { 3257 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3258 stcb->sctp_socket->so_error = ECONNRESET; 3259 } 3260 } else { 3261 if (timedout) { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3263 stcb->sctp_socket->so_error = ETIMEDOUT; 3264 } else { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3266 stcb->sctp_socket->so_error = ECONNABORTED; 3267 } 3268 } 3269 SOCK_UNLOCK(stcb->sctp_socket); 3270 } 3271 /* Wake ANY sleepers */ 3272 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3273 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3274 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3275 socantrcvmore(stcb->sctp_socket); 3276 } 3277 sorwakeup(stcb->sctp_socket); 3278 sowwakeup(stcb->sctp_socket); 3279 } 3280 3281 static void 3282 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3283 struct sockaddr *sa, uint32_t error, int so_locked) 3284 { 3285 struct mbuf *m_notify; 3286 struct sctp_paddr_change *spc; 3287 struct sctp_queued_to_read *control; 3288 3289 if ((stcb == NULL) || 3290 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3291 /* event not enabled */ 3292 return; 3293 } 3294 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3295 if (m_notify == NULL) 3296 return; 3297 SCTP_BUF_LEN(m_notify) = 0; 3298 spc = mtod(m_notify, struct sctp_paddr_change *); 3299 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3300 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3301 spc->spc_flags = 0; 3302 spc->spc_length = sizeof(struct sctp_paddr_change); 3303 switch (sa->sa_family) { 3304 #ifdef INET 3305 case AF_INET: 3306 #ifdef INET6 3307 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3308 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3309 (struct sockaddr_in6 *)&spc->spc_aaddr); 3310 } else { 3311 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3312 } 3313 #else 3314 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3315 #endif 3316 break; 3317 #endif 3318 #ifdef INET6 3319 case AF_INET6: 3320 { 3321 struct sockaddr_in6 *sin6; 3322 3323 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3324 3325 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3326 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3327 if (sin6->sin6_scope_id == 0) { 3328 /* recover scope_id for user */ 3329 (void)sa6_recoverscope(sin6); 3330 } else { 3331 /* clear embedded scope_id for user */ 3332 in6_clearscope(&sin6->sin6_addr); 3333 } 3334 } 3335 break; 3336 } 3337 #endif 3338 default: 3339 /* TSNH */ 3340 break; 3341 } 3342 spc->spc_state = state; 3343 spc->spc_error = error; 3344 spc->spc_assoc_id = sctp_get_associd(stcb); 3345 3346 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3347 SCTP_BUF_NEXT(m_notify) = NULL; 3348 3349 /* append to socket */ 3350 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3351 0, 0, stcb->asoc.context, 0, 0, 0, 3352 m_notify); 3353 if (control == NULL) { 3354 /* no memory */ 3355 sctp_m_freem(m_notify); 3356 return; 3357 } 3358 control->length = SCTP_BUF_LEN(m_notify); 3359 control->spec_flags = M_NOTIFICATION; 3360 /* not that we need this */ 3361 control->tail_mbuf = m_notify; 3362 sctp_add_to_readq(stcb->sctp_ep, stcb, 3363 control, 3364 &stcb->sctp_socket->so_rcv, 1, 3365 SCTP_READ_LOCK_NOT_HELD, 3366 so_locked); 3367 } 3368 3369 static void 3370 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3371 struct sctp_tmit_chunk *chk, int so_locked) 3372 { 3373 struct mbuf *m_notify; 3374 struct sctp_send_failed *ssf; 3375 struct sctp_send_failed_event *ssfe; 3376 struct sctp_queued_to_read *control; 3377 struct sctp_chunkhdr *chkhdr; 3378 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3379 3380 if ((stcb == NULL) || 3381 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3382 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3383 /* event not enabled */ 3384 return; 3385 } 3386 3387 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3388 notifhdr_len = sizeof(struct sctp_send_failed_event); 3389 } else { 3390 notifhdr_len = sizeof(struct sctp_send_failed); 3391 } 3392 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3393 if (m_notify == NULL) 3394 /* no space left */ 3395 return; 3396 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3397 if (stcb->asoc.idata_supported) { 3398 chkhdr_len = sizeof(struct sctp_idata_chunk); 3399 } else { 3400 chkhdr_len = sizeof(struct sctp_data_chunk); 3401 } 3402 /* Use some defaults in case we can't access the chunk header */ 3403 if (chk->send_size >= chkhdr_len) { 3404 payload_len = chk->send_size - chkhdr_len; 3405 } else { 3406 payload_len = 0; 3407 } 3408 padding_len = 0; 3409 if (chk->data != NULL) { 3410 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3411 if (chkhdr != NULL) { 3412 chk_len = ntohs(chkhdr->chunk_length); 3413 if ((chk_len >= chkhdr_len) && 3414 (chk->send_size >= chk_len) && 3415 (chk->send_size - chk_len < 4)) { 3416 padding_len = chk->send_size - chk_len; 3417 payload_len = chk->send_size - chkhdr_len - padding_len; 3418 } 3419 } 3420 } 3421 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3422 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3423 memset(ssfe, 0, notifhdr_len); 3424 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3425 if (sent) { 3426 ssfe->ssfe_flags = SCTP_DATA_SENT; 3427 } else { 3428 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3429 } 3430 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3431 ssfe->ssfe_error = error; 3432 /* not exactly what the user sent in, but should be close :) */ 3433 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3434 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3435 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3436 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3437 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3438 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3439 } else { 3440 ssf = mtod(m_notify, struct sctp_send_failed *); 3441 memset(ssf, 0, notifhdr_len); 3442 ssf->ssf_type = SCTP_SEND_FAILED; 3443 if (sent) { 3444 ssf->ssf_flags = SCTP_DATA_SENT; 3445 } else { 3446 ssf->ssf_flags = SCTP_DATA_UNSENT; 3447 } 3448 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3449 ssf->ssf_error = error; 3450 /* not exactly what the user sent in, but should be close :) */ 3451 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3452 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3453 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3454 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3455 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3456 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3457 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3458 } 3459 if (chk->data != NULL) { 3460 /* Trim off the sctp chunk header (it should be there) */ 3461 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3462 m_adj(chk->data, chkhdr_len); 3463 m_adj(chk->data, -padding_len); 3464 sctp_mbuf_crush(chk->data); 3465 chk->send_size -= (chkhdr_len + padding_len); 3466 } 3467 } 3468 SCTP_BUF_NEXT(m_notify) = chk->data; 3469 /* Steal off the mbuf */ 3470 chk->data = NULL; 3471 /* 3472 * For this case, we check the actual socket buffer, since the assoc 3473 * is going away we don't want to overfill the socket buffer for a 3474 * non-reader 3475 */ 3476 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3477 sctp_m_freem(m_notify); 3478 return; 3479 } 3480 /* append to socket */ 3481 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3482 0, 0, stcb->asoc.context, 0, 0, 0, 3483 m_notify); 3484 if (control == NULL) { 3485 /* no memory */ 3486 sctp_m_freem(m_notify); 3487 return; 3488 } 3489 control->length = SCTP_BUF_LEN(m_notify); 3490 control->spec_flags = M_NOTIFICATION; 3491 /* not that we need this */ 3492 control->tail_mbuf = m_notify; 3493 sctp_add_to_readq(stcb->sctp_ep, stcb, 3494 control, 3495 &stcb->sctp_socket->so_rcv, 1, 3496 SCTP_READ_LOCK_NOT_HELD, 3497 so_locked); 3498 } 3499 3500 static void 3501 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3502 struct sctp_stream_queue_pending *sp, int so_locked) 3503 { 3504 struct mbuf *m_notify; 3505 struct sctp_send_failed *ssf; 3506 struct sctp_send_failed_event *ssfe; 3507 struct sctp_queued_to_read *control; 3508 int notifhdr_len; 3509 3510 if ((stcb == NULL) || 3511 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3512 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3513 /* event not enabled */ 3514 return; 3515 } 3516 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3517 notifhdr_len = sizeof(struct sctp_send_failed_event); 3518 } else { 3519 notifhdr_len = sizeof(struct sctp_send_failed); 3520 } 3521 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3522 if (m_notify == NULL) { 3523 /* no space left */ 3524 return; 3525 } 3526 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3527 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3528 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3529 memset(ssfe, 0, notifhdr_len); 3530 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3531 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3532 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3533 ssfe->ssfe_error = error; 3534 /* not exactly what the user sent in, but should be close :) */ 3535 ssfe->ssfe_info.snd_sid = sp->sid; 3536 if (sp->some_taken) { 3537 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3538 } else { 3539 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3540 } 3541 ssfe->ssfe_info.snd_ppid = sp->ppid; 3542 ssfe->ssfe_info.snd_context = sp->context; 3543 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3544 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3545 } else { 3546 ssf = mtod(m_notify, struct sctp_send_failed *); 3547 memset(ssf, 0, notifhdr_len); 3548 ssf->ssf_type = SCTP_SEND_FAILED; 3549 ssf->ssf_flags = SCTP_DATA_UNSENT; 3550 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3551 ssf->ssf_error = error; 3552 /* not exactly what the user sent in, but should be close :) */ 3553 ssf->ssf_info.sinfo_stream = sp->sid; 3554 ssf->ssf_info.sinfo_ssn = 0; 3555 if (sp->some_taken) { 3556 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3557 } else { 3558 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3559 } 3560 ssf->ssf_info.sinfo_ppid = sp->ppid; 3561 ssf->ssf_info.sinfo_context = sp->context; 3562 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3563 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3564 } 3565 SCTP_BUF_NEXT(m_notify) = sp->data; 3566 3567 /* Steal off the mbuf */ 3568 sp->data = NULL; 3569 /* 3570 * For this case, we check the actual socket buffer, since the assoc 3571 * is going away we don't want to overfill the socket buffer for a 3572 * non-reader 3573 */ 3574 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3575 sctp_m_freem(m_notify); 3576 return; 3577 } 3578 /* append to socket */ 3579 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3580 0, 0, stcb->asoc.context, 0, 0, 0, 3581 m_notify); 3582 if (control == NULL) { 3583 /* no memory */ 3584 sctp_m_freem(m_notify); 3585 return; 3586 } 3587 control->length = SCTP_BUF_LEN(m_notify); 3588 control->spec_flags = M_NOTIFICATION; 3589 /* not that we need this */ 3590 control->tail_mbuf = m_notify; 3591 sctp_add_to_readq(stcb->sctp_ep, stcb, 3592 control, 3593 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3594 } 3595 3596 static void 3597 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3598 { 3599 struct mbuf *m_notify; 3600 struct sctp_adaptation_event *sai; 3601 struct sctp_queued_to_read *control; 3602 3603 if ((stcb == NULL) || 3604 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3605 /* event not enabled */ 3606 return; 3607 } 3608 3609 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3610 if (m_notify == NULL) 3611 /* no space left */ 3612 return; 3613 SCTP_BUF_LEN(m_notify) = 0; 3614 sai = mtod(m_notify, struct sctp_adaptation_event *); 3615 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3616 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3617 sai->sai_flags = 0; 3618 sai->sai_length = sizeof(struct sctp_adaptation_event); 3619 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3620 sai->sai_assoc_id = sctp_get_associd(stcb); 3621 3622 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3623 SCTP_BUF_NEXT(m_notify) = NULL; 3624 3625 /* append to socket */ 3626 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3627 0, 0, stcb->asoc.context, 0, 0, 0, 3628 m_notify); 3629 if (control == NULL) { 3630 /* no memory */ 3631 sctp_m_freem(m_notify); 3632 return; 3633 } 3634 control->length = SCTP_BUF_LEN(m_notify); 3635 control->spec_flags = M_NOTIFICATION; 3636 /* not that we need this */ 3637 control->tail_mbuf = m_notify; 3638 sctp_add_to_readq(stcb->sctp_ep, stcb, 3639 control, 3640 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3641 } 3642 3643 static void 3644 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3645 struct sctp_queued_to_read *aborted_control, 3646 int so_locked) 3647 { 3648 struct mbuf *m_notify; 3649 struct sctp_pdapi_event *pdapi; 3650 struct sctp_queued_to_read *control; 3651 struct sockbuf *sb; 3652 3653 if ((stcb == NULL) || 3654 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3655 /* event not enabled */ 3656 return; 3657 } 3658 3659 KASSERT(aborted_control != NULL, ("aborted_control is NULL")); 3660 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3661 3662 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3663 if (m_notify == NULL) 3664 /* no space left */ 3665 return; 3666 SCTP_BUF_LEN(m_notify) = 0; 3667 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3668 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3669 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3670 pdapi->pdapi_flags = 0; 3671 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3672 pdapi->pdapi_indication = error; 3673 pdapi->pdapi_stream = aborted_control->sinfo_stream; 3674 pdapi->pdapi_seq = (uint16_t)aborted_control->mid; 3675 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3676 3677 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3678 SCTP_BUF_NEXT(m_notify) = NULL; 3679 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3680 0, 0, stcb->asoc.context, 0, 0, 0, 3681 m_notify); 3682 if (control == NULL) { 3683 /* no memory */ 3684 sctp_m_freem(m_notify); 3685 return; 3686 } 3687 control->length = SCTP_BUF_LEN(m_notify); 3688 control->spec_flags = M_NOTIFICATION; 3689 /* not that we need this */ 3690 control->tail_mbuf = m_notify; 3691 sb = &stcb->sctp_socket->so_rcv; 3692 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3693 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3694 } 3695 sctp_sballoc(stcb, sb, m_notify); 3696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3697 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3698 } 3699 control->end_added = 1; 3700 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, aborted_control, control, next); 3701 if (stcb->sctp_ep && stcb->sctp_socket) { 3702 /* This should always be the case */ 3703 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3704 } 3705 } 3706 3707 static void 3708 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3709 { 3710 struct mbuf *m_notify; 3711 struct sctp_shutdown_event *sse; 3712 struct sctp_queued_to_read *control; 3713 3714 /* 3715 * For TCP model AND UDP connected sockets we will send an error up 3716 * when an SHUTDOWN completes 3717 */ 3718 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3719 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3720 /* mark socket closed for read/write and wakeup! */ 3721 socantsendmore(stcb->sctp_socket); 3722 } 3723 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3724 /* event not enabled */ 3725 return; 3726 } 3727 3728 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3729 if (m_notify == NULL) 3730 /* no space left */ 3731 return; 3732 sse = mtod(m_notify, struct sctp_shutdown_event *); 3733 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3734 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3735 sse->sse_flags = 0; 3736 sse->sse_length = sizeof(struct sctp_shutdown_event); 3737 sse->sse_assoc_id = sctp_get_associd(stcb); 3738 3739 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3740 SCTP_BUF_NEXT(m_notify) = NULL; 3741 3742 /* append to socket */ 3743 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3744 0, 0, stcb->asoc.context, 0, 0, 0, 3745 m_notify); 3746 if (control == NULL) { 3747 /* no memory */ 3748 sctp_m_freem(m_notify); 3749 return; 3750 } 3751 control->length = SCTP_BUF_LEN(m_notify); 3752 control->spec_flags = M_NOTIFICATION; 3753 /* not that we need this */ 3754 control->tail_mbuf = m_notify; 3755 sctp_add_to_readq(stcb->sctp_ep, stcb, 3756 control, 3757 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3758 } 3759 3760 static void 3761 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3762 int so_locked) 3763 { 3764 struct mbuf *m_notify; 3765 struct sctp_sender_dry_event *event; 3766 struct sctp_queued_to_read *control; 3767 3768 if ((stcb == NULL) || 3769 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3770 /* event not enabled */ 3771 return; 3772 } 3773 3774 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3775 if (m_notify == NULL) { 3776 /* no space left */ 3777 return; 3778 } 3779 SCTP_BUF_LEN(m_notify) = 0; 3780 event = mtod(m_notify, struct sctp_sender_dry_event *); 3781 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3782 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3783 event->sender_dry_flags = 0; 3784 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3785 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3786 3787 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3788 SCTP_BUF_NEXT(m_notify) = NULL; 3789 3790 /* append to socket */ 3791 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3792 0, 0, stcb->asoc.context, 0, 0, 0, 3793 m_notify); 3794 if (control == NULL) { 3795 /* no memory */ 3796 sctp_m_freem(m_notify); 3797 return; 3798 } 3799 control->length = SCTP_BUF_LEN(m_notify); 3800 control->spec_flags = M_NOTIFICATION; 3801 /* not that we need this */ 3802 control->tail_mbuf = m_notify; 3803 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3804 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3805 } 3806 3807 void 3808 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3809 { 3810 struct mbuf *m_notify; 3811 struct sctp_queued_to_read *control; 3812 struct sctp_stream_change_event *stradd; 3813 3814 if ((stcb == NULL) || 3815 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3816 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3817 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3818 /* If the socket is gone we are out of here. */ 3819 return; 3820 } 3821 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 3822 /* event not enabled */ 3823 return; 3824 } 3825 3826 if ((stcb->asoc.peer_req_out) && flag) { 3827 /* Peer made the request, don't tell the local user */ 3828 stcb->asoc.peer_req_out = 0; 3829 return; 3830 } 3831 stcb->asoc.peer_req_out = 0; 3832 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3833 if (m_notify == NULL) 3834 /* no space left */ 3835 return; 3836 SCTP_BUF_LEN(m_notify) = 0; 3837 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3838 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3839 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3840 stradd->strchange_flags = flag; 3841 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3842 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3843 stradd->strchange_instrms = numberin; 3844 stradd->strchange_outstrms = numberout; 3845 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3846 SCTP_BUF_NEXT(m_notify) = NULL; 3847 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3848 /* no space */ 3849 sctp_m_freem(m_notify); 3850 return; 3851 } 3852 /* append to socket */ 3853 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3854 0, 0, stcb->asoc.context, 0, 0, 0, 3855 m_notify); 3856 if (control == NULL) { 3857 /* no memory */ 3858 sctp_m_freem(m_notify); 3859 return; 3860 } 3861 control->length = SCTP_BUF_LEN(m_notify); 3862 control->spec_flags = M_NOTIFICATION; 3863 /* not that we need this */ 3864 control->tail_mbuf = m_notify; 3865 sctp_add_to_readq(stcb->sctp_ep, stcb, 3866 control, 3867 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3868 } 3869 3870 void 3871 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3872 { 3873 struct mbuf *m_notify; 3874 struct sctp_queued_to_read *control; 3875 struct sctp_assoc_reset_event *strasoc; 3876 3877 if ((stcb == NULL) || 3878 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3879 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3880 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3881 /* If the socket is gone we are out of here. */ 3882 return; 3883 } 3884 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 3885 /* event not enabled */ 3886 return; 3887 } 3888 3889 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3890 if (m_notify == NULL) 3891 /* no space left */ 3892 return; 3893 SCTP_BUF_LEN(m_notify) = 0; 3894 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3895 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3896 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3897 strasoc->assocreset_flags = flag; 3898 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3899 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3900 strasoc->assocreset_local_tsn = sending_tsn; 3901 strasoc->assocreset_remote_tsn = recv_tsn; 3902 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3903 SCTP_BUF_NEXT(m_notify) = NULL; 3904 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3905 /* no space */ 3906 sctp_m_freem(m_notify); 3907 return; 3908 } 3909 /* append to socket */ 3910 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3911 0, 0, stcb->asoc.context, 0, 0, 0, 3912 m_notify); 3913 if (control == NULL) { 3914 /* no memory */ 3915 sctp_m_freem(m_notify); 3916 return; 3917 } 3918 control->length = SCTP_BUF_LEN(m_notify); 3919 control->spec_flags = M_NOTIFICATION; 3920 /* not that we need this */ 3921 control->tail_mbuf = m_notify; 3922 sctp_add_to_readq(stcb->sctp_ep, stcb, 3923 control, 3924 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3925 } 3926 3927 static void 3928 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3929 int number_entries, uint16_t *list, int flag) 3930 { 3931 struct mbuf *m_notify; 3932 struct sctp_queued_to_read *control; 3933 struct sctp_stream_reset_event *strreset; 3934 int len; 3935 3936 if ((stcb == NULL) || 3937 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3938 /* event not enabled */ 3939 return; 3940 } 3941 3942 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3943 if (m_notify == NULL) 3944 /* no space left */ 3945 return; 3946 SCTP_BUF_LEN(m_notify) = 0; 3947 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3948 if (len > M_TRAILINGSPACE(m_notify)) { 3949 /* never enough room */ 3950 sctp_m_freem(m_notify); 3951 return; 3952 } 3953 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3954 memset(strreset, 0, len); 3955 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3956 strreset->strreset_flags = flag; 3957 strreset->strreset_length = len; 3958 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3959 if (number_entries) { 3960 int i; 3961 3962 for (i = 0; i < number_entries; i++) { 3963 strreset->strreset_stream_list[i] = ntohs(list[i]); 3964 } 3965 } 3966 SCTP_BUF_LEN(m_notify) = len; 3967 SCTP_BUF_NEXT(m_notify) = NULL; 3968 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3969 /* no space */ 3970 sctp_m_freem(m_notify); 3971 return; 3972 } 3973 /* append to socket */ 3974 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3975 0, 0, stcb->asoc.context, 0, 0, 0, 3976 m_notify); 3977 if (control == NULL) { 3978 /* no memory */ 3979 sctp_m_freem(m_notify); 3980 return; 3981 } 3982 control->length = SCTP_BUF_LEN(m_notify); 3983 control->spec_flags = M_NOTIFICATION; 3984 /* not that we need this */ 3985 control->tail_mbuf = m_notify; 3986 sctp_add_to_readq(stcb->sctp_ep, stcb, 3987 control, 3988 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3989 } 3990 3991 static void 3992 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3993 { 3994 struct mbuf *m_notify; 3995 struct sctp_remote_error *sre; 3996 struct sctp_queued_to_read *control; 3997 unsigned int notif_len; 3998 uint16_t chunk_len; 3999 4000 if ((stcb == NULL) || 4001 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4002 return; 4003 } 4004 if (chunk != NULL) { 4005 chunk_len = ntohs(chunk->ch.chunk_length); 4006 /* 4007 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4008 * contiguous. 4009 */ 4010 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4011 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4012 } 4013 } else { 4014 chunk_len = 0; 4015 } 4016 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4017 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4018 if (m_notify == NULL) { 4019 /* Retry with smaller value. */ 4020 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4021 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4022 if (m_notify == NULL) { 4023 return; 4024 } 4025 } 4026 SCTP_BUF_NEXT(m_notify) = NULL; 4027 sre = mtod(m_notify, struct sctp_remote_error *); 4028 memset(sre, 0, notif_len); 4029 sre->sre_type = SCTP_REMOTE_ERROR; 4030 sre->sre_flags = 0; 4031 sre->sre_length = sizeof(struct sctp_remote_error); 4032 sre->sre_error = error; 4033 sre->sre_assoc_id = sctp_get_associd(stcb); 4034 if (notif_len > sizeof(struct sctp_remote_error)) { 4035 memcpy(sre->sre_data, chunk, chunk_len); 4036 sre->sre_length += chunk_len; 4037 } 4038 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4039 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4040 0, 0, stcb->asoc.context, 0, 0, 0, 4041 m_notify); 4042 if (control != NULL) { 4043 control->length = SCTP_BUF_LEN(m_notify); 4044 control->spec_flags = M_NOTIFICATION; 4045 /* not that we need this */ 4046 control->tail_mbuf = m_notify; 4047 sctp_add_to_readq(stcb->sctp_ep, stcb, 4048 control, 4049 &stcb->sctp_socket->so_rcv, 1, 4050 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4051 } else { 4052 sctp_m_freem(m_notify); 4053 } 4054 } 4055 4056 void 4057 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4058 uint32_t error, void *data, int so_locked) 4059 { 4060 if ((stcb == NULL) || 4061 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4062 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4063 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4064 /* If the socket is gone we are out of here */ 4065 return; 4066 } 4067 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4068 return; 4069 } 4070 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4071 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4072 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4073 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4074 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4075 /* Don't report these in front states */ 4076 return; 4077 } 4078 } 4079 switch (notification) { 4080 case SCTP_NOTIFY_ASSOC_UP: 4081 if (stcb->asoc.assoc_up_sent == 0) { 4082 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4083 stcb->asoc.assoc_up_sent = 1; 4084 } 4085 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4086 sctp_notify_adaptation_layer(stcb); 4087 } 4088 if (stcb->asoc.auth_supported == 0) { 4089 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4090 NULL, so_locked); 4091 } 4092 break; 4093 case SCTP_NOTIFY_ASSOC_DOWN: 4094 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4095 break; 4096 case SCTP_NOTIFY_INTERFACE_DOWN: 4097 { 4098 struct sctp_nets *net; 4099 4100 net = (struct sctp_nets *)data; 4101 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4102 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4103 break; 4104 } 4105 case SCTP_NOTIFY_INTERFACE_UP: 4106 { 4107 struct sctp_nets *net; 4108 4109 net = (struct sctp_nets *)data; 4110 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4111 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4112 break; 4113 } 4114 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4115 { 4116 struct sctp_nets *net; 4117 4118 net = (struct sctp_nets *)data; 4119 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4120 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4121 break; 4122 } 4123 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4124 sctp_notify_send_failed2(stcb, error, 4125 (struct sctp_stream_queue_pending *)data, so_locked); 4126 break; 4127 case SCTP_NOTIFY_SENT_DG_FAIL: 4128 sctp_notify_send_failed(stcb, 1, error, 4129 (struct sctp_tmit_chunk *)data, so_locked); 4130 break; 4131 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4132 sctp_notify_send_failed(stcb, 0, error, 4133 (struct sctp_tmit_chunk *)data, so_locked); 4134 break; 4135 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4136 sctp_notify_partial_delivery_indication(stcb, error, 4137 (struct sctp_queued_to_read *)data, 4138 so_locked); 4139 break; 4140 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4141 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4142 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4143 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4144 } else { 4145 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4146 } 4147 break; 4148 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4149 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4150 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4151 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4152 } else { 4153 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4154 } 4155 break; 4156 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4157 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4158 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4159 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4160 } else { 4161 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4162 } 4163 break; 4164 case SCTP_NOTIFY_ASSOC_RESTART: 4165 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4166 if (stcb->asoc.auth_supported == 0) { 4167 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4168 NULL, so_locked); 4169 } 4170 break; 4171 case SCTP_NOTIFY_STR_RESET_SEND: 4172 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4173 break; 4174 case SCTP_NOTIFY_STR_RESET_RECV: 4175 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4176 break; 4177 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4178 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4179 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4180 break; 4181 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4182 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4183 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4184 break; 4185 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4186 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4187 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4188 break; 4189 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4190 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4191 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4192 break; 4193 case SCTP_NOTIFY_ASCONF_ADD_IP: 4194 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4195 error, so_locked); 4196 break; 4197 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4198 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4199 error, so_locked); 4200 break; 4201 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4202 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4203 error, so_locked); 4204 break; 4205 case SCTP_NOTIFY_PEER_SHUTDOWN: 4206 sctp_notify_shutdown_event(stcb); 4207 break; 4208 case SCTP_NOTIFY_AUTH_NEW_KEY: 4209 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4210 (uint16_t)(uintptr_t)data, 4211 so_locked); 4212 break; 4213 case SCTP_NOTIFY_AUTH_FREE_KEY: 4214 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4215 (uint16_t)(uintptr_t)data, 4216 so_locked); 4217 break; 4218 case SCTP_NOTIFY_NO_PEER_AUTH: 4219 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4220 (uint16_t)(uintptr_t)data, 4221 so_locked); 4222 break; 4223 case SCTP_NOTIFY_SENDER_DRY: 4224 sctp_notify_sender_dry_event(stcb, so_locked); 4225 break; 4226 case SCTP_NOTIFY_REMOTE_ERROR: 4227 sctp_notify_remote_error(stcb, error, data); 4228 break; 4229 default: 4230 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4231 __func__, notification, notification); 4232 break; 4233 } /* end switch */ 4234 } 4235 4236 void 4237 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4238 { 4239 struct sctp_association *asoc; 4240 struct sctp_stream_out *outs; 4241 struct sctp_tmit_chunk *chk, *nchk; 4242 struct sctp_stream_queue_pending *sp, *nsp; 4243 int i; 4244 4245 if (stcb == NULL) { 4246 return; 4247 } 4248 asoc = &stcb->asoc; 4249 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4250 /* already being freed */ 4251 return; 4252 } 4253 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4254 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4255 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4256 return; 4257 } 4258 /* now through all the gunk freeing chunks */ 4259 /* sent queue SHOULD be empty */ 4260 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4261 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4262 asoc->sent_queue_cnt--; 4263 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4264 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4265 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4266 #ifdef INVARIANTS 4267 } else { 4268 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4269 #endif 4270 } 4271 } 4272 if (chk->data != NULL) { 4273 sctp_free_bufspace(stcb, asoc, chk, 1); 4274 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4275 error, chk, so_locked); 4276 if (chk->data) { 4277 sctp_m_freem(chk->data); 4278 chk->data = NULL; 4279 } 4280 } 4281 sctp_free_a_chunk(stcb, chk, so_locked); 4282 /* sa_ignore FREED_MEMORY */ 4283 } 4284 /* pending send queue SHOULD be empty */ 4285 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4286 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4287 asoc->send_queue_cnt--; 4288 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4289 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4290 #ifdef INVARIANTS 4291 } else { 4292 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4293 #endif 4294 } 4295 if (chk->data != NULL) { 4296 sctp_free_bufspace(stcb, asoc, chk, 1); 4297 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4298 error, chk, so_locked); 4299 if (chk->data) { 4300 sctp_m_freem(chk->data); 4301 chk->data = NULL; 4302 } 4303 } 4304 sctp_free_a_chunk(stcb, chk, so_locked); 4305 /* sa_ignore FREED_MEMORY */ 4306 } 4307 for (i = 0; i < asoc->streamoutcnt; i++) { 4308 /* For each stream */ 4309 outs = &asoc->strmout[i]; 4310 /* clean up any sends there */ 4311 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4312 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4313 TAILQ_REMOVE(&outs->outqueue, sp, next); 4314 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4315 sctp_free_spbufspace(stcb, asoc, sp); 4316 if (sp->data) { 4317 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4318 error, (void *)sp, so_locked); 4319 if (sp->data) { 4320 sctp_m_freem(sp->data); 4321 sp->data = NULL; 4322 sp->tail_mbuf = NULL; 4323 sp->length = 0; 4324 } 4325 } 4326 if (sp->net) { 4327 sctp_free_remote_addr(sp->net); 4328 sp->net = NULL; 4329 } 4330 /* Free the chunk */ 4331 sctp_free_a_strmoq(stcb, sp, so_locked); 4332 /* sa_ignore FREED_MEMORY */ 4333 } 4334 } 4335 } 4336 4337 void 4338 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4339 uint16_t error, struct sctp_abort_chunk *abort, 4340 int so_locked) 4341 { 4342 if (stcb == NULL) { 4343 return; 4344 } 4345 SCTP_TCB_LOCK_ASSERT(stcb); 4346 4347 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4348 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4349 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4350 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4351 } 4352 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4353 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4354 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4355 return; 4356 } 4357 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4358 /* Tell them we lost the asoc */ 4359 sctp_report_all_outbound(stcb, error, so_locked); 4360 if (from_peer) { 4361 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4362 } else { 4363 if (timeout) { 4364 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4365 } else { 4366 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4367 } 4368 } 4369 } 4370 4371 void 4372 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4373 struct mbuf *m, int iphlen, 4374 struct sockaddr *src, struct sockaddr *dst, 4375 struct sctphdr *sh, struct mbuf *op_err, 4376 uint8_t mflowtype, uint32_t mflowid, 4377 uint32_t vrf_id, uint16_t port) 4378 { 4379 struct sctp_gen_error_cause *cause; 4380 uint32_t vtag; 4381 uint16_t cause_code; 4382 4383 if (stcb != NULL) { 4384 vtag = stcb->asoc.peer_vtag; 4385 vrf_id = stcb->asoc.vrf_id; 4386 if (op_err != NULL) { 4387 /* Read the cause code from the error cause. */ 4388 cause = mtod(op_err, struct sctp_gen_error_cause *); 4389 cause_code = ntohs(cause->code); 4390 } else { 4391 cause_code = 0; 4392 } 4393 } else { 4394 vtag = 0; 4395 } 4396 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4397 mflowtype, mflowid, inp->fibnum, 4398 vrf_id, port); 4399 if (stcb != NULL) { 4400 /* We have a TCB to abort, send notification too */ 4401 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4402 /* Ok, now lets free it */ 4403 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4404 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4405 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4406 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4407 } 4408 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4409 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4410 } 4411 } 4412 #ifdef SCTP_ASOCLOG_OF_TSNS 4413 void 4414 sctp_print_out_track_log(struct sctp_tcb *stcb) 4415 { 4416 #ifdef NOSIY_PRINTS 4417 int i; 4418 4419 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4420 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4421 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4422 SCTP_PRINTF("None rcvd\n"); 4423 goto none_in; 4424 } 4425 if (stcb->asoc.tsn_in_wrapped) { 4426 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4427 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4428 stcb->asoc.in_tsnlog[i].tsn, 4429 stcb->asoc.in_tsnlog[i].strm, 4430 stcb->asoc.in_tsnlog[i].seq, 4431 stcb->asoc.in_tsnlog[i].flgs, 4432 stcb->asoc.in_tsnlog[i].sz); 4433 } 4434 } 4435 if (stcb->asoc.tsn_in_at) { 4436 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4437 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4438 stcb->asoc.in_tsnlog[i].tsn, 4439 stcb->asoc.in_tsnlog[i].strm, 4440 stcb->asoc.in_tsnlog[i].seq, 4441 stcb->asoc.in_tsnlog[i].flgs, 4442 stcb->asoc.in_tsnlog[i].sz); 4443 } 4444 } 4445 none_in: 4446 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4447 if ((stcb->asoc.tsn_out_at == 0) && 4448 (stcb->asoc.tsn_out_wrapped == 0)) { 4449 SCTP_PRINTF("None sent\n"); 4450 } 4451 if (stcb->asoc.tsn_out_wrapped) { 4452 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4453 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4454 stcb->asoc.out_tsnlog[i].tsn, 4455 stcb->asoc.out_tsnlog[i].strm, 4456 stcb->asoc.out_tsnlog[i].seq, 4457 stcb->asoc.out_tsnlog[i].flgs, 4458 stcb->asoc.out_tsnlog[i].sz); 4459 } 4460 } 4461 if (stcb->asoc.tsn_out_at) { 4462 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4463 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4464 stcb->asoc.out_tsnlog[i].tsn, 4465 stcb->asoc.out_tsnlog[i].strm, 4466 stcb->asoc.out_tsnlog[i].seq, 4467 stcb->asoc.out_tsnlog[i].flgs, 4468 stcb->asoc.out_tsnlog[i].sz); 4469 } 4470 } 4471 #endif 4472 } 4473 #endif 4474 4475 void 4476 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4477 struct mbuf *op_err, bool timedout, int so_locked) 4478 { 4479 struct sctp_gen_error_cause *cause; 4480 uint16_t cause_code; 4481 4482 if (stcb == NULL) { 4483 /* Got to have a TCB */ 4484 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4485 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4486 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4487 SCTP_CALLED_DIRECTLY_NOCMPSET); 4488 } 4489 } 4490 return; 4491 } 4492 if (op_err != NULL) { 4493 /* Read the cause code from the error cause. */ 4494 cause = mtod(op_err, struct sctp_gen_error_cause *); 4495 cause_code = ntohs(cause->code); 4496 } else { 4497 cause_code = 0; 4498 } 4499 /* notify the peer */ 4500 sctp_send_abort_tcb(stcb, op_err, so_locked); 4501 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4502 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4503 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4504 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4505 } 4506 /* notify the ulp */ 4507 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4508 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4509 } 4510 /* now free the asoc */ 4511 #ifdef SCTP_ASOCLOG_OF_TSNS 4512 sctp_print_out_track_log(stcb); 4513 #endif 4514 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4515 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4516 } 4517 4518 void 4519 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4520 struct sockaddr *src, struct sockaddr *dst, 4521 struct sctphdr *sh, struct sctp_inpcb *inp, 4522 struct mbuf *cause, 4523 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4524 uint32_t vrf_id, uint16_t port) 4525 { 4526 struct sctp_chunkhdr *ch, chunk_buf; 4527 unsigned int chk_length; 4528 int contains_init_chunk; 4529 4530 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4531 /* Generate a TO address for future reference */ 4532 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4533 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4534 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4535 SCTP_CALLED_DIRECTLY_NOCMPSET); 4536 } 4537 } 4538 contains_init_chunk = 0; 4539 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4540 sizeof(*ch), (uint8_t *)&chunk_buf); 4541 while (ch != NULL) { 4542 chk_length = ntohs(ch->chunk_length); 4543 if (chk_length < sizeof(*ch)) { 4544 /* break to abort land */ 4545 break; 4546 } 4547 switch (ch->chunk_type) { 4548 case SCTP_INIT: 4549 contains_init_chunk = 1; 4550 break; 4551 case SCTP_PACKET_DROPPED: 4552 /* we don't respond to pkt-dropped */ 4553 return; 4554 case SCTP_ABORT_ASSOCIATION: 4555 /* we don't respond with an ABORT to an ABORT */ 4556 return; 4557 case SCTP_SHUTDOWN_COMPLETE: 4558 /* 4559 * we ignore it since we are not waiting for it and 4560 * peer is gone 4561 */ 4562 return; 4563 case SCTP_SHUTDOWN_ACK: 4564 sctp_send_shutdown_complete2(src, dst, sh, 4565 mflowtype, mflowid, fibnum, 4566 vrf_id, port); 4567 return; 4568 default: 4569 break; 4570 } 4571 offset += SCTP_SIZE32(chk_length); 4572 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4573 sizeof(*ch), (uint8_t *)&chunk_buf); 4574 } 4575 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4576 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4577 (contains_init_chunk == 0))) { 4578 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4579 mflowtype, mflowid, fibnum, 4580 vrf_id, port); 4581 } 4582 } 4583 4584 /* 4585 * check the inbound datagram to make sure there is not an abort inside it, 4586 * if there is return 1, else return 0. 4587 */ 4588 int 4589 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4590 { 4591 struct sctp_chunkhdr *ch; 4592 struct sctp_init_chunk *init_chk, chunk_buf; 4593 int offset; 4594 unsigned int chk_length; 4595 4596 offset = iphlen + sizeof(struct sctphdr); 4597 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4598 (uint8_t *)&chunk_buf); 4599 while (ch != NULL) { 4600 chk_length = ntohs(ch->chunk_length); 4601 if (chk_length < sizeof(*ch)) { 4602 /* packet is probably corrupt */ 4603 break; 4604 } 4605 /* we seem to be ok, is it an abort? */ 4606 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4607 /* yep, tell them */ 4608 return (1); 4609 } 4610 if ((ch->chunk_type == SCTP_INITIATION) || 4611 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4612 /* need to update the Vtag */ 4613 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4614 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4615 if (init_chk != NULL) { 4616 *vtag = ntohl(init_chk->init.initiate_tag); 4617 } 4618 } 4619 /* Nope, move to the next chunk */ 4620 offset += SCTP_SIZE32(chk_length); 4621 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4622 sizeof(*ch), (uint8_t *)&chunk_buf); 4623 } 4624 return (0); 4625 } 4626 4627 /* 4628 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4629 * set (i.e. it's 0) so, create this function to compare link local scopes 4630 */ 4631 #ifdef INET6 4632 uint32_t 4633 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4634 { 4635 struct sockaddr_in6 a, b; 4636 4637 /* save copies */ 4638 a = *addr1; 4639 b = *addr2; 4640 4641 if (a.sin6_scope_id == 0) 4642 if (sa6_recoverscope(&a)) { 4643 /* can't get scope, so can't match */ 4644 return (0); 4645 } 4646 if (b.sin6_scope_id == 0) 4647 if (sa6_recoverscope(&b)) { 4648 /* can't get scope, so can't match */ 4649 return (0); 4650 } 4651 if (a.sin6_scope_id != b.sin6_scope_id) 4652 return (0); 4653 4654 return (1); 4655 } 4656 4657 /* 4658 * returns a sockaddr_in6 with embedded scope recovered and removed 4659 */ 4660 struct sockaddr_in6 * 4661 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4662 { 4663 /* check and strip embedded scope junk */ 4664 if (addr->sin6_family == AF_INET6) { 4665 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4666 if (addr->sin6_scope_id == 0) { 4667 *store = *addr; 4668 if (!sa6_recoverscope(store)) { 4669 /* use the recovered scope */ 4670 addr = store; 4671 } 4672 } else { 4673 /* else, return the original "to" addr */ 4674 in6_clearscope(&addr->sin6_addr); 4675 } 4676 } 4677 } 4678 return (addr); 4679 } 4680 #endif 4681 4682 /* 4683 * are the two addresses the same? currently a "scopeless" check returns: 1 4684 * if same, 0 if not 4685 */ 4686 int 4687 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4688 { 4689 4690 /* must be valid */ 4691 if (sa1 == NULL || sa2 == NULL) 4692 return (0); 4693 4694 /* must be the same family */ 4695 if (sa1->sa_family != sa2->sa_family) 4696 return (0); 4697 4698 switch (sa1->sa_family) { 4699 #ifdef INET6 4700 case AF_INET6: 4701 { 4702 /* IPv6 addresses */ 4703 struct sockaddr_in6 *sin6_1, *sin6_2; 4704 4705 sin6_1 = (struct sockaddr_in6 *)sa1; 4706 sin6_2 = (struct sockaddr_in6 *)sa2; 4707 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4708 sin6_2)); 4709 } 4710 #endif 4711 #ifdef INET 4712 case AF_INET: 4713 { 4714 /* IPv4 addresses */ 4715 struct sockaddr_in *sin_1, *sin_2; 4716 4717 sin_1 = (struct sockaddr_in *)sa1; 4718 sin_2 = (struct sockaddr_in *)sa2; 4719 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4720 } 4721 #endif 4722 default: 4723 /* we don't do these... */ 4724 return (0); 4725 } 4726 } 4727 4728 void 4729 sctp_print_address(struct sockaddr *sa) 4730 { 4731 #ifdef INET6 4732 char ip6buf[INET6_ADDRSTRLEN]; 4733 #endif 4734 4735 switch (sa->sa_family) { 4736 #ifdef INET6 4737 case AF_INET6: 4738 { 4739 struct sockaddr_in6 *sin6; 4740 4741 sin6 = (struct sockaddr_in6 *)sa; 4742 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4743 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4744 ntohs(sin6->sin6_port), 4745 sin6->sin6_scope_id); 4746 break; 4747 } 4748 #endif 4749 #ifdef INET 4750 case AF_INET: 4751 { 4752 struct sockaddr_in *sin; 4753 unsigned char *p; 4754 4755 sin = (struct sockaddr_in *)sa; 4756 p = (unsigned char *)&sin->sin_addr; 4757 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4758 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4759 break; 4760 } 4761 #endif 4762 default: 4763 SCTP_PRINTF("?\n"); 4764 break; 4765 } 4766 } 4767 4768 void 4769 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4770 struct sctp_inpcb *new_inp, 4771 struct sctp_tcb *stcb, 4772 int waitflags) 4773 { 4774 /* 4775 * go through our old INP and pull off any control structures that 4776 * belong to stcb and move then to the new inp. 4777 */ 4778 struct socket *old_so, *new_so; 4779 struct sctp_queued_to_read *control, *nctl; 4780 struct sctp_readhead tmp_queue; 4781 struct mbuf *m; 4782 int error = 0; 4783 4784 old_so = old_inp->sctp_socket; 4785 new_so = new_inp->sctp_socket; 4786 TAILQ_INIT(&tmp_queue); 4787 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4788 if (error) { 4789 /* 4790 * Gak, can't get I/O lock, we have a problem. data will be 4791 * left stranded.. and we don't dare look at it since the 4792 * other thread may be reading something. Oh well, its a 4793 * screwed up app that does a peeloff OR a accept while 4794 * reading from the main socket... actually its only the 4795 * peeloff() case, since I think read will fail on a 4796 * listening socket.. 4797 */ 4798 return; 4799 } 4800 /* lock the socket buffers */ 4801 SCTP_INP_READ_LOCK(old_inp); 4802 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4803 /* Pull off all for out target stcb */ 4804 if (control->stcb == stcb) { 4805 /* remove it we want it */ 4806 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4807 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4808 m = control->data; 4809 while (m) { 4810 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4811 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4812 } 4813 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4814 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4815 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4816 } 4817 m = SCTP_BUF_NEXT(m); 4818 } 4819 } 4820 } 4821 SCTP_INP_READ_UNLOCK(old_inp); 4822 /* Remove the recv-lock on the old socket */ 4823 SOCK_IO_RECV_UNLOCK(old_so); 4824 /* Now we move them over to the new socket buffer */ 4825 SCTP_INP_READ_LOCK(new_inp); 4826 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4827 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4828 m = control->data; 4829 while (m) { 4830 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4831 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4832 } 4833 sctp_sballoc(stcb, &new_so->so_rcv, m); 4834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4835 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4836 } 4837 m = SCTP_BUF_NEXT(m); 4838 } 4839 } 4840 SCTP_INP_READ_UNLOCK(new_inp); 4841 } 4842 4843 void 4844 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4845 struct sctp_tcb *stcb, 4846 int so_locked 4847 SCTP_UNUSED 4848 ) 4849 { 4850 if ((inp != NULL) && 4851 (inp->sctp_socket != NULL) && 4852 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4853 !SCTP_IS_LISTENING(inp))) { 4854 sctp_sorwakeup(inp, inp->sctp_socket); 4855 } 4856 } 4857 4858 void 4859 sctp_add_to_readq(struct sctp_inpcb *inp, 4860 struct sctp_tcb *stcb, 4861 struct sctp_queued_to_read *control, 4862 struct sockbuf *sb, 4863 int end, 4864 int inp_read_lock_held, 4865 int so_locked) 4866 { 4867 /* 4868 * Here we must place the control on the end of the socket read 4869 * queue AND increment sb_cc so that select will work properly on 4870 * read. 4871 */ 4872 struct mbuf *m, *prev = NULL; 4873 4874 if (inp == NULL) { 4875 /* Gak, TSNH!! */ 4876 #ifdef INVARIANTS 4877 panic("Gak, inp NULL on add_to_readq"); 4878 #endif 4879 return; 4880 } 4881 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4882 SCTP_INP_READ_LOCK(inp); 4883 } 4884 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4885 if (!control->on_strm_q) { 4886 sctp_free_remote_addr(control->whoFrom); 4887 if (control->data) { 4888 sctp_m_freem(control->data); 4889 control->data = NULL; 4890 } 4891 sctp_free_a_readq(stcb, control); 4892 } 4893 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4894 SCTP_INP_READ_UNLOCK(inp); 4895 } 4896 return; 4897 } 4898 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4899 atomic_add_int(&inp->total_recvs, 1); 4900 if (!control->do_not_ref_stcb) { 4901 atomic_add_int(&stcb->total_recvs, 1); 4902 } 4903 } 4904 m = control->data; 4905 control->held_length = 0; 4906 control->length = 0; 4907 while (m != NULL) { 4908 if (SCTP_BUF_LEN(m) == 0) { 4909 /* Skip mbufs with NO length */ 4910 if (prev == NULL) { 4911 /* First one */ 4912 control->data = sctp_m_free(m); 4913 m = control->data; 4914 } else { 4915 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4916 m = SCTP_BUF_NEXT(prev); 4917 } 4918 if (m == NULL) { 4919 control->tail_mbuf = prev; 4920 } 4921 continue; 4922 } 4923 prev = m; 4924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4925 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4926 } 4927 sctp_sballoc(stcb, sb, m); 4928 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4929 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4930 } 4931 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4932 m = SCTP_BUF_NEXT(m); 4933 } 4934 if (prev != NULL) { 4935 control->tail_mbuf = prev; 4936 } else { 4937 /* Everything got collapsed out?? */ 4938 if (!control->on_strm_q) { 4939 sctp_free_remote_addr(control->whoFrom); 4940 sctp_free_a_readq(stcb, control); 4941 } 4942 if (inp_read_lock_held == 0) 4943 SCTP_INP_READ_UNLOCK(inp); 4944 return; 4945 } 4946 if (end) { 4947 control->end_added = 1; 4948 } 4949 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4950 control->on_read_q = 1; 4951 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4952 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4953 } 4954 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4955 SCTP_INP_READ_UNLOCK(inp); 4956 } 4957 } 4958 4959 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4960 *************ALTERNATE ROUTING CODE 4961 */ 4962 4963 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4964 *************ALTERNATE ROUTING CODE 4965 */ 4966 4967 struct mbuf * 4968 sctp_generate_cause(uint16_t code, char *info) 4969 { 4970 struct mbuf *m; 4971 struct sctp_gen_error_cause *cause; 4972 size_t info_len; 4973 uint16_t len; 4974 4975 if ((code == 0) || (info == NULL)) { 4976 return (NULL); 4977 } 4978 info_len = strlen(info); 4979 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4980 return (NULL); 4981 } 4982 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4983 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4984 if (m != NULL) { 4985 SCTP_BUF_LEN(m) = len; 4986 cause = mtod(m, struct sctp_gen_error_cause *); 4987 cause->code = htons(code); 4988 cause->length = htons(len); 4989 memcpy(cause->info, info, info_len); 4990 } 4991 return (m); 4992 } 4993 4994 struct mbuf * 4995 sctp_generate_no_user_data_cause(uint32_t tsn) 4996 { 4997 struct mbuf *m; 4998 struct sctp_error_no_user_data *no_user_data_cause; 4999 uint16_t len; 5000 5001 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5002 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5003 if (m != NULL) { 5004 SCTP_BUF_LEN(m) = len; 5005 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5006 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5007 no_user_data_cause->cause.length = htons(len); 5008 no_user_data_cause->tsn = htonl(tsn); 5009 } 5010 return (m); 5011 } 5012 5013 void 5014 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5015 struct sctp_tmit_chunk *tp1, int chk_cnt) 5016 { 5017 if (tp1->data == NULL) { 5018 return; 5019 } 5020 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5021 #ifdef SCTP_MBCNT_LOGGING 5022 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5023 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5024 asoc->total_output_queue_size, 5025 tp1->book_size, 5026 0, 5027 tp1->mbcnt); 5028 } 5029 #endif 5030 if (asoc->total_output_queue_size >= tp1->book_size) { 5031 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5032 } else { 5033 asoc->total_output_queue_size = 0; 5034 } 5035 if ((stcb->sctp_socket != NULL) && 5036 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5037 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5038 SCTP_SB_DECR(&stcb->sctp_socket->so_snd, tp1->book_size); 5039 } 5040 } 5041 5042 int 5043 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5044 uint8_t sent, int so_locked) 5045 { 5046 struct sctp_stream_out *strq; 5047 struct sctp_tmit_chunk *chk = NULL, *tp2; 5048 struct sctp_stream_queue_pending *sp; 5049 uint32_t mid; 5050 uint16_t sid; 5051 uint8_t foundeom = 0; 5052 int ret_sz = 0; 5053 int notdone; 5054 int do_wakeup_routine = 0; 5055 5056 SCTP_TCB_LOCK_ASSERT(stcb); 5057 5058 sid = tp1->rec.data.sid; 5059 mid = tp1->rec.data.mid; 5060 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5061 stcb->asoc.abandoned_sent[0]++; 5062 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5063 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5064 #if defined(SCTP_DETAILED_STR_STATS) 5065 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5066 #endif 5067 } else { 5068 stcb->asoc.abandoned_unsent[0]++; 5069 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5070 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5071 #if defined(SCTP_DETAILED_STR_STATS) 5072 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5073 #endif 5074 } 5075 do { 5076 ret_sz += tp1->book_size; 5077 if (tp1->data != NULL) { 5078 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5079 sctp_flight_size_decrease(tp1); 5080 sctp_total_flight_decrease(stcb, tp1); 5081 } 5082 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5083 stcb->asoc.peers_rwnd += tp1->send_size; 5084 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5085 if (sent) { 5086 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5087 } else { 5088 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5089 } 5090 if (tp1->data) { 5091 sctp_m_freem(tp1->data); 5092 tp1->data = NULL; 5093 } 5094 do_wakeup_routine = 1; 5095 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5096 stcb->asoc.sent_queue_cnt_removeable--; 5097 } 5098 } 5099 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5100 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5101 SCTP_DATA_NOT_FRAG) { 5102 /* not frag'ed we ae done */ 5103 notdone = 0; 5104 foundeom = 1; 5105 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5106 /* end of frag, we are done */ 5107 notdone = 0; 5108 foundeom = 1; 5109 } else { 5110 /* 5111 * Its a begin or middle piece, we must mark all of 5112 * it 5113 */ 5114 notdone = 1; 5115 tp1 = TAILQ_NEXT(tp1, sctp_next); 5116 } 5117 } while (tp1 && notdone); 5118 if (foundeom == 0) { 5119 /* 5120 * The multi-part message was scattered across the send and 5121 * sent queue. 5122 */ 5123 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5124 if ((tp1->rec.data.sid != sid) || 5125 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5126 break; 5127 } 5128 /* 5129 * save to chk in case we have some on stream out 5130 * queue. If so and we have an un-transmitted one we 5131 * don't have to fudge the TSN. 5132 */ 5133 chk = tp1; 5134 ret_sz += tp1->book_size; 5135 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5136 if (sent) { 5137 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5138 } else { 5139 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5140 } 5141 if (tp1->data) { 5142 sctp_m_freem(tp1->data); 5143 tp1->data = NULL; 5144 } 5145 /* No flight involved here book the size to 0 */ 5146 tp1->book_size = 0; 5147 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5148 foundeom = 1; 5149 } 5150 do_wakeup_routine = 1; 5151 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5152 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5153 /* 5154 * on to the sent queue so we can wait for it to be 5155 * passed by. 5156 */ 5157 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5158 sctp_next); 5159 stcb->asoc.send_queue_cnt--; 5160 stcb->asoc.sent_queue_cnt++; 5161 } 5162 } 5163 if (foundeom == 0) { 5164 /* 5165 * Still no eom found. That means there is stuff left on the 5166 * stream out queue.. yuck. 5167 */ 5168 strq = &stcb->asoc.strmout[sid]; 5169 sp = TAILQ_FIRST(&strq->outqueue); 5170 if (sp != NULL) { 5171 sp->discard_rest = 1; 5172 /* 5173 * We may need to put a chunk on the queue that 5174 * holds the TSN that would have been sent with the 5175 * LAST bit. 5176 */ 5177 if (chk == NULL) { 5178 /* Yep, we have to */ 5179 sctp_alloc_a_chunk(stcb, chk); 5180 if (chk == NULL) { 5181 /* 5182 * we are hosed. All we can do is 5183 * nothing.. which will cause an 5184 * abort if the peer is paying 5185 * attention. 5186 */ 5187 goto oh_well; 5188 } 5189 memset(chk, 0, sizeof(*chk)); 5190 chk->rec.data.rcv_flags = 0; 5191 chk->sent = SCTP_FORWARD_TSN_SKIP; 5192 chk->asoc = &stcb->asoc; 5193 if (stcb->asoc.idata_supported == 0) { 5194 if (sp->sinfo_flags & SCTP_UNORDERED) { 5195 chk->rec.data.mid = 0; 5196 } else { 5197 chk->rec.data.mid = strq->next_mid_ordered; 5198 } 5199 } else { 5200 if (sp->sinfo_flags & SCTP_UNORDERED) { 5201 chk->rec.data.mid = strq->next_mid_unordered; 5202 } else { 5203 chk->rec.data.mid = strq->next_mid_ordered; 5204 } 5205 } 5206 chk->rec.data.sid = sp->sid; 5207 chk->rec.data.ppid = sp->ppid; 5208 chk->rec.data.context = sp->context; 5209 chk->flags = sp->act_flags; 5210 chk->whoTo = NULL; 5211 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5212 strq->chunks_on_queues++; 5213 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5214 stcb->asoc.sent_queue_cnt++; 5215 stcb->asoc.pr_sctp_cnt++; 5216 } 5217 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5218 if (sp->sinfo_flags & SCTP_UNORDERED) { 5219 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5220 } 5221 if (stcb->asoc.idata_supported == 0) { 5222 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5223 strq->next_mid_ordered++; 5224 } 5225 } else { 5226 if (sp->sinfo_flags & SCTP_UNORDERED) { 5227 strq->next_mid_unordered++; 5228 } else { 5229 strq->next_mid_ordered++; 5230 } 5231 } 5232 oh_well: 5233 if (sp->data) { 5234 /* 5235 * Pull any data to free up the SB and allow 5236 * sender to "add more" while we will throw 5237 * away :-) 5238 */ 5239 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5240 ret_sz += sp->length; 5241 do_wakeup_routine = 1; 5242 sp->some_taken = 1; 5243 sctp_m_freem(sp->data); 5244 sp->data = NULL; 5245 sp->tail_mbuf = NULL; 5246 sp->length = 0; 5247 } 5248 } 5249 } 5250 if (do_wakeup_routine) { 5251 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5252 } 5253 return (ret_sz); 5254 } 5255 5256 /* 5257 * checks to see if the given address, sa, is one that is currently known by 5258 * the kernel note: can't distinguish the same address on multiple interfaces 5259 * and doesn't handle multiple addresses with different zone/scope id's note: 5260 * ifa_ifwithaddr() compares the entire sockaddr struct 5261 */ 5262 struct sctp_ifa * 5263 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5264 int holds_lock) 5265 { 5266 struct sctp_laddr *laddr; 5267 5268 if (holds_lock == 0) { 5269 SCTP_INP_RLOCK(inp); 5270 } 5271 5272 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5273 if (laddr->ifa == NULL) 5274 continue; 5275 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5276 continue; 5277 #ifdef INET 5278 if (addr->sa_family == AF_INET) { 5279 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5280 laddr->ifa->address.sin.sin_addr.s_addr) { 5281 /* found him. */ 5282 break; 5283 } 5284 } 5285 #endif 5286 #ifdef INET6 5287 if (addr->sa_family == AF_INET6) { 5288 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5289 &laddr->ifa->address.sin6)) { 5290 /* found him. */ 5291 break; 5292 } 5293 } 5294 #endif 5295 } 5296 if (holds_lock == 0) { 5297 SCTP_INP_RUNLOCK(inp); 5298 } 5299 if (laddr != NULL) { 5300 return (laddr->ifa); 5301 } else { 5302 return (NULL); 5303 } 5304 } 5305 5306 uint32_t 5307 sctp_get_ifa_hash_val(struct sockaddr *addr) 5308 { 5309 switch (addr->sa_family) { 5310 #ifdef INET 5311 case AF_INET: 5312 { 5313 struct sockaddr_in *sin; 5314 5315 sin = (struct sockaddr_in *)addr; 5316 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5317 } 5318 #endif 5319 #ifdef INET6 5320 case AF_INET6: 5321 { 5322 struct sockaddr_in6 *sin6; 5323 uint32_t hash_of_addr; 5324 5325 sin6 = (struct sockaddr_in6 *)addr; 5326 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5327 sin6->sin6_addr.s6_addr32[1] + 5328 sin6->sin6_addr.s6_addr32[2] + 5329 sin6->sin6_addr.s6_addr32[3]); 5330 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5331 return (hash_of_addr); 5332 } 5333 #endif 5334 default: 5335 break; 5336 } 5337 return (0); 5338 } 5339 5340 struct sctp_ifa * 5341 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5342 { 5343 struct sctp_ifa *sctp_ifap; 5344 struct sctp_vrf *vrf; 5345 struct sctp_ifalist *hash_head; 5346 uint32_t hash_of_addr; 5347 5348 if (holds_lock == 0) { 5349 SCTP_IPI_ADDR_RLOCK(); 5350 } else { 5351 SCTP_IPI_ADDR_LOCK_ASSERT(); 5352 } 5353 5354 vrf = sctp_find_vrf(vrf_id); 5355 if (vrf == NULL) { 5356 if (holds_lock == 0) 5357 SCTP_IPI_ADDR_RUNLOCK(); 5358 return (NULL); 5359 } 5360 5361 hash_of_addr = sctp_get_ifa_hash_val(addr); 5362 5363 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5364 if (hash_head == NULL) { 5365 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5366 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5367 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5368 sctp_print_address(addr); 5369 SCTP_PRINTF("No such bucket for address\n"); 5370 if (holds_lock == 0) 5371 SCTP_IPI_ADDR_RUNLOCK(); 5372 5373 return (NULL); 5374 } 5375 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5376 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5377 continue; 5378 #ifdef INET 5379 if (addr->sa_family == AF_INET) { 5380 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5381 sctp_ifap->address.sin.sin_addr.s_addr) { 5382 /* found him. */ 5383 break; 5384 } 5385 } 5386 #endif 5387 #ifdef INET6 5388 if (addr->sa_family == AF_INET6) { 5389 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5390 &sctp_ifap->address.sin6)) { 5391 /* found him. */ 5392 break; 5393 } 5394 } 5395 #endif 5396 } 5397 if (holds_lock == 0) 5398 SCTP_IPI_ADDR_RUNLOCK(); 5399 return (sctp_ifap); 5400 } 5401 5402 static void 5403 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5404 uint32_t rwnd_req) 5405 { 5406 /* User pulled some data, do we need a rwnd update? */ 5407 struct epoch_tracker et; 5408 int r_unlocked = 0; 5409 uint32_t dif, rwnd; 5410 struct socket *so = NULL; 5411 5412 if (stcb == NULL) 5413 return; 5414 5415 atomic_add_int(&stcb->asoc.refcnt, 1); 5416 5417 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5418 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5419 /* Pre-check If we are freeing no update */ 5420 goto no_lock; 5421 } 5422 SCTP_INP_INCR_REF(stcb->sctp_ep); 5423 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5424 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5425 goto out; 5426 } 5427 so = stcb->sctp_socket; 5428 if (so == NULL) { 5429 goto out; 5430 } 5431 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5432 /* Have you have freed enough to look */ 5433 *freed_so_far = 0; 5434 /* Yep, its worth a look and the lock overhead */ 5435 5436 /* Figure out what the rwnd would be */ 5437 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5438 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5439 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5440 } else { 5441 dif = 0; 5442 } 5443 if (dif >= rwnd_req) { 5444 if (hold_rlock) { 5445 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5446 r_unlocked = 1; 5447 } 5448 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5449 /* 5450 * One last check before we allow the guy possibly 5451 * to get in. There is a race, where the guy has not 5452 * reached the gate. In that case 5453 */ 5454 goto out; 5455 } 5456 SCTP_TCB_LOCK(stcb); 5457 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5458 /* No reports here */ 5459 SCTP_TCB_UNLOCK(stcb); 5460 goto out; 5461 } 5462 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5463 NET_EPOCH_ENTER(et); 5464 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5465 5466 sctp_chunk_output(stcb->sctp_ep, stcb, 5467 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5468 /* make sure no timer is running */ 5469 NET_EPOCH_EXIT(et); 5470 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5471 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5472 SCTP_TCB_UNLOCK(stcb); 5473 } else { 5474 /* Update how much we have pending */ 5475 stcb->freed_by_sorcv_sincelast = dif; 5476 } 5477 out: 5478 if (so && r_unlocked && hold_rlock) { 5479 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5480 } 5481 5482 SCTP_INP_DECR_REF(stcb->sctp_ep); 5483 no_lock: 5484 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5485 return; 5486 } 5487 5488 int 5489 sctp_sorecvmsg(struct socket *so, 5490 struct uio *uio, 5491 struct mbuf **mp, 5492 struct sockaddr *from, 5493 int fromlen, 5494 int *msg_flags, 5495 struct sctp_sndrcvinfo *sinfo, 5496 int filling_sinfo) 5497 { 5498 /* 5499 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5500 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5501 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5502 * On the way out we may send out any combination of: 5503 * MSG_NOTIFICATION MSG_EOR 5504 * 5505 */ 5506 struct sctp_inpcb *inp = NULL; 5507 ssize_t my_len = 0; 5508 ssize_t cp_len = 0; 5509 int error = 0; 5510 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5511 struct mbuf *m = NULL; 5512 struct sctp_tcb *stcb = NULL; 5513 int wakeup_read_socket = 0; 5514 int freecnt_applied = 0; 5515 int out_flags = 0, in_flags = 0; 5516 int block_allowed = 1; 5517 uint32_t freed_so_far = 0; 5518 ssize_t copied_so_far = 0; 5519 int in_eeor_mode = 0; 5520 int no_rcv_needed = 0; 5521 uint32_t rwnd_req = 0; 5522 int hold_sblock = 0; 5523 int hold_rlock = 0; 5524 ssize_t slen = 0; 5525 uint32_t held_length = 0; 5526 int sockbuf_lock = 0; 5527 5528 if (uio == NULL) { 5529 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5530 return (EINVAL); 5531 } 5532 5533 if (msg_flags) { 5534 in_flags = *msg_flags; 5535 if (in_flags & MSG_PEEK) 5536 SCTP_STAT_INCR(sctps_read_peeks); 5537 } else { 5538 in_flags = 0; 5539 } 5540 slen = uio->uio_resid; 5541 5542 /* Pull in and set up our int flags */ 5543 if (in_flags & MSG_OOB) { 5544 /* Out of band's NOT supported */ 5545 return (EOPNOTSUPP); 5546 } 5547 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5548 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5549 return (EINVAL); 5550 } 5551 if ((in_flags & (MSG_DONTWAIT 5552 | MSG_NBIO 5553 )) || 5554 SCTP_SO_IS_NBIO(so)) { 5555 block_allowed = 0; 5556 } 5557 /* setup the endpoint */ 5558 inp = (struct sctp_inpcb *)so->so_pcb; 5559 if (inp == NULL) { 5560 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5561 return (EFAULT); 5562 } 5563 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5564 /* Must be at least a MTU's worth */ 5565 if (rwnd_req < SCTP_MIN_RWND) 5566 rwnd_req = SCTP_MIN_RWND; 5567 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5569 sctp_misc_ints(SCTP_SORECV_ENTER, 5570 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5571 } 5572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5573 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5574 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5575 } 5576 5577 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5578 if (error) { 5579 goto release_unlocked; 5580 } 5581 sockbuf_lock = 1; 5582 restart: 5583 5584 restart_nosblocks: 5585 if (hold_sblock == 0) { 5586 SOCKBUF_LOCK(&so->so_rcv); 5587 hold_sblock = 1; 5588 } 5589 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5590 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5591 goto out; 5592 } 5593 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5594 if (so->so_error) { 5595 error = so->so_error; 5596 if ((in_flags & MSG_PEEK) == 0) 5597 so->so_error = 0; 5598 goto out; 5599 } else { 5600 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5601 /* indicate EOF */ 5602 error = 0; 5603 goto out; 5604 } 5605 } 5606 } 5607 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5608 if (so->so_error) { 5609 error = so->so_error; 5610 if ((in_flags & MSG_PEEK) == 0) { 5611 so->so_error = 0; 5612 } 5613 goto out; 5614 } 5615 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5616 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5617 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5618 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5619 /* 5620 * For active open side clear flags for 5621 * re-use passive open is blocked by 5622 * connect. 5623 */ 5624 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5625 /* 5626 * You were aborted, passive side 5627 * always hits here 5628 */ 5629 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5630 error = ECONNRESET; 5631 } 5632 so->so_state &= ~(SS_ISCONNECTING | 5633 SS_ISDISCONNECTING | 5634 SS_ISCONFIRMING | 5635 SS_ISCONNECTED); 5636 if (error == 0) { 5637 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5639 error = ENOTCONN; 5640 } 5641 } 5642 goto out; 5643 } 5644 } 5645 if (block_allowed) { 5646 error = sbwait(so, SO_RCV); 5647 if (error) { 5648 goto out; 5649 } 5650 held_length = 0; 5651 goto restart_nosblocks; 5652 } else { 5653 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5654 error = EWOULDBLOCK; 5655 goto out; 5656 } 5657 } 5658 if (hold_sblock == 1) { 5659 SOCKBUF_UNLOCK(&so->so_rcv); 5660 hold_sblock = 0; 5661 } 5662 /* we possibly have data we can read */ 5663 /* sa_ignore FREED_MEMORY */ 5664 control = TAILQ_FIRST(&inp->read_queue); 5665 if (control == NULL) { 5666 /* 5667 * This could be happening since the appender did the 5668 * increment but as not yet did the tailq insert onto the 5669 * read_queue 5670 */ 5671 if (hold_rlock == 0) { 5672 SCTP_INP_READ_LOCK(inp); 5673 } 5674 control = TAILQ_FIRST(&inp->read_queue); 5675 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5676 #ifdef INVARIANTS 5677 panic("Huh, its non zero and nothing on control?"); 5678 #endif 5679 SCTP_SB_CLEAR(so->so_rcv); 5680 } 5681 SCTP_INP_READ_UNLOCK(inp); 5682 hold_rlock = 0; 5683 goto restart; 5684 } 5685 5686 if ((control->length == 0) && 5687 (control->do_not_ref_stcb)) { 5688 /* 5689 * Clean up code for freeing assoc that left behind a 5690 * pdapi.. maybe a peer in EEOR that just closed after 5691 * sending and never indicated a EOR. 5692 */ 5693 if (hold_rlock == 0) { 5694 hold_rlock = 1; 5695 SCTP_INP_READ_LOCK(inp); 5696 } 5697 control->held_length = 0; 5698 if (control->data) { 5699 /* Hmm there is data here .. fix */ 5700 struct mbuf *m_tmp; 5701 int cnt = 0; 5702 5703 m_tmp = control->data; 5704 while (m_tmp) { 5705 cnt += SCTP_BUF_LEN(m_tmp); 5706 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5707 control->tail_mbuf = m_tmp; 5708 control->end_added = 1; 5709 } 5710 m_tmp = SCTP_BUF_NEXT(m_tmp); 5711 } 5712 control->length = cnt; 5713 } else { 5714 /* remove it */ 5715 TAILQ_REMOVE(&inp->read_queue, control, next); 5716 /* Add back any hidden data */ 5717 sctp_free_remote_addr(control->whoFrom); 5718 sctp_free_a_readq(stcb, control); 5719 } 5720 if (hold_rlock) { 5721 hold_rlock = 0; 5722 SCTP_INP_READ_UNLOCK(inp); 5723 } 5724 goto restart; 5725 } 5726 if ((control->length == 0) && 5727 (control->end_added == 1)) { 5728 /* 5729 * Do we also need to check for (control->pdapi_aborted == 5730 * 1)? 5731 */ 5732 if (hold_rlock == 0) { 5733 hold_rlock = 1; 5734 SCTP_INP_READ_LOCK(inp); 5735 } 5736 TAILQ_REMOVE(&inp->read_queue, control, next); 5737 if (control->data) { 5738 #ifdef INVARIANTS 5739 panic("control->data not null but control->length == 0"); 5740 #else 5741 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5742 sctp_m_freem(control->data); 5743 control->data = NULL; 5744 #endif 5745 } 5746 if (control->aux_data) { 5747 sctp_m_free(control->aux_data); 5748 control->aux_data = NULL; 5749 } 5750 #ifdef INVARIANTS 5751 if (control->on_strm_q) { 5752 panic("About to free ctl:%p so:%p and its in %d", 5753 control, so, control->on_strm_q); 5754 } 5755 #endif 5756 sctp_free_remote_addr(control->whoFrom); 5757 sctp_free_a_readq(stcb, control); 5758 if (hold_rlock) { 5759 hold_rlock = 0; 5760 SCTP_INP_READ_UNLOCK(inp); 5761 } 5762 goto restart; 5763 } 5764 if (control->length == 0) { 5765 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5766 (filling_sinfo)) { 5767 /* find a more suitable one then this */ 5768 ctl = TAILQ_NEXT(control, next); 5769 while (ctl) { 5770 if ((ctl->stcb != control->stcb) && (ctl->length) && 5771 (ctl->some_taken || 5772 (ctl->spec_flags & M_NOTIFICATION) || 5773 ((ctl->do_not_ref_stcb == 0) && 5774 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5775 ) { 5776 /*- 5777 * If we have a different TCB next, and there is data 5778 * present. If we have already taken some (pdapi), OR we can 5779 * ref the tcb and no delivery as started on this stream, we 5780 * take it. Note we allow a notification on a different 5781 * assoc to be delivered.. 5782 */ 5783 control = ctl; 5784 goto found_one; 5785 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5786 (ctl->length) && 5787 ((ctl->some_taken) || 5788 ((ctl->do_not_ref_stcb == 0) && 5789 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5790 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5791 /*- 5792 * If we have the same tcb, and there is data present, and we 5793 * have the strm interleave feature present. Then if we have 5794 * taken some (pdapi) or we can refer to tht tcb AND we have 5795 * not started a delivery for this stream, we can take it. 5796 * Note we do NOT allow a notification on the same assoc to 5797 * be delivered. 5798 */ 5799 control = ctl; 5800 goto found_one; 5801 } 5802 ctl = TAILQ_NEXT(ctl, next); 5803 } 5804 } 5805 /* 5806 * if we reach here, not suitable replacement is available 5807 * <or> fragment interleave is NOT on. So stuff the sb_cc 5808 * into the our held count, and its time to sleep again. 5809 */ 5810 held_length = SCTP_SBAVAIL(&so->so_rcv); 5811 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5812 goto restart; 5813 } 5814 /* Clear the held length since there is something to read */ 5815 control->held_length = 0; 5816 found_one: 5817 /* 5818 * If we reach here, control has a some data for us to read off. 5819 * Note that stcb COULD be NULL. 5820 */ 5821 if (hold_rlock == 0) { 5822 hold_rlock = 1; 5823 SCTP_INP_READ_LOCK(inp); 5824 } 5825 control->some_taken++; 5826 stcb = control->stcb; 5827 if (stcb) { 5828 if ((control->do_not_ref_stcb == 0) && 5829 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5830 if (freecnt_applied == 0) 5831 stcb = NULL; 5832 } else if (control->do_not_ref_stcb == 0) { 5833 /* you can't free it on me please */ 5834 /* 5835 * The lock on the socket buffer protects us so the 5836 * free code will stop. But since we used the 5837 * socketbuf lock and the sender uses the tcb_lock 5838 * to increment, we need to use the atomic add to 5839 * the refcnt 5840 */ 5841 if (freecnt_applied) { 5842 #ifdef INVARIANTS 5843 panic("refcnt already incremented"); 5844 #else 5845 SCTP_PRINTF("refcnt already incremented?\n"); 5846 #endif 5847 } else { 5848 atomic_add_int(&stcb->asoc.refcnt, 1); 5849 freecnt_applied = 1; 5850 } 5851 /* 5852 * Setup to remember how much we have not yet told 5853 * the peer our rwnd has opened up. Note we grab the 5854 * value from the tcb from last time. Note too that 5855 * sack sending clears this when a sack is sent, 5856 * which is fine. Once we hit the rwnd_req, we then 5857 * will go to the sctp_user_rcvd() that will not 5858 * lock until it KNOWs it MUST send a WUP-SACK. 5859 */ 5860 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5861 stcb->freed_by_sorcv_sincelast = 0; 5862 } 5863 } 5864 if (stcb && 5865 ((control->spec_flags & M_NOTIFICATION) == 0) && 5866 control->do_not_ref_stcb == 0) { 5867 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5868 } 5869 5870 /* First lets get off the sinfo and sockaddr info */ 5871 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5872 sinfo->sinfo_stream = control->sinfo_stream; 5873 sinfo->sinfo_ssn = (uint16_t)control->mid; 5874 sinfo->sinfo_flags = control->sinfo_flags; 5875 sinfo->sinfo_ppid = control->sinfo_ppid; 5876 sinfo->sinfo_context = control->sinfo_context; 5877 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5878 sinfo->sinfo_tsn = control->sinfo_tsn; 5879 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5880 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5881 nxt = TAILQ_NEXT(control, next); 5882 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5883 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5884 struct sctp_extrcvinfo *s_extra; 5885 5886 s_extra = (struct sctp_extrcvinfo *)sinfo; 5887 if ((nxt) && 5888 (nxt->length)) { 5889 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5890 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5891 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5892 } 5893 if (nxt->spec_flags & M_NOTIFICATION) { 5894 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5895 } 5896 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5897 s_extra->serinfo_next_length = nxt->length; 5898 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5899 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5900 if (nxt->tail_mbuf != NULL) { 5901 if (nxt->end_added) { 5902 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5903 } 5904 } 5905 } else { 5906 /* 5907 * we explicitly 0 this, since the memcpy 5908 * got some other things beyond the older 5909 * sinfo_ that is on the control's structure 5910 * :-D 5911 */ 5912 nxt = NULL; 5913 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5914 s_extra->serinfo_next_aid = 0; 5915 s_extra->serinfo_next_length = 0; 5916 s_extra->serinfo_next_ppid = 0; 5917 s_extra->serinfo_next_stream = 0; 5918 } 5919 } 5920 /* 5921 * update off the real current cum-ack, if we have an stcb. 5922 */ 5923 if ((control->do_not_ref_stcb == 0) && stcb) 5924 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5925 /* 5926 * mask off the high bits, we keep the actual chunk bits in 5927 * there. 5928 */ 5929 sinfo->sinfo_flags &= 0x00ff; 5930 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5931 sinfo->sinfo_flags |= SCTP_UNORDERED; 5932 } 5933 } 5934 #ifdef SCTP_ASOCLOG_OF_TSNS 5935 { 5936 int index, newindex; 5937 struct sctp_pcbtsn_rlog *entry; 5938 5939 do { 5940 index = inp->readlog_index; 5941 newindex = index + 1; 5942 if (newindex >= SCTP_READ_LOG_SIZE) { 5943 newindex = 0; 5944 } 5945 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5946 entry = &inp->readlog[index]; 5947 entry->vtag = control->sinfo_assoc_id; 5948 entry->strm = control->sinfo_stream; 5949 entry->seq = (uint16_t)control->mid; 5950 entry->sz = control->length; 5951 entry->flgs = control->sinfo_flags; 5952 } 5953 #endif 5954 if ((fromlen > 0) && (from != NULL)) { 5955 union sctp_sockstore store; 5956 size_t len; 5957 5958 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5959 #ifdef INET6 5960 case AF_INET6: 5961 len = sizeof(struct sockaddr_in6); 5962 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5963 store.sin6.sin6_port = control->port_from; 5964 break; 5965 #endif 5966 #ifdef INET 5967 case AF_INET: 5968 #ifdef INET6 5969 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5970 len = sizeof(struct sockaddr_in6); 5971 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5972 &store.sin6); 5973 store.sin6.sin6_port = control->port_from; 5974 } else { 5975 len = sizeof(struct sockaddr_in); 5976 store.sin = control->whoFrom->ro._l_addr.sin; 5977 store.sin.sin_port = control->port_from; 5978 } 5979 #else 5980 len = sizeof(struct sockaddr_in); 5981 store.sin = control->whoFrom->ro._l_addr.sin; 5982 store.sin.sin_port = control->port_from; 5983 #endif 5984 break; 5985 #endif 5986 default: 5987 len = 0; 5988 break; 5989 } 5990 memcpy(from, &store, min((size_t)fromlen, len)); 5991 #ifdef INET6 5992 { 5993 struct sockaddr_in6 lsa6, *from6; 5994 5995 from6 = (struct sockaddr_in6 *)from; 5996 sctp_recover_scope_mac(from6, (&lsa6)); 5997 } 5998 #endif 5999 } 6000 if (hold_rlock) { 6001 SCTP_INP_READ_UNLOCK(inp); 6002 hold_rlock = 0; 6003 } 6004 if (hold_sblock) { 6005 SOCKBUF_UNLOCK(&so->so_rcv); 6006 hold_sblock = 0; 6007 } 6008 /* now copy out what data we can */ 6009 if (mp == NULL) { 6010 /* copy out each mbuf in the chain up to length */ 6011 get_more_data: 6012 m = control->data; 6013 while (m) { 6014 /* Move out all we can */ 6015 cp_len = uio->uio_resid; 6016 my_len = SCTP_BUF_LEN(m); 6017 if (cp_len > my_len) { 6018 /* not enough in this buf */ 6019 cp_len = my_len; 6020 } 6021 if (hold_rlock) { 6022 SCTP_INP_READ_UNLOCK(inp); 6023 hold_rlock = 0; 6024 } 6025 if (cp_len > 0) 6026 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6027 /* re-read */ 6028 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6029 goto release; 6030 } 6031 6032 if ((control->do_not_ref_stcb == 0) && stcb && 6033 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6034 no_rcv_needed = 1; 6035 } 6036 if (error) { 6037 /* error we are out of here */ 6038 goto release; 6039 } 6040 SCTP_INP_READ_LOCK(inp); 6041 hold_rlock = 1; 6042 if (cp_len == SCTP_BUF_LEN(m)) { 6043 if ((SCTP_BUF_NEXT(m) == NULL) && 6044 (control->end_added)) { 6045 out_flags |= MSG_EOR; 6046 if ((control->do_not_ref_stcb == 0) && 6047 (control->stcb != NULL) && 6048 ((control->spec_flags & M_NOTIFICATION) == 0)) 6049 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6050 } 6051 if (control->spec_flags & M_NOTIFICATION) { 6052 out_flags |= MSG_NOTIFICATION; 6053 } 6054 /* we ate up the mbuf */ 6055 if (in_flags & MSG_PEEK) { 6056 /* just looking */ 6057 m = SCTP_BUF_NEXT(m); 6058 copied_so_far += cp_len; 6059 } else { 6060 /* dispose of the mbuf */ 6061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6062 sctp_sblog(&so->so_rcv, 6063 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6064 } 6065 sctp_sbfree(control, stcb, &so->so_rcv, m); 6066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6067 sctp_sblog(&so->so_rcv, 6068 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6069 } 6070 copied_so_far += cp_len; 6071 freed_so_far += (uint32_t)cp_len; 6072 freed_so_far += MSIZE; 6073 atomic_subtract_int(&control->length, (int)cp_len); 6074 control->data = sctp_m_free(m); 6075 m = control->data; 6076 /* 6077 * been through it all, must hold sb 6078 * lock ok to null tail 6079 */ 6080 if (control->data == NULL) { 6081 #ifdef INVARIANTS 6082 if ((control->end_added == 0) || 6083 (TAILQ_NEXT(control, next) == NULL)) { 6084 /* 6085 * If the end is not 6086 * added, OR the 6087 * next is NOT null 6088 * we MUST have the 6089 * lock. 6090 */ 6091 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6092 panic("Hmm we don't own the lock?"); 6093 } 6094 } 6095 #endif 6096 control->tail_mbuf = NULL; 6097 #ifdef INVARIANTS 6098 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6099 panic("end_added, nothing left and no MSG_EOR"); 6100 } 6101 #endif 6102 } 6103 } 6104 } else { 6105 /* Do we need to trim the mbuf? */ 6106 if (control->spec_flags & M_NOTIFICATION) { 6107 out_flags |= MSG_NOTIFICATION; 6108 } 6109 if ((in_flags & MSG_PEEK) == 0) { 6110 SCTP_BUF_RESV_UF(m, cp_len); 6111 SCTP_BUF_LEN(m) -= (int)cp_len; 6112 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6113 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6114 } 6115 SCTP_SB_DECR(&so->so_rcv, cp_len); 6116 if ((control->do_not_ref_stcb == 0) && 6117 stcb) { 6118 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6119 } 6120 copied_so_far += cp_len; 6121 freed_so_far += (uint32_t)cp_len; 6122 freed_so_far += MSIZE; 6123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6124 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6125 SCTP_LOG_SBRESULT, 0); 6126 } 6127 atomic_subtract_int(&control->length, (int)cp_len); 6128 } else { 6129 copied_so_far += cp_len; 6130 } 6131 } 6132 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6133 break; 6134 } 6135 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6136 (control->do_not_ref_stcb == 0) && 6137 (freed_so_far >= rwnd_req)) { 6138 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6139 } 6140 } /* end while(m) */ 6141 /* 6142 * At this point we have looked at it all and we either have 6143 * a MSG_EOR/or read all the user wants... <OR> 6144 * control->length == 0. 6145 */ 6146 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6147 /* we are done with this control */ 6148 if (control->length == 0) { 6149 if (control->data) { 6150 #ifdef INVARIANTS 6151 panic("control->data not null at read eor?"); 6152 #else 6153 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6154 sctp_m_freem(control->data); 6155 control->data = NULL; 6156 #endif 6157 } 6158 done_with_control: 6159 if (hold_rlock == 0) { 6160 SCTP_INP_READ_LOCK(inp); 6161 hold_rlock = 1; 6162 } 6163 TAILQ_REMOVE(&inp->read_queue, control, next); 6164 /* Add back any hidden data */ 6165 if (control->held_length) { 6166 held_length = 0; 6167 control->held_length = 0; 6168 wakeup_read_socket = 1; 6169 } 6170 if (control->aux_data) { 6171 sctp_m_free(control->aux_data); 6172 control->aux_data = NULL; 6173 } 6174 no_rcv_needed = control->do_not_ref_stcb; 6175 sctp_free_remote_addr(control->whoFrom); 6176 control->data = NULL; 6177 #ifdef INVARIANTS 6178 if (control->on_strm_q) { 6179 panic("About to free ctl:%p so:%p and its in %d", 6180 control, so, control->on_strm_q); 6181 } 6182 #endif 6183 sctp_free_a_readq(stcb, control); 6184 control = NULL; 6185 if ((freed_so_far >= rwnd_req) && 6186 (no_rcv_needed == 0)) 6187 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6188 6189 } else { 6190 /* 6191 * The user did not read all of this 6192 * message, turn off the returned MSG_EOR 6193 * since we are leaving more behind on the 6194 * control to read. 6195 */ 6196 #ifdef INVARIANTS 6197 if (control->end_added && 6198 (control->data == NULL) && 6199 (control->tail_mbuf == NULL)) { 6200 panic("Gak, control->length is corrupt?"); 6201 } 6202 #endif 6203 no_rcv_needed = control->do_not_ref_stcb; 6204 out_flags &= ~MSG_EOR; 6205 } 6206 } 6207 if (out_flags & MSG_EOR) { 6208 goto release; 6209 } 6210 if ((uio->uio_resid == 0) || 6211 ((in_eeor_mode) && 6212 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6213 goto release; 6214 } 6215 /* 6216 * If I hit here the receiver wants more and this message is 6217 * NOT done (pd-api). So two questions. Can we block? if not 6218 * we are done. Did the user NOT set MSG_WAITALL? 6219 */ 6220 if (block_allowed == 0) { 6221 goto release; 6222 } 6223 /* 6224 * We need to wait for more data a few things: - We don't 6225 * release the I/O lock so we don't get someone else 6226 * reading. - We must be sure to account for the case where 6227 * what is added is NOT to our control when we wakeup. 6228 */ 6229 6230 /* 6231 * Do we need to tell the transport a rwnd update might be 6232 * needed before we go to sleep? 6233 */ 6234 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6235 ((freed_so_far >= rwnd_req) && 6236 (control->do_not_ref_stcb == 0) && 6237 (no_rcv_needed == 0))) { 6238 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6239 } 6240 wait_some_more: 6241 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6242 goto release; 6243 } 6244 6245 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6246 goto release; 6247 6248 if (hold_rlock == 1) { 6249 SCTP_INP_READ_UNLOCK(inp); 6250 hold_rlock = 0; 6251 } 6252 if (hold_sblock == 0) { 6253 SOCKBUF_LOCK(&so->so_rcv); 6254 hold_sblock = 1; 6255 } 6256 if ((copied_so_far) && (control->length == 0) && 6257 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6258 goto release; 6259 } 6260 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6261 error = sbwait(so, SO_RCV); 6262 if (error) { 6263 goto release; 6264 } 6265 control->held_length = 0; 6266 } 6267 if (hold_sblock) { 6268 SOCKBUF_UNLOCK(&so->so_rcv); 6269 hold_sblock = 0; 6270 } 6271 if (control->length == 0) { 6272 /* still nothing here */ 6273 if (control->end_added == 1) { 6274 /* he aborted, or is done i.e.did a shutdown */ 6275 out_flags |= MSG_EOR; 6276 if (control->pdapi_aborted) { 6277 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6278 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6279 6280 out_flags |= MSG_TRUNC; 6281 } else { 6282 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6283 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6284 } 6285 goto done_with_control; 6286 } 6287 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6288 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6289 held_length = 0; 6290 } 6291 goto wait_some_more; 6292 } else if (control->data == NULL) { 6293 /* 6294 * we must re-sync since data is probably being 6295 * added 6296 */ 6297 SCTP_INP_READ_LOCK(inp); 6298 if ((control->length > 0) && (control->data == NULL)) { 6299 /* 6300 * big trouble.. we have the lock and its 6301 * corrupt? 6302 */ 6303 #ifdef INVARIANTS 6304 panic("Impossible data==NULL length !=0"); 6305 #endif 6306 out_flags |= MSG_EOR; 6307 out_flags |= MSG_TRUNC; 6308 control->length = 0; 6309 SCTP_INP_READ_UNLOCK(inp); 6310 goto done_with_control; 6311 } 6312 SCTP_INP_READ_UNLOCK(inp); 6313 /* We will fall around to get more data */ 6314 } 6315 goto get_more_data; 6316 } else { 6317 /*- 6318 * Give caller back the mbuf chain, 6319 * store in uio_resid the length 6320 */ 6321 wakeup_read_socket = 0; 6322 if ((control->end_added == 0) || 6323 (TAILQ_NEXT(control, next) == NULL)) { 6324 /* Need to get rlock */ 6325 if (hold_rlock == 0) { 6326 SCTP_INP_READ_LOCK(inp); 6327 hold_rlock = 1; 6328 } 6329 } 6330 if (control->end_added) { 6331 out_flags |= MSG_EOR; 6332 if ((control->do_not_ref_stcb == 0) && 6333 (control->stcb != NULL) && 6334 ((control->spec_flags & M_NOTIFICATION) == 0)) 6335 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6336 } 6337 if (control->spec_flags & M_NOTIFICATION) { 6338 out_flags |= MSG_NOTIFICATION; 6339 } 6340 uio->uio_resid = control->length; 6341 *mp = control->data; 6342 m = control->data; 6343 while (m) { 6344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6345 sctp_sblog(&so->so_rcv, 6346 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6347 } 6348 sctp_sbfree(control, stcb, &so->so_rcv, m); 6349 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6350 freed_so_far += MSIZE; 6351 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6352 sctp_sblog(&so->so_rcv, 6353 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6354 } 6355 m = SCTP_BUF_NEXT(m); 6356 } 6357 control->data = control->tail_mbuf = NULL; 6358 control->length = 0; 6359 if (out_flags & MSG_EOR) { 6360 /* Done with this control */ 6361 goto done_with_control; 6362 } 6363 } 6364 release: 6365 if (hold_rlock == 1) { 6366 SCTP_INP_READ_UNLOCK(inp); 6367 hold_rlock = 0; 6368 } 6369 if (hold_sblock == 1) { 6370 SOCKBUF_UNLOCK(&so->so_rcv); 6371 hold_sblock = 0; 6372 } 6373 6374 SOCK_IO_RECV_UNLOCK(so); 6375 sockbuf_lock = 0; 6376 6377 release_unlocked: 6378 if (hold_sblock) { 6379 SOCKBUF_UNLOCK(&so->so_rcv); 6380 hold_sblock = 0; 6381 } 6382 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6383 if ((freed_so_far >= rwnd_req) && 6384 (control && (control->do_not_ref_stcb == 0)) && 6385 (no_rcv_needed == 0)) 6386 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6387 } 6388 out: 6389 if (msg_flags) { 6390 *msg_flags = out_flags; 6391 } 6392 if (((out_flags & MSG_EOR) == 0) && 6393 ((in_flags & MSG_PEEK) == 0) && 6394 (sinfo) && 6395 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6396 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6397 struct sctp_extrcvinfo *s_extra; 6398 6399 s_extra = (struct sctp_extrcvinfo *)sinfo; 6400 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6401 } 6402 if (hold_rlock == 1) { 6403 SCTP_INP_READ_UNLOCK(inp); 6404 } 6405 if (hold_sblock) { 6406 SOCKBUF_UNLOCK(&so->so_rcv); 6407 } 6408 if (sockbuf_lock) { 6409 SOCK_IO_RECV_UNLOCK(so); 6410 } 6411 6412 if (freecnt_applied) { 6413 /* 6414 * The lock on the socket buffer protects us so the free 6415 * code will stop. But since we used the socketbuf lock and 6416 * the sender uses the tcb_lock to increment, we need to use 6417 * the atomic add to the refcnt. 6418 */ 6419 if (stcb == NULL) { 6420 #ifdef INVARIANTS 6421 panic("stcb for refcnt has gone NULL?"); 6422 goto stage_left; 6423 #else 6424 goto stage_left; 6425 #endif 6426 } 6427 /* Save the value back for next time */ 6428 stcb->freed_by_sorcv_sincelast = freed_so_far; 6429 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6430 } 6431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6432 if (stcb) { 6433 sctp_misc_ints(SCTP_SORECV_DONE, 6434 freed_so_far, 6435 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6436 stcb->asoc.my_rwnd, 6437 SCTP_SBAVAIL(&so->so_rcv)); 6438 } else { 6439 sctp_misc_ints(SCTP_SORECV_DONE, 6440 freed_so_far, 6441 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6442 0, 6443 SCTP_SBAVAIL(&so->so_rcv)); 6444 } 6445 } 6446 stage_left: 6447 if (wakeup_read_socket) { 6448 sctp_sorwakeup(inp, so); 6449 } 6450 return (error); 6451 } 6452 6453 #ifdef SCTP_MBUF_LOGGING 6454 struct mbuf * 6455 sctp_m_free(struct mbuf *m) 6456 { 6457 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6458 sctp_log_mb(m, SCTP_MBUF_IFREE); 6459 } 6460 return (m_free(m)); 6461 } 6462 6463 void 6464 sctp_m_freem(struct mbuf *mb) 6465 { 6466 while (mb != NULL) 6467 mb = sctp_m_free(mb); 6468 } 6469 6470 #endif 6471 6472 int 6473 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6474 { 6475 /* 6476 * Given a local address. For all associations that holds the 6477 * address, request a peer-set-primary. 6478 */ 6479 struct sctp_ifa *ifa; 6480 struct sctp_laddr *wi; 6481 6482 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6483 if (ifa == NULL) { 6484 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6485 return (EADDRNOTAVAIL); 6486 } 6487 /* 6488 * Now that we have the ifa we must awaken the iterator with this 6489 * message. 6490 */ 6491 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6492 if (wi == NULL) { 6493 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6494 return (ENOMEM); 6495 } 6496 /* Now incr the count and int wi structure */ 6497 SCTP_INCR_LADDR_COUNT(); 6498 memset(wi, 0, sizeof(*wi)); 6499 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6500 wi->ifa = ifa; 6501 wi->action = SCTP_SET_PRIM_ADDR; 6502 atomic_add_int(&ifa->refcount, 1); 6503 6504 /* Now add it to the work queue */ 6505 SCTP_WQ_ADDR_LOCK(); 6506 /* 6507 * Should this really be a tailq? As it is we will process the 6508 * newest first :-0 6509 */ 6510 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6511 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6512 (struct sctp_inpcb *)NULL, 6513 (struct sctp_tcb *)NULL, 6514 (struct sctp_nets *)NULL); 6515 SCTP_WQ_ADDR_UNLOCK(); 6516 return (0); 6517 } 6518 6519 int 6520 sctp_soreceive(struct socket *so, 6521 struct sockaddr **psa, 6522 struct uio *uio, 6523 struct mbuf **mp0, 6524 struct mbuf **controlp, 6525 int *flagsp) 6526 { 6527 int error, fromlen; 6528 uint8_t sockbuf[256]; 6529 struct sockaddr *from; 6530 struct sctp_extrcvinfo sinfo; 6531 int filling_sinfo = 1; 6532 int flags; 6533 struct sctp_inpcb *inp; 6534 6535 inp = (struct sctp_inpcb *)so->so_pcb; 6536 /* pickup the assoc we are reading from */ 6537 if (inp == NULL) { 6538 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6539 return (EINVAL); 6540 } 6541 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6542 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6543 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6544 (controlp == NULL)) { 6545 /* user does not want the sndrcv ctl */ 6546 filling_sinfo = 0; 6547 } 6548 if (psa) { 6549 from = (struct sockaddr *)sockbuf; 6550 fromlen = sizeof(sockbuf); 6551 from->sa_len = 0; 6552 } else { 6553 from = NULL; 6554 fromlen = 0; 6555 } 6556 6557 if (filling_sinfo) { 6558 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6559 } 6560 if (flagsp != NULL) { 6561 flags = *flagsp; 6562 } else { 6563 flags = 0; 6564 } 6565 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6566 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6567 if (flagsp != NULL) { 6568 *flagsp = flags; 6569 } 6570 if (controlp != NULL) { 6571 /* copy back the sinfo in a CMSG format */ 6572 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6573 *controlp = sctp_build_ctl_nchunk(inp, 6574 (struct sctp_sndrcvinfo *)&sinfo); 6575 } else { 6576 *controlp = NULL; 6577 } 6578 } 6579 if (psa) { 6580 /* copy back the address info */ 6581 if (from && from->sa_len) { 6582 *psa = sodupsockaddr(from, M_NOWAIT); 6583 } else { 6584 *psa = NULL; 6585 } 6586 } 6587 return (error); 6588 } 6589 6590 int 6591 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6592 int totaddr, int *error) 6593 { 6594 int added = 0; 6595 int i; 6596 struct sctp_inpcb *inp; 6597 struct sockaddr *sa; 6598 size_t incr = 0; 6599 #ifdef INET 6600 struct sockaddr_in *sin; 6601 #endif 6602 #ifdef INET6 6603 struct sockaddr_in6 *sin6; 6604 #endif 6605 6606 sa = addr; 6607 inp = stcb->sctp_ep; 6608 *error = 0; 6609 for (i = 0; i < totaddr; i++) { 6610 switch (sa->sa_family) { 6611 #ifdef INET 6612 case AF_INET: 6613 incr = sizeof(struct sockaddr_in); 6614 sin = (struct sockaddr_in *)sa; 6615 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6616 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6617 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6618 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6619 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6620 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6621 *error = EINVAL; 6622 goto out_now; 6623 } 6624 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6625 SCTP_DONOT_SETSCOPE, 6626 SCTP_ADDR_IS_CONFIRMED)) { 6627 /* assoc gone no un-lock */ 6628 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6629 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6630 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6631 *error = ENOBUFS; 6632 goto out_now; 6633 } 6634 added++; 6635 break; 6636 #endif 6637 #ifdef INET6 6638 case AF_INET6: 6639 incr = sizeof(struct sockaddr_in6); 6640 sin6 = (struct sockaddr_in6 *)sa; 6641 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6642 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6643 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6644 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6645 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6646 *error = EINVAL; 6647 goto out_now; 6648 } 6649 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6650 SCTP_DONOT_SETSCOPE, 6651 SCTP_ADDR_IS_CONFIRMED)) { 6652 /* assoc gone no un-lock */ 6653 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6654 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6655 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6656 *error = ENOBUFS; 6657 goto out_now; 6658 } 6659 added++; 6660 break; 6661 #endif 6662 default: 6663 break; 6664 } 6665 sa = (struct sockaddr *)((caddr_t)sa + incr); 6666 } 6667 out_now: 6668 return (added); 6669 } 6670 6671 int 6672 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6673 unsigned int totaddr, 6674 unsigned int *num_v4, unsigned int *num_v6, 6675 unsigned int limit) 6676 { 6677 struct sockaddr *sa; 6678 struct sctp_tcb *stcb; 6679 unsigned int incr, at, i; 6680 6681 at = 0; 6682 sa = addr; 6683 *num_v6 = *num_v4 = 0; 6684 /* account and validate addresses */ 6685 if (totaddr == 0) { 6686 return (EINVAL); 6687 } 6688 for (i = 0; i < totaddr; i++) { 6689 if (at + sizeof(struct sockaddr) > limit) { 6690 return (EINVAL); 6691 } 6692 switch (sa->sa_family) { 6693 #ifdef INET 6694 case AF_INET: 6695 incr = (unsigned int)sizeof(struct sockaddr_in); 6696 if (sa->sa_len != incr) { 6697 return (EINVAL); 6698 } 6699 (*num_v4) += 1; 6700 break; 6701 #endif 6702 #ifdef INET6 6703 case AF_INET6: 6704 { 6705 struct sockaddr_in6 *sin6; 6706 6707 incr = (unsigned int)sizeof(struct sockaddr_in6); 6708 if (sa->sa_len != incr) { 6709 return (EINVAL); 6710 } 6711 sin6 = (struct sockaddr_in6 *)sa; 6712 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6713 /* Must be non-mapped for connectx */ 6714 return (EINVAL); 6715 } 6716 (*num_v6) += 1; 6717 break; 6718 } 6719 #endif 6720 default: 6721 return (EINVAL); 6722 } 6723 if ((at + incr) > limit) { 6724 return (EINVAL); 6725 } 6726 SCTP_INP_INCR_REF(inp); 6727 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6728 if (stcb != NULL) { 6729 SCTP_TCB_UNLOCK(stcb); 6730 return (EALREADY); 6731 } else { 6732 SCTP_INP_DECR_REF(inp); 6733 } 6734 at += incr; 6735 sa = (struct sockaddr *)((caddr_t)sa + incr); 6736 } 6737 return (0); 6738 } 6739 6740 /* 6741 * sctp_bindx(ADD) for one address. 6742 * assumes all arguments are valid/checked by caller. 6743 */ 6744 void 6745 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6746 struct sockaddr *sa, uint32_t vrf_id, int *error, 6747 void *p) 6748 { 6749 #if defined(INET) && defined(INET6) 6750 struct sockaddr_in sin; 6751 #endif 6752 #ifdef INET6 6753 struct sockaddr_in6 *sin6; 6754 #endif 6755 #ifdef INET 6756 struct sockaddr_in *sinp; 6757 #endif 6758 struct sockaddr *addr_to_use; 6759 struct sctp_inpcb *lep; 6760 uint16_t port; 6761 6762 /* see if we're bound all already! */ 6763 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6764 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6765 *error = EINVAL; 6766 return; 6767 } 6768 switch (sa->sa_family) { 6769 #ifdef INET6 6770 case AF_INET6: 6771 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6772 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6773 *error = EINVAL; 6774 return; 6775 } 6776 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6777 /* can only bind v6 on PF_INET6 sockets */ 6778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6779 *error = EINVAL; 6780 return; 6781 } 6782 sin6 = (struct sockaddr_in6 *)sa; 6783 port = sin6->sin6_port; 6784 #ifdef INET 6785 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6786 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6787 SCTP_IPV6_V6ONLY(inp)) { 6788 /* can't bind v4-mapped on PF_INET sockets */ 6789 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6790 *error = EINVAL; 6791 return; 6792 } 6793 in6_sin6_2_sin(&sin, sin6); 6794 addr_to_use = (struct sockaddr *)&sin; 6795 } else { 6796 addr_to_use = sa; 6797 } 6798 #else 6799 addr_to_use = sa; 6800 #endif 6801 break; 6802 #endif 6803 #ifdef INET 6804 case AF_INET: 6805 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6807 *error = EINVAL; 6808 return; 6809 } 6810 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6811 SCTP_IPV6_V6ONLY(inp)) { 6812 /* can't bind v4 on PF_INET sockets */ 6813 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6814 *error = EINVAL; 6815 return; 6816 } 6817 sinp = (struct sockaddr_in *)sa; 6818 port = sinp->sin_port; 6819 addr_to_use = sa; 6820 break; 6821 #endif 6822 default: 6823 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6824 *error = EINVAL; 6825 return; 6826 } 6827 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6828 if (p == NULL) { 6829 /* Can't get proc for Net/Open BSD */ 6830 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6831 *error = EINVAL; 6832 return; 6833 } 6834 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6835 return; 6836 } 6837 /* Validate the incoming port. */ 6838 if ((port != 0) && (port != inp->sctp_lport)) { 6839 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6840 *error = EINVAL; 6841 return; 6842 } 6843 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6844 if (lep == NULL) { 6845 /* add the address */ 6846 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6847 SCTP_ADD_IP_ADDRESS, vrf_id); 6848 } else { 6849 if (lep != inp) { 6850 *error = EADDRINUSE; 6851 } 6852 SCTP_INP_DECR_REF(lep); 6853 } 6854 } 6855 6856 /* 6857 * sctp_bindx(DELETE) for one address. 6858 * assumes all arguments are valid/checked by caller. 6859 */ 6860 void 6861 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6862 struct sockaddr *sa, uint32_t vrf_id, int *error) 6863 { 6864 struct sockaddr *addr_to_use; 6865 #if defined(INET) && defined(INET6) 6866 struct sockaddr_in6 *sin6; 6867 struct sockaddr_in sin; 6868 #endif 6869 6870 /* see if we're bound all already! */ 6871 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6872 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6873 *error = EINVAL; 6874 return; 6875 } 6876 switch (sa->sa_family) { 6877 #ifdef INET6 6878 case AF_INET6: 6879 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6880 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6881 *error = EINVAL; 6882 return; 6883 } 6884 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6885 /* can only bind v6 on PF_INET6 sockets */ 6886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6887 *error = EINVAL; 6888 return; 6889 } 6890 #ifdef INET 6891 sin6 = (struct sockaddr_in6 *)sa; 6892 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6893 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6894 SCTP_IPV6_V6ONLY(inp)) { 6895 /* can't bind mapped-v4 on PF_INET sockets */ 6896 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6897 *error = EINVAL; 6898 return; 6899 } 6900 in6_sin6_2_sin(&sin, sin6); 6901 addr_to_use = (struct sockaddr *)&sin; 6902 } else { 6903 addr_to_use = sa; 6904 } 6905 #else 6906 addr_to_use = sa; 6907 #endif 6908 break; 6909 #endif 6910 #ifdef INET 6911 case AF_INET: 6912 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6913 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6914 *error = EINVAL; 6915 return; 6916 } 6917 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6918 SCTP_IPV6_V6ONLY(inp)) { 6919 /* can't bind v4 on PF_INET sockets */ 6920 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6921 *error = EINVAL; 6922 return; 6923 } 6924 addr_to_use = sa; 6925 break; 6926 #endif 6927 default: 6928 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6929 *error = EINVAL; 6930 return; 6931 } 6932 /* No lock required mgmt_ep_sa does its own locking. */ 6933 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6934 vrf_id); 6935 } 6936 6937 /* 6938 * returns the valid local address count for an assoc, taking into account 6939 * all scoping rules 6940 */ 6941 int 6942 sctp_local_addr_count(struct sctp_tcb *stcb) 6943 { 6944 int loopback_scope; 6945 #if defined(INET) 6946 int ipv4_local_scope, ipv4_addr_legal; 6947 #endif 6948 #if defined(INET6) 6949 int local_scope, site_scope, ipv6_addr_legal; 6950 #endif 6951 struct sctp_vrf *vrf; 6952 struct sctp_ifn *sctp_ifn; 6953 struct sctp_ifa *sctp_ifa; 6954 int count = 0; 6955 6956 /* Turn on all the appropriate scopes */ 6957 loopback_scope = stcb->asoc.scope.loopback_scope; 6958 #if defined(INET) 6959 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6960 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6961 #endif 6962 #if defined(INET6) 6963 local_scope = stcb->asoc.scope.local_scope; 6964 site_scope = stcb->asoc.scope.site_scope; 6965 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6966 #endif 6967 SCTP_IPI_ADDR_RLOCK(); 6968 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6969 if (vrf == NULL) { 6970 /* no vrf, no addresses */ 6971 SCTP_IPI_ADDR_RUNLOCK(); 6972 return (0); 6973 } 6974 6975 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6976 /* 6977 * bound all case: go through all ifns on the vrf 6978 */ 6979 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6980 if ((loopback_scope == 0) && 6981 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6982 continue; 6983 } 6984 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6985 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6986 continue; 6987 switch (sctp_ifa->address.sa.sa_family) { 6988 #ifdef INET 6989 case AF_INET: 6990 if (ipv4_addr_legal) { 6991 struct sockaddr_in *sin; 6992 6993 sin = &sctp_ifa->address.sin; 6994 if (sin->sin_addr.s_addr == 0) { 6995 /* 6996 * skip unspecified 6997 * addrs 6998 */ 6999 continue; 7000 } 7001 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7002 &sin->sin_addr) != 0) { 7003 continue; 7004 } 7005 if ((ipv4_local_scope == 0) && 7006 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7007 continue; 7008 } 7009 /* count this one */ 7010 count++; 7011 } else { 7012 continue; 7013 } 7014 break; 7015 #endif 7016 #ifdef INET6 7017 case AF_INET6: 7018 if (ipv6_addr_legal) { 7019 struct sockaddr_in6 *sin6; 7020 7021 sin6 = &sctp_ifa->address.sin6; 7022 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7023 continue; 7024 } 7025 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7026 &sin6->sin6_addr) != 0) { 7027 continue; 7028 } 7029 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7030 if (local_scope == 0) 7031 continue; 7032 if (sin6->sin6_scope_id == 0) { 7033 if (sa6_recoverscope(sin6) != 0) 7034 /* 7035 * 7036 * bad 7037 * link 7038 * 7039 * local 7040 * 7041 * address 7042 */ 7043 continue; 7044 } 7045 } 7046 if ((site_scope == 0) && 7047 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7048 continue; 7049 } 7050 /* count this one */ 7051 count++; 7052 } 7053 break; 7054 #endif 7055 default: 7056 /* TSNH */ 7057 break; 7058 } 7059 } 7060 } 7061 } else { 7062 /* 7063 * subset bound case 7064 */ 7065 struct sctp_laddr *laddr; 7066 7067 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7068 sctp_nxt_addr) { 7069 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7070 continue; 7071 } 7072 /* count this one */ 7073 count++; 7074 } 7075 } 7076 SCTP_IPI_ADDR_RUNLOCK(); 7077 return (count); 7078 } 7079 7080 #if defined(SCTP_LOCAL_TRACE_BUF) 7081 7082 void 7083 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7084 { 7085 uint32_t saveindex, newindex; 7086 7087 do { 7088 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7089 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7090 newindex = 1; 7091 } else { 7092 newindex = saveindex + 1; 7093 } 7094 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7095 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7096 saveindex = 0; 7097 } 7098 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7099 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7100 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7101 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7102 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7103 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7104 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7105 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7106 } 7107 7108 #endif 7109 static bool 7110 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7111 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7112 { 7113 struct ip *iph; 7114 #ifdef INET6 7115 struct ip6_hdr *ip6; 7116 #endif 7117 struct mbuf *sp, *last; 7118 struct udphdr *uhdr; 7119 uint16_t port; 7120 7121 if ((m->m_flags & M_PKTHDR) == 0) { 7122 /* Can't handle one that is not a pkt hdr */ 7123 goto out; 7124 } 7125 /* Pull the src port */ 7126 iph = mtod(m, struct ip *); 7127 uhdr = (struct udphdr *)((caddr_t)iph + off); 7128 port = uhdr->uh_sport; 7129 /* 7130 * Split out the mbuf chain. Leave the IP header in m, place the 7131 * rest in the sp. 7132 */ 7133 sp = m_split(m, off, M_NOWAIT); 7134 if (sp == NULL) { 7135 /* Gak, drop packet, we can't do a split */ 7136 goto out; 7137 } 7138 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7139 /* Gak, packet can't have an SCTP header in it - too small */ 7140 m_freem(sp); 7141 goto out; 7142 } 7143 /* Now pull up the UDP header and SCTP header together */ 7144 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7145 if (sp == NULL) { 7146 /* Gak pullup failed */ 7147 goto out; 7148 } 7149 /* Trim out the UDP header */ 7150 m_adj(sp, sizeof(struct udphdr)); 7151 7152 /* Now reconstruct the mbuf chain */ 7153 for (last = m; last->m_next; last = last->m_next); 7154 last->m_next = sp; 7155 m->m_pkthdr.len += sp->m_pkthdr.len; 7156 /* 7157 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7158 * checksum and it was valid. Since CSUM_DATA_VALID == 7159 * CSUM_SCTP_VALID this would imply that the HW also verified the 7160 * SCTP checksum. Therefore, clear the bit. 7161 */ 7162 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7163 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7164 m->m_pkthdr.len, 7165 if_name(m->m_pkthdr.rcvif), 7166 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7167 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7168 iph = mtod(m, struct ip *); 7169 switch (iph->ip_v) { 7170 #ifdef INET 7171 case IPVERSION: 7172 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7173 sctp_input_with_port(m, off, port); 7174 break; 7175 #endif 7176 #ifdef INET6 7177 case IPV6_VERSION >> 4: 7178 ip6 = mtod(m, struct ip6_hdr *); 7179 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7180 sctp6_input_with_port(&m, &off, port); 7181 break; 7182 #endif 7183 default: 7184 goto out; 7185 break; 7186 } 7187 return (true); 7188 out: 7189 m_freem(m); 7190 7191 return (true); 7192 } 7193 7194 #ifdef INET 7195 static void 7196 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 7197 { 7198 struct icmp *icmp = param.icmp; 7199 struct ip *outer_ip, *inner_ip; 7200 struct sctphdr *sh; 7201 struct udphdr *udp; 7202 struct sctp_inpcb *inp; 7203 struct sctp_tcb *stcb; 7204 struct sctp_nets *net; 7205 struct sctp_init_chunk *ch; 7206 struct sockaddr_in src, dst; 7207 uint8_t type, code; 7208 7209 inner_ip = &icmp->icmp_ip; 7210 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7211 if (ntohs(outer_ip->ip_len) < 7212 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7213 return; 7214 } 7215 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7216 sh = (struct sctphdr *)(udp + 1); 7217 memset(&src, 0, sizeof(struct sockaddr_in)); 7218 src.sin_family = AF_INET; 7219 src.sin_len = sizeof(struct sockaddr_in); 7220 src.sin_port = sh->src_port; 7221 src.sin_addr = inner_ip->ip_src; 7222 memset(&dst, 0, sizeof(struct sockaddr_in)); 7223 dst.sin_family = AF_INET; 7224 dst.sin_len = sizeof(struct sockaddr_in); 7225 dst.sin_port = sh->dest_port; 7226 dst.sin_addr = inner_ip->ip_dst; 7227 /* 7228 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7229 * holds our local endpoint address. Thus we reverse the dst and the 7230 * src in the lookup. 7231 */ 7232 inp = NULL; 7233 net = NULL; 7234 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7235 (struct sockaddr *)&src, 7236 &inp, &net, 1, 7237 SCTP_DEFAULT_VRFID); 7238 if ((stcb != NULL) && 7239 (net != NULL) && 7240 (inp != NULL)) { 7241 /* Check the UDP port numbers */ 7242 if ((udp->uh_dport != net->port) || 7243 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7244 SCTP_TCB_UNLOCK(stcb); 7245 return; 7246 } 7247 /* Check the verification tag */ 7248 if (ntohl(sh->v_tag) != 0) { 7249 /* 7250 * This must be the verification tag used for 7251 * sending out packets. We don't consider packets 7252 * reflecting the verification tag. 7253 */ 7254 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7255 SCTP_TCB_UNLOCK(stcb); 7256 return; 7257 } 7258 } else { 7259 if (ntohs(outer_ip->ip_len) >= 7260 sizeof(struct ip) + 7261 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7262 /* 7263 * In this case we can check if we got an 7264 * INIT chunk and if the initiate tag 7265 * matches. 7266 */ 7267 ch = (struct sctp_init_chunk *)(sh + 1); 7268 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7269 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7270 SCTP_TCB_UNLOCK(stcb); 7271 return; 7272 } 7273 } else { 7274 SCTP_TCB_UNLOCK(stcb); 7275 return; 7276 } 7277 } 7278 type = icmp->icmp_type; 7279 code = icmp->icmp_code; 7280 if ((type == ICMP_UNREACH) && 7281 (code == ICMP_UNREACH_PORT)) { 7282 code = ICMP_UNREACH_PROTOCOL; 7283 } 7284 sctp_notify(inp, stcb, net, type, code, 7285 ntohs(inner_ip->ip_len), 7286 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7287 } else { 7288 if ((stcb == NULL) && (inp != NULL)) { 7289 /* reduce ref-count */ 7290 SCTP_INP_WLOCK(inp); 7291 SCTP_INP_DECR_REF(inp); 7292 SCTP_INP_WUNLOCK(inp); 7293 } 7294 if (stcb) { 7295 SCTP_TCB_UNLOCK(stcb); 7296 } 7297 } 7298 return; 7299 } 7300 #endif 7301 7302 #ifdef INET6 7303 static void 7304 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 7305 { 7306 struct ip6ctlparam *ip6cp = param.ip6cp; 7307 struct sctp_inpcb *inp; 7308 struct sctp_tcb *stcb; 7309 struct sctp_nets *net; 7310 struct sctphdr sh; 7311 struct udphdr udp; 7312 struct sockaddr_in6 src, dst; 7313 uint8_t type, code; 7314 7315 /* 7316 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7317 */ 7318 if (ip6cp->ip6c_m == NULL) { 7319 return; 7320 } 7321 /* 7322 * Check if we can safely examine the ports and the verification tag 7323 * of the SCTP common header. 7324 */ 7325 if (ip6cp->ip6c_m->m_pkthdr.len < 7326 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7327 return; 7328 } 7329 /* Copy out the UDP header. */ 7330 memset(&udp, 0, sizeof(struct udphdr)); 7331 m_copydata(ip6cp->ip6c_m, 7332 ip6cp->ip6c_off, 7333 sizeof(struct udphdr), 7334 (caddr_t)&udp); 7335 /* Copy out the port numbers and the verification tag. */ 7336 memset(&sh, 0, sizeof(struct sctphdr)); 7337 m_copydata(ip6cp->ip6c_m, 7338 ip6cp->ip6c_off + sizeof(struct udphdr), 7339 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7340 (caddr_t)&sh); 7341 memset(&src, 0, sizeof(struct sockaddr_in6)); 7342 src.sin6_family = AF_INET6; 7343 src.sin6_len = sizeof(struct sockaddr_in6); 7344 src.sin6_port = sh.src_port; 7345 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7346 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7347 return; 7348 } 7349 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7350 dst.sin6_family = AF_INET6; 7351 dst.sin6_len = sizeof(struct sockaddr_in6); 7352 dst.sin6_port = sh.dest_port; 7353 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7354 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7355 return; 7356 } 7357 inp = NULL; 7358 net = NULL; 7359 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7360 (struct sockaddr *)&src, 7361 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7362 if ((stcb != NULL) && 7363 (net != NULL) && 7364 (inp != NULL)) { 7365 /* Check the UDP port numbers */ 7366 if ((udp.uh_dport != net->port) || 7367 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7368 SCTP_TCB_UNLOCK(stcb); 7369 return; 7370 } 7371 /* Check the verification tag */ 7372 if (ntohl(sh.v_tag) != 0) { 7373 /* 7374 * This must be the verification tag used for 7375 * sending out packets. We don't consider packets 7376 * reflecting the verification tag. 7377 */ 7378 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7379 SCTP_TCB_UNLOCK(stcb); 7380 return; 7381 } 7382 } else { 7383 if (ip6cp->ip6c_m->m_pkthdr.len >= 7384 ip6cp->ip6c_off + sizeof(struct udphdr) + 7385 sizeof(struct sctphdr) + 7386 sizeof(struct sctp_chunkhdr) + 7387 offsetof(struct sctp_init, a_rwnd)) { 7388 /* 7389 * In this case we can check if we got an 7390 * INIT chunk and if the initiate tag 7391 * matches. 7392 */ 7393 uint32_t initiate_tag; 7394 uint8_t chunk_type; 7395 7396 m_copydata(ip6cp->ip6c_m, 7397 ip6cp->ip6c_off + 7398 sizeof(struct udphdr) + 7399 sizeof(struct sctphdr), 7400 sizeof(uint8_t), 7401 (caddr_t)&chunk_type); 7402 m_copydata(ip6cp->ip6c_m, 7403 ip6cp->ip6c_off + 7404 sizeof(struct udphdr) + 7405 sizeof(struct sctphdr) + 7406 sizeof(struct sctp_chunkhdr), 7407 sizeof(uint32_t), 7408 (caddr_t)&initiate_tag); 7409 if ((chunk_type != SCTP_INITIATION) || 7410 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7411 SCTP_TCB_UNLOCK(stcb); 7412 return; 7413 } 7414 } else { 7415 SCTP_TCB_UNLOCK(stcb); 7416 return; 7417 } 7418 } 7419 type = ip6cp->ip6c_icmp6->icmp6_type; 7420 code = ip6cp->ip6c_icmp6->icmp6_code; 7421 if ((type == ICMP6_DST_UNREACH) && 7422 (code == ICMP6_DST_UNREACH_NOPORT)) { 7423 type = ICMP6_PARAM_PROB; 7424 code = ICMP6_PARAMPROB_NEXTHEADER; 7425 } 7426 sctp6_notify(inp, stcb, net, type, code, 7427 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7428 } else { 7429 if ((stcb == NULL) && (inp != NULL)) { 7430 /* reduce inp's ref-count */ 7431 SCTP_INP_WLOCK(inp); 7432 SCTP_INP_DECR_REF(inp); 7433 SCTP_INP_WUNLOCK(inp); 7434 } 7435 if (stcb) { 7436 SCTP_TCB_UNLOCK(stcb); 7437 } 7438 } 7439 } 7440 #endif 7441 7442 void 7443 sctp_over_udp_stop(void) 7444 { 7445 /* 7446 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7447 * for writing! 7448 */ 7449 #ifdef INET 7450 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7451 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7452 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7453 } 7454 #endif 7455 #ifdef INET6 7456 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7457 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7458 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7459 } 7460 #endif 7461 } 7462 7463 int 7464 sctp_over_udp_start(void) 7465 { 7466 uint16_t port; 7467 int ret; 7468 #ifdef INET 7469 struct sockaddr_in sin; 7470 #endif 7471 #ifdef INET6 7472 struct sockaddr_in6 sin6; 7473 #endif 7474 /* 7475 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7476 * for writing! 7477 */ 7478 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7479 if (ntohs(port) == 0) { 7480 /* Must have a port set */ 7481 return (EINVAL); 7482 } 7483 #ifdef INET 7484 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7485 /* Already running -- must stop first */ 7486 return (EALREADY); 7487 } 7488 #endif 7489 #ifdef INET6 7490 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7491 /* Already running -- must stop first */ 7492 return (EALREADY); 7493 } 7494 #endif 7495 #ifdef INET 7496 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7497 SOCK_DGRAM, IPPROTO_UDP, 7498 curthread->td_ucred, curthread))) { 7499 sctp_over_udp_stop(); 7500 return (ret); 7501 } 7502 /* Call the special UDP hook. */ 7503 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7504 sctp_recv_udp_tunneled_packet, 7505 sctp_recv_icmp_tunneled_packet, 7506 NULL))) { 7507 sctp_over_udp_stop(); 7508 return (ret); 7509 } 7510 /* Ok, we have a socket, bind it to the port. */ 7511 memset(&sin, 0, sizeof(struct sockaddr_in)); 7512 sin.sin_len = sizeof(struct sockaddr_in); 7513 sin.sin_family = AF_INET; 7514 sin.sin_port = htons(port); 7515 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7516 (struct sockaddr *)&sin, curthread))) { 7517 sctp_over_udp_stop(); 7518 return (ret); 7519 } 7520 #endif 7521 #ifdef INET6 7522 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7523 SOCK_DGRAM, IPPROTO_UDP, 7524 curthread->td_ucred, curthread))) { 7525 sctp_over_udp_stop(); 7526 return (ret); 7527 } 7528 /* Call the special UDP hook. */ 7529 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7530 sctp_recv_udp_tunneled_packet, 7531 sctp_recv_icmp6_tunneled_packet, 7532 NULL))) { 7533 sctp_over_udp_stop(); 7534 return (ret); 7535 } 7536 /* Ok, we have a socket, bind it to the port. */ 7537 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7538 sin6.sin6_len = sizeof(struct sockaddr_in6); 7539 sin6.sin6_family = AF_INET6; 7540 sin6.sin6_port = htons(port); 7541 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7542 (struct sockaddr *)&sin6, curthread))) { 7543 sctp_over_udp_stop(); 7544 return (ret); 7545 } 7546 #endif 7547 return (0); 7548 } 7549 7550 /* 7551 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7552 * If all arguments are zero, zero is returned. 7553 */ 7554 uint32_t 7555 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7556 { 7557 if (mtu1 > 0) { 7558 if (mtu2 > 0) { 7559 if (mtu3 > 0) { 7560 return (min(mtu1, min(mtu2, mtu3))); 7561 } else { 7562 return (min(mtu1, mtu2)); 7563 } 7564 } else { 7565 if (mtu3 > 0) { 7566 return (min(mtu1, mtu3)); 7567 } else { 7568 return (mtu1); 7569 } 7570 } 7571 } else { 7572 if (mtu2 > 0) { 7573 if (mtu3 > 0) { 7574 return (min(mtu2, mtu3)); 7575 } else { 7576 return (mtu2); 7577 } 7578 } else { 7579 return (mtu3); 7580 } 7581 } 7582 } 7583 7584 void 7585 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7586 { 7587 struct in_conninfo inc; 7588 7589 memset(&inc, 0, sizeof(struct in_conninfo)); 7590 inc.inc_fibnum = fibnum; 7591 switch (addr->sa.sa_family) { 7592 #ifdef INET 7593 case AF_INET: 7594 inc.inc_faddr = addr->sin.sin_addr; 7595 break; 7596 #endif 7597 #ifdef INET6 7598 case AF_INET6: 7599 inc.inc_flags |= INC_ISIPV6; 7600 inc.inc6_faddr = addr->sin6.sin6_addr; 7601 break; 7602 #endif 7603 default: 7604 return; 7605 } 7606 tcp_hc_updatemtu(&inc, (u_long)mtu); 7607 } 7608 7609 uint32_t 7610 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7611 { 7612 struct in_conninfo inc; 7613 7614 memset(&inc, 0, sizeof(struct in_conninfo)); 7615 inc.inc_fibnum = fibnum; 7616 switch (addr->sa.sa_family) { 7617 #ifdef INET 7618 case AF_INET: 7619 inc.inc_faddr = addr->sin.sin_addr; 7620 break; 7621 #endif 7622 #ifdef INET6 7623 case AF_INET6: 7624 inc.inc_flags |= INC_ISIPV6; 7625 inc.inc6_faddr = addr->sin6.sin6_addr; 7626 break; 7627 #endif 7628 default: 7629 return (0); 7630 } 7631 return ((uint32_t)tcp_hc_getmtu(&inc)); 7632 } 7633 7634 void 7635 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7636 { 7637 #if defined(KDTRACE_HOOKS) 7638 int old_state = stcb->asoc.state; 7639 #endif 7640 7641 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7642 ("sctp_set_state: Can't set substate (new_state = %x)", 7643 new_state)); 7644 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7645 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7646 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7647 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7648 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7649 } 7650 #if defined(KDTRACE_HOOKS) 7651 if (((old_state & SCTP_STATE_MASK) != new_state) && 7652 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7653 (new_state == SCTP_STATE_INUSE))) { 7654 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7655 } 7656 #endif 7657 } 7658 7659 void 7660 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7661 { 7662 #if defined(KDTRACE_HOOKS) 7663 int old_state = stcb->asoc.state; 7664 #endif 7665 7666 KASSERT((substate & SCTP_STATE_MASK) == 0, 7667 ("sctp_add_substate: Can't set state (substate = %x)", 7668 substate)); 7669 stcb->asoc.state |= substate; 7670 #if defined(KDTRACE_HOOKS) 7671 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7672 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7673 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7674 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7675 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7676 } 7677 #endif 7678 } 7679