1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->rcv_edmid = inp->rcv_edmid; 1153 asoc->snd_edmid = SCTP_EDMID_NONE; 1154 asoc->sctp_cmt_pf = (uint8_t)0; 1155 asoc->sctp_frag_point = inp->sctp_frag_point; 1156 asoc->sctp_features = inp->sctp_features; 1157 asoc->default_dscp = inp->sctp_ep.default_dscp; 1158 asoc->max_cwnd = inp->max_cwnd; 1159 #ifdef INET6 1160 if (inp->sctp_ep.default_flowlabel) { 1161 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1162 } else { 1163 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1164 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1165 asoc->default_flowlabel &= 0x000fffff; 1166 asoc->default_flowlabel |= 0x80000000; 1167 } else { 1168 asoc->default_flowlabel = 0; 1169 } 1170 } 1171 #endif 1172 asoc->sb_send_resv = 0; 1173 if (override_tag) { 1174 asoc->my_vtag = override_tag; 1175 } else { 1176 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1177 } 1178 /* Get the nonce tags */ 1179 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1180 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1181 asoc->vrf_id = vrf_id; 1182 1183 #ifdef SCTP_ASOCLOG_OF_TSNS 1184 asoc->tsn_in_at = 0; 1185 asoc->tsn_out_at = 0; 1186 asoc->tsn_in_wrapped = 0; 1187 asoc->tsn_out_wrapped = 0; 1188 asoc->cumack_log_at = 0; 1189 asoc->cumack_log_atsnt = 0; 1190 #endif 1191 #ifdef SCTP_FS_SPEC_LOG 1192 asoc->fs_index = 0; 1193 #endif 1194 asoc->refcnt = 0; 1195 asoc->assoc_up_sent = 0; 1196 if (override_tag) { 1197 asoc->init_seq_number = initial_tsn; 1198 } else { 1199 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1200 } 1201 asoc->asconf_seq_out = asoc->init_seq_number; 1202 asoc->str_reset_seq_out = asoc->init_seq_number; 1203 asoc->sending_seq = asoc->init_seq_number; 1204 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1205 /* we are optimistic here */ 1206 asoc->peer_supports_nat = 0; 1207 asoc->sent_queue_retran_cnt = 0; 1208 1209 /* for CMT */ 1210 asoc->last_net_cmt_send_started = NULL; 1211 1212 asoc->last_acked_seq = asoc->init_seq_number - 1; 1213 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1214 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1215 1216 /* here we are different, we hold the next one we expect */ 1217 asoc->str_reset_seq_in = asoc->init_seq_number; 1218 1219 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1220 asoc->initial_rto = inp->sctp_ep.initial_rto; 1221 1222 asoc->default_mtu = inp->sctp_ep.default_mtu; 1223 asoc->max_init_times = inp->sctp_ep.max_init_times; 1224 asoc->max_send_times = inp->sctp_ep.max_send_times; 1225 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1226 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1227 asoc->free_chunk_cnt = 0; 1228 1229 asoc->iam_blocking = 0; 1230 asoc->context = inp->sctp_context; 1231 asoc->local_strreset_support = inp->local_strreset_support; 1232 asoc->def_send = inp->def_send; 1233 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1234 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1235 asoc->pr_sctp_cnt = 0; 1236 asoc->total_output_queue_size = 0; 1237 1238 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1239 asoc->scope.ipv6_addr_legal = 1; 1240 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1241 asoc->scope.ipv4_addr_legal = 1; 1242 } else { 1243 asoc->scope.ipv4_addr_legal = 0; 1244 } 1245 } else { 1246 asoc->scope.ipv6_addr_legal = 0; 1247 asoc->scope.ipv4_addr_legal = 1; 1248 } 1249 1250 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1251 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1252 1253 asoc->smallest_mtu = 0; 1254 asoc->minrto = inp->sctp_ep.sctp_minrto; 1255 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1256 1257 asoc->stream_locked_on = 0; 1258 asoc->ecn_echo_cnt_onq = 0; 1259 asoc->stream_locked = 0; 1260 1261 asoc->send_sack = 1; 1262 1263 LIST_INIT(&asoc->sctp_restricted_addrs); 1264 1265 TAILQ_INIT(&asoc->nets); 1266 TAILQ_INIT(&asoc->pending_reply_queue); 1267 TAILQ_INIT(&asoc->asconf_ack_sent); 1268 /* Setup to fill the hb random cache at first HB */ 1269 asoc->hb_random_idx = 4; 1270 1271 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1272 1273 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1274 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1275 1276 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1277 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1278 1279 /* 1280 * Now the stream parameters, here we allocate space for all streams 1281 * that we request by default. 1282 */ 1283 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1284 o_strms; 1285 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1286 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1287 SCTP_M_STRMO); 1288 if (asoc->strmout == NULL) { 1289 /* big trouble no memory */ 1290 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1291 return (ENOMEM); 1292 } 1293 SCTP_TCB_LOCK(stcb); 1294 for (i = 0; i < asoc->streamoutcnt; i++) { 1295 /* 1296 * inbound side must be set to 0xffff, also NOTE when we get 1297 * the INIT-ACK back (for INIT sender) we MUST reduce the 1298 * count (streamoutcnt) but first check if we sent to any of 1299 * the upper streams that were dropped (if some were). Those 1300 * that were dropped must be notified to the upper layer as 1301 * failed to send. 1302 */ 1303 TAILQ_INIT(&asoc->strmout[i].outqueue); 1304 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1305 asoc->strmout[i].chunks_on_queues = 0; 1306 #if defined(SCTP_DETAILED_STR_STATS) 1307 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1308 asoc->strmout[i].abandoned_sent[j] = 0; 1309 asoc->strmout[i].abandoned_unsent[j] = 0; 1310 } 1311 #else 1312 asoc->strmout[i].abandoned_sent[0] = 0; 1313 asoc->strmout[i].abandoned_unsent[0] = 0; 1314 #endif 1315 asoc->strmout[i].next_mid_ordered = 0; 1316 asoc->strmout[i].next_mid_unordered = 0; 1317 asoc->strmout[i].sid = i; 1318 asoc->strmout[i].last_msg_incomplete = 0; 1319 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1320 } 1321 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1322 SCTP_TCB_UNLOCK(stcb); 1323 1324 /* Now the mapping array */ 1325 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1326 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1327 SCTP_M_MAP); 1328 if (asoc->mapping_array == NULL) { 1329 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1334 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1335 SCTP_M_MAP); 1336 if (asoc->nr_mapping_array == NULL) { 1337 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1338 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1339 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1340 return (ENOMEM); 1341 } 1342 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1343 1344 /* Now the init of the other outqueues */ 1345 TAILQ_INIT(&asoc->free_chunks); 1346 TAILQ_INIT(&asoc->control_send_queue); 1347 TAILQ_INIT(&asoc->asconf_send_queue); 1348 TAILQ_INIT(&asoc->send_queue); 1349 TAILQ_INIT(&asoc->sent_queue); 1350 TAILQ_INIT(&asoc->resetHead); 1351 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1352 TAILQ_INIT(&asoc->asconf_queue); 1353 /* authentication fields */ 1354 asoc->authinfo.random = NULL; 1355 asoc->authinfo.active_keyid = 0; 1356 asoc->authinfo.assoc_key = NULL; 1357 asoc->authinfo.assoc_keyid = 0; 1358 asoc->authinfo.recv_key = NULL; 1359 asoc->authinfo.recv_keyid = 0; 1360 LIST_INIT(&asoc->shared_keys); 1361 asoc->marked_retrans = 0; 1362 asoc->port = inp->sctp_ep.port; 1363 asoc->timoinit = 0; 1364 asoc->timodata = 0; 1365 asoc->timosack = 0; 1366 asoc->timoshutdown = 0; 1367 asoc->timoheartbeat = 0; 1368 asoc->timocookie = 0; 1369 asoc->timoshutdownack = 0; 1370 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1371 asoc->discontinuity_time = asoc->start_time; 1372 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1373 asoc->abandoned_unsent[i] = 0; 1374 asoc->abandoned_sent[i] = 0; 1375 } 1376 /* 1377 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1378 * freed later when the association is freed. 1379 */ 1380 return (0); 1381 } 1382 1383 void 1384 sctp_print_mapping_array(struct sctp_association *asoc) 1385 { 1386 unsigned int i, limit; 1387 1388 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1389 asoc->mapping_array_size, 1390 asoc->mapping_array_base_tsn, 1391 asoc->cumulative_tsn, 1392 asoc->highest_tsn_inside_map, 1393 asoc->highest_tsn_inside_nr_map); 1394 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1395 if (asoc->mapping_array[limit - 1] != 0) { 1396 break; 1397 } 1398 } 1399 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1400 for (i = 0; i < limit; i++) { 1401 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1402 } 1403 if (limit % 16) 1404 SCTP_PRINTF("\n"); 1405 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1406 if (asoc->nr_mapping_array[limit - 1]) { 1407 break; 1408 } 1409 } 1410 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1411 for (i = 0; i < limit; i++) { 1412 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1413 } 1414 if (limit % 16) 1415 SCTP_PRINTF("\n"); 1416 } 1417 1418 int 1419 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1420 { 1421 /* mapping array needs to grow */ 1422 uint8_t *new_array1, *new_array2; 1423 uint32_t new_size; 1424 1425 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1426 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1427 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1428 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1429 /* can't get more, forget it */ 1430 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1431 if (new_array1) { 1432 SCTP_FREE(new_array1, SCTP_M_MAP); 1433 } 1434 if (new_array2) { 1435 SCTP_FREE(new_array2, SCTP_M_MAP); 1436 } 1437 return (-1); 1438 } 1439 memset(new_array1, 0, new_size); 1440 memset(new_array2, 0, new_size); 1441 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1442 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1443 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1444 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1445 asoc->mapping_array = new_array1; 1446 asoc->nr_mapping_array = new_array2; 1447 asoc->mapping_array_size = new_size; 1448 return (0); 1449 } 1450 1451 static void 1452 sctp_iterator_work(struct sctp_iterator *it) 1453 { 1454 struct epoch_tracker et; 1455 struct sctp_inpcb *tinp; 1456 int iteration_count = 0; 1457 int inp_skip = 0; 1458 int first_in = 1; 1459 1460 NET_EPOCH_ENTER(et); 1461 SCTP_INP_INFO_RLOCK(); 1462 SCTP_ITERATOR_LOCK(); 1463 sctp_it_ctl.cur_it = it; 1464 if (it->inp) { 1465 SCTP_INP_RLOCK(it->inp); 1466 SCTP_INP_DECR_REF(it->inp); 1467 } 1468 if (it->inp == NULL) { 1469 /* iterator is complete */ 1470 done_with_iterator: 1471 sctp_it_ctl.cur_it = NULL; 1472 SCTP_ITERATOR_UNLOCK(); 1473 SCTP_INP_INFO_RUNLOCK(); 1474 if (it->function_atend != NULL) { 1475 (*it->function_atend) (it->pointer, it->val); 1476 } 1477 SCTP_FREE(it, SCTP_M_ITER); 1478 NET_EPOCH_EXIT(et); 1479 return; 1480 } 1481 select_a_new_ep: 1482 if (first_in) { 1483 first_in = 0; 1484 } else { 1485 SCTP_INP_RLOCK(it->inp); 1486 } 1487 while (((it->pcb_flags) && 1488 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1489 ((it->pcb_features) && 1490 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1491 /* endpoint flags or features don't match, so keep looking */ 1492 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1493 SCTP_INP_RUNLOCK(it->inp); 1494 goto done_with_iterator; 1495 } 1496 tinp = it->inp; 1497 it->inp = LIST_NEXT(it->inp, sctp_list); 1498 it->stcb = NULL; 1499 SCTP_INP_RUNLOCK(tinp); 1500 if (it->inp == NULL) { 1501 goto done_with_iterator; 1502 } 1503 SCTP_INP_RLOCK(it->inp); 1504 } 1505 /* now go through each assoc which is in the desired state */ 1506 if (it->done_current_ep == 0) { 1507 if (it->function_inp != NULL) 1508 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1509 it->done_current_ep = 1; 1510 } 1511 if (it->stcb == NULL) { 1512 /* run the per instance function */ 1513 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1514 } 1515 if ((inp_skip) || it->stcb == NULL) { 1516 if (it->function_inp_end != NULL) { 1517 inp_skip = (*it->function_inp_end) (it->inp, 1518 it->pointer, 1519 it->val); 1520 } 1521 SCTP_INP_RUNLOCK(it->inp); 1522 goto no_stcb; 1523 } 1524 while (it->stcb != NULL) { 1525 SCTP_TCB_LOCK(it->stcb); 1526 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1527 /* not in the right state... keep looking */ 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 goto next_assoc; 1530 } 1531 /* see if we have limited out the iterator loop */ 1532 iteration_count++; 1533 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1534 /* Pause to let others grab the lock */ 1535 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1536 SCTP_TCB_UNLOCK(it->stcb); 1537 SCTP_INP_INCR_REF(it->inp); 1538 SCTP_INP_RUNLOCK(it->inp); 1539 SCTP_ITERATOR_UNLOCK(); 1540 SCTP_INP_INFO_RUNLOCK(); 1541 SCTP_INP_INFO_RLOCK(); 1542 SCTP_ITERATOR_LOCK(); 1543 if (sctp_it_ctl.iterator_flags) { 1544 /* We won't be staying here */ 1545 SCTP_INP_DECR_REF(it->inp); 1546 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1547 if (sctp_it_ctl.iterator_flags & 1548 SCTP_ITERATOR_STOP_CUR_IT) { 1549 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1550 goto done_with_iterator; 1551 } 1552 if (sctp_it_ctl.iterator_flags & 1553 SCTP_ITERATOR_STOP_CUR_INP) { 1554 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1555 goto no_stcb; 1556 } 1557 /* If we reach here huh? */ 1558 SCTP_PRINTF("Unknown it ctl flag %x\n", 1559 sctp_it_ctl.iterator_flags); 1560 sctp_it_ctl.iterator_flags = 0; 1561 } 1562 SCTP_INP_RLOCK(it->inp); 1563 SCTP_INP_DECR_REF(it->inp); 1564 SCTP_TCB_LOCK(it->stcb); 1565 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1566 iteration_count = 0; 1567 } 1568 KASSERT(it->inp == it->stcb->sctp_ep, 1569 ("%s: stcb %p does not belong to inp %p, but inp %p", 1570 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1571 SCTP_INP_RLOCK_ASSERT(it->inp); 1572 SCTP_TCB_LOCK_ASSERT(it->stcb); 1573 1574 /* run function on this one */ 1575 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1576 SCTP_INP_RLOCK_ASSERT(it->inp); 1577 SCTP_TCB_LOCK_ASSERT(it->stcb); 1578 1579 /* 1580 * we lie here, it really needs to have its own type but 1581 * first I must verify that this won't effect things :-0 1582 */ 1583 if (it->no_chunk_output == 0) { 1584 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1585 SCTP_INP_RLOCK_ASSERT(it->inp); 1586 SCTP_TCB_LOCK_ASSERT(it->stcb); 1587 } 1588 1589 SCTP_TCB_UNLOCK(it->stcb); 1590 next_assoc: 1591 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1592 if (it->stcb == NULL) { 1593 /* Run last function */ 1594 if (it->function_inp_end != NULL) { 1595 inp_skip = (*it->function_inp_end) (it->inp, 1596 it->pointer, 1597 it->val); 1598 } 1599 } 1600 } 1601 SCTP_INP_RUNLOCK(it->inp); 1602 no_stcb: 1603 /* done with all assocs on this endpoint, move on to next endpoint */ 1604 it->done_current_ep = 0; 1605 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1606 it->inp = NULL; 1607 } else { 1608 it->inp = LIST_NEXT(it->inp, sctp_list); 1609 } 1610 it->stcb = NULL; 1611 if (it->inp == NULL) { 1612 goto done_with_iterator; 1613 } 1614 goto select_a_new_ep; 1615 } 1616 1617 void 1618 sctp_iterator_worker(void) 1619 { 1620 struct sctp_iterator *it; 1621 1622 /* This function is called with the WQ lock in place */ 1623 sctp_it_ctl.iterator_running = 1; 1624 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1625 /* now lets work on this one */ 1626 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1627 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1628 CURVNET_SET(it->vn); 1629 sctp_iterator_work(it); 1630 CURVNET_RESTORE(); 1631 SCTP_IPI_ITERATOR_WQ_LOCK(); 1632 /* sa_ignore FREED_MEMORY */ 1633 } 1634 sctp_it_ctl.iterator_running = 0; 1635 return; 1636 } 1637 1638 static void 1639 sctp_handle_addr_wq(void) 1640 { 1641 /* deal with the ADDR wq from the rtsock calls */ 1642 struct sctp_laddr *wi, *nwi; 1643 struct sctp_asconf_iterator *asc; 1644 1645 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1646 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1647 if (asc == NULL) { 1648 /* Try later, no memory */ 1649 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1650 (struct sctp_inpcb *)NULL, 1651 (struct sctp_tcb *)NULL, 1652 (struct sctp_nets *)NULL); 1653 return; 1654 } 1655 LIST_INIT(&asc->list_of_work); 1656 asc->cnt = 0; 1657 1658 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1659 LIST_REMOVE(wi, sctp_nxt_addr); 1660 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1661 asc->cnt++; 1662 } 1663 1664 if (asc->cnt == 0) { 1665 SCTP_FREE(asc, SCTP_M_ASC_IT); 1666 } else { 1667 int ret; 1668 1669 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1670 sctp_asconf_iterator_stcb, 1671 NULL, /* No ep end for boundall */ 1672 SCTP_PCB_FLAGS_BOUNDALL, 1673 SCTP_PCB_ANY_FEATURES, 1674 SCTP_ASOC_ANY_STATE, 1675 (void *)asc, 0, 1676 sctp_asconf_iterator_end, NULL, 0); 1677 if (ret) { 1678 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1679 /* 1680 * Freeing if we are stopping or put back on the 1681 * addr_wq. 1682 */ 1683 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1684 sctp_asconf_iterator_end(asc, 0); 1685 } else { 1686 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1687 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1688 } 1689 SCTP_FREE(asc, SCTP_M_ASC_IT); 1690 } 1691 } 1692 } 1693 } 1694 1695 /*- 1696 * The following table shows which pointers for the inp, stcb, or net are 1697 * stored for each timer after it was started. 1698 * 1699 *|Name |Timer |inp |stcb|net | 1700 *|-----------------------------|-----------------------------|----|----|----| 1701 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1704 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1706 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1708 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1710 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1711 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1713 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1714 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1715 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1716 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1717 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1718 */ 1719 1720 void 1721 sctp_timeout_handler(void *t) 1722 { 1723 struct epoch_tracker et; 1724 struct timeval tv; 1725 struct sctp_inpcb *inp; 1726 struct sctp_tcb *stcb; 1727 struct sctp_nets *net; 1728 struct sctp_timer *tmr; 1729 struct mbuf *op_err; 1730 int type; 1731 int i, secret; 1732 bool did_output, released_asoc_reference; 1733 1734 /* 1735 * If inp, stcb or net are not NULL, then references to these were 1736 * added when the timer was started, and must be released before 1737 * this function returns. 1738 */ 1739 tmr = (struct sctp_timer *)t; 1740 inp = (struct sctp_inpcb *)tmr->ep; 1741 stcb = (struct sctp_tcb *)tmr->tcb; 1742 net = (struct sctp_nets *)tmr->net; 1743 CURVNET_SET((struct vnet *)tmr->vnet); 1744 NET_EPOCH_ENTER(et); 1745 released_asoc_reference = false; 1746 1747 #ifdef SCTP_AUDITING_ENABLED 1748 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1749 sctp_auditing(3, inp, stcb, net); 1750 #endif 1751 1752 /* sanity checks... */ 1753 KASSERT(tmr->self == NULL || tmr->self == tmr, 1754 ("sctp_timeout_handler: tmr->self corrupted")); 1755 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1756 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1757 type = tmr->type; 1758 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1759 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1760 type, stcb, stcb->sctp_ep)); 1761 tmr->stopped_from = 0xa001; 1762 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1763 SCTPDBG(SCTP_DEBUG_TIMER2, 1764 "Timer type %d handler exiting due to CLOSED association.\n", 1765 type); 1766 goto out_decr; 1767 } 1768 tmr->stopped_from = 0xa002; 1769 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1770 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1771 SCTPDBG(SCTP_DEBUG_TIMER2, 1772 "Timer type %d handler exiting due to not being active.\n", 1773 type); 1774 goto out_decr; 1775 } 1776 1777 tmr->stopped_from = 0xa003; 1778 if (stcb) { 1779 SCTP_TCB_LOCK(stcb); 1780 /* 1781 * Release reference so that association can be freed if 1782 * necessary below. This is safe now that we have acquired 1783 * the lock. 1784 */ 1785 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1786 released_asoc_reference = true; 1787 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1788 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1789 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1790 SCTPDBG(SCTP_DEBUG_TIMER2, 1791 "Timer type %d handler exiting due to CLOSED association.\n", 1792 type); 1793 goto out; 1794 } 1795 } else if (inp != NULL) { 1796 SCTP_INP_WLOCK(inp); 1797 } else { 1798 SCTP_WQ_ADDR_LOCK(); 1799 } 1800 1801 /* Record in stopped_from which timeout occurred. */ 1802 tmr->stopped_from = type; 1803 /* mark as being serviced now */ 1804 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1805 /* 1806 * Callout has been rescheduled. 1807 */ 1808 goto out; 1809 } 1810 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1811 /* 1812 * Not active, so no action. 1813 */ 1814 goto out; 1815 } 1816 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1817 1818 /* call the handler for the appropriate timer type */ 1819 switch (type) { 1820 case SCTP_TIMER_TYPE_SEND: 1821 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1822 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1823 type, inp, stcb, net)); 1824 SCTP_STAT_INCR(sctps_timodata); 1825 stcb->asoc.timodata++; 1826 stcb->asoc.num_send_timers_up--; 1827 if (stcb->asoc.num_send_timers_up < 0) { 1828 stcb->asoc.num_send_timers_up = 0; 1829 } 1830 SCTP_TCB_LOCK_ASSERT(stcb); 1831 if (sctp_t3rxt_timer(inp, stcb, net)) { 1832 /* no need to unlock on tcb its gone */ 1833 1834 goto out_decr; 1835 } 1836 SCTP_TCB_LOCK_ASSERT(stcb); 1837 #ifdef SCTP_AUDITING_ENABLED 1838 sctp_auditing(4, inp, stcb, net); 1839 #endif 1840 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1841 did_output = true; 1842 if ((stcb->asoc.num_send_timers_up == 0) && 1843 (stcb->asoc.sent_queue_cnt > 0)) { 1844 struct sctp_tmit_chunk *chk; 1845 1846 /* 1847 * Safeguard. If there on some on the sent queue 1848 * somewhere but no timers running something is 1849 * wrong... so we start a timer on the first chunk 1850 * on the send queue on whatever net it is sent to. 1851 */ 1852 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1853 if (chk->whoTo != NULL) { 1854 break; 1855 } 1856 } 1857 if (chk != NULL) { 1858 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1859 } 1860 } 1861 break; 1862 case SCTP_TIMER_TYPE_INIT: 1863 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1864 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1865 type, inp, stcb, net)); 1866 SCTP_STAT_INCR(sctps_timoinit); 1867 stcb->asoc.timoinit++; 1868 if (sctp_t1init_timer(inp, stcb, net)) { 1869 /* no need to unlock on tcb its gone */ 1870 goto out_decr; 1871 } 1872 did_output = false; 1873 break; 1874 case SCTP_TIMER_TYPE_RECV: 1875 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1876 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1877 type, inp, stcb, net)); 1878 SCTP_STAT_INCR(sctps_timosack); 1879 stcb->asoc.timosack++; 1880 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1881 #ifdef SCTP_AUDITING_ENABLED 1882 sctp_auditing(4, inp, stcb, NULL); 1883 #endif 1884 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1885 did_output = true; 1886 break; 1887 case SCTP_TIMER_TYPE_SHUTDOWN: 1888 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1889 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1890 type, inp, stcb, net)); 1891 SCTP_STAT_INCR(sctps_timoshutdown); 1892 stcb->asoc.timoshutdown++; 1893 if (sctp_shutdown_timer(inp, stcb, net)) { 1894 /* no need to unlock on tcb its gone */ 1895 goto out_decr; 1896 } 1897 #ifdef SCTP_AUDITING_ENABLED 1898 sctp_auditing(4, inp, stcb, net); 1899 #endif 1900 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1901 did_output = true; 1902 break; 1903 case SCTP_TIMER_TYPE_HEARTBEAT: 1904 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1905 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1906 type, inp, stcb, net)); 1907 SCTP_STAT_INCR(sctps_timoheartbeat); 1908 stcb->asoc.timoheartbeat++; 1909 if (sctp_heartbeat_timer(inp, stcb, net)) { 1910 /* no need to unlock on tcb its gone */ 1911 goto out_decr; 1912 } 1913 #ifdef SCTP_AUDITING_ENABLED 1914 sctp_auditing(4, inp, stcb, net); 1915 #endif 1916 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1917 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1918 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1919 did_output = true; 1920 } else { 1921 did_output = false; 1922 } 1923 break; 1924 case SCTP_TIMER_TYPE_COOKIE: 1925 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1926 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1927 type, inp, stcb, net)); 1928 SCTP_STAT_INCR(sctps_timocookie); 1929 stcb->asoc.timocookie++; 1930 if (sctp_cookie_timer(inp, stcb, net)) { 1931 /* no need to unlock on tcb its gone */ 1932 goto out_decr; 1933 } 1934 #ifdef SCTP_AUDITING_ENABLED 1935 sctp_auditing(4, inp, stcb, net); 1936 #endif 1937 /* 1938 * We consider T3 and Cookie timer pretty much the same with 1939 * respect to where from in chunk_output. 1940 */ 1941 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1942 did_output = true; 1943 break; 1944 case SCTP_TIMER_TYPE_NEWCOOKIE: 1945 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1946 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1947 type, inp, stcb, net)); 1948 SCTP_STAT_INCR(sctps_timosecret); 1949 (void)SCTP_GETTIME_TIMEVAL(&tv); 1950 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1951 inp->sctp_ep.last_secret_number = 1952 inp->sctp_ep.current_secret_number; 1953 inp->sctp_ep.current_secret_number++; 1954 if (inp->sctp_ep.current_secret_number >= 1955 SCTP_HOW_MANY_SECRETS) { 1956 inp->sctp_ep.current_secret_number = 0; 1957 } 1958 secret = (int)inp->sctp_ep.current_secret_number; 1959 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1960 inp->sctp_ep.secret_key[secret][i] = 1961 sctp_select_initial_TSN(&inp->sctp_ep); 1962 } 1963 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1964 did_output = false; 1965 break; 1966 case SCTP_TIMER_TYPE_PATHMTURAISE: 1967 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1968 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1969 type, inp, stcb, net)); 1970 SCTP_STAT_INCR(sctps_timopathmtu); 1971 sctp_pathmtu_timer(inp, stcb, net); 1972 did_output = false; 1973 break; 1974 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1975 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1976 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1977 type, inp, stcb, net)); 1978 if (sctp_shutdownack_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 SCTP_STAT_INCR(sctps_timoshutdownack); 1983 stcb->asoc.timoshutdownack++; 1984 #ifdef SCTP_AUDITING_ENABLED 1985 sctp_auditing(4, inp, stcb, net); 1986 #endif 1987 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1988 did_output = true; 1989 break; 1990 case SCTP_TIMER_TYPE_ASCONF: 1991 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1992 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1993 type, inp, stcb, net)); 1994 SCTP_STAT_INCR(sctps_timoasconf); 1995 if (sctp_asconf_timer(inp, stcb, net)) { 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 } 1999 #ifdef SCTP_AUDITING_ENABLED 2000 sctp_auditing(4, inp, stcb, net); 2001 #endif 2002 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2003 did_output = true; 2004 break; 2005 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2006 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2007 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2008 type, inp, stcb, net)); 2009 SCTP_STAT_INCR(sctps_timoshutdownguard); 2010 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2011 "Shutdown guard timer expired"); 2012 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 case SCTP_TIMER_TYPE_AUTOCLOSE: 2016 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2017 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2018 type, inp, stcb, net)); 2019 SCTP_STAT_INCR(sctps_timoautoclose); 2020 sctp_autoclose_timer(inp, stcb); 2021 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2022 did_output = true; 2023 break; 2024 case SCTP_TIMER_TYPE_STRRESET: 2025 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2026 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2027 type, inp, stcb, net)); 2028 SCTP_STAT_INCR(sctps_timostrmrst); 2029 if (sctp_strreset_timer(inp, stcb)) { 2030 /* no need to unlock on tcb its gone */ 2031 goto out_decr; 2032 } 2033 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2034 did_output = true; 2035 break; 2036 case SCTP_TIMER_TYPE_INPKILL: 2037 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoinpkill); 2041 /* 2042 * special case, take away our increment since WE are the 2043 * killer 2044 */ 2045 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2046 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2047 SCTP_INP_DECR_REF(inp); 2048 SCTP_INP_WUNLOCK(inp); 2049 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2050 SCTP_CALLED_FROM_INPKILL_TIMER); 2051 inp = NULL; 2052 goto out_decr; 2053 case SCTP_TIMER_TYPE_ASOCKILL: 2054 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2055 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2056 type, inp, stcb, net)); 2057 SCTP_STAT_INCR(sctps_timoassockill); 2058 /* Can we free it yet? */ 2059 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2060 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2061 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2062 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2063 /* 2064 * free asoc, always unlocks (or destroy's) so prevent 2065 * duplicate unlock or unlock of a free mtx :-0 2066 */ 2067 stcb = NULL; 2068 goto out_decr; 2069 case SCTP_TIMER_TYPE_ADDR_WQ: 2070 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2071 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2072 type, inp, stcb, net)); 2073 sctp_handle_addr_wq(); 2074 did_output = true; 2075 break; 2076 case SCTP_TIMER_TYPE_PRIM_DELETED: 2077 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2078 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2079 type, inp, stcb, net)); 2080 SCTP_STAT_INCR(sctps_timodelprim); 2081 sctp_delete_prim_timer(inp, stcb); 2082 did_output = false; 2083 break; 2084 default: 2085 #ifdef INVARIANTS 2086 panic("Unknown timer type %d", type); 2087 #else 2088 goto out; 2089 #endif 2090 } 2091 #ifdef SCTP_AUDITING_ENABLED 2092 sctp_audit_log(0xF1, (uint8_t)type); 2093 if (inp != NULL) 2094 sctp_auditing(5, inp, stcb, net); 2095 #endif 2096 if (did_output && (stcb != NULL)) { 2097 /* 2098 * Now we need to clean up the control chunk chain if an 2099 * ECNE is on it. It must be marked as UNSENT again so next 2100 * call will continue to send it until such time that we get 2101 * a CWR, to remove it. It is, however, less likely that we 2102 * will find a ecn echo on the chain though. 2103 */ 2104 sctp_fix_ecn_echo(&stcb->asoc); 2105 } 2106 out: 2107 if (stcb != NULL) { 2108 SCTP_TCB_UNLOCK(stcb); 2109 } else if (inp != NULL) { 2110 SCTP_INP_WUNLOCK(inp); 2111 } else { 2112 SCTP_WQ_ADDR_UNLOCK(); 2113 } 2114 2115 out_decr: 2116 /* These reference counts were incremented in sctp_timer_start(). */ 2117 if (inp != NULL) { 2118 SCTP_INP_DECR_REF(inp); 2119 } 2120 if ((stcb != NULL) && !released_asoc_reference) { 2121 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2122 } 2123 if (net != NULL) { 2124 sctp_free_remote_addr(net); 2125 } 2126 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2127 CURVNET_RESTORE(); 2128 NET_EPOCH_EXIT(et); 2129 } 2130 2131 /*- 2132 * The following table shows which parameters must be provided 2133 * when calling sctp_timer_start(). For parameters not being 2134 * provided, NULL must be used. 2135 * 2136 * |Name |inp |stcb|net | 2137 * |-----------------------------|----|----|----| 2138 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2143 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2145 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2147 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2148 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2149 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2150 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2151 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2152 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2153 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2154 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2155 * 2156 */ 2157 2158 void 2159 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2160 struct sctp_nets *net) 2161 { 2162 struct sctp_timer *tmr; 2163 uint32_t to_ticks; 2164 uint32_t rndval, jitter; 2165 2166 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2167 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2168 t_type, stcb, stcb->sctp_ep)); 2169 tmr = NULL; 2170 if (stcb != NULL) { 2171 SCTP_TCB_LOCK_ASSERT(stcb); 2172 } else if (inp != NULL) { 2173 SCTP_INP_WLOCK_ASSERT(inp); 2174 } else { 2175 SCTP_WQ_ADDR_LOCK_ASSERT(); 2176 } 2177 if (stcb != NULL) { 2178 /* 2179 * Don't restart timer on association that's about to be 2180 * killed. 2181 */ 2182 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2183 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2184 SCTPDBG(SCTP_DEBUG_TIMER2, 2185 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2186 t_type, inp, stcb, net); 2187 return; 2188 } 2189 /* Don't restart timer on net that's been removed. */ 2190 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2191 SCTPDBG(SCTP_DEBUG_TIMER2, 2192 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2193 t_type, inp, stcb, net); 2194 return; 2195 } 2196 } 2197 switch (t_type) { 2198 case SCTP_TIMER_TYPE_SEND: 2199 /* Here we use the RTO timer. */ 2200 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2201 #ifdef INVARIANTS 2202 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2203 t_type, inp, stcb, net); 2204 #else 2205 return; 2206 #endif 2207 } 2208 tmr = &net->rxt_timer; 2209 if (net->RTO == 0) { 2210 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2211 } else { 2212 to_ticks = sctp_msecs_to_ticks(net->RTO); 2213 } 2214 break; 2215 case SCTP_TIMER_TYPE_INIT: 2216 /* 2217 * Here we use the INIT timer default usually about 1 2218 * second. 2219 */ 2220 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2221 #ifdef INVARIANTS 2222 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2223 t_type, inp, stcb, net); 2224 #else 2225 return; 2226 #endif 2227 } 2228 tmr = &net->rxt_timer; 2229 if (net->RTO == 0) { 2230 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2231 } else { 2232 to_ticks = sctp_msecs_to_ticks(net->RTO); 2233 } 2234 break; 2235 case SCTP_TIMER_TYPE_RECV: 2236 /* 2237 * Here we use the Delayed-Ack timer value from the inp, 2238 * usually about 200ms. 2239 */ 2240 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2241 #ifdef INVARIANTS 2242 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2243 t_type, inp, stcb, net); 2244 #else 2245 return; 2246 #endif 2247 } 2248 tmr = &stcb->asoc.dack_timer; 2249 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2250 break; 2251 case SCTP_TIMER_TYPE_SHUTDOWN: 2252 /* Here we use the RTO of the destination. */ 2253 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2254 #ifdef INVARIANTS 2255 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2256 t_type, inp, stcb, net); 2257 #else 2258 return; 2259 #endif 2260 } 2261 tmr = &net->rxt_timer; 2262 if (net->RTO == 0) { 2263 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2264 } else { 2265 to_ticks = sctp_msecs_to_ticks(net->RTO); 2266 } 2267 break; 2268 case SCTP_TIMER_TYPE_HEARTBEAT: 2269 /* 2270 * The net is used here so that we can add in the RTO. Even 2271 * though we use a different timer. We also add the HB timer 2272 * PLUS a random jitter. 2273 */ 2274 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2275 #ifdef INVARIANTS 2276 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2277 t_type, inp, stcb, net); 2278 #else 2279 return; 2280 #endif 2281 } 2282 if ((net->dest_state & SCTP_ADDR_NOHB) && 2283 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2284 SCTPDBG(SCTP_DEBUG_TIMER2, 2285 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2286 t_type, inp, stcb, net); 2287 return; 2288 } 2289 tmr = &net->hb_timer; 2290 if (net->RTO == 0) { 2291 to_ticks = stcb->asoc.initial_rto; 2292 } else { 2293 to_ticks = net->RTO; 2294 } 2295 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2296 jitter = rndval % to_ticks; 2297 if (to_ticks > 1) { 2298 to_ticks >>= 1; 2299 } 2300 if (jitter < (UINT32_MAX - to_ticks)) { 2301 to_ticks += jitter; 2302 } else { 2303 to_ticks = UINT32_MAX; 2304 } 2305 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2306 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2307 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2308 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2309 to_ticks += net->heart_beat_delay; 2310 } else { 2311 to_ticks = UINT32_MAX; 2312 } 2313 } 2314 /* 2315 * Now we must convert the to_ticks that are now in ms to 2316 * ticks. 2317 */ 2318 to_ticks = sctp_msecs_to_ticks(to_ticks); 2319 break; 2320 case SCTP_TIMER_TYPE_COOKIE: 2321 /* 2322 * Here we can use the RTO timer from the network since one 2323 * RTT was complete. If a retransmission happened then we 2324 * will be using the RTO initial value. 2325 */ 2326 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2327 #ifdef INVARIANTS 2328 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2329 t_type, inp, stcb, net); 2330 #else 2331 return; 2332 #endif 2333 } 2334 tmr = &net->rxt_timer; 2335 if (net->RTO == 0) { 2336 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2337 } else { 2338 to_ticks = sctp_msecs_to_ticks(net->RTO); 2339 } 2340 break; 2341 case SCTP_TIMER_TYPE_NEWCOOKIE: 2342 /* 2343 * Nothing needed but the endpoint here usually about 60 2344 * minutes. 2345 */ 2346 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2347 #ifdef INVARIANTS 2348 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2349 t_type, inp, stcb, net); 2350 #else 2351 return; 2352 #endif 2353 } 2354 tmr = &inp->sctp_ep.signature_change; 2355 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2356 break; 2357 case SCTP_TIMER_TYPE_PATHMTURAISE: 2358 /* 2359 * Here we use the value found in the EP for PMTUD, usually 2360 * about 10 minutes. 2361 */ 2362 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2363 #ifdef INVARIANTS 2364 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2365 t_type, inp, stcb, net); 2366 #else 2367 return; 2368 #endif 2369 } 2370 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2371 SCTPDBG(SCTP_DEBUG_TIMER2, 2372 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2373 t_type, inp, stcb, net); 2374 return; 2375 } 2376 tmr = &net->pmtu_timer; 2377 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2378 break; 2379 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2380 /* Here we use the RTO of the destination. */ 2381 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2382 #ifdef INVARIANTS 2383 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2384 t_type, inp, stcb, net); 2385 #else 2386 return; 2387 #endif 2388 } 2389 tmr = &net->rxt_timer; 2390 if (net->RTO == 0) { 2391 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2392 } else { 2393 to_ticks = sctp_msecs_to_ticks(net->RTO); 2394 } 2395 break; 2396 case SCTP_TIMER_TYPE_ASCONF: 2397 /* 2398 * Here the timer comes from the stcb but its value is from 2399 * the net's RTO. 2400 */ 2401 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2402 #ifdef INVARIANTS 2403 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2404 t_type, inp, stcb, net); 2405 #else 2406 return; 2407 #endif 2408 } 2409 tmr = &stcb->asoc.asconf_timer; 2410 if (net->RTO == 0) { 2411 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2412 } else { 2413 to_ticks = sctp_msecs_to_ticks(net->RTO); 2414 } 2415 break; 2416 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2417 /* 2418 * Here we use the endpoints shutdown guard timer usually 2419 * about 3 minutes. 2420 */ 2421 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2422 #ifdef INVARIANTS 2423 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2424 t_type, inp, stcb, net); 2425 #else 2426 return; 2427 #endif 2428 } 2429 tmr = &stcb->asoc.shut_guard_timer; 2430 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2431 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2432 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2433 } else { 2434 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2435 } 2436 } else { 2437 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2438 } 2439 break; 2440 case SCTP_TIMER_TYPE_AUTOCLOSE: 2441 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2442 #ifdef INVARIANTS 2443 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2444 t_type, inp, stcb, net); 2445 #else 2446 return; 2447 #endif 2448 } 2449 tmr = &stcb->asoc.autoclose_timer; 2450 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2451 break; 2452 case SCTP_TIMER_TYPE_STRRESET: 2453 /* 2454 * Here the timer comes from the stcb but its value is from 2455 * the net's RTO. 2456 */ 2457 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2458 #ifdef INVARIANTS 2459 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2460 t_type, inp, stcb, net); 2461 #else 2462 return; 2463 #endif 2464 } 2465 tmr = &stcb->asoc.strreset_timer; 2466 if (net->RTO == 0) { 2467 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2468 } else { 2469 to_ticks = sctp_msecs_to_ticks(net->RTO); 2470 } 2471 break; 2472 case SCTP_TIMER_TYPE_INPKILL: 2473 /* 2474 * The inp is setup to die. We re-use the signature_change 2475 * timer since that has stopped and we are in the GONE 2476 * state. 2477 */ 2478 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 tmr = &inp->sctp_ep.signature_change; 2487 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2488 break; 2489 case SCTP_TIMER_TYPE_ASOCKILL: 2490 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2491 #ifdef INVARIANTS 2492 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2493 t_type, inp, stcb, net); 2494 #else 2495 return; 2496 #endif 2497 } 2498 tmr = &stcb->asoc.strreset_timer; 2499 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2500 break; 2501 case SCTP_TIMER_TYPE_ADDR_WQ: 2502 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2503 #ifdef INVARIANTS 2504 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2505 t_type, inp, stcb, net); 2506 #else 2507 return; 2508 #endif 2509 } 2510 /* Only 1 tick away :-) */ 2511 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2512 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2513 break; 2514 case SCTP_TIMER_TYPE_PRIM_DELETED: 2515 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2516 #ifdef INVARIANTS 2517 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2518 t_type, inp, stcb, net); 2519 #else 2520 return; 2521 #endif 2522 } 2523 tmr = &stcb->asoc.delete_prim_timer; 2524 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2525 break; 2526 default: 2527 #ifdef INVARIANTS 2528 panic("Unknown timer type %d", t_type); 2529 #else 2530 return; 2531 #endif 2532 } 2533 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2534 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2535 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2536 /* 2537 * We do NOT allow you to have it already running. If it is, 2538 * we leave the current one up unchanged. 2539 */ 2540 SCTPDBG(SCTP_DEBUG_TIMER2, 2541 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2542 t_type, inp, stcb, net); 2543 return; 2544 } 2545 /* At this point we can proceed. */ 2546 if (t_type == SCTP_TIMER_TYPE_SEND) { 2547 stcb->asoc.num_send_timers_up++; 2548 } 2549 tmr->stopped_from = 0; 2550 tmr->type = t_type; 2551 tmr->ep = (void *)inp; 2552 tmr->tcb = (void *)stcb; 2553 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2554 tmr->net = NULL; 2555 } else { 2556 tmr->net = (void *)net; 2557 } 2558 tmr->self = (void *)tmr; 2559 tmr->vnet = (void *)curvnet; 2560 tmr->ticks = sctp_get_tick_count(); 2561 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2562 SCTPDBG(SCTP_DEBUG_TIMER2, 2563 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2564 t_type, to_ticks, inp, stcb, net); 2565 /* 2566 * If this is a newly scheduled callout, as opposed to a 2567 * rescheduled one, increment relevant reference counts. 2568 */ 2569 if (tmr->ep != NULL) { 2570 SCTP_INP_INCR_REF(inp); 2571 } 2572 if (tmr->tcb != NULL) { 2573 atomic_add_int(&stcb->asoc.refcnt, 1); 2574 } 2575 if (tmr->net != NULL) { 2576 atomic_add_int(&net->ref_count, 1); 2577 } 2578 } else { 2579 /* 2580 * This should not happen, since we checked for pending 2581 * above. 2582 */ 2583 SCTPDBG(SCTP_DEBUG_TIMER2, 2584 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2585 t_type, to_ticks, inp, stcb, net); 2586 } 2587 return; 2588 } 2589 2590 /*- 2591 * The following table shows which parameters must be provided 2592 * when calling sctp_timer_stop(). For parameters not being 2593 * provided, NULL must be used. 2594 * 2595 * |Name |inp |stcb|net | 2596 * |-----------------------------|----|----|----| 2597 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2600 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2601 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2604 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2605 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2606 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2608 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2610 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2611 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2612 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2613 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2614 * 2615 */ 2616 2617 void 2618 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2619 struct sctp_nets *net, uint32_t from) 2620 { 2621 struct sctp_timer *tmr; 2622 2623 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2624 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2625 t_type, stcb, stcb->sctp_ep)); 2626 if (stcb != NULL) { 2627 SCTP_TCB_LOCK_ASSERT(stcb); 2628 } else if (inp != NULL) { 2629 SCTP_INP_WLOCK_ASSERT(inp); 2630 } else { 2631 SCTP_WQ_ADDR_LOCK_ASSERT(); 2632 } 2633 tmr = NULL; 2634 switch (t_type) { 2635 case SCTP_TIMER_TYPE_SEND: 2636 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2637 #ifdef INVARIANTS 2638 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2639 t_type, inp, stcb, net); 2640 #else 2641 return; 2642 #endif 2643 } 2644 tmr = &net->rxt_timer; 2645 break; 2646 case SCTP_TIMER_TYPE_INIT: 2647 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2648 #ifdef INVARIANTS 2649 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2650 t_type, inp, stcb, net); 2651 #else 2652 return; 2653 #endif 2654 } 2655 tmr = &net->rxt_timer; 2656 break; 2657 case SCTP_TIMER_TYPE_RECV: 2658 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2659 #ifdef INVARIANTS 2660 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2661 t_type, inp, stcb, net); 2662 #else 2663 return; 2664 #endif 2665 } 2666 tmr = &stcb->asoc.dack_timer; 2667 break; 2668 case SCTP_TIMER_TYPE_SHUTDOWN: 2669 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2670 #ifdef INVARIANTS 2671 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2672 t_type, inp, stcb, net); 2673 #else 2674 return; 2675 #endif 2676 } 2677 tmr = &net->rxt_timer; 2678 break; 2679 case SCTP_TIMER_TYPE_HEARTBEAT: 2680 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2681 #ifdef INVARIANTS 2682 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2683 t_type, inp, stcb, net); 2684 #else 2685 return; 2686 #endif 2687 } 2688 tmr = &net->hb_timer; 2689 break; 2690 case SCTP_TIMER_TYPE_COOKIE: 2691 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2692 #ifdef INVARIANTS 2693 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2694 t_type, inp, stcb, net); 2695 #else 2696 return; 2697 #endif 2698 } 2699 tmr = &net->rxt_timer; 2700 break; 2701 case SCTP_TIMER_TYPE_NEWCOOKIE: 2702 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2703 #ifdef INVARIANTS 2704 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2705 t_type, inp, stcb, net); 2706 #else 2707 return; 2708 #endif 2709 } 2710 tmr = &inp->sctp_ep.signature_change; 2711 break; 2712 case SCTP_TIMER_TYPE_PATHMTURAISE: 2713 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2714 #ifdef INVARIANTS 2715 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2716 t_type, inp, stcb, net); 2717 #else 2718 return; 2719 #endif 2720 } 2721 tmr = &net->pmtu_timer; 2722 break; 2723 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2724 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2725 #ifdef INVARIANTS 2726 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2727 t_type, inp, stcb, net); 2728 #else 2729 return; 2730 #endif 2731 } 2732 tmr = &net->rxt_timer; 2733 break; 2734 case SCTP_TIMER_TYPE_ASCONF: 2735 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2736 #ifdef INVARIANTS 2737 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2738 t_type, inp, stcb, net); 2739 #else 2740 return; 2741 #endif 2742 } 2743 tmr = &stcb->asoc.asconf_timer; 2744 break; 2745 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2746 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2747 #ifdef INVARIANTS 2748 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2749 t_type, inp, stcb, net); 2750 #else 2751 return; 2752 #endif 2753 } 2754 tmr = &stcb->asoc.shut_guard_timer; 2755 break; 2756 case SCTP_TIMER_TYPE_AUTOCLOSE: 2757 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2758 #ifdef INVARIANTS 2759 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2760 t_type, inp, stcb, net); 2761 #else 2762 return; 2763 #endif 2764 } 2765 tmr = &stcb->asoc.autoclose_timer; 2766 break; 2767 case SCTP_TIMER_TYPE_STRRESET: 2768 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2769 #ifdef INVARIANTS 2770 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2771 t_type, inp, stcb, net); 2772 #else 2773 return; 2774 #endif 2775 } 2776 tmr = &stcb->asoc.strreset_timer; 2777 break; 2778 case SCTP_TIMER_TYPE_INPKILL: 2779 /* 2780 * The inp is setup to die. We re-use the signature_change 2781 * timer since that has stopped and we are in the GONE 2782 * state. 2783 */ 2784 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2785 #ifdef INVARIANTS 2786 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2787 t_type, inp, stcb, net); 2788 #else 2789 return; 2790 #endif 2791 } 2792 tmr = &inp->sctp_ep.signature_change; 2793 break; 2794 case SCTP_TIMER_TYPE_ASOCKILL: 2795 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2796 #ifdef INVARIANTS 2797 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2798 t_type, inp, stcb, net); 2799 #else 2800 return; 2801 #endif 2802 } 2803 tmr = &stcb->asoc.strreset_timer; 2804 break; 2805 case SCTP_TIMER_TYPE_ADDR_WQ: 2806 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2807 #ifdef INVARIANTS 2808 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2809 t_type, inp, stcb, net); 2810 #else 2811 return; 2812 #endif 2813 } 2814 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2815 break; 2816 case SCTP_TIMER_TYPE_PRIM_DELETED: 2817 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2818 #ifdef INVARIANTS 2819 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2820 t_type, inp, stcb, net); 2821 #else 2822 return; 2823 #endif 2824 } 2825 tmr = &stcb->asoc.delete_prim_timer; 2826 break; 2827 default: 2828 #ifdef INVARIANTS 2829 panic("Unknown timer type %d", t_type); 2830 #else 2831 return; 2832 #endif 2833 } 2834 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2835 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2836 (tmr->type != t_type)) { 2837 /* 2838 * Ok we have a timer that is under joint use. Cookie timer 2839 * per chance with the SEND timer. We therefore are NOT 2840 * running the timer that the caller wants stopped. So just 2841 * return. 2842 */ 2843 SCTPDBG(SCTP_DEBUG_TIMER2, 2844 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2845 t_type, inp, stcb, net); 2846 return; 2847 } 2848 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2849 stcb->asoc.num_send_timers_up--; 2850 if (stcb->asoc.num_send_timers_up < 0) { 2851 stcb->asoc.num_send_timers_up = 0; 2852 } 2853 } 2854 tmr->self = NULL; 2855 tmr->stopped_from = from; 2856 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2857 KASSERT(tmr->ep == inp, 2858 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2859 t_type, inp, tmr->ep)); 2860 KASSERT(tmr->tcb == stcb, 2861 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2862 t_type, stcb, tmr->tcb)); 2863 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2864 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2865 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2866 t_type, net, tmr->net)); 2867 SCTPDBG(SCTP_DEBUG_TIMER2, 2868 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2869 t_type, inp, stcb, net); 2870 /* 2871 * If the timer was actually stopped, decrement reference 2872 * counts that were incremented in sctp_timer_start(). 2873 */ 2874 if (tmr->ep != NULL) { 2875 tmr->ep = NULL; 2876 SCTP_INP_DECR_REF(inp); 2877 } 2878 if (tmr->tcb != NULL) { 2879 tmr->tcb = NULL; 2880 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2881 } 2882 if (tmr->net != NULL) { 2883 struct sctp_nets *tmr_net; 2884 2885 /* 2886 * Can't use net, since it doesn't work for 2887 * SCTP_TIMER_TYPE_ASCONF. 2888 */ 2889 tmr_net = tmr->net; 2890 tmr->net = NULL; 2891 sctp_free_remote_addr(tmr_net); 2892 } 2893 } else { 2894 SCTPDBG(SCTP_DEBUG_TIMER2, 2895 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2896 t_type, inp, stcb, net); 2897 } 2898 return; 2899 } 2900 2901 uint32_t 2902 sctp_calculate_len(struct mbuf *m) 2903 { 2904 struct mbuf *at; 2905 uint32_t tlen; 2906 2907 tlen = 0; 2908 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2909 tlen += SCTP_BUF_LEN(at); 2910 } 2911 return (tlen); 2912 } 2913 2914 /* 2915 * Given an association and starting time of the current RTT period, update 2916 * RTO in number of msecs. net should point to the current network. 2917 * Return 1, if an RTO update was performed, return 0 if no update was 2918 * performed due to invalid starting point. 2919 */ 2920 2921 int 2922 sctp_calculate_rto(struct sctp_tcb *stcb, 2923 struct sctp_association *asoc, 2924 struct sctp_nets *net, 2925 struct timeval *old, 2926 int rtt_from_sack) 2927 { 2928 struct timeval now; 2929 uint64_t rtt_us; /* RTT in us */ 2930 int32_t rtt; /* RTT in ms */ 2931 uint32_t new_rto; 2932 int first_measure = 0; 2933 2934 /************************/ 2935 /* 1. calculate new RTT */ 2936 /************************/ 2937 /* get the current time */ 2938 if (stcb->asoc.use_precise_time) { 2939 (void)SCTP_GETPTIME_TIMEVAL(&now); 2940 } else { 2941 (void)SCTP_GETTIME_TIMEVAL(&now); 2942 } 2943 if ((old->tv_sec > now.tv_sec) || 2944 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2945 /* The starting point is in the future. */ 2946 return (0); 2947 } 2948 timevalsub(&now, old); 2949 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2950 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2951 /* The RTT is larger than a sane value. */ 2952 return (0); 2953 } 2954 /* store the current RTT in us */ 2955 net->rtt = rtt_us; 2956 /* compute rtt in ms */ 2957 rtt = (int32_t)(net->rtt / 1000); 2958 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2959 /* 2960 * Tell the CC module that a new update has just occurred 2961 * from a sack 2962 */ 2963 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2964 } 2965 /* 2966 * Do we need to determine the lan? We do this only on sacks i.e. 2967 * RTT being determined from data not non-data (HB/INIT->INITACK). 2968 */ 2969 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2970 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2971 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2972 net->lan_type = SCTP_LAN_INTERNET; 2973 } else { 2974 net->lan_type = SCTP_LAN_LOCAL; 2975 } 2976 } 2977 2978 /***************************/ 2979 /* 2. update RTTVAR & SRTT */ 2980 /***************************/ 2981 /*- 2982 * Compute the scaled average lastsa and the 2983 * scaled variance lastsv as described in van Jacobson 2984 * Paper "Congestion Avoidance and Control", Annex A. 2985 * 2986 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2987 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2988 */ 2989 if (net->RTO_measured) { 2990 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2991 net->lastsa += rtt; 2992 if (rtt < 0) { 2993 rtt = -rtt; 2994 } 2995 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2996 net->lastsv += rtt; 2997 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2998 rto_logging(net, SCTP_LOG_RTTVAR); 2999 } 3000 } else { 3001 /* First RTO measurement */ 3002 net->RTO_measured = 1; 3003 first_measure = 1; 3004 net->lastsa = rtt << SCTP_RTT_SHIFT; 3005 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3007 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3008 } 3009 } 3010 if (net->lastsv == 0) { 3011 net->lastsv = SCTP_CLOCK_GRANULARITY; 3012 } 3013 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3014 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3015 (stcb->asoc.sat_network_lockout == 0)) { 3016 stcb->asoc.sat_network = 1; 3017 } else if ((!first_measure) && stcb->asoc.sat_network) { 3018 stcb->asoc.sat_network = 0; 3019 stcb->asoc.sat_network_lockout = 1; 3020 } 3021 /* bound it, per C6/C7 in Section 5.3.1 */ 3022 if (new_rto < stcb->asoc.minrto) { 3023 new_rto = stcb->asoc.minrto; 3024 } 3025 if (new_rto > stcb->asoc.maxrto) { 3026 new_rto = stcb->asoc.maxrto; 3027 } 3028 net->RTO = new_rto; 3029 return (1); 3030 } 3031 3032 /* 3033 * return a pointer to a contiguous piece of data from the given mbuf chain 3034 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3035 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3036 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3037 */ 3038 caddr_t 3039 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3040 { 3041 uint32_t count; 3042 uint8_t *ptr; 3043 3044 ptr = in_ptr; 3045 if ((off < 0) || (len <= 0)) 3046 return (NULL); 3047 3048 /* find the desired start location */ 3049 while ((m != NULL) && (off > 0)) { 3050 if (off < SCTP_BUF_LEN(m)) 3051 break; 3052 off -= SCTP_BUF_LEN(m); 3053 m = SCTP_BUF_NEXT(m); 3054 } 3055 if (m == NULL) 3056 return (NULL); 3057 3058 /* is the current mbuf large enough (eg. contiguous)? */ 3059 if ((SCTP_BUF_LEN(m) - off) >= len) { 3060 return (mtod(m, caddr_t)+off); 3061 } else { 3062 /* else, it spans more than one mbuf, so save a temp copy... */ 3063 while ((m != NULL) && (len > 0)) { 3064 count = min(SCTP_BUF_LEN(m) - off, len); 3065 memcpy(ptr, mtod(m, caddr_t)+off, count); 3066 len -= count; 3067 ptr += count; 3068 off = 0; 3069 m = SCTP_BUF_NEXT(m); 3070 } 3071 if ((m == NULL) && (len > 0)) 3072 return (NULL); 3073 else 3074 return ((caddr_t)in_ptr); 3075 } 3076 } 3077 3078 struct sctp_paramhdr * 3079 sctp_get_next_param(struct mbuf *m, 3080 int offset, 3081 struct sctp_paramhdr *pull, 3082 int pull_limit) 3083 { 3084 /* This just provides a typed signature to Peter's Pull routine */ 3085 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3086 (uint8_t *)pull)); 3087 } 3088 3089 struct mbuf * 3090 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3091 { 3092 struct mbuf *m_last; 3093 caddr_t dp; 3094 3095 if (padlen > 3) { 3096 return (NULL); 3097 } 3098 if (padlen <= M_TRAILINGSPACE(m)) { 3099 /* 3100 * The easy way. We hope the majority of the time we hit 3101 * here :) 3102 */ 3103 m_last = m; 3104 } else { 3105 /* Hard way we must grow the mbuf chain */ 3106 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3107 if (m_last == NULL) { 3108 return (NULL); 3109 } 3110 SCTP_BUF_LEN(m_last) = 0; 3111 SCTP_BUF_NEXT(m_last) = NULL; 3112 SCTP_BUF_NEXT(m) = m_last; 3113 } 3114 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3115 SCTP_BUF_LEN(m_last) += padlen; 3116 memset(dp, 0, padlen); 3117 return (m_last); 3118 } 3119 3120 struct mbuf * 3121 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3122 { 3123 /* find the last mbuf in chain and pad it */ 3124 struct mbuf *m_at; 3125 3126 if (last_mbuf != NULL) { 3127 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3128 } else { 3129 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3130 if (SCTP_BUF_NEXT(m_at) == NULL) { 3131 return (sctp_add_pad_tombuf(m_at, padval)); 3132 } 3133 } 3134 } 3135 return (NULL); 3136 } 3137 3138 static void 3139 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3140 uint16_t error, struct sctp_abort_chunk *abort, 3141 bool from_peer, bool timedout, int so_locked) 3142 { 3143 struct mbuf *m_notify; 3144 struct sctp_assoc_change *sac; 3145 struct sctp_queued_to_read *control; 3146 unsigned int notif_len; 3147 uint16_t abort_len; 3148 unsigned int i; 3149 3150 KASSERT(abort == NULL || from_peer, 3151 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3152 KASSERT(!from_peer || !timedout, 3153 ("sctp_notify_assoc_change: timeouts can only be local")); 3154 if (stcb == NULL) { 3155 return; 3156 } 3157 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3158 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3159 if (abort != NULL) { 3160 abort_len = ntohs(abort->ch.chunk_length); 3161 /* 3162 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3163 * contiguous. 3164 */ 3165 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3166 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3167 } 3168 } else { 3169 abort_len = 0; 3170 } 3171 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3172 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3173 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3174 notif_len += abort_len; 3175 } 3176 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3177 if (m_notify == NULL) { 3178 /* Retry with smaller value. */ 3179 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3180 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3181 if (m_notify == NULL) { 3182 goto set_error; 3183 } 3184 } 3185 SCTP_BUF_NEXT(m_notify) = NULL; 3186 sac = mtod(m_notify, struct sctp_assoc_change *); 3187 memset(sac, 0, notif_len); 3188 sac->sac_type = SCTP_ASSOC_CHANGE; 3189 sac->sac_flags = 0; 3190 sac->sac_length = sizeof(struct sctp_assoc_change); 3191 sac->sac_state = state; 3192 sac->sac_error = error; 3193 if (state == SCTP_CANT_STR_ASSOC) { 3194 sac->sac_outbound_streams = 0; 3195 sac->sac_inbound_streams = 0; 3196 } else { 3197 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3198 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3199 } 3200 sac->sac_assoc_id = sctp_get_associd(stcb); 3201 if (notif_len > sizeof(struct sctp_assoc_change)) { 3202 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3203 i = 0; 3204 if (stcb->asoc.prsctp_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3206 } 3207 if (stcb->asoc.auth_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3209 } 3210 if (stcb->asoc.asconf_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3212 } 3213 if (stcb->asoc.idata_supported == 1) { 3214 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3215 } 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3217 if (stcb->asoc.reconfig_supported == 1) { 3218 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3219 } 3220 sac->sac_length += i; 3221 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3222 memcpy(sac->sac_info, abort, abort_len); 3223 sac->sac_length += abort_len; 3224 } 3225 } 3226 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3228 0, 0, stcb->asoc.context, 0, 0, 0, 3229 m_notify); 3230 if (control != NULL) { 3231 control->length = SCTP_BUF_LEN(m_notify); 3232 control->spec_flags = M_NOTIFICATION; 3233 /* not that we need this */ 3234 control->tail_mbuf = m_notify; 3235 sctp_add_to_readq(stcb->sctp_ep, stcb, 3236 control, 3237 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3238 so_locked); 3239 } else { 3240 sctp_m_freem(m_notify); 3241 } 3242 } 3243 /* 3244 * For 1-to-1 style sockets, we send up and error when an ABORT 3245 * comes in. 3246 */ 3247 set_error: 3248 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3249 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3250 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3251 SOCK_LOCK(stcb->sctp_socket); 3252 if (from_peer) { 3253 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3255 stcb->sctp_socket->so_error = ECONNREFUSED; 3256 } else { 3257 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3258 stcb->sctp_socket->so_error = ECONNRESET; 3259 } 3260 } else { 3261 if (timedout) { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3263 stcb->sctp_socket->so_error = ETIMEDOUT; 3264 } else { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3266 stcb->sctp_socket->so_error = ECONNABORTED; 3267 } 3268 } 3269 SOCK_UNLOCK(stcb->sctp_socket); 3270 } 3271 /* Wake ANY sleepers */ 3272 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3273 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3274 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3275 socantrcvmore(stcb->sctp_socket); 3276 } 3277 sorwakeup(stcb->sctp_socket); 3278 sowwakeup(stcb->sctp_socket); 3279 } 3280 3281 static void 3282 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3283 struct sockaddr *sa, uint32_t error, int so_locked) 3284 { 3285 struct mbuf *m_notify; 3286 struct sctp_paddr_change *spc; 3287 struct sctp_queued_to_read *control; 3288 3289 if ((stcb == NULL) || 3290 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3291 /* event not enabled */ 3292 return; 3293 } 3294 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3295 if (m_notify == NULL) 3296 return; 3297 SCTP_BUF_LEN(m_notify) = 0; 3298 spc = mtod(m_notify, struct sctp_paddr_change *); 3299 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3300 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3301 spc->spc_flags = 0; 3302 spc->spc_length = sizeof(struct sctp_paddr_change); 3303 switch (sa->sa_family) { 3304 #ifdef INET 3305 case AF_INET: 3306 #ifdef INET6 3307 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3308 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3309 (struct sockaddr_in6 *)&spc->spc_aaddr); 3310 } else { 3311 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3312 } 3313 #else 3314 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3315 #endif 3316 break; 3317 #endif 3318 #ifdef INET6 3319 case AF_INET6: 3320 { 3321 struct sockaddr_in6 *sin6; 3322 3323 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3324 3325 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3326 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3327 if (sin6->sin6_scope_id == 0) { 3328 /* recover scope_id for user */ 3329 (void)sa6_recoverscope(sin6); 3330 } else { 3331 /* clear embedded scope_id for user */ 3332 in6_clearscope(&sin6->sin6_addr); 3333 } 3334 } 3335 break; 3336 } 3337 #endif 3338 default: 3339 /* TSNH */ 3340 break; 3341 } 3342 spc->spc_state = state; 3343 spc->spc_error = error; 3344 spc->spc_assoc_id = sctp_get_associd(stcb); 3345 3346 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3347 SCTP_BUF_NEXT(m_notify) = NULL; 3348 3349 /* append to socket */ 3350 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3351 0, 0, stcb->asoc.context, 0, 0, 0, 3352 m_notify); 3353 if (control == NULL) { 3354 /* no memory */ 3355 sctp_m_freem(m_notify); 3356 return; 3357 } 3358 control->length = SCTP_BUF_LEN(m_notify); 3359 control->spec_flags = M_NOTIFICATION; 3360 /* not that we need this */ 3361 control->tail_mbuf = m_notify; 3362 sctp_add_to_readq(stcb->sctp_ep, stcb, 3363 control, 3364 &stcb->sctp_socket->so_rcv, 1, 3365 SCTP_READ_LOCK_NOT_HELD, 3366 so_locked); 3367 } 3368 3369 static void 3370 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3371 struct sctp_tmit_chunk *chk, int so_locked) 3372 { 3373 struct mbuf *m_notify; 3374 struct sctp_send_failed *ssf; 3375 struct sctp_send_failed_event *ssfe; 3376 struct sctp_queued_to_read *control; 3377 struct sctp_chunkhdr *chkhdr; 3378 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3379 3380 if ((stcb == NULL) || 3381 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3382 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3383 /* event not enabled */ 3384 return; 3385 } 3386 3387 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3388 notifhdr_len = sizeof(struct sctp_send_failed_event); 3389 } else { 3390 notifhdr_len = sizeof(struct sctp_send_failed); 3391 } 3392 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3393 if (m_notify == NULL) 3394 /* no space left */ 3395 return; 3396 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3397 if (stcb->asoc.idata_supported) { 3398 chkhdr_len = sizeof(struct sctp_idata_chunk); 3399 } else { 3400 chkhdr_len = sizeof(struct sctp_data_chunk); 3401 } 3402 /* Use some defaults in case we can't access the chunk header */ 3403 if (chk->send_size >= chkhdr_len) { 3404 payload_len = chk->send_size - chkhdr_len; 3405 } else { 3406 payload_len = 0; 3407 } 3408 padding_len = 0; 3409 if (chk->data != NULL) { 3410 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3411 if (chkhdr != NULL) { 3412 chk_len = ntohs(chkhdr->chunk_length); 3413 if ((chk_len >= chkhdr_len) && 3414 (chk->send_size >= chk_len) && 3415 (chk->send_size - chk_len < 4)) { 3416 padding_len = chk->send_size - chk_len; 3417 payload_len = chk->send_size - chkhdr_len - padding_len; 3418 } 3419 } 3420 } 3421 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3422 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3423 memset(ssfe, 0, notifhdr_len); 3424 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3425 if (sent) { 3426 ssfe->ssfe_flags = SCTP_DATA_SENT; 3427 } else { 3428 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3429 } 3430 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3431 ssfe->ssfe_error = error; 3432 /* not exactly what the user sent in, but should be close :) */ 3433 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3434 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3435 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3436 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3437 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3438 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3439 } else { 3440 ssf = mtod(m_notify, struct sctp_send_failed *); 3441 memset(ssf, 0, notifhdr_len); 3442 ssf->ssf_type = SCTP_SEND_FAILED; 3443 if (sent) { 3444 ssf->ssf_flags = SCTP_DATA_SENT; 3445 } else { 3446 ssf->ssf_flags = SCTP_DATA_UNSENT; 3447 } 3448 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3449 ssf->ssf_error = error; 3450 /* not exactly what the user sent in, but should be close :) */ 3451 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3452 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3453 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3454 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3455 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3456 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3457 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3458 } 3459 if (chk->data != NULL) { 3460 /* Trim off the sctp chunk header (it should be there) */ 3461 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3462 m_adj(chk->data, chkhdr_len); 3463 m_adj(chk->data, -padding_len); 3464 sctp_mbuf_crush(chk->data); 3465 chk->send_size -= (chkhdr_len + padding_len); 3466 } 3467 } 3468 SCTP_BUF_NEXT(m_notify) = chk->data; 3469 /* Steal off the mbuf */ 3470 chk->data = NULL; 3471 /* 3472 * For this case, we check the actual socket buffer, since the assoc 3473 * is going away we don't want to overfill the socket buffer for a 3474 * non-reader 3475 */ 3476 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3477 sctp_m_freem(m_notify); 3478 return; 3479 } 3480 /* append to socket */ 3481 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3482 0, 0, stcb->asoc.context, 0, 0, 0, 3483 m_notify); 3484 if (control == NULL) { 3485 /* no memory */ 3486 sctp_m_freem(m_notify); 3487 return; 3488 } 3489 control->length = SCTP_BUF_LEN(m_notify); 3490 control->spec_flags = M_NOTIFICATION; 3491 /* not that we need this */ 3492 control->tail_mbuf = m_notify; 3493 sctp_add_to_readq(stcb->sctp_ep, stcb, 3494 control, 3495 &stcb->sctp_socket->so_rcv, 1, 3496 SCTP_READ_LOCK_NOT_HELD, 3497 so_locked); 3498 } 3499 3500 static void 3501 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3502 struct sctp_stream_queue_pending *sp, int so_locked) 3503 { 3504 struct mbuf *m_notify; 3505 struct sctp_send_failed *ssf; 3506 struct sctp_send_failed_event *ssfe; 3507 struct sctp_queued_to_read *control; 3508 int notifhdr_len; 3509 3510 if ((stcb == NULL) || 3511 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3512 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3513 /* event not enabled */ 3514 return; 3515 } 3516 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3517 notifhdr_len = sizeof(struct sctp_send_failed_event); 3518 } else { 3519 notifhdr_len = sizeof(struct sctp_send_failed); 3520 } 3521 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3522 if (m_notify == NULL) { 3523 /* no space left */ 3524 return; 3525 } 3526 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3527 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3528 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3529 memset(ssfe, 0, notifhdr_len); 3530 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3531 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3532 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3533 ssfe->ssfe_error = error; 3534 /* not exactly what the user sent in, but should be close :) */ 3535 ssfe->ssfe_info.snd_sid = sp->sid; 3536 if (sp->some_taken) { 3537 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3538 } else { 3539 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3540 } 3541 ssfe->ssfe_info.snd_ppid = sp->ppid; 3542 ssfe->ssfe_info.snd_context = sp->context; 3543 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3544 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3545 } else { 3546 ssf = mtod(m_notify, struct sctp_send_failed *); 3547 memset(ssf, 0, notifhdr_len); 3548 ssf->ssf_type = SCTP_SEND_FAILED; 3549 ssf->ssf_flags = SCTP_DATA_UNSENT; 3550 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3551 ssf->ssf_error = error; 3552 /* not exactly what the user sent in, but should be close :) */ 3553 ssf->ssf_info.sinfo_stream = sp->sid; 3554 ssf->ssf_info.sinfo_ssn = 0; 3555 if (sp->some_taken) { 3556 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3557 } else { 3558 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3559 } 3560 ssf->ssf_info.sinfo_ppid = sp->ppid; 3561 ssf->ssf_info.sinfo_context = sp->context; 3562 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3563 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3564 } 3565 SCTP_BUF_NEXT(m_notify) = sp->data; 3566 3567 /* Steal off the mbuf */ 3568 sp->data = NULL; 3569 /* 3570 * For this case, we check the actual socket buffer, since the assoc 3571 * is going away we don't want to overfill the socket buffer for a 3572 * non-reader 3573 */ 3574 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3575 sctp_m_freem(m_notify); 3576 return; 3577 } 3578 /* append to socket */ 3579 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3580 0, 0, stcb->asoc.context, 0, 0, 0, 3581 m_notify); 3582 if (control == NULL) { 3583 /* no memory */ 3584 sctp_m_freem(m_notify); 3585 return; 3586 } 3587 control->length = SCTP_BUF_LEN(m_notify); 3588 control->spec_flags = M_NOTIFICATION; 3589 /* not that we need this */ 3590 control->tail_mbuf = m_notify; 3591 sctp_add_to_readq(stcb->sctp_ep, stcb, 3592 control, 3593 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3594 } 3595 3596 static void 3597 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3598 { 3599 struct mbuf *m_notify; 3600 struct sctp_adaptation_event *sai; 3601 struct sctp_queued_to_read *control; 3602 3603 if ((stcb == NULL) || 3604 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3605 /* event not enabled */ 3606 return; 3607 } 3608 3609 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3610 if (m_notify == NULL) 3611 /* no space left */ 3612 return; 3613 SCTP_BUF_LEN(m_notify) = 0; 3614 sai = mtod(m_notify, struct sctp_adaptation_event *); 3615 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3616 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3617 sai->sai_flags = 0; 3618 sai->sai_length = sizeof(struct sctp_adaptation_event); 3619 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3620 sai->sai_assoc_id = sctp_get_associd(stcb); 3621 3622 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3623 SCTP_BUF_NEXT(m_notify) = NULL; 3624 3625 /* append to socket */ 3626 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3627 0, 0, stcb->asoc.context, 0, 0, 0, 3628 m_notify); 3629 if (control == NULL) { 3630 /* no memory */ 3631 sctp_m_freem(m_notify); 3632 return; 3633 } 3634 control->length = SCTP_BUF_LEN(m_notify); 3635 control->spec_flags = M_NOTIFICATION; 3636 /* not that we need this */ 3637 control->tail_mbuf = m_notify; 3638 sctp_add_to_readq(stcb->sctp_ep, stcb, 3639 control, 3640 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3641 } 3642 3643 /* This always must be called with the read-queue LOCKED in the INP */ 3644 static void 3645 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3646 uint32_t val, int so_locked) 3647 { 3648 struct mbuf *m_notify; 3649 struct sctp_pdapi_event *pdapi; 3650 struct sctp_queued_to_read *control; 3651 struct sockbuf *sb; 3652 3653 if ((stcb == NULL) || 3654 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3655 /* event not enabled */ 3656 return; 3657 } 3658 3659 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3660 if (m_notify == NULL) 3661 /* no space left */ 3662 return; 3663 SCTP_BUF_LEN(m_notify) = 0; 3664 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3665 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3666 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3667 pdapi->pdapi_flags = 0; 3668 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3669 pdapi->pdapi_indication = error; 3670 pdapi->pdapi_stream = (val >> 16); 3671 pdapi->pdapi_seq = (val & 0x0000ffff); 3672 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3673 3674 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3675 SCTP_BUF_NEXT(m_notify) = NULL; 3676 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3677 0, 0, stcb->asoc.context, 0, 0, 0, 3678 m_notify); 3679 if (control == NULL) { 3680 /* no memory */ 3681 sctp_m_freem(m_notify); 3682 return; 3683 } 3684 control->length = SCTP_BUF_LEN(m_notify); 3685 control->spec_flags = M_NOTIFICATION; 3686 /* not that we need this */ 3687 control->tail_mbuf = m_notify; 3688 sb = &stcb->sctp_socket->so_rcv; 3689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3690 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3691 } 3692 sctp_sballoc(stcb, sb, m_notify); 3693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3694 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3695 } 3696 control->end_added = 1; 3697 if (stcb->asoc.control_pdapi) 3698 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3699 else { 3700 /* we really should not see this case */ 3701 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3702 } 3703 if (stcb->sctp_ep && stcb->sctp_socket) { 3704 /* This should always be the case */ 3705 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3706 } 3707 } 3708 3709 static void 3710 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3711 { 3712 struct mbuf *m_notify; 3713 struct sctp_shutdown_event *sse; 3714 struct sctp_queued_to_read *control; 3715 3716 /* 3717 * For TCP model AND UDP connected sockets we will send an error up 3718 * when an SHUTDOWN completes 3719 */ 3720 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3721 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3722 /* mark socket closed for read/write and wakeup! */ 3723 socantsendmore(stcb->sctp_socket); 3724 } 3725 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3726 /* event not enabled */ 3727 return; 3728 } 3729 3730 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3731 if (m_notify == NULL) 3732 /* no space left */ 3733 return; 3734 sse = mtod(m_notify, struct sctp_shutdown_event *); 3735 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3736 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3737 sse->sse_flags = 0; 3738 sse->sse_length = sizeof(struct sctp_shutdown_event); 3739 sse->sse_assoc_id = sctp_get_associd(stcb); 3740 3741 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3742 SCTP_BUF_NEXT(m_notify) = NULL; 3743 3744 /* append to socket */ 3745 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3746 0, 0, stcb->asoc.context, 0, 0, 0, 3747 m_notify); 3748 if (control == NULL) { 3749 /* no memory */ 3750 sctp_m_freem(m_notify); 3751 return; 3752 } 3753 control->length = SCTP_BUF_LEN(m_notify); 3754 control->spec_flags = M_NOTIFICATION; 3755 /* not that we need this */ 3756 control->tail_mbuf = m_notify; 3757 sctp_add_to_readq(stcb->sctp_ep, stcb, 3758 control, 3759 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3760 } 3761 3762 static void 3763 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3764 int so_locked) 3765 { 3766 struct mbuf *m_notify; 3767 struct sctp_sender_dry_event *event; 3768 struct sctp_queued_to_read *control; 3769 3770 if ((stcb == NULL) || 3771 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3772 /* event not enabled */ 3773 return; 3774 } 3775 3776 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3777 if (m_notify == NULL) { 3778 /* no space left */ 3779 return; 3780 } 3781 SCTP_BUF_LEN(m_notify) = 0; 3782 event = mtod(m_notify, struct sctp_sender_dry_event *); 3783 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3784 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3785 event->sender_dry_flags = 0; 3786 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3787 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3788 3789 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3790 SCTP_BUF_NEXT(m_notify) = NULL; 3791 3792 /* append to socket */ 3793 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3794 0, 0, stcb->asoc.context, 0, 0, 0, 3795 m_notify); 3796 if (control == NULL) { 3797 /* no memory */ 3798 sctp_m_freem(m_notify); 3799 return; 3800 } 3801 control->length = SCTP_BUF_LEN(m_notify); 3802 control->spec_flags = M_NOTIFICATION; 3803 /* not that we need this */ 3804 control->tail_mbuf = m_notify; 3805 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3806 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3807 } 3808 3809 void 3810 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3811 { 3812 struct mbuf *m_notify; 3813 struct sctp_queued_to_read *control; 3814 struct sctp_stream_change_event *stradd; 3815 3816 if ((stcb == NULL) || 3817 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3818 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3819 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3820 /* If the socket is gone we are out of here. */ 3821 return; 3822 } 3823 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 3824 /* event not enabled */ 3825 return; 3826 } 3827 3828 if ((stcb->asoc.peer_req_out) && flag) { 3829 /* Peer made the request, don't tell the local user */ 3830 stcb->asoc.peer_req_out = 0; 3831 return; 3832 } 3833 stcb->asoc.peer_req_out = 0; 3834 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3835 if (m_notify == NULL) 3836 /* no space left */ 3837 return; 3838 SCTP_BUF_LEN(m_notify) = 0; 3839 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3840 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3841 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3842 stradd->strchange_flags = flag; 3843 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3844 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3845 stradd->strchange_instrms = numberin; 3846 stradd->strchange_outstrms = numberout; 3847 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3848 SCTP_BUF_NEXT(m_notify) = NULL; 3849 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3850 /* no space */ 3851 sctp_m_freem(m_notify); 3852 return; 3853 } 3854 /* append to socket */ 3855 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3856 0, 0, stcb->asoc.context, 0, 0, 0, 3857 m_notify); 3858 if (control == NULL) { 3859 /* no memory */ 3860 sctp_m_freem(m_notify); 3861 return; 3862 } 3863 control->length = SCTP_BUF_LEN(m_notify); 3864 control->spec_flags = M_NOTIFICATION; 3865 /* not that we need this */ 3866 control->tail_mbuf = m_notify; 3867 sctp_add_to_readq(stcb->sctp_ep, stcb, 3868 control, 3869 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3870 } 3871 3872 void 3873 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3874 { 3875 struct mbuf *m_notify; 3876 struct sctp_queued_to_read *control; 3877 struct sctp_assoc_reset_event *strasoc; 3878 3879 if ((stcb == NULL) || 3880 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3881 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3882 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3883 /* If the socket is gone we are out of here. */ 3884 return; 3885 } 3886 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 3887 /* event not enabled */ 3888 return; 3889 } 3890 3891 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3892 if (m_notify == NULL) 3893 /* no space left */ 3894 return; 3895 SCTP_BUF_LEN(m_notify) = 0; 3896 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3897 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3898 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3899 strasoc->assocreset_flags = flag; 3900 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3901 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3902 strasoc->assocreset_local_tsn = sending_tsn; 3903 strasoc->assocreset_remote_tsn = recv_tsn; 3904 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3905 SCTP_BUF_NEXT(m_notify) = NULL; 3906 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3907 /* no space */ 3908 sctp_m_freem(m_notify); 3909 return; 3910 } 3911 /* append to socket */ 3912 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3913 0, 0, stcb->asoc.context, 0, 0, 0, 3914 m_notify); 3915 if (control == NULL) { 3916 /* no memory */ 3917 sctp_m_freem(m_notify); 3918 return; 3919 } 3920 control->length = SCTP_BUF_LEN(m_notify); 3921 control->spec_flags = M_NOTIFICATION; 3922 /* not that we need this */ 3923 control->tail_mbuf = m_notify; 3924 sctp_add_to_readq(stcb->sctp_ep, stcb, 3925 control, 3926 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3927 } 3928 3929 static void 3930 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3931 int number_entries, uint16_t *list, int flag) 3932 { 3933 struct mbuf *m_notify; 3934 struct sctp_queued_to_read *control; 3935 struct sctp_stream_reset_event *strreset; 3936 int len; 3937 3938 if ((stcb == NULL) || 3939 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3940 /* event not enabled */ 3941 return; 3942 } 3943 3944 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3945 if (m_notify == NULL) 3946 /* no space left */ 3947 return; 3948 SCTP_BUF_LEN(m_notify) = 0; 3949 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3950 if (len > M_TRAILINGSPACE(m_notify)) { 3951 /* never enough room */ 3952 sctp_m_freem(m_notify); 3953 return; 3954 } 3955 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3956 memset(strreset, 0, len); 3957 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3958 strreset->strreset_flags = flag; 3959 strreset->strreset_length = len; 3960 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3961 if (number_entries) { 3962 int i; 3963 3964 for (i = 0; i < number_entries; i++) { 3965 strreset->strreset_stream_list[i] = ntohs(list[i]); 3966 } 3967 } 3968 SCTP_BUF_LEN(m_notify) = len; 3969 SCTP_BUF_NEXT(m_notify) = NULL; 3970 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3971 /* no space */ 3972 sctp_m_freem(m_notify); 3973 return; 3974 } 3975 /* append to socket */ 3976 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3977 0, 0, stcb->asoc.context, 0, 0, 0, 3978 m_notify); 3979 if (control == NULL) { 3980 /* no memory */ 3981 sctp_m_freem(m_notify); 3982 return; 3983 } 3984 control->length = SCTP_BUF_LEN(m_notify); 3985 control->spec_flags = M_NOTIFICATION; 3986 /* not that we need this */ 3987 control->tail_mbuf = m_notify; 3988 sctp_add_to_readq(stcb->sctp_ep, stcb, 3989 control, 3990 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3991 } 3992 3993 static void 3994 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3995 { 3996 struct mbuf *m_notify; 3997 struct sctp_remote_error *sre; 3998 struct sctp_queued_to_read *control; 3999 unsigned int notif_len; 4000 uint16_t chunk_len; 4001 4002 if ((stcb == NULL) || 4003 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4004 return; 4005 } 4006 if (chunk != NULL) { 4007 chunk_len = ntohs(chunk->ch.chunk_length); 4008 /* 4009 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4010 * contiguous. 4011 */ 4012 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4013 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4014 } 4015 } else { 4016 chunk_len = 0; 4017 } 4018 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4019 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4020 if (m_notify == NULL) { 4021 /* Retry with smaller value. */ 4022 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4023 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4024 if (m_notify == NULL) { 4025 return; 4026 } 4027 } 4028 SCTP_BUF_NEXT(m_notify) = NULL; 4029 sre = mtod(m_notify, struct sctp_remote_error *); 4030 memset(sre, 0, notif_len); 4031 sre->sre_type = SCTP_REMOTE_ERROR; 4032 sre->sre_flags = 0; 4033 sre->sre_length = sizeof(struct sctp_remote_error); 4034 sre->sre_error = error; 4035 sre->sre_assoc_id = sctp_get_associd(stcb); 4036 if (notif_len > sizeof(struct sctp_remote_error)) { 4037 memcpy(sre->sre_data, chunk, chunk_len); 4038 sre->sre_length += chunk_len; 4039 } 4040 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4041 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4042 0, 0, stcb->asoc.context, 0, 0, 0, 4043 m_notify); 4044 if (control != NULL) { 4045 control->length = SCTP_BUF_LEN(m_notify); 4046 control->spec_flags = M_NOTIFICATION; 4047 /* not that we need this */ 4048 control->tail_mbuf = m_notify; 4049 sctp_add_to_readq(stcb->sctp_ep, stcb, 4050 control, 4051 &stcb->sctp_socket->so_rcv, 1, 4052 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4053 } else { 4054 sctp_m_freem(m_notify); 4055 } 4056 } 4057 4058 void 4059 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4060 uint32_t error, void *data, int so_locked) 4061 { 4062 if ((stcb == NULL) || 4063 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4064 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4065 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4066 /* If the socket is gone we are out of here */ 4067 return; 4068 } 4069 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4070 return; 4071 } 4072 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4073 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4074 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4075 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4076 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4077 /* Don't report these in front states */ 4078 return; 4079 } 4080 } 4081 switch (notification) { 4082 case SCTP_NOTIFY_ASSOC_UP: 4083 if (stcb->asoc.assoc_up_sent == 0) { 4084 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4085 stcb->asoc.assoc_up_sent = 1; 4086 } 4087 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4088 sctp_notify_adaptation_layer(stcb); 4089 } 4090 if (stcb->asoc.auth_supported == 0) { 4091 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4092 NULL, so_locked); 4093 } 4094 break; 4095 case SCTP_NOTIFY_ASSOC_DOWN: 4096 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4097 break; 4098 case SCTP_NOTIFY_INTERFACE_DOWN: 4099 { 4100 struct sctp_nets *net; 4101 4102 net = (struct sctp_nets *)data; 4103 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4104 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4105 break; 4106 } 4107 case SCTP_NOTIFY_INTERFACE_UP: 4108 { 4109 struct sctp_nets *net; 4110 4111 net = (struct sctp_nets *)data; 4112 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4113 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4114 break; 4115 } 4116 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4117 { 4118 struct sctp_nets *net; 4119 4120 net = (struct sctp_nets *)data; 4121 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4122 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4123 break; 4124 } 4125 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4126 sctp_notify_send_failed2(stcb, error, 4127 (struct sctp_stream_queue_pending *)data, so_locked); 4128 break; 4129 case SCTP_NOTIFY_SENT_DG_FAIL: 4130 sctp_notify_send_failed(stcb, 1, error, 4131 (struct sctp_tmit_chunk *)data, so_locked); 4132 break; 4133 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4134 sctp_notify_send_failed(stcb, 0, error, 4135 (struct sctp_tmit_chunk *)data, so_locked); 4136 break; 4137 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4138 { 4139 uint32_t val; 4140 4141 val = *((uint32_t *)data); 4142 4143 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4144 break; 4145 } 4146 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4147 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4148 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4149 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4150 } else { 4151 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4152 } 4153 break; 4154 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4155 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4156 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4157 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4158 } else { 4159 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4160 } 4161 break; 4162 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4163 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4164 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4165 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4166 } else { 4167 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4168 } 4169 break; 4170 case SCTP_NOTIFY_ASSOC_RESTART: 4171 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4172 if (stcb->asoc.auth_supported == 0) { 4173 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4174 NULL, so_locked); 4175 } 4176 break; 4177 case SCTP_NOTIFY_STR_RESET_SEND: 4178 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4179 break; 4180 case SCTP_NOTIFY_STR_RESET_RECV: 4181 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4182 break; 4183 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4184 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4185 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4186 break; 4187 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4188 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4189 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4190 break; 4191 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4192 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4193 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4194 break; 4195 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4196 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4197 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4198 break; 4199 case SCTP_NOTIFY_ASCONF_ADD_IP: 4200 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4201 error, so_locked); 4202 break; 4203 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4204 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4205 error, so_locked); 4206 break; 4207 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4208 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4209 error, so_locked); 4210 break; 4211 case SCTP_NOTIFY_PEER_SHUTDOWN: 4212 sctp_notify_shutdown_event(stcb); 4213 break; 4214 case SCTP_NOTIFY_AUTH_NEW_KEY: 4215 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4216 (uint16_t)(uintptr_t)data, 4217 so_locked); 4218 break; 4219 case SCTP_NOTIFY_AUTH_FREE_KEY: 4220 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4221 (uint16_t)(uintptr_t)data, 4222 so_locked); 4223 break; 4224 case SCTP_NOTIFY_NO_PEER_AUTH: 4225 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4226 (uint16_t)(uintptr_t)data, 4227 so_locked); 4228 break; 4229 case SCTP_NOTIFY_SENDER_DRY: 4230 sctp_notify_sender_dry_event(stcb, so_locked); 4231 break; 4232 case SCTP_NOTIFY_REMOTE_ERROR: 4233 sctp_notify_remote_error(stcb, error, data); 4234 break; 4235 default: 4236 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4237 __func__, notification, notification); 4238 break; 4239 } /* end switch */ 4240 } 4241 4242 void 4243 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4244 { 4245 struct sctp_association *asoc; 4246 struct sctp_stream_out *outs; 4247 struct sctp_tmit_chunk *chk, *nchk; 4248 struct sctp_stream_queue_pending *sp, *nsp; 4249 int i; 4250 4251 if (stcb == NULL) { 4252 return; 4253 } 4254 asoc = &stcb->asoc; 4255 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4256 /* already being freed */ 4257 return; 4258 } 4259 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4260 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4261 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4262 return; 4263 } 4264 /* now through all the gunk freeing chunks */ 4265 /* sent queue SHOULD be empty */ 4266 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4267 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4268 asoc->sent_queue_cnt--; 4269 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4270 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4271 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4272 #ifdef INVARIANTS 4273 } else { 4274 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4275 #endif 4276 } 4277 } 4278 if (chk->data != NULL) { 4279 sctp_free_bufspace(stcb, asoc, chk, 1); 4280 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4281 error, chk, so_locked); 4282 if (chk->data) { 4283 sctp_m_freem(chk->data); 4284 chk->data = NULL; 4285 } 4286 } 4287 sctp_free_a_chunk(stcb, chk, so_locked); 4288 /* sa_ignore FREED_MEMORY */ 4289 } 4290 /* pending send queue SHOULD be empty */ 4291 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4292 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4293 asoc->send_queue_cnt--; 4294 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4295 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4296 #ifdef INVARIANTS 4297 } else { 4298 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4299 #endif 4300 } 4301 if (chk->data != NULL) { 4302 sctp_free_bufspace(stcb, asoc, chk, 1); 4303 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4304 error, chk, so_locked); 4305 if (chk->data) { 4306 sctp_m_freem(chk->data); 4307 chk->data = NULL; 4308 } 4309 } 4310 sctp_free_a_chunk(stcb, chk, so_locked); 4311 /* sa_ignore FREED_MEMORY */ 4312 } 4313 for (i = 0; i < asoc->streamoutcnt; i++) { 4314 /* For each stream */ 4315 outs = &asoc->strmout[i]; 4316 /* clean up any sends there */ 4317 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4318 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4319 TAILQ_REMOVE(&outs->outqueue, sp, next); 4320 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4321 sctp_free_spbufspace(stcb, asoc, sp); 4322 if (sp->data) { 4323 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4324 error, (void *)sp, so_locked); 4325 if (sp->data) { 4326 sctp_m_freem(sp->data); 4327 sp->data = NULL; 4328 sp->tail_mbuf = NULL; 4329 sp->length = 0; 4330 } 4331 } 4332 if (sp->net) { 4333 sctp_free_remote_addr(sp->net); 4334 sp->net = NULL; 4335 } 4336 /* Free the chunk */ 4337 sctp_free_a_strmoq(stcb, sp, so_locked); 4338 /* sa_ignore FREED_MEMORY */ 4339 } 4340 } 4341 } 4342 4343 void 4344 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4345 uint16_t error, struct sctp_abort_chunk *abort, 4346 int so_locked) 4347 { 4348 if (stcb == NULL) { 4349 return; 4350 } 4351 SCTP_TCB_LOCK_ASSERT(stcb); 4352 4353 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4354 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4355 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4356 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4357 } 4358 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4359 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4360 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4361 return; 4362 } 4363 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4364 /* Tell them we lost the asoc */ 4365 sctp_report_all_outbound(stcb, error, so_locked); 4366 if (from_peer) { 4367 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4368 } else { 4369 if (timeout) { 4370 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4371 } else { 4372 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4373 } 4374 } 4375 } 4376 4377 void 4378 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4379 struct mbuf *m, int iphlen, 4380 struct sockaddr *src, struct sockaddr *dst, 4381 struct sctphdr *sh, struct mbuf *op_err, 4382 uint8_t mflowtype, uint32_t mflowid, 4383 uint32_t vrf_id, uint16_t port) 4384 { 4385 struct sctp_gen_error_cause *cause; 4386 uint32_t vtag; 4387 uint16_t cause_code; 4388 4389 if (stcb != NULL) { 4390 vtag = stcb->asoc.peer_vtag; 4391 vrf_id = stcb->asoc.vrf_id; 4392 if (op_err != NULL) { 4393 /* Read the cause code from the error cause. */ 4394 cause = mtod(op_err, struct sctp_gen_error_cause *); 4395 cause_code = ntohs(cause->code); 4396 } else { 4397 cause_code = 0; 4398 } 4399 } else { 4400 vtag = 0; 4401 } 4402 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4403 mflowtype, mflowid, inp->fibnum, 4404 vrf_id, port); 4405 if (stcb != NULL) { 4406 /* We have a TCB to abort, send notification too */ 4407 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4408 /* Ok, now lets free it */ 4409 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4410 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4411 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4412 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4413 } 4414 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4415 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4416 } 4417 } 4418 #ifdef SCTP_ASOCLOG_OF_TSNS 4419 void 4420 sctp_print_out_track_log(struct sctp_tcb *stcb) 4421 { 4422 #ifdef NOSIY_PRINTS 4423 int i; 4424 4425 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4426 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4427 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4428 SCTP_PRINTF("None rcvd\n"); 4429 goto none_in; 4430 } 4431 if (stcb->asoc.tsn_in_wrapped) { 4432 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4433 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4434 stcb->asoc.in_tsnlog[i].tsn, 4435 stcb->asoc.in_tsnlog[i].strm, 4436 stcb->asoc.in_tsnlog[i].seq, 4437 stcb->asoc.in_tsnlog[i].flgs, 4438 stcb->asoc.in_tsnlog[i].sz); 4439 } 4440 } 4441 if (stcb->asoc.tsn_in_at) { 4442 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4443 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4444 stcb->asoc.in_tsnlog[i].tsn, 4445 stcb->asoc.in_tsnlog[i].strm, 4446 stcb->asoc.in_tsnlog[i].seq, 4447 stcb->asoc.in_tsnlog[i].flgs, 4448 stcb->asoc.in_tsnlog[i].sz); 4449 } 4450 } 4451 none_in: 4452 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4453 if ((stcb->asoc.tsn_out_at == 0) && 4454 (stcb->asoc.tsn_out_wrapped == 0)) { 4455 SCTP_PRINTF("None sent\n"); 4456 } 4457 if (stcb->asoc.tsn_out_wrapped) { 4458 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4459 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4460 stcb->asoc.out_tsnlog[i].tsn, 4461 stcb->asoc.out_tsnlog[i].strm, 4462 stcb->asoc.out_tsnlog[i].seq, 4463 stcb->asoc.out_tsnlog[i].flgs, 4464 stcb->asoc.out_tsnlog[i].sz); 4465 } 4466 } 4467 if (stcb->asoc.tsn_out_at) { 4468 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4469 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4470 stcb->asoc.out_tsnlog[i].tsn, 4471 stcb->asoc.out_tsnlog[i].strm, 4472 stcb->asoc.out_tsnlog[i].seq, 4473 stcb->asoc.out_tsnlog[i].flgs, 4474 stcb->asoc.out_tsnlog[i].sz); 4475 } 4476 } 4477 #endif 4478 } 4479 #endif 4480 4481 void 4482 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4483 struct mbuf *op_err, bool timedout, int so_locked) 4484 { 4485 struct sctp_gen_error_cause *cause; 4486 uint16_t cause_code; 4487 4488 if (stcb == NULL) { 4489 /* Got to have a TCB */ 4490 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4491 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4492 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4493 SCTP_CALLED_DIRECTLY_NOCMPSET); 4494 } 4495 } 4496 return; 4497 } 4498 if (op_err != NULL) { 4499 /* Read the cause code from the error cause. */ 4500 cause = mtod(op_err, struct sctp_gen_error_cause *); 4501 cause_code = ntohs(cause->code); 4502 } else { 4503 cause_code = 0; 4504 } 4505 /* notify the peer */ 4506 sctp_send_abort_tcb(stcb, op_err, so_locked); 4507 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4508 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4509 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4510 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4511 } 4512 /* notify the ulp */ 4513 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4514 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4515 } 4516 /* now free the asoc */ 4517 #ifdef SCTP_ASOCLOG_OF_TSNS 4518 sctp_print_out_track_log(stcb); 4519 #endif 4520 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4521 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4522 } 4523 4524 void 4525 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4526 struct sockaddr *src, struct sockaddr *dst, 4527 struct sctphdr *sh, struct sctp_inpcb *inp, 4528 struct mbuf *cause, 4529 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4530 uint32_t vrf_id, uint16_t port) 4531 { 4532 struct sctp_chunkhdr *ch, chunk_buf; 4533 unsigned int chk_length; 4534 int contains_init_chunk; 4535 4536 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4537 /* Generate a TO address for future reference */ 4538 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4539 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4540 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4541 SCTP_CALLED_DIRECTLY_NOCMPSET); 4542 } 4543 } 4544 contains_init_chunk = 0; 4545 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4546 sizeof(*ch), (uint8_t *)&chunk_buf); 4547 while (ch != NULL) { 4548 chk_length = ntohs(ch->chunk_length); 4549 if (chk_length < sizeof(*ch)) { 4550 /* break to abort land */ 4551 break; 4552 } 4553 switch (ch->chunk_type) { 4554 case SCTP_INIT: 4555 contains_init_chunk = 1; 4556 break; 4557 case SCTP_PACKET_DROPPED: 4558 /* we don't respond to pkt-dropped */ 4559 return; 4560 case SCTP_ABORT_ASSOCIATION: 4561 /* we don't respond with an ABORT to an ABORT */ 4562 return; 4563 case SCTP_SHUTDOWN_COMPLETE: 4564 /* 4565 * we ignore it since we are not waiting for it and 4566 * peer is gone 4567 */ 4568 return; 4569 case SCTP_SHUTDOWN_ACK: 4570 sctp_send_shutdown_complete2(src, dst, sh, 4571 mflowtype, mflowid, fibnum, 4572 vrf_id, port); 4573 return; 4574 default: 4575 break; 4576 } 4577 offset += SCTP_SIZE32(chk_length); 4578 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4579 sizeof(*ch), (uint8_t *)&chunk_buf); 4580 } 4581 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4582 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4583 (contains_init_chunk == 0))) { 4584 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4585 mflowtype, mflowid, fibnum, 4586 vrf_id, port); 4587 } 4588 } 4589 4590 /* 4591 * check the inbound datagram to make sure there is not an abort inside it, 4592 * if there is return 1, else return 0. 4593 */ 4594 int 4595 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4596 { 4597 struct sctp_chunkhdr *ch; 4598 struct sctp_init_chunk *init_chk, chunk_buf; 4599 int offset; 4600 unsigned int chk_length; 4601 4602 offset = iphlen + sizeof(struct sctphdr); 4603 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4604 (uint8_t *)&chunk_buf); 4605 while (ch != NULL) { 4606 chk_length = ntohs(ch->chunk_length); 4607 if (chk_length < sizeof(*ch)) { 4608 /* packet is probably corrupt */ 4609 break; 4610 } 4611 /* we seem to be ok, is it an abort? */ 4612 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4613 /* yep, tell them */ 4614 return (1); 4615 } 4616 if ((ch->chunk_type == SCTP_INITIATION) || 4617 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4618 /* need to update the Vtag */ 4619 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4620 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4621 if (init_chk != NULL) { 4622 *vtag = ntohl(init_chk->init.initiate_tag); 4623 } 4624 } 4625 /* Nope, move to the next chunk */ 4626 offset += SCTP_SIZE32(chk_length); 4627 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4628 sizeof(*ch), (uint8_t *)&chunk_buf); 4629 } 4630 return (0); 4631 } 4632 4633 /* 4634 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4635 * set (i.e. it's 0) so, create this function to compare link local scopes 4636 */ 4637 #ifdef INET6 4638 uint32_t 4639 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4640 { 4641 struct sockaddr_in6 a, b; 4642 4643 /* save copies */ 4644 a = *addr1; 4645 b = *addr2; 4646 4647 if (a.sin6_scope_id == 0) 4648 if (sa6_recoverscope(&a)) { 4649 /* can't get scope, so can't match */ 4650 return (0); 4651 } 4652 if (b.sin6_scope_id == 0) 4653 if (sa6_recoverscope(&b)) { 4654 /* can't get scope, so can't match */ 4655 return (0); 4656 } 4657 if (a.sin6_scope_id != b.sin6_scope_id) 4658 return (0); 4659 4660 return (1); 4661 } 4662 4663 /* 4664 * returns a sockaddr_in6 with embedded scope recovered and removed 4665 */ 4666 struct sockaddr_in6 * 4667 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4668 { 4669 /* check and strip embedded scope junk */ 4670 if (addr->sin6_family == AF_INET6) { 4671 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4672 if (addr->sin6_scope_id == 0) { 4673 *store = *addr; 4674 if (!sa6_recoverscope(store)) { 4675 /* use the recovered scope */ 4676 addr = store; 4677 } 4678 } else { 4679 /* else, return the original "to" addr */ 4680 in6_clearscope(&addr->sin6_addr); 4681 } 4682 } 4683 } 4684 return (addr); 4685 } 4686 #endif 4687 4688 /* 4689 * are the two addresses the same? currently a "scopeless" check returns: 1 4690 * if same, 0 if not 4691 */ 4692 int 4693 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4694 { 4695 4696 /* must be valid */ 4697 if (sa1 == NULL || sa2 == NULL) 4698 return (0); 4699 4700 /* must be the same family */ 4701 if (sa1->sa_family != sa2->sa_family) 4702 return (0); 4703 4704 switch (sa1->sa_family) { 4705 #ifdef INET6 4706 case AF_INET6: 4707 { 4708 /* IPv6 addresses */ 4709 struct sockaddr_in6 *sin6_1, *sin6_2; 4710 4711 sin6_1 = (struct sockaddr_in6 *)sa1; 4712 sin6_2 = (struct sockaddr_in6 *)sa2; 4713 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4714 sin6_2)); 4715 } 4716 #endif 4717 #ifdef INET 4718 case AF_INET: 4719 { 4720 /* IPv4 addresses */ 4721 struct sockaddr_in *sin_1, *sin_2; 4722 4723 sin_1 = (struct sockaddr_in *)sa1; 4724 sin_2 = (struct sockaddr_in *)sa2; 4725 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4726 } 4727 #endif 4728 default: 4729 /* we don't do these... */ 4730 return (0); 4731 } 4732 } 4733 4734 void 4735 sctp_print_address(struct sockaddr *sa) 4736 { 4737 #ifdef INET6 4738 char ip6buf[INET6_ADDRSTRLEN]; 4739 #endif 4740 4741 switch (sa->sa_family) { 4742 #ifdef INET6 4743 case AF_INET6: 4744 { 4745 struct sockaddr_in6 *sin6; 4746 4747 sin6 = (struct sockaddr_in6 *)sa; 4748 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4749 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4750 ntohs(sin6->sin6_port), 4751 sin6->sin6_scope_id); 4752 break; 4753 } 4754 #endif 4755 #ifdef INET 4756 case AF_INET: 4757 { 4758 struct sockaddr_in *sin; 4759 unsigned char *p; 4760 4761 sin = (struct sockaddr_in *)sa; 4762 p = (unsigned char *)&sin->sin_addr; 4763 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4764 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4765 break; 4766 } 4767 #endif 4768 default: 4769 SCTP_PRINTF("?\n"); 4770 break; 4771 } 4772 } 4773 4774 void 4775 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4776 struct sctp_inpcb *new_inp, 4777 struct sctp_tcb *stcb, 4778 int waitflags) 4779 { 4780 /* 4781 * go through our old INP and pull off any control structures that 4782 * belong to stcb and move then to the new inp. 4783 */ 4784 struct socket *old_so, *new_so; 4785 struct sctp_queued_to_read *control, *nctl; 4786 struct sctp_readhead tmp_queue; 4787 struct mbuf *m; 4788 int error = 0; 4789 4790 old_so = old_inp->sctp_socket; 4791 new_so = new_inp->sctp_socket; 4792 TAILQ_INIT(&tmp_queue); 4793 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4794 if (error) { 4795 /* 4796 * Gak, can't get I/O lock, we have a problem. data will be 4797 * left stranded.. and we don't dare look at it since the 4798 * other thread may be reading something. Oh well, its a 4799 * screwed up app that does a peeloff OR a accept while 4800 * reading from the main socket... actually its only the 4801 * peeloff() case, since I think read will fail on a 4802 * listening socket.. 4803 */ 4804 return; 4805 } 4806 /* lock the socket buffers */ 4807 SCTP_INP_READ_LOCK(old_inp); 4808 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4809 /* Pull off all for out target stcb */ 4810 if (control->stcb == stcb) { 4811 /* remove it we want it */ 4812 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4813 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4814 m = control->data; 4815 while (m) { 4816 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4817 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4818 } 4819 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4821 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4822 } 4823 m = SCTP_BUF_NEXT(m); 4824 } 4825 } 4826 } 4827 SCTP_INP_READ_UNLOCK(old_inp); 4828 /* Remove the recv-lock on the old socket */ 4829 SOCK_IO_RECV_UNLOCK(old_so); 4830 /* Now we move them over to the new socket buffer */ 4831 SCTP_INP_READ_LOCK(new_inp); 4832 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4833 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4834 m = control->data; 4835 while (m) { 4836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4837 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4838 } 4839 sctp_sballoc(stcb, &new_so->so_rcv, m); 4840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4841 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4842 } 4843 m = SCTP_BUF_NEXT(m); 4844 } 4845 } 4846 SCTP_INP_READ_UNLOCK(new_inp); 4847 } 4848 4849 void 4850 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4851 struct sctp_tcb *stcb, 4852 int so_locked 4853 SCTP_UNUSED 4854 ) 4855 { 4856 if ((inp != NULL) && 4857 (inp->sctp_socket != NULL) && 4858 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4859 !SCTP_IS_LISTENING(inp))) { 4860 sctp_sorwakeup(inp, inp->sctp_socket); 4861 } 4862 } 4863 4864 void 4865 sctp_add_to_readq(struct sctp_inpcb *inp, 4866 struct sctp_tcb *stcb, 4867 struct sctp_queued_to_read *control, 4868 struct sockbuf *sb, 4869 int end, 4870 int inp_read_lock_held, 4871 int so_locked) 4872 { 4873 /* 4874 * Here we must place the control on the end of the socket read 4875 * queue AND increment sb_cc so that select will work properly on 4876 * read. 4877 */ 4878 struct mbuf *m, *prev = NULL; 4879 4880 if (inp == NULL) { 4881 /* Gak, TSNH!! */ 4882 #ifdef INVARIANTS 4883 panic("Gak, inp NULL on add_to_readq"); 4884 #endif 4885 return; 4886 } 4887 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4888 SCTP_INP_READ_LOCK(inp); 4889 } 4890 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4891 if (!control->on_strm_q) { 4892 sctp_free_remote_addr(control->whoFrom); 4893 if (control->data) { 4894 sctp_m_freem(control->data); 4895 control->data = NULL; 4896 } 4897 sctp_free_a_readq(stcb, control); 4898 } 4899 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4900 SCTP_INP_READ_UNLOCK(inp); 4901 } 4902 return; 4903 } 4904 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4905 atomic_add_int(&inp->total_recvs, 1); 4906 if (!control->do_not_ref_stcb) { 4907 atomic_add_int(&stcb->total_recvs, 1); 4908 } 4909 } 4910 m = control->data; 4911 control->held_length = 0; 4912 control->length = 0; 4913 while (m != NULL) { 4914 if (SCTP_BUF_LEN(m) == 0) { 4915 /* Skip mbufs with NO length */ 4916 if (prev == NULL) { 4917 /* First one */ 4918 control->data = sctp_m_free(m); 4919 m = control->data; 4920 } else { 4921 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4922 m = SCTP_BUF_NEXT(prev); 4923 } 4924 if (m == NULL) { 4925 control->tail_mbuf = prev; 4926 } 4927 continue; 4928 } 4929 prev = m; 4930 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4931 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4932 } 4933 sctp_sballoc(stcb, sb, m); 4934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4935 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4936 } 4937 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4938 m = SCTP_BUF_NEXT(m); 4939 } 4940 if (prev != NULL) { 4941 control->tail_mbuf = prev; 4942 } else { 4943 /* Everything got collapsed out?? */ 4944 if (!control->on_strm_q) { 4945 sctp_free_remote_addr(control->whoFrom); 4946 sctp_free_a_readq(stcb, control); 4947 } 4948 if (inp_read_lock_held == 0) 4949 SCTP_INP_READ_UNLOCK(inp); 4950 return; 4951 } 4952 if (end) { 4953 control->end_added = 1; 4954 } 4955 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4956 control->on_read_q = 1; 4957 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4958 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4959 } 4960 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4961 SCTP_INP_READ_UNLOCK(inp); 4962 } 4963 } 4964 4965 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4966 *************ALTERNATE ROUTING CODE 4967 */ 4968 4969 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4970 *************ALTERNATE ROUTING CODE 4971 */ 4972 4973 struct mbuf * 4974 sctp_generate_cause(uint16_t code, char *info) 4975 { 4976 struct mbuf *m; 4977 struct sctp_gen_error_cause *cause; 4978 size_t info_len; 4979 uint16_t len; 4980 4981 if ((code == 0) || (info == NULL)) { 4982 return (NULL); 4983 } 4984 info_len = strlen(info); 4985 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4986 return (NULL); 4987 } 4988 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4989 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4990 if (m != NULL) { 4991 SCTP_BUF_LEN(m) = len; 4992 cause = mtod(m, struct sctp_gen_error_cause *); 4993 cause->code = htons(code); 4994 cause->length = htons(len); 4995 memcpy(cause->info, info, info_len); 4996 } 4997 return (m); 4998 } 4999 5000 struct mbuf * 5001 sctp_generate_no_user_data_cause(uint32_t tsn) 5002 { 5003 struct mbuf *m; 5004 struct sctp_error_no_user_data *no_user_data_cause; 5005 uint16_t len; 5006 5007 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5008 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5009 if (m != NULL) { 5010 SCTP_BUF_LEN(m) = len; 5011 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5012 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5013 no_user_data_cause->cause.length = htons(len); 5014 no_user_data_cause->tsn = htonl(tsn); 5015 } 5016 return (m); 5017 } 5018 5019 void 5020 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5021 struct sctp_tmit_chunk *tp1, int chk_cnt) 5022 { 5023 if (tp1->data == NULL) { 5024 return; 5025 } 5026 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5027 #ifdef SCTP_MBCNT_LOGGING 5028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5029 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5030 asoc->total_output_queue_size, 5031 tp1->book_size, 5032 0, 5033 tp1->mbcnt); 5034 } 5035 #endif 5036 if (asoc->total_output_queue_size >= tp1->book_size) { 5037 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5038 } else { 5039 asoc->total_output_queue_size = 0; 5040 } 5041 if ((stcb->sctp_socket != NULL) && 5042 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5043 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5044 SCTP_SB_DECR(&stcb->sctp_socket->so_snd, tp1->book_size); 5045 } 5046 } 5047 5048 int 5049 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5050 uint8_t sent, int so_locked) 5051 { 5052 struct sctp_stream_out *strq; 5053 struct sctp_tmit_chunk *chk = NULL, *tp2; 5054 struct sctp_stream_queue_pending *sp; 5055 uint32_t mid; 5056 uint16_t sid; 5057 uint8_t foundeom = 0; 5058 int ret_sz = 0; 5059 int notdone; 5060 int do_wakeup_routine = 0; 5061 5062 SCTP_TCB_LOCK_ASSERT(stcb); 5063 5064 sid = tp1->rec.data.sid; 5065 mid = tp1->rec.data.mid; 5066 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5067 stcb->asoc.abandoned_sent[0]++; 5068 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5069 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5070 #if defined(SCTP_DETAILED_STR_STATS) 5071 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5072 #endif 5073 } else { 5074 stcb->asoc.abandoned_unsent[0]++; 5075 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5076 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5077 #if defined(SCTP_DETAILED_STR_STATS) 5078 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5079 #endif 5080 } 5081 do { 5082 ret_sz += tp1->book_size; 5083 if (tp1->data != NULL) { 5084 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5085 sctp_flight_size_decrease(tp1); 5086 sctp_total_flight_decrease(stcb, tp1); 5087 } 5088 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5089 stcb->asoc.peers_rwnd += tp1->send_size; 5090 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5091 if (sent) { 5092 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5093 } else { 5094 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5095 } 5096 if (tp1->data) { 5097 sctp_m_freem(tp1->data); 5098 tp1->data = NULL; 5099 } 5100 do_wakeup_routine = 1; 5101 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5102 stcb->asoc.sent_queue_cnt_removeable--; 5103 } 5104 } 5105 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5106 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5107 SCTP_DATA_NOT_FRAG) { 5108 /* not frag'ed we ae done */ 5109 notdone = 0; 5110 foundeom = 1; 5111 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5112 /* end of frag, we are done */ 5113 notdone = 0; 5114 foundeom = 1; 5115 } else { 5116 /* 5117 * Its a begin or middle piece, we must mark all of 5118 * it 5119 */ 5120 notdone = 1; 5121 tp1 = TAILQ_NEXT(tp1, sctp_next); 5122 } 5123 } while (tp1 && notdone); 5124 if (foundeom == 0) { 5125 /* 5126 * The multi-part message was scattered across the send and 5127 * sent queue. 5128 */ 5129 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5130 if ((tp1->rec.data.sid != sid) || 5131 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5132 break; 5133 } 5134 /* 5135 * save to chk in case we have some on stream out 5136 * queue. If so and we have an un-transmitted one we 5137 * don't have to fudge the TSN. 5138 */ 5139 chk = tp1; 5140 ret_sz += tp1->book_size; 5141 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5142 if (sent) { 5143 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5144 } else { 5145 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5146 } 5147 if (tp1->data) { 5148 sctp_m_freem(tp1->data); 5149 tp1->data = NULL; 5150 } 5151 /* No flight involved here book the size to 0 */ 5152 tp1->book_size = 0; 5153 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5154 foundeom = 1; 5155 } 5156 do_wakeup_routine = 1; 5157 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5158 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5159 /* 5160 * on to the sent queue so we can wait for it to be 5161 * passed by. 5162 */ 5163 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5164 sctp_next); 5165 stcb->asoc.send_queue_cnt--; 5166 stcb->asoc.sent_queue_cnt++; 5167 } 5168 } 5169 if (foundeom == 0) { 5170 /* 5171 * Still no eom found. That means there is stuff left on the 5172 * stream out queue.. yuck. 5173 */ 5174 strq = &stcb->asoc.strmout[sid]; 5175 sp = TAILQ_FIRST(&strq->outqueue); 5176 if (sp != NULL) { 5177 sp->discard_rest = 1; 5178 /* 5179 * We may need to put a chunk on the queue that 5180 * holds the TSN that would have been sent with the 5181 * LAST bit. 5182 */ 5183 if (chk == NULL) { 5184 /* Yep, we have to */ 5185 sctp_alloc_a_chunk(stcb, chk); 5186 if (chk == NULL) { 5187 /* 5188 * we are hosed. All we can do is 5189 * nothing.. which will cause an 5190 * abort if the peer is paying 5191 * attention. 5192 */ 5193 goto oh_well; 5194 } 5195 memset(chk, 0, sizeof(*chk)); 5196 chk->rec.data.rcv_flags = 0; 5197 chk->sent = SCTP_FORWARD_TSN_SKIP; 5198 chk->asoc = &stcb->asoc; 5199 if (stcb->asoc.idata_supported == 0) { 5200 if (sp->sinfo_flags & SCTP_UNORDERED) { 5201 chk->rec.data.mid = 0; 5202 } else { 5203 chk->rec.data.mid = strq->next_mid_ordered; 5204 } 5205 } else { 5206 if (sp->sinfo_flags & SCTP_UNORDERED) { 5207 chk->rec.data.mid = strq->next_mid_unordered; 5208 } else { 5209 chk->rec.data.mid = strq->next_mid_ordered; 5210 } 5211 } 5212 chk->rec.data.sid = sp->sid; 5213 chk->rec.data.ppid = sp->ppid; 5214 chk->rec.data.context = sp->context; 5215 chk->flags = sp->act_flags; 5216 chk->whoTo = NULL; 5217 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5218 strq->chunks_on_queues++; 5219 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5220 stcb->asoc.sent_queue_cnt++; 5221 stcb->asoc.pr_sctp_cnt++; 5222 } 5223 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5224 if (sp->sinfo_flags & SCTP_UNORDERED) { 5225 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5226 } 5227 if (stcb->asoc.idata_supported == 0) { 5228 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5229 strq->next_mid_ordered++; 5230 } 5231 } else { 5232 if (sp->sinfo_flags & SCTP_UNORDERED) { 5233 strq->next_mid_unordered++; 5234 } else { 5235 strq->next_mid_ordered++; 5236 } 5237 } 5238 oh_well: 5239 if (sp->data) { 5240 /* 5241 * Pull any data to free up the SB and allow 5242 * sender to "add more" while we will throw 5243 * away :-) 5244 */ 5245 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5246 ret_sz += sp->length; 5247 do_wakeup_routine = 1; 5248 sp->some_taken = 1; 5249 sctp_m_freem(sp->data); 5250 sp->data = NULL; 5251 sp->tail_mbuf = NULL; 5252 sp->length = 0; 5253 } 5254 } 5255 } 5256 if (do_wakeup_routine) { 5257 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5258 } 5259 return (ret_sz); 5260 } 5261 5262 /* 5263 * checks to see if the given address, sa, is one that is currently known by 5264 * the kernel note: can't distinguish the same address on multiple interfaces 5265 * and doesn't handle multiple addresses with different zone/scope id's note: 5266 * ifa_ifwithaddr() compares the entire sockaddr struct 5267 */ 5268 struct sctp_ifa * 5269 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5270 int holds_lock) 5271 { 5272 struct sctp_laddr *laddr; 5273 5274 if (holds_lock == 0) { 5275 SCTP_INP_RLOCK(inp); 5276 } 5277 5278 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5279 if (laddr->ifa == NULL) 5280 continue; 5281 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5282 continue; 5283 #ifdef INET 5284 if (addr->sa_family == AF_INET) { 5285 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5286 laddr->ifa->address.sin.sin_addr.s_addr) { 5287 /* found him. */ 5288 break; 5289 } 5290 } 5291 #endif 5292 #ifdef INET6 5293 if (addr->sa_family == AF_INET6) { 5294 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5295 &laddr->ifa->address.sin6)) { 5296 /* found him. */ 5297 break; 5298 } 5299 } 5300 #endif 5301 } 5302 if (holds_lock == 0) { 5303 SCTP_INP_RUNLOCK(inp); 5304 } 5305 if (laddr != NULL) { 5306 return (laddr->ifa); 5307 } else { 5308 return (NULL); 5309 } 5310 } 5311 5312 uint32_t 5313 sctp_get_ifa_hash_val(struct sockaddr *addr) 5314 { 5315 switch (addr->sa_family) { 5316 #ifdef INET 5317 case AF_INET: 5318 { 5319 struct sockaddr_in *sin; 5320 5321 sin = (struct sockaddr_in *)addr; 5322 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5323 } 5324 #endif 5325 #ifdef INET6 5326 case AF_INET6: 5327 { 5328 struct sockaddr_in6 *sin6; 5329 uint32_t hash_of_addr; 5330 5331 sin6 = (struct sockaddr_in6 *)addr; 5332 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5333 sin6->sin6_addr.s6_addr32[1] + 5334 sin6->sin6_addr.s6_addr32[2] + 5335 sin6->sin6_addr.s6_addr32[3]); 5336 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5337 return (hash_of_addr); 5338 } 5339 #endif 5340 default: 5341 break; 5342 } 5343 return (0); 5344 } 5345 5346 struct sctp_ifa * 5347 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5348 { 5349 struct sctp_ifa *sctp_ifap; 5350 struct sctp_vrf *vrf; 5351 struct sctp_ifalist *hash_head; 5352 uint32_t hash_of_addr; 5353 5354 if (holds_lock == 0) { 5355 SCTP_IPI_ADDR_RLOCK(); 5356 } else { 5357 SCTP_IPI_ADDR_LOCK_ASSERT(); 5358 } 5359 5360 vrf = sctp_find_vrf(vrf_id); 5361 if (vrf == NULL) { 5362 if (holds_lock == 0) 5363 SCTP_IPI_ADDR_RUNLOCK(); 5364 return (NULL); 5365 } 5366 5367 hash_of_addr = sctp_get_ifa_hash_val(addr); 5368 5369 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5370 if (hash_head == NULL) { 5371 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5372 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5373 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5374 sctp_print_address(addr); 5375 SCTP_PRINTF("No such bucket for address\n"); 5376 if (holds_lock == 0) 5377 SCTP_IPI_ADDR_RUNLOCK(); 5378 5379 return (NULL); 5380 } 5381 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5382 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5383 continue; 5384 #ifdef INET 5385 if (addr->sa_family == AF_INET) { 5386 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5387 sctp_ifap->address.sin.sin_addr.s_addr) { 5388 /* found him. */ 5389 break; 5390 } 5391 } 5392 #endif 5393 #ifdef INET6 5394 if (addr->sa_family == AF_INET6) { 5395 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5396 &sctp_ifap->address.sin6)) { 5397 /* found him. */ 5398 break; 5399 } 5400 } 5401 #endif 5402 } 5403 if (holds_lock == 0) 5404 SCTP_IPI_ADDR_RUNLOCK(); 5405 return (sctp_ifap); 5406 } 5407 5408 static void 5409 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5410 uint32_t rwnd_req) 5411 { 5412 /* User pulled some data, do we need a rwnd update? */ 5413 struct epoch_tracker et; 5414 int r_unlocked = 0; 5415 uint32_t dif, rwnd; 5416 struct socket *so = NULL; 5417 5418 if (stcb == NULL) 5419 return; 5420 5421 atomic_add_int(&stcb->asoc.refcnt, 1); 5422 5423 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5424 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5425 /* Pre-check If we are freeing no update */ 5426 goto no_lock; 5427 } 5428 SCTP_INP_INCR_REF(stcb->sctp_ep); 5429 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5430 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5431 goto out; 5432 } 5433 so = stcb->sctp_socket; 5434 if (so == NULL) { 5435 goto out; 5436 } 5437 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5438 /* Have you have freed enough to look */ 5439 *freed_so_far = 0; 5440 /* Yep, its worth a look and the lock overhead */ 5441 5442 /* Figure out what the rwnd would be */ 5443 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5444 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5445 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5446 } else { 5447 dif = 0; 5448 } 5449 if (dif >= rwnd_req) { 5450 if (hold_rlock) { 5451 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5452 r_unlocked = 1; 5453 } 5454 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5455 /* 5456 * One last check before we allow the guy possibly 5457 * to get in. There is a race, where the guy has not 5458 * reached the gate. In that case 5459 */ 5460 goto out; 5461 } 5462 SCTP_TCB_LOCK(stcb); 5463 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5464 /* No reports here */ 5465 SCTP_TCB_UNLOCK(stcb); 5466 goto out; 5467 } 5468 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5469 NET_EPOCH_ENTER(et); 5470 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5471 5472 sctp_chunk_output(stcb->sctp_ep, stcb, 5473 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5474 /* make sure no timer is running */ 5475 NET_EPOCH_EXIT(et); 5476 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5477 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5478 SCTP_TCB_UNLOCK(stcb); 5479 } else { 5480 /* Update how much we have pending */ 5481 stcb->freed_by_sorcv_sincelast = dif; 5482 } 5483 out: 5484 if (so && r_unlocked && hold_rlock) { 5485 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5486 } 5487 5488 SCTP_INP_DECR_REF(stcb->sctp_ep); 5489 no_lock: 5490 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5491 return; 5492 } 5493 5494 int 5495 sctp_sorecvmsg(struct socket *so, 5496 struct uio *uio, 5497 struct mbuf **mp, 5498 struct sockaddr *from, 5499 int fromlen, 5500 int *msg_flags, 5501 struct sctp_sndrcvinfo *sinfo, 5502 int filling_sinfo) 5503 { 5504 /* 5505 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5506 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5507 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5508 * On the way out we may send out any combination of: 5509 * MSG_NOTIFICATION MSG_EOR 5510 * 5511 */ 5512 struct sctp_inpcb *inp = NULL; 5513 ssize_t my_len = 0; 5514 ssize_t cp_len = 0; 5515 int error = 0; 5516 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5517 struct mbuf *m = NULL; 5518 struct sctp_tcb *stcb = NULL; 5519 int wakeup_read_socket = 0; 5520 int freecnt_applied = 0; 5521 int out_flags = 0, in_flags = 0; 5522 int block_allowed = 1; 5523 uint32_t freed_so_far = 0; 5524 ssize_t copied_so_far = 0; 5525 int in_eeor_mode = 0; 5526 int no_rcv_needed = 0; 5527 uint32_t rwnd_req = 0; 5528 int hold_sblock = 0; 5529 int hold_rlock = 0; 5530 ssize_t slen = 0; 5531 uint32_t held_length = 0; 5532 int sockbuf_lock = 0; 5533 5534 if (uio == NULL) { 5535 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5536 return (EINVAL); 5537 } 5538 5539 if (msg_flags) { 5540 in_flags = *msg_flags; 5541 if (in_flags & MSG_PEEK) 5542 SCTP_STAT_INCR(sctps_read_peeks); 5543 } else { 5544 in_flags = 0; 5545 } 5546 slen = uio->uio_resid; 5547 5548 /* Pull in and set up our int flags */ 5549 if (in_flags & MSG_OOB) { 5550 /* Out of band's NOT supported */ 5551 return (EOPNOTSUPP); 5552 } 5553 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5554 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5555 return (EINVAL); 5556 } 5557 if ((in_flags & (MSG_DONTWAIT 5558 | MSG_NBIO 5559 )) || 5560 SCTP_SO_IS_NBIO(so)) { 5561 block_allowed = 0; 5562 } 5563 /* setup the endpoint */ 5564 inp = (struct sctp_inpcb *)so->so_pcb; 5565 if (inp == NULL) { 5566 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5567 return (EFAULT); 5568 } 5569 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5570 /* Must be at least a MTU's worth */ 5571 if (rwnd_req < SCTP_MIN_RWND) 5572 rwnd_req = SCTP_MIN_RWND; 5573 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5574 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5575 sctp_misc_ints(SCTP_SORECV_ENTER, 5576 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5577 } 5578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5579 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5580 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5581 } 5582 5583 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5584 if (error) { 5585 goto release_unlocked; 5586 } 5587 sockbuf_lock = 1; 5588 restart: 5589 5590 restart_nosblocks: 5591 if (hold_sblock == 0) { 5592 SOCKBUF_LOCK(&so->so_rcv); 5593 hold_sblock = 1; 5594 } 5595 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5596 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5597 goto out; 5598 } 5599 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5600 if (so->so_error) { 5601 error = so->so_error; 5602 if ((in_flags & MSG_PEEK) == 0) 5603 so->so_error = 0; 5604 goto out; 5605 } else { 5606 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5607 /* indicate EOF */ 5608 error = 0; 5609 goto out; 5610 } 5611 } 5612 } 5613 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5614 if (so->so_error) { 5615 error = so->so_error; 5616 if ((in_flags & MSG_PEEK) == 0) { 5617 so->so_error = 0; 5618 } 5619 goto out; 5620 } 5621 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5622 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5623 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5624 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5625 /* 5626 * For active open side clear flags for 5627 * re-use passive open is blocked by 5628 * connect. 5629 */ 5630 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5631 /* 5632 * You were aborted, passive side 5633 * always hits here 5634 */ 5635 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5636 error = ECONNRESET; 5637 } 5638 so->so_state &= ~(SS_ISCONNECTING | 5639 SS_ISDISCONNECTING | 5640 SS_ISCONFIRMING | 5641 SS_ISCONNECTED); 5642 if (error == 0) { 5643 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5645 error = ENOTCONN; 5646 } 5647 } 5648 goto out; 5649 } 5650 } 5651 if (block_allowed) { 5652 error = sbwait(so, SO_RCV); 5653 if (error) { 5654 goto out; 5655 } 5656 held_length = 0; 5657 goto restart_nosblocks; 5658 } else { 5659 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5660 error = EWOULDBLOCK; 5661 goto out; 5662 } 5663 } 5664 if (hold_sblock == 1) { 5665 SOCKBUF_UNLOCK(&so->so_rcv); 5666 hold_sblock = 0; 5667 } 5668 /* we possibly have data we can read */ 5669 /* sa_ignore FREED_MEMORY */ 5670 control = TAILQ_FIRST(&inp->read_queue); 5671 if (control == NULL) { 5672 /* 5673 * This could be happening since the appender did the 5674 * increment but as not yet did the tailq insert onto the 5675 * read_queue 5676 */ 5677 if (hold_rlock == 0) { 5678 SCTP_INP_READ_LOCK(inp); 5679 } 5680 control = TAILQ_FIRST(&inp->read_queue); 5681 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5682 #ifdef INVARIANTS 5683 panic("Huh, its non zero and nothing on control?"); 5684 #endif 5685 SCTP_SB_CLEAR(so->so_rcv); 5686 } 5687 SCTP_INP_READ_UNLOCK(inp); 5688 hold_rlock = 0; 5689 goto restart; 5690 } 5691 5692 if ((control->length == 0) && 5693 (control->do_not_ref_stcb)) { 5694 /* 5695 * Clean up code for freeing assoc that left behind a 5696 * pdapi.. maybe a peer in EEOR that just closed after 5697 * sending and never indicated a EOR. 5698 */ 5699 if (hold_rlock == 0) { 5700 hold_rlock = 1; 5701 SCTP_INP_READ_LOCK(inp); 5702 } 5703 control->held_length = 0; 5704 if (control->data) { 5705 /* Hmm there is data here .. fix */ 5706 struct mbuf *m_tmp; 5707 int cnt = 0; 5708 5709 m_tmp = control->data; 5710 while (m_tmp) { 5711 cnt += SCTP_BUF_LEN(m_tmp); 5712 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5713 control->tail_mbuf = m_tmp; 5714 control->end_added = 1; 5715 } 5716 m_tmp = SCTP_BUF_NEXT(m_tmp); 5717 } 5718 control->length = cnt; 5719 } else { 5720 /* remove it */ 5721 TAILQ_REMOVE(&inp->read_queue, control, next); 5722 /* Add back any hidden data */ 5723 sctp_free_remote_addr(control->whoFrom); 5724 sctp_free_a_readq(stcb, control); 5725 } 5726 if (hold_rlock) { 5727 hold_rlock = 0; 5728 SCTP_INP_READ_UNLOCK(inp); 5729 } 5730 goto restart; 5731 } 5732 if ((control->length == 0) && 5733 (control->end_added == 1)) { 5734 /* 5735 * Do we also need to check for (control->pdapi_aborted == 5736 * 1)? 5737 */ 5738 if (hold_rlock == 0) { 5739 hold_rlock = 1; 5740 SCTP_INP_READ_LOCK(inp); 5741 } 5742 TAILQ_REMOVE(&inp->read_queue, control, next); 5743 if (control->data) { 5744 #ifdef INVARIANTS 5745 panic("control->data not null but control->length == 0"); 5746 #else 5747 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5748 sctp_m_freem(control->data); 5749 control->data = NULL; 5750 #endif 5751 } 5752 if (control->aux_data) { 5753 sctp_m_free(control->aux_data); 5754 control->aux_data = NULL; 5755 } 5756 #ifdef INVARIANTS 5757 if (control->on_strm_q) { 5758 panic("About to free ctl:%p so:%p and its in %d", 5759 control, so, control->on_strm_q); 5760 } 5761 #endif 5762 sctp_free_remote_addr(control->whoFrom); 5763 sctp_free_a_readq(stcb, control); 5764 if (hold_rlock) { 5765 hold_rlock = 0; 5766 SCTP_INP_READ_UNLOCK(inp); 5767 } 5768 goto restart; 5769 } 5770 if (control->length == 0) { 5771 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5772 (filling_sinfo)) { 5773 /* find a more suitable one then this */ 5774 ctl = TAILQ_NEXT(control, next); 5775 while (ctl) { 5776 if ((ctl->stcb != control->stcb) && (ctl->length) && 5777 (ctl->some_taken || 5778 (ctl->spec_flags & M_NOTIFICATION) || 5779 ((ctl->do_not_ref_stcb == 0) && 5780 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5781 ) { 5782 /*- 5783 * If we have a different TCB next, and there is data 5784 * present. If we have already taken some (pdapi), OR we can 5785 * ref the tcb and no delivery as started on this stream, we 5786 * take it. Note we allow a notification on a different 5787 * assoc to be delivered.. 5788 */ 5789 control = ctl; 5790 goto found_one; 5791 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5792 (ctl->length) && 5793 ((ctl->some_taken) || 5794 ((ctl->do_not_ref_stcb == 0) && 5795 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5796 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5797 /*- 5798 * If we have the same tcb, and there is data present, and we 5799 * have the strm interleave feature present. Then if we have 5800 * taken some (pdapi) or we can refer to tht tcb AND we have 5801 * not started a delivery for this stream, we can take it. 5802 * Note we do NOT allow a notification on the same assoc to 5803 * be delivered. 5804 */ 5805 control = ctl; 5806 goto found_one; 5807 } 5808 ctl = TAILQ_NEXT(ctl, next); 5809 } 5810 } 5811 /* 5812 * if we reach here, not suitable replacement is available 5813 * <or> fragment interleave is NOT on. So stuff the sb_cc 5814 * into the our held count, and its time to sleep again. 5815 */ 5816 held_length = SCTP_SBAVAIL(&so->so_rcv); 5817 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5818 goto restart; 5819 } 5820 /* Clear the held length since there is something to read */ 5821 control->held_length = 0; 5822 found_one: 5823 /* 5824 * If we reach here, control has a some data for us to read off. 5825 * Note that stcb COULD be NULL. 5826 */ 5827 if (hold_rlock == 0) { 5828 hold_rlock = 1; 5829 SCTP_INP_READ_LOCK(inp); 5830 } 5831 control->some_taken++; 5832 stcb = control->stcb; 5833 if (stcb) { 5834 if ((control->do_not_ref_stcb == 0) && 5835 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5836 if (freecnt_applied == 0) 5837 stcb = NULL; 5838 } else if (control->do_not_ref_stcb == 0) { 5839 /* you can't free it on me please */ 5840 /* 5841 * The lock on the socket buffer protects us so the 5842 * free code will stop. But since we used the 5843 * socketbuf lock and the sender uses the tcb_lock 5844 * to increment, we need to use the atomic add to 5845 * the refcnt 5846 */ 5847 if (freecnt_applied) { 5848 #ifdef INVARIANTS 5849 panic("refcnt already incremented"); 5850 #else 5851 SCTP_PRINTF("refcnt already incremented?\n"); 5852 #endif 5853 } else { 5854 atomic_add_int(&stcb->asoc.refcnt, 1); 5855 freecnt_applied = 1; 5856 } 5857 /* 5858 * Setup to remember how much we have not yet told 5859 * the peer our rwnd has opened up. Note we grab the 5860 * value from the tcb from last time. Note too that 5861 * sack sending clears this when a sack is sent, 5862 * which is fine. Once we hit the rwnd_req, we then 5863 * will go to the sctp_user_rcvd() that will not 5864 * lock until it KNOWs it MUST send a WUP-SACK. 5865 */ 5866 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5867 stcb->freed_by_sorcv_sincelast = 0; 5868 } 5869 } 5870 if (stcb && 5871 ((control->spec_flags & M_NOTIFICATION) == 0) && 5872 control->do_not_ref_stcb == 0) { 5873 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5874 } 5875 5876 /* First lets get off the sinfo and sockaddr info */ 5877 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5878 sinfo->sinfo_stream = control->sinfo_stream; 5879 sinfo->sinfo_ssn = (uint16_t)control->mid; 5880 sinfo->sinfo_flags = control->sinfo_flags; 5881 sinfo->sinfo_ppid = control->sinfo_ppid; 5882 sinfo->sinfo_context = control->sinfo_context; 5883 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5884 sinfo->sinfo_tsn = control->sinfo_tsn; 5885 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5886 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5887 nxt = TAILQ_NEXT(control, next); 5888 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5889 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5890 struct sctp_extrcvinfo *s_extra; 5891 5892 s_extra = (struct sctp_extrcvinfo *)sinfo; 5893 if ((nxt) && 5894 (nxt->length)) { 5895 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5896 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5897 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5898 } 5899 if (nxt->spec_flags & M_NOTIFICATION) { 5900 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5901 } 5902 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5903 s_extra->serinfo_next_length = nxt->length; 5904 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5905 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5906 if (nxt->tail_mbuf != NULL) { 5907 if (nxt->end_added) { 5908 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5909 } 5910 } 5911 } else { 5912 /* 5913 * we explicitly 0 this, since the memcpy 5914 * got some other things beyond the older 5915 * sinfo_ that is on the control's structure 5916 * :-D 5917 */ 5918 nxt = NULL; 5919 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5920 s_extra->serinfo_next_aid = 0; 5921 s_extra->serinfo_next_length = 0; 5922 s_extra->serinfo_next_ppid = 0; 5923 s_extra->serinfo_next_stream = 0; 5924 } 5925 } 5926 /* 5927 * update off the real current cum-ack, if we have an stcb. 5928 */ 5929 if ((control->do_not_ref_stcb == 0) && stcb) 5930 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5931 /* 5932 * mask off the high bits, we keep the actual chunk bits in 5933 * there. 5934 */ 5935 sinfo->sinfo_flags &= 0x00ff; 5936 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5937 sinfo->sinfo_flags |= SCTP_UNORDERED; 5938 } 5939 } 5940 #ifdef SCTP_ASOCLOG_OF_TSNS 5941 { 5942 int index, newindex; 5943 struct sctp_pcbtsn_rlog *entry; 5944 5945 do { 5946 index = inp->readlog_index; 5947 newindex = index + 1; 5948 if (newindex >= SCTP_READ_LOG_SIZE) { 5949 newindex = 0; 5950 } 5951 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5952 entry = &inp->readlog[index]; 5953 entry->vtag = control->sinfo_assoc_id; 5954 entry->strm = control->sinfo_stream; 5955 entry->seq = (uint16_t)control->mid; 5956 entry->sz = control->length; 5957 entry->flgs = control->sinfo_flags; 5958 } 5959 #endif 5960 if ((fromlen > 0) && (from != NULL)) { 5961 union sctp_sockstore store; 5962 size_t len; 5963 5964 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5965 #ifdef INET6 5966 case AF_INET6: 5967 len = sizeof(struct sockaddr_in6); 5968 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5969 store.sin6.sin6_port = control->port_from; 5970 break; 5971 #endif 5972 #ifdef INET 5973 case AF_INET: 5974 #ifdef INET6 5975 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5976 len = sizeof(struct sockaddr_in6); 5977 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5978 &store.sin6); 5979 store.sin6.sin6_port = control->port_from; 5980 } else { 5981 len = sizeof(struct sockaddr_in); 5982 store.sin = control->whoFrom->ro._l_addr.sin; 5983 store.sin.sin_port = control->port_from; 5984 } 5985 #else 5986 len = sizeof(struct sockaddr_in); 5987 store.sin = control->whoFrom->ro._l_addr.sin; 5988 store.sin.sin_port = control->port_from; 5989 #endif 5990 break; 5991 #endif 5992 default: 5993 len = 0; 5994 break; 5995 } 5996 memcpy(from, &store, min((size_t)fromlen, len)); 5997 #ifdef INET6 5998 { 5999 struct sockaddr_in6 lsa6, *from6; 6000 6001 from6 = (struct sockaddr_in6 *)from; 6002 sctp_recover_scope_mac(from6, (&lsa6)); 6003 } 6004 #endif 6005 } 6006 if (hold_rlock) { 6007 SCTP_INP_READ_UNLOCK(inp); 6008 hold_rlock = 0; 6009 } 6010 if (hold_sblock) { 6011 SOCKBUF_UNLOCK(&so->so_rcv); 6012 hold_sblock = 0; 6013 } 6014 /* now copy out what data we can */ 6015 if (mp == NULL) { 6016 /* copy out each mbuf in the chain up to length */ 6017 get_more_data: 6018 m = control->data; 6019 while (m) { 6020 /* Move out all we can */ 6021 cp_len = uio->uio_resid; 6022 my_len = SCTP_BUF_LEN(m); 6023 if (cp_len > my_len) { 6024 /* not enough in this buf */ 6025 cp_len = my_len; 6026 } 6027 if (hold_rlock) { 6028 SCTP_INP_READ_UNLOCK(inp); 6029 hold_rlock = 0; 6030 } 6031 if (cp_len > 0) 6032 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6033 /* re-read */ 6034 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6035 goto release; 6036 } 6037 6038 if ((control->do_not_ref_stcb == 0) && stcb && 6039 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6040 no_rcv_needed = 1; 6041 } 6042 if (error) { 6043 /* error we are out of here */ 6044 goto release; 6045 } 6046 SCTP_INP_READ_LOCK(inp); 6047 hold_rlock = 1; 6048 if (cp_len == SCTP_BUF_LEN(m)) { 6049 if ((SCTP_BUF_NEXT(m) == NULL) && 6050 (control->end_added)) { 6051 out_flags |= MSG_EOR; 6052 if ((control->do_not_ref_stcb == 0) && 6053 (control->stcb != NULL) && 6054 ((control->spec_flags & M_NOTIFICATION) == 0)) 6055 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6056 } 6057 if (control->spec_flags & M_NOTIFICATION) { 6058 out_flags |= MSG_NOTIFICATION; 6059 } 6060 /* we ate up the mbuf */ 6061 if (in_flags & MSG_PEEK) { 6062 /* just looking */ 6063 m = SCTP_BUF_NEXT(m); 6064 copied_so_far += cp_len; 6065 } else { 6066 /* dispose of the mbuf */ 6067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6068 sctp_sblog(&so->so_rcv, 6069 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6070 } 6071 sctp_sbfree(control, stcb, &so->so_rcv, m); 6072 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6073 sctp_sblog(&so->so_rcv, 6074 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6075 } 6076 copied_so_far += cp_len; 6077 freed_so_far += (uint32_t)cp_len; 6078 freed_so_far += MSIZE; 6079 atomic_subtract_int(&control->length, (int)cp_len); 6080 control->data = sctp_m_free(m); 6081 m = control->data; 6082 /* 6083 * been through it all, must hold sb 6084 * lock ok to null tail 6085 */ 6086 if (control->data == NULL) { 6087 #ifdef INVARIANTS 6088 if ((control->end_added == 0) || 6089 (TAILQ_NEXT(control, next) == NULL)) { 6090 /* 6091 * If the end is not 6092 * added, OR the 6093 * next is NOT null 6094 * we MUST have the 6095 * lock. 6096 */ 6097 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6098 panic("Hmm we don't own the lock?"); 6099 } 6100 } 6101 #endif 6102 control->tail_mbuf = NULL; 6103 #ifdef INVARIANTS 6104 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6105 panic("end_added, nothing left and no MSG_EOR"); 6106 } 6107 #endif 6108 } 6109 } 6110 } else { 6111 /* Do we need to trim the mbuf? */ 6112 if (control->spec_flags & M_NOTIFICATION) { 6113 out_flags |= MSG_NOTIFICATION; 6114 } 6115 if ((in_flags & MSG_PEEK) == 0) { 6116 SCTP_BUF_RESV_UF(m, cp_len); 6117 SCTP_BUF_LEN(m) -= (int)cp_len; 6118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6119 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6120 } 6121 SCTP_SB_DECR(&so->so_rcv, cp_len); 6122 if ((control->do_not_ref_stcb == 0) && 6123 stcb) { 6124 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6125 } 6126 copied_so_far += cp_len; 6127 freed_so_far += (uint32_t)cp_len; 6128 freed_so_far += MSIZE; 6129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6130 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6131 SCTP_LOG_SBRESULT, 0); 6132 } 6133 atomic_subtract_int(&control->length, (int)cp_len); 6134 } else { 6135 copied_so_far += cp_len; 6136 } 6137 } 6138 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6139 break; 6140 } 6141 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6142 (control->do_not_ref_stcb == 0) && 6143 (freed_so_far >= rwnd_req)) { 6144 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6145 } 6146 } /* end while(m) */ 6147 /* 6148 * At this point we have looked at it all and we either have 6149 * a MSG_EOR/or read all the user wants... <OR> 6150 * control->length == 0. 6151 */ 6152 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6153 /* we are done with this control */ 6154 if (control->length == 0) { 6155 if (control->data) { 6156 #ifdef INVARIANTS 6157 panic("control->data not null at read eor?"); 6158 #else 6159 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6160 sctp_m_freem(control->data); 6161 control->data = NULL; 6162 #endif 6163 } 6164 done_with_control: 6165 if (hold_rlock == 0) { 6166 SCTP_INP_READ_LOCK(inp); 6167 hold_rlock = 1; 6168 } 6169 TAILQ_REMOVE(&inp->read_queue, control, next); 6170 /* Add back any hidden data */ 6171 if (control->held_length) { 6172 held_length = 0; 6173 control->held_length = 0; 6174 wakeup_read_socket = 1; 6175 } 6176 if (control->aux_data) { 6177 sctp_m_free(control->aux_data); 6178 control->aux_data = NULL; 6179 } 6180 no_rcv_needed = control->do_not_ref_stcb; 6181 sctp_free_remote_addr(control->whoFrom); 6182 control->data = NULL; 6183 #ifdef INVARIANTS 6184 if (control->on_strm_q) { 6185 panic("About to free ctl:%p so:%p and its in %d", 6186 control, so, control->on_strm_q); 6187 } 6188 #endif 6189 sctp_free_a_readq(stcb, control); 6190 control = NULL; 6191 if ((freed_so_far >= rwnd_req) && 6192 (no_rcv_needed == 0)) 6193 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6194 6195 } else { 6196 /* 6197 * The user did not read all of this 6198 * message, turn off the returned MSG_EOR 6199 * since we are leaving more behind on the 6200 * control to read. 6201 */ 6202 #ifdef INVARIANTS 6203 if (control->end_added && 6204 (control->data == NULL) && 6205 (control->tail_mbuf == NULL)) { 6206 panic("Gak, control->length is corrupt?"); 6207 } 6208 #endif 6209 no_rcv_needed = control->do_not_ref_stcb; 6210 out_flags &= ~MSG_EOR; 6211 } 6212 } 6213 if (out_flags & MSG_EOR) { 6214 goto release; 6215 } 6216 if ((uio->uio_resid == 0) || 6217 ((in_eeor_mode) && 6218 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6219 goto release; 6220 } 6221 /* 6222 * If I hit here the receiver wants more and this message is 6223 * NOT done (pd-api). So two questions. Can we block? if not 6224 * we are done. Did the user NOT set MSG_WAITALL? 6225 */ 6226 if (block_allowed == 0) { 6227 goto release; 6228 } 6229 /* 6230 * We need to wait for more data a few things: - We don't 6231 * release the I/O lock so we don't get someone else 6232 * reading. - We must be sure to account for the case where 6233 * what is added is NOT to our control when we wakeup. 6234 */ 6235 6236 /* 6237 * Do we need to tell the transport a rwnd update might be 6238 * needed before we go to sleep? 6239 */ 6240 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6241 ((freed_so_far >= rwnd_req) && 6242 (control->do_not_ref_stcb == 0) && 6243 (no_rcv_needed == 0))) { 6244 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6245 } 6246 wait_some_more: 6247 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6248 goto release; 6249 } 6250 6251 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6252 goto release; 6253 6254 if (hold_rlock == 1) { 6255 SCTP_INP_READ_UNLOCK(inp); 6256 hold_rlock = 0; 6257 } 6258 if (hold_sblock == 0) { 6259 SOCKBUF_LOCK(&so->so_rcv); 6260 hold_sblock = 1; 6261 } 6262 if ((copied_so_far) && (control->length == 0) && 6263 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6264 goto release; 6265 } 6266 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6267 error = sbwait(so, SO_RCV); 6268 if (error) { 6269 goto release; 6270 } 6271 control->held_length = 0; 6272 } 6273 if (hold_sblock) { 6274 SOCKBUF_UNLOCK(&so->so_rcv); 6275 hold_sblock = 0; 6276 } 6277 if (control->length == 0) { 6278 /* still nothing here */ 6279 if (control->end_added == 1) { 6280 /* he aborted, or is done i.e.did a shutdown */ 6281 out_flags |= MSG_EOR; 6282 if (control->pdapi_aborted) { 6283 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6284 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6285 6286 out_flags |= MSG_TRUNC; 6287 } else { 6288 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6289 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6290 } 6291 goto done_with_control; 6292 } 6293 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6294 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6295 held_length = 0; 6296 } 6297 goto wait_some_more; 6298 } else if (control->data == NULL) { 6299 /* 6300 * we must re-sync since data is probably being 6301 * added 6302 */ 6303 SCTP_INP_READ_LOCK(inp); 6304 if ((control->length > 0) && (control->data == NULL)) { 6305 /* 6306 * big trouble.. we have the lock and its 6307 * corrupt? 6308 */ 6309 #ifdef INVARIANTS 6310 panic("Impossible data==NULL length !=0"); 6311 #endif 6312 out_flags |= MSG_EOR; 6313 out_flags |= MSG_TRUNC; 6314 control->length = 0; 6315 SCTP_INP_READ_UNLOCK(inp); 6316 goto done_with_control; 6317 } 6318 SCTP_INP_READ_UNLOCK(inp); 6319 /* We will fall around to get more data */ 6320 } 6321 goto get_more_data; 6322 } else { 6323 /*- 6324 * Give caller back the mbuf chain, 6325 * store in uio_resid the length 6326 */ 6327 wakeup_read_socket = 0; 6328 if ((control->end_added == 0) || 6329 (TAILQ_NEXT(control, next) == NULL)) { 6330 /* Need to get rlock */ 6331 if (hold_rlock == 0) { 6332 SCTP_INP_READ_LOCK(inp); 6333 hold_rlock = 1; 6334 } 6335 } 6336 if (control->end_added) { 6337 out_flags |= MSG_EOR; 6338 if ((control->do_not_ref_stcb == 0) && 6339 (control->stcb != NULL) && 6340 ((control->spec_flags & M_NOTIFICATION) == 0)) 6341 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6342 } 6343 if (control->spec_flags & M_NOTIFICATION) { 6344 out_flags |= MSG_NOTIFICATION; 6345 } 6346 uio->uio_resid = control->length; 6347 *mp = control->data; 6348 m = control->data; 6349 while (m) { 6350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6351 sctp_sblog(&so->so_rcv, 6352 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6353 } 6354 sctp_sbfree(control, stcb, &so->so_rcv, m); 6355 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6356 freed_so_far += MSIZE; 6357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6358 sctp_sblog(&so->so_rcv, 6359 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6360 } 6361 m = SCTP_BUF_NEXT(m); 6362 } 6363 control->data = control->tail_mbuf = NULL; 6364 control->length = 0; 6365 if (out_flags & MSG_EOR) { 6366 /* Done with this control */ 6367 goto done_with_control; 6368 } 6369 } 6370 release: 6371 if (hold_rlock == 1) { 6372 SCTP_INP_READ_UNLOCK(inp); 6373 hold_rlock = 0; 6374 } 6375 if (hold_sblock == 1) { 6376 SOCKBUF_UNLOCK(&so->so_rcv); 6377 hold_sblock = 0; 6378 } 6379 6380 SOCK_IO_RECV_UNLOCK(so); 6381 sockbuf_lock = 0; 6382 6383 release_unlocked: 6384 if (hold_sblock) { 6385 SOCKBUF_UNLOCK(&so->so_rcv); 6386 hold_sblock = 0; 6387 } 6388 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6389 if ((freed_so_far >= rwnd_req) && 6390 (control && (control->do_not_ref_stcb == 0)) && 6391 (no_rcv_needed == 0)) 6392 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6393 } 6394 out: 6395 if (msg_flags) { 6396 *msg_flags = out_flags; 6397 } 6398 if (((out_flags & MSG_EOR) == 0) && 6399 ((in_flags & MSG_PEEK) == 0) && 6400 (sinfo) && 6401 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6402 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6403 struct sctp_extrcvinfo *s_extra; 6404 6405 s_extra = (struct sctp_extrcvinfo *)sinfo; 6406 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6407 } 6408 if (hold_rlock == 1) { 6409 SCTP_INP_READ_UNLOCK(inp); 6410 } 6411 if (hold_sblock) { 6412 SOCKBUF_UNLOCK(&so->so_rcv); 6413 } 6414 if (sockbuf_lock) { 6415 SOCK_IO_RECV_UNLOCK(so); 6416 } 6417 6418 if (freecnt_applied) { 6419 /* 6420 * The lock on the socket buffer protects us so the free 6421 * code will stop. But since we used the socketbuf lock and 6422 * the sender uses the tcb_lock to increment, we need to use 6423 * the atomic add to the refcnt. 6424 */ 6425 if (stcb == NULL) { 6426 #ifdef INVARIANTS 6427 panic("stcb for refcnt has gone NULL?"); 6428 goto stage_left; 6429 #else 6430 goto stage_left; 6431 #endif 6432 } 6433 /* Save the value back for next time */ 6434 stcb->freed_by_sorcv_sincelast = freed_so_far; 6435 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6436 } 6437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6438 if (stcb) { 6439 sctp_misc_ints(SCTP_SORECV_DONE, 6440 freed_so_far, 6441 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6442 stcb->asoc.my_rwnd, 6443 SCTP_SBAVAIL(&so->so_rcv)); 6444 } else { 6445 sctp_misc_ints(SCTP_SORECV_DONE, 6446 freed_so_far, 6447 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6448 0, 6449 SCTP_SBAVAIL(&so->so_rcv)); 6450 } 6451 } 6452 stage_left: 6453 if (wakeup_read_socket) { 6454 sctp_sorwakeup(inp, so); 6455 } 6456 return (error); 6457 } 6458 6459 #ifdef SCTP_MBUF_LOGGING 6460 struct mbuf * 6461 sctp_m_free(struct mbuf *m) 6462 { 6463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6464 sctp_log_mb(m, SCTP_MBUF_IFREE); 6465 } 6466 return (m_free(m)); 6467 } 6468 6469 void 6470 sctp_m_freem(struct mbuf *mb) 6471 { 6472 while (mb != NULL) 6473 mb = sctp_m_free(mb); 6474 } 6475 6476 #endif 6477 6478 int 6479 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6480 { 6481 /* 6482 * Given a local address. For all associations that holds the 6483 * address, request a peer-set-primary. 6484 */ 6485 struct sctp_ifa *ifa; 6486 struct sctp_laddr *wi; 6487 6488 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6489 if (ifa == NULL) { 6490 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6491 return (EADDRNOTAVAIL); 6492 } 6493 /* 6494 * Now that we have the ifa we must awaken the iterator with this 6495 * message. 6496 */ 6497 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6498 if (wi == NULL) { 6499 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6500 return (ENOMEM); 6501 } 6502 /* Now incr the count and int wi structure */ 6503 SCTP_INCR_LADDR_COUNT(); 6504 memset(wi, 0, sizeof(*wi)); 6505 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6506 wi->ifa = ifa; 6507 wi->action = SCTP_SET_PRIM_ADDR; 6508 atomic_add_int(&ifa->refcount, 1); 6509 6510 /* Now add it to the work queue */ 6511 SCTP_WQ_ADDR_LOCK(); 6512 /* 6513 * Should this really be a tailq? As it is we will process the 6514 * newest first :-0 6515 */ 6516 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6517 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6518 (struct sctp_inpcb *)NULL, 6519 (struct sctp_tcb *)NULL, 6520 (struct sctp_nets *)NULL); 6521 SCTP_WQ_ADDR_UNLOCK(); 6522 return (0); 6523 } 6524 6525 int 6526 sctp_soreceive(struct socket *so, 6527 struct sockaddr **psa, 6528 struct uio *uio, 6529 struct mbuf **mp0, 6530 struct mbuf **controlp, 6531 int *flagsp) 6532 { 6533 int error, fromlen; 6534 uint8_t sockbuf[256]; 6535 struct sockaddr *from; 6536 struct sctp_extrcvinfo sinfo; 6537 int filling_sinfo = 1; 6538 int flags; 6539 struct sctp_inpcb *inp; 6540 6541 inp = (struct sctp_inpcb *)so->so_pcb; 6542 /* pickup the assoc we are reading from */ 6543 if (inp == NULL) { 6544 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6545 return (EINVAL); 6546 } 6547 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6548 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6549 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6550 (controlp == NULL)) { 6551 /* user does not want the sndrcv ctl */ 6552 filling_sinfo = 0; 6553 } 6554 if (psa) { 6555 from = (struct sockaddr *)sockbuf; 6556 fromlen = sizeof(sockbuf); 6557 from->sa_len = 0; 6558 } else { 6559 from = NULL; 6560 fromlen = 0; 6561 } 6562 6563 if (filling_sinfo) { 6564 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6565 } 6566 if (flagsp != NULL) { 6567 flags = *flagsp; 6568 } else { 6569 flags = 0; 6570 } 6571 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6572 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6573 if (flagsp != NULL) { 6574 *flagsp = flags; 6575 } 6576 if (controlp != NULL) { 6577 /* copy back the sinfo in a CMSG format */ 6578 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6579 *controlp = sctp_build_ctl_nchunk(inp, 6580 (struct sctp_sndrcvinfo *)&sinfo); 6581 } else { 6582 *controlp = NULL; 6583 } 6584 } 6585 if (psa) { 6586 /* copy back the address info */ 6587 if (from && from->sa_len) { 6588 *psa = sodupsockaddr(from, M_NOWAIT); 6589 } else { 6590 *psa = NULL; 6591 } 6592 } 6593 return (error); 6594 } 6595 6596 int 6597 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6598 int totaddr, int *error) 6599 { 6600 int added = 0; 6601 int i; 6602 struct sctp_inpcb *inp; 6603 struct sockaddr *sa; 6604 size_t incr = 0; 6605 #ifdef INET 6606 struct sockaddr_in *sin; 6607 #endif 6608 #ifdef INET6 6609 struct sockaddr_in6 *sin6; 6610 #endif 6611 6612 sa = addr; 6613 inp = stcb->sctp_ep; 6614 *error = 0; 6615 for (i = 0; i < totaddr; i++) { 6616 switch (sa->sa_family) { 6617 #ifdef INET 6618 case AF_INET: 6619 incr = sizeof(struct sockaddr_in); 6620 sin = (struct sockaddr_in *)sa; 6621 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6622 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6623 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6624 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6625 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6626 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6627 *error = EINVAL; 6628 goto out_now; 6629 } 6630 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6631 SCTP_DONOT_SETSCOPE, 6632 SCTP_ADDR_IS_CONFIRMED)) { 6633 /* assoc gone no un-lock */ 6634 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6635 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6636 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6637 *error = ENOBUFS; 6638 goto out_now; 6639 } 6640 added++; 6641 break; 6642 #endif 6643 #ifdef INET6 6644 case AF_INET6: 6645 incr = sizeof(struct sockaddr_in6); 6646 sin6 = (struct sockaddr_in6 *)sa; 6647 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6648 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6649 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6650 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6651 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6652 *error = EINVAL; 6653 goto out_now; 6654 } 6655 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6656 SCTP_DONOT_SETSCOPE, 6657 SCTP_ADDR_IS_CONFIRMED)) { 6658 /* assoc gone no un-lock */ 6659 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6660 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6661 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6662 *error = ENOBUFS; 6663 goto out_now; 6664 } 6665 added++; 6666 break; 6667 #endif 6668 default: 6669 break; 6670 } 6671 sa = (struct sockaddr *)((caddr_t)sa + incr); 6672 } 6673 out_now: 6674 return (added); 6675 } 6676 6677 int 6678 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6679 unsigned int totaddr, 6680 unsigned int *num_v4, unsigned int *num_v6, 6681 unsigned int limit) 6682 { 6683 struct sockaddr *sa; 6684 struct sctp_tcb *stcb; 6685 unsigned int incr, at, i; 6686 6687 at = 0; 6688 sa = addr; 6689 *num_v6 = *num_v4 = 0; 6690 /* account and validate addresses */ 6691 if (totaddr == 0) { 6692 return (EINVAL); 6693 } 6694 for (i = 0; i < totaddr; i++) { 6695 if (at + sizeof(struct sockaddr) > limit) { 6696 return (EINVAL); 6697 } 6698 switch (sa->sa_family) { 6699 #ifdef INET 6700 case AF_INET: 6701 incr = (unsigned int)sizeof(struct sockaddr_in); 6702 if (sa->sa_len != incr) { 6703 return (EINVAL); 6704 } 6705 (*num_v4) += 1; 6706 break; 6707 #endif 6708 #ifdef INET6 6709 case AF_INET6: 6710 { 6711 struct sockaddr_in6 *sin6; 6712 6713 incr = (unsigned int)sizeof(struct sockaddr_in6); 6714 if (sa->sa_len != incr) { 6715 return (EINVAL); 6716 } 6717 sin6 = (struct sockaddr_in6 *)sa; 6718 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6719 /* Must be non-mapped for connectx */ 6720 return (EINVAL); 6721 } 6722 (*num_v6) += 1; 6723 break; 6724 } 6725 #endif 6726 default: 6727 return (EINVAL); 6728 } 6729 if ((at + incr) > limit) { 6730 return (EINVAL); 6731 } 6732 SCTP_INP_INCR_REF(inp); 6733 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6734 if (stcb != NULL) { 6735 SCTP_TCB_UNLOCK(stcb); 6736 return (EALREADY); 6737 } else { 6738 SCTP_INP_DECR_REF(inp); 6739 } 6740 at += incr; 6741 sa = (struct sockaddr *)((caddr_t)sa + incr); 6742 } 6743 return (0); 6744 } 6745 6746 /* 6747 * sctp_bindx(ADD) for one address. 6748 * assumes all arguments are valid/checked by caller. 6749 */ 6750 void 6751 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6752 struct sockaddr *sa, uint32_t vrf_id, int *error, 6753 void *p) 6754 { 6755 #if defined(INET) && defined(INET6) 6756 struct sockaddr_in sin; 6757 #endif 6758 #ifdef INET6 6759 struct sockaddr_in6 *sin6; 6760 #endif 6761 #ifdef INET 6762 struct sockaddr_in *sinp; 6763 #endif 6764 struct sockaddr *addr_to_use; 6765 struct sctp_inpcb *lep; 6766 uint16_t port; 6767 6768 /* see if we're bound all already! */ 6769 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6770 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6771 *error = EINVAL; 6772 return; 6773 } 6774 switch (sa->sa_family) { 6775 #ifdef INET6 6776 case AF_INET6: 6777 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6779 *error = EINVAL; 6780 return; 6781 } 6782 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6783 /* can only bind v6 on PF_INET6 sockets */ 6784 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6785 *error = EINVAL; 6786 return; 6787 } 6788 sin6 = (struct sockaddr_in6 *)sa; 6789 port = sin6->sin6_port; 6790 #ifdef INET 6791 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6792 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6793 SCTP_IPV6_V6ONLY(inp)) { 6794 /* can't bind v4-mapped on PF_INET sockets */ 6795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6796 *error = EINVAL; 6797 return; 6798 } 6799 in6_sin6_2_sin(&sin, sin6); 6800 addr_to_use = (struct sockaddr *)&sin; 6801 } else { 6802 addr_to_use = sa; 6803 } 6804 #else 6805 addr_to_use = sa; 6806 #endif 6807 break; 6808 #endif 6809 #ifdef INET 6810 case AF_INET: 6811 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6812 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6813 *error = EINVAL; 6814 return; 6815 } 6816 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6817 SCTP_IPV6_V6ONLY(inp)) { 6818 /* can't bind v4 on PF_INET sockets */ 6819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6820 *error = EINVAL; 6821 return; 6822 } 6823 sinp = (struct sockaddr_in *)sa; 6824 port = sinp->sin_port; 6825 addr_to_use = sa; 6826 break; 6827 #endif 6828 default: 6829 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6830 *error = EINVAL; 6831 return; 6832 } 6833 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6834 if (p == NULL) { 6835 /* Can't get proc for Net/Open BSD */ 6836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6837 *error = EINVAL; 6838 return; 6839 } 6840 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6841 return; 6842 } 6843 /* Validate the incoming port. */ 6844 if ((port != 0) && (port != inp->sctp_lport)) { 6845 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6846 *error = EINVAL; 6847 return; 6848 } 6849 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6850 if (lep == NULL) { 6851 /* add the address */ 6852 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6853 SCTP_ADD_IP_ADDRESS, vrf_id); 6854 } else { 6855 if (lep != inp) { 6856 *error = EADDRINUSE; 6857 } 6858 SCTP_INP_DECR_REF(lep); 6859 } 6860 } 6861 6862 /* 6863 * sctp_bindx(DELETE) for one address. 6864 * assumes all arguments are valid/checked by caller. 6865 */ 6866 void 6867 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6868 struct sockaddr *sa, uint32_t vrf_id, int *error) 6869 { 6870 struct sockaddr *addr_to_use; 6871 #if defined(INET) && defined(INET6) 6872 struct sockaddr_in6 *sin6; 6873 struct sockaddr_in sin; 6874 #endif 6875 6876 /* see if we're bound all already! */ 6877 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6879 *error = EINVAL; 6880 return; 6881 } 6882 switch (sa->sa_family) { 6883 #ifdef INET6 6884 case AF_INET6: 6885 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6887 *error = EINVAL; 6888 return; 6889 } 6890 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6891 /* can only bind v6 on PF_INET6 sockets */ 6892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6893 *error = EINVAL; 6894 return; 6895 } 6896 #ifdef INET 6897 sin6 = (struct sockaddr_in6 *)sa; 6898 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6899 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6900 SCTP_IPV6_V6ONLY(inp)) { 6901 /* can't bind mapped-v4 on PF_INET sockets */ 6902 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6903 *error = EINVAL; 6904 return; 6905 } 6906 in6_sin6_2_sin(&sin, sin6); 6907 addr_to_use = (struct sockaddr *)&sin; 6908 } else { 6909 addr_to_use = sa; 6910 } 6911 #else 6912 addr_to_use = sa; 6913 #endif 6914 break; 6915 #endif 6916 #ifdef INET 6917 case AF_INET: 6918 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6919 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6920 *error = EINVAL; 6921 return; 6922 } 6923 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6924 SCTP_IPV6_V6ONLY(inp)) { 6925 /* can't bind v4 on PF_INET sockets */ 6926 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6927 *error = EINVAL; 6928 return; 6929 } 6930 addr_to_use = sa; 6931 break; 6932 #endif 6933 default: 6934 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6935 *error = EINVAL; 6936 return; 6937 } 6938 /* No lock required mgmt_ep_sa does its own locking. */ 6939 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6940 vrf_id); 6941 } 6942 6943 /* 6944 * returns the valid local address count for an assoc, taking into account 6945 * all scoping rules 6946 */ 6947 int 6948 sctp_local_addr_count(struct sctp_tcb *stcb) 6949 { 6950 int loopback_scope; 6951 #if defined(INET) 6952 int ipv4_local_scope, ipv4_addr_legal; 6953 #endif 6954 #if defined(INET6) 6955 int local_scope, site_scope, ipv6_addr_legal; 6956 #endif 6957 struct sctp_vrf *vrf; 6958 struct sctp_ifn *sctp_ifn; 6959 struct sctp_ifa *sctp_ifa; 6960 int count = 0; 6961 6962 /* Turn on all the appropriate scopes */ 6963 loopback_scope = stcb->asoc.scope.loopback_scope; 6964 #if defined(INET) 6965 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6966 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6967 #endif 6968 #if defined(INET6) 6969 local_scope = stcb->asoc.scope.local_scope; 6970 site_scope = stcb->asoc.scope.site_scope; 6971 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6972 #endif 6973 SCTP_IPI_ADDR_RLOCK(); 6974 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6975 if (vrf == NULL) { 6976 /* no vrf, no addresses */ 6977 SCTP_IPI_ADDR_RUNLOCK(); 6978 return (0); 6979 } 6980 6981 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6982 /* 6983 * bound all case: go through all ifns on the vrf 6984 */ 6985 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6986 if ((loopback_scope == 0) && 6987 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6988 continue; 6989 } 6990 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6991 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6992 continue; 6993 switch (sctp_ifa->address.sa.sa_family) { 6994 #ifdef INET 6995 case AF_INET: 6996 if (ipv4_addr_legal) { 6997 struct sockaddr_in *sin; 6998 6999 sin = &sctp_ifa->address.sin; 7000 if (sin->sin_addr.s_addr == 0) { 7001 /* 7002 * skip unspecified 7003 * addrs 7004 */ 7005 continue; 7006 } 7007 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7008 &sin->sin_addr) != 0) { 7009 continue; 7010 } 7011 if ((ipv4_local_scope == 0) && 7012 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7013 continue; 7014 } 7015 /* count this one */ 7016 count++; 7017 } else { 7018 continue; 7019 } 7020 break; 7021 #endif 7022 #ifdef INET6 7023 case AF_INET6: 7024 if (ipv6_addr_legal) { 7025 struct sockaddr_in6 *sin6; 7026 7027 sin6 = &sctp_ifa->address.sin6; 7028 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7029 continue; 7030 } 7031 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7032 &sin6->sin6_addr) != 0) { 7033 continue; 7034 } 7035 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7036 if (local_scope == 0) 7037 continue; 7038 if (sin6->sin6_scope_id == 0) { 7039 if (sa6_recoverscope(sin6) != 0) 7040 /* 7041 * 7042 * bad 7043 * link 7044 * 7045 * local 7046 * 7047 * address 7048 */ 7049 continue; 7050 } 7051 } 7052 if ((site_scope == 0) && 7053 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7054 continue; 7055 } 7056 /* count this one */ 7057 count++; 7058 } 7059 break; 7060 #endif 7061 default: 7062 /* TSNH */ 7063 break; 7064 } 7065 } 7066 } 7067 } else { 7068 /* 7069 * subset bound case 7070 */ 7071 struct sctp_laddr *laddr; 7072 7073 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7074 sctp_nxt_addr) { 7075 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7076 continue; 7077 } 7078 /* count this one */ 7079 count++; 7080 } 7081 } 7082 SCTP_IPI_ADDR_RUNLOCK(); 7083 return (count); 7084 } 7085 7086 #if defined(SCTP_LOCAL_TRACE_BUF) 7087 7088 void 7089 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7090 { 7091 uint32_t saveindex, newindex; 7092 7093 do { 7094 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7095 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7096 newindex = 1; 7097 } else { 7098 newindex = saveindex + 1; 7099 } 7100 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7101 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7102 saveindex = 0; 7103 } 7104 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7105 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7106 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7107 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7108 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7109 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7110 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7111 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7112 } 7113 7114 #endif 7115 static bool 7116 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7117 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7118 { 7119 struct ip *iph; 7120 #ifdef INET6 7121 struct ip6_hdr *ip6; 7122 #endif 7123 struct mbuf *sp, *last; 7124 struct udphdr *uhdr; 7125 uint16_t port; 7126 7127 if ((m->m_flags & M_PKTHDR) == 0) { 7128 /* Can't handle one that is not a pkt hdr */ 7129 goto out; 7130 } 7131 /* Pull the src port */ 7132 iph = mtod(m, struct ip *); 7133 uhdr = (struct udphdr *)((caddr_t)iph + off); 7134 port = uhdr->uh_sport; 7135 /* 7136 * Split out the mbuf chain. Leave the IP header in m, place the 7137 * rest in the sp. 7138 */ 7139 sp = m_split(m, off, M_NOWAIT); 7140 if (sp == NULL) { 7141 /* Gak, drop packet, we can't do a split */ 7142 goto out; 7143 } 7144 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7145 /* Gak, packet can't have an SCTP header in it - too small */ 7146 m_freem(sp); 7147 goto out; 7148 } 7149 /* Now pull up the UDP header and SCTP header together */ 7150 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7151 if (sp == NULL) { 7152 /* Gak pullup failed */ 7153 goto out; 7154 } 7155 /* Trim out the UDP header */ 7156 m_adj(sp, sizeof(struct udphdr)); 7157 7158 /* Now reconstruct the mbuf chain */ 7159 for (last = m; last->m_next; last = last->m_next); 7160 last->m_next = sp; 7161 m->m_pkthdr.len += sp->m_pkthdr.len; 7162 /* 7163 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7164 * checksum and it was valid. Since CSUM_DATA_VALID == 7165 * CSUM_SCTP_VALID this would imply that the HW also verified the 7166 * SCTP checksum. Therefore, clear the bit. 7167 */ 7168 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7169 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7170 m->m_pkthdr.len, 7171 if_name(m->m_pkthdr.rcvif), 7172 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7173 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7174 iph = mtod(m, struct ip *); 7175 switch (iph->ip_v) { 7176 #ifdef INET 7177 case IPVERSION: 7178 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7179 sctp_input_with_port(m, off, port); 7180 break; 7181 #endif 7182 #ifdef INET6 7183 case IPV6_VERSION >> 4: 7184 ip6 = mtod(m, struct ip6_hdr *); 7185 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7186 sctp6_input_with_port(&m, &off, port); 7187 break; 7188 #endif 7189 default: 7190 goto out; 7191 break; 7192 } 7193 return (true); 7194 out: 7195 m_freem(m); 7196 7197 return (true); 7198 } 7199 7200 #ifdef INET 7201 static void 7202 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 7203 { 7204 struct icmp *icmp = param.icmp; 7205 struct ip *outer_ip, *inner_ip; 7206 struct sctphdr *sh; 7207 struct udphdr *udp; 7208 struct sctp_inpcb *inp; 7209 struct sctp_tcb *stcb; 7210 struct sctp_nets *net; 7211 struct sctp_init_chunk *ch; 7212 struct sockaddr_in src, dst; 7213 uint8_t type, code; 7214 7215 inner_ip = &icmp->icmp_ip; 7216 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7217 if (ntohs(outer_ip->ip_len) < 7218 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7219 return; 7220 } 7221 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7222 sh = (struct sctphdr *)(udp + 1); 7223 memset(&src, 0, sizeof(struct sockaddr_in)); 7224 src.sin_family = AF_INET; 7225 src.sin_len = sizeof(struct sockaddr_in); 7226 src.sin_port = sh->src_port; 7227 src.sin_addr = inner_ip->ip_src; 7228 memset(&dst, 0, sizeof(struct sockaddr_in)); 7229 dst.sin_family = AF_INET; 7230 dst.sin_len = sizeof(struct sockaddr_in); 7231 dst.sin_port = sh->dest_port; 7232 dst.sin_addr = inner_ip->ip_dst; 7233 /* 7234 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7235 * holds our local endpoint address. Thus we reverse the dst and the 7236 * src in the lookup. 7237 */ 7238 inp = NULL; 7239 net = NULL; 7240 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7241 (struct sockaddr *)&src, 7242 &inp, &net, 1, 7243 SCTP_DEFAULT_VRFID); 7244 if ((stcb != NULL) && 7245 (net != NULL) && 7246 (inp != NULL)) { 7247 /* Check the UDP port numbers */ 7248 if ((udp->uh_dport != net->port) || 7249 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7250 SCTP_TCB_UNLOCK(stcb); 7251 return; 7252 } 7253 /* Check the verification tag */ 7254 if (ntohl(sh->v_tag) != 0) { 7255 /* 7256 * This must be the verification tag used for 7257 * sending out packets. We don't consider packets 7258 * reflecting the verification tag. 7259 */ 7260 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7261 SCTP_TCB_UNLOCK(stcb); 7262 return; 7263 } 7264 } else { 7265 if (ntohs(outer_ip->ip_len) >= 7266 sizeof(struct ip) + 7267 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7268 /* 7269 * In this case we can check if we got an 7270 * INIT chunk and if the initiate tag 7271 * matches. 7272 */ 7273 ch = (struct sctp_init_chunk *)(sh + 1); 7274 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7275 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7276 SCTP_TCB_UNLOCK(stcb); 7277 return; 7278 } 7279 } else { 7280 SCTP_TCB_UNLOCK(stcb); 7281 return; 7282 } 7283 } 7284 type = icmp->icmp_type; 7285 code = icmp->icmp_code; 7286 if ((type == ICMP_UNREACH) && 7287 (code == ICMP_UNREACH_PORT)) { 7288 code = ICMP_UNREACH_PROTOCOL; 7289 } 7290 sctp_notify(inp, stcb, net, type, code, 7291 ntohs(inner_ip->ip_len), 7292 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7293 } else { 7294 if ((stcb == NULL) && (inp != NULL)) { 7295 /* reduce ref-count */ 7296 SCTP_INP_WLOCK(inp); 7297 SCTP_INP_DECR_REF(inp); 7298 SCTP_INP_WUNLOCK(inp); 7299 } 7300 if (stcb) { 7301 SCTP_TCB_UNLOCK(stcb); 7302 } 7303 } 7304 return; 7305 } 7306 #endif 7307 7308 #ifdef INET6 7309 static void 7310 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 7311 { 7312 struct ip6ctlparam *ip6cp = param.ip6cp; 7313 struct sctp_inpcb *inp; 7314 struct sctp_tcb *stcb; 7315 struct sctp_nets *net; 7316 struct sctphdr sh; 7317 struct udphdr udp; 7318 struct sockaddr_in6 src, dst; 7319 uint8_t type, code; 7320 7321 /* 7322 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7323 */ 7324 if (ip6cp->ip6c_m == NULL) { 7325 return; 7326 } 7327 /* 7328 * Check if we can safely examine the ports and the verification tag 7329 * of the SCTP common header. 7330 */ 7331 if (ip6cp->ip6c_m->m_pkthdr.len < 7332 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7333 return; 7334 } 7335 /* Copy out the UDP header. */ 7336 memset(&udp, 0, sizeof(struct udphdr)); 7337 m_copydata(ip6cp->ip6c_m, 7338 ip6cp->ip6c_off, 7339 sizeof(struct udphdr), 7340 (caddr_t)&udp); 7341 /* Copy out the port numbers and the verification tag. */ 7342 memset(&sh, 0, sizeof(struct sctphdr)); 7343 m_copydata(ip6cp->ip6c_m, 7344 ip6cp->ip6c_off + sizeof(struct udphdr), 7345 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7346 (caddr_t)&sh); 7347 memset(&src, 0, sizeof(struct sockaddr_in6)); 7348 src.sin6_family = AF_INET6; 7349 src.sin6_len = sizeof(struct sockaddr_in6); 7350 src.sin6_port = sh.src_port; 7351 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7352 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7353 return; 7354 } 7355 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7356 dst.sin6_family = AF_INET6; 7357 dst.sin6_len = sizeof(struct sockaddr_in6); 7358 dst.sin6_port = sh.dest_port; 7359 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7360 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7361 return; 7362 } 7363 inp = NULL; 7364 net = NULL; 7365 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7366 (struct sockaddr *)&src, 7367 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7368 if ((stcb != NULL) && 7369 (net != NULL) && 7370 (inp != NULL)) { 7371 /* Check the UDP port numbers */ 7372 if ((udp.uh_dport != net->port) || 7373 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7374 SCTP_TCB_UNLOCK(stcb); 7375 return; 7376 } 7377 /* Check the verification tag */ 7378 if (ntohl(sh.v_tag) != 0) { 7379 /* 7380 * This must be the verification tag used for 7381 * sending out packets. We don't consider packets 7382 * reflecting the verification tag. 7383 */ 7384 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7385 SCTP_TCB_UNLOCK(stcb); 7386 return; 7387 } 7388 } else { 7389 if (ip6cp->ip6c_m->m_pkthdr.len >= 7390 ip6cp->ip6c_off + sizeof(struct udphdr) + 7391 sizeof(struct sctphdr) + 7392 sizeof(struct sctp_chunkhdr) + 7393 offsetof(struct sctp_init, a_rwnd)) { 7394 /* 7395 * In this case we can check if we got an 7396 * INIT chunk and if the initiate tag 7397 * matches. 7398 */ 7399 uint32_t initiate_tag; 7400 uint8_t chunk_type; 7401 7402 m_copydata(ip6cp->ip6c_m, 7403 ip6cp->ip6c_off + 7404 sizeof(struct udphdr) + 7405 sizeof(struct sctphdr), 7406 sizeof(uint8_t), 7407 (caddr_t)&chunk_type); 7408 m_copydata(ip6cp->ip6c_m, 7409 ip6cp->ip6c_off + 7410 sizeof(struct udphdr) + 7411 sizeof(struct sctphdr) + 7412 sizeof(struct sctp_chunkhdr), 7413 sizeof(uint32_t), 7414 (caddr_t)&initiate_tag); 7415 if ((chunk_type != SCTP_INITIATION) || 7416 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7417 SCTP_TCB_UNLOCK(stcb); 7418 return; 7419 } 7420 } else { 7421 SCTP_TCB_UNLOCK(stcb); 7422 return; 7423 } 7424 } 7425 type = ip6cp->ip6c_icmp6->icmp6_type; 7426 code = ip6cp->ip6c_icmp6->icmp6_code; 7427 if ((type == ICMP6_DST_UNREACH) && 7428 (code == ICMP6_DST_UNREACH_NOPORT)) { 7429 type = ICMP6_PARAM_PROB; 7430 code = ICMP6_PARAMPROB_NEXTHEADER; 7431 } 7432 sctp6_notify(inp, stcb, net, type, code, 7433 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7434 } else { 7435 if ((stcb == NULL) && (inp != NULL)) { 7436 /* reduce inp's ref-count */ 7437 SCTP_INP_WLOCK(inp); 7438 SCTP_INP_DECR_REF(inp); 7439 SCTP_INP_WUNLOCK(inp); 7440 } 7441 if (stcb) { 7442 SCTP_TCB_UNLOCK(stcb); 7443 } 7444 } 7445 } 7446 #endif 7447 7448 void 7449 sctp_over_udp_stop(void) 7450 { 7451 /* 7452 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7453 * for writing! 7454 */ 7455 #ifdef INET 7456 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7457 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7458 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7459 } 7460 #endif 7461 #ifdef INET6 7462 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7463 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7464 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7465 } 7466 #endif 7467 } 7468 7469 int 7470 sctp_over_udp_start(void) 7471 { 7472 uint16_t port; 7473 int ret; 7474 #ifdef INET 7475 struct sockaddr_in sin; 7476 #endif 7477 #ifdef INET6 7478 struct sockaddr_in6 sin6; 7479 #endif 7480 /* 7481 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7482 * for writing! 7483 */ 7484 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7485 if (ntohs(port) == 0) { 7486 /* Must have a port set */ 7487 return (EINVAL); 7488 } 7489 #ifdef INET 7490 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7491 /* Already running -- must stop first */ 7492 return (EALREADY); 7493 } 7494 #endif 7495 #ifdef INET6 7496 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7497 /* Already running -- must stop first */ 7498 return (EALREADY); 7499 } 7500 #endif 7501 #ifdef INET 7502 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7503 SOCK_DGRAM, IPPROTO_UDP, 7504 curthread->td_ucred, curthread))) { 7505 sctp_over_udp_stop(); 7506 return (ret); 7507 } 7508 /* Call the special UDP hook. */ 7509 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7510 sctp_recv_udp_tunneled_packet, 7511 sctp_recv_icmp_tunneled_packet, 7512 NULL))) { 7513 sctp_over_udp_stop(); 7514 return (ret); 7515 } 7516 /* Ok, we have a socket, bind it to the port. */ 7517 memset(&sin, 0, sizeof(struct sockaddr_in)); 7518 sin.sin_len = sizeof(struct sockaddr_in); 7519 sin.sin_family = AF_INET; 7520 sin.sin_port = htons(port); 7521 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7522 (struct sockaddr *)&sin, curthread))) { 7523 sctp_over_udp_stop(); 7524 return (ret); 7525 } 7526 #endif 7527 #ifdef INET6 7528 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7529 SOCK_DGRAM, IPPROTO_UDP, 7530 curthread->td_ucred, curthread))) { 7531 sctp_over_udp_stop(); 7532 return (ret); 7533 } 7534 /* Call the special UDP hook. */ 7535 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7536 sctp_recv_udp_tunneled_packet, 7537 sctp_recv_icmp6_tunneled_packet, 7538 NULL))) { 7539 sctp_over_udp_stop(); 7540 return (ret); 7541 } 7542 /* Ok, we have a socket, bind it to the port. */ 7543 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7544 sin6.sin6_len = sizeof(struct sockaddr_in6); 7545 sin6.sin6_family = AF_INET6; 7546 sin6.sin6_port = htons(port); 7547 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7548 (struct sockaddr *)&sin6, curthread))) { 7549 sctp_over_udp_stop(); 7550 return (ret); 7551 } 7552 #endif 7553 return (0); 7554 } 7555 7556 /* 7557 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7558 * If all arguments are zero, zero is returned. 7559 */ 7560 uint32_t 7561 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7562 { 7563 if (mtu1 > 0) { 7564 if (mtu2 > 0) { 7565 if (mtu3 > 0) { 7566 return (min(mtu1, min(mtu2, mtu3))); 7567 } else { 7568 return (min(mtu1, mtu2)); 7569 } 7570 } else { 7571 if (mtu3 > 0) { 7572 return (min(mtu1, mtu3)); 7573 } else { 7574 return (mtu1); 7575 } 7576 } 7577 } else { 7578 if (mtu2 > 0) { 7579 if (mtu3 > 0) { 7580 return (min(mtu2, mtu3)); 7581 } else { 7582 return (mtu2); 7583 } 7584 } else { 7585 return (mtu3); 7586 } 7587 } 7588 } 7589 7590 void 7591 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7592 { 7593 struct in_conninfo inc; 7594 7595 memset(&inc, 0, sizeof(struct in_conninfo)); 7596 inc.inc_fibnum = fibnum; 7597 switch (addr->sa.sa_family) { 7598 #ifdef INET 7599 case AF_INET: 7600 inc.inc_faddr = addr->sin.sin_addr; 7601 break; 7602 #endif 7603 #ifdef INET6 7604 case AF_INET6: 7605 inc.inc_flags |= INC_ISIPV6; 7606 inc.inc6_faddr = addr->sin6.sin6_addr; 7607 break; 7608 #endif 7609 default: 7610 return; 7611 } 7612 tcp_hc_updatemtu(&inc, (u_long)mtu); 7613 } 7614 7615 uint32_t 7616 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7617 { 7618 struct in_conninfo inc; 7619 7620 memset(&inc, 0, sizeof(struct in_conninfo)); 7621 inc.inc_fibnum = fibnum; 7622 switch (addr->sa.sa_family) { 7623 #ifdef INET 7624 case AF_INET: 7625 inc.inc_faddr = addr->sin.sin_addr; 7626 break; 7627 #endif 7628 #ifdef INET6 7629 case AF_INET6: 7630 inc.inc_flags |= INC_ISIPV6; 7631 inc.inc6_faddr = addr->sin6.sin6_addr; 7632 break; 7633 #endif 7634 default: 7635 return (0); 7636 } 7637 return ((uint32_t)tcp_hc_getmtu(&inc)); 7638 } 7639 7640 void 7641 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7642 { 7643 #if defined(KDTRACE_HOOKS) 7644 int old_state = stcb->asoc.state; 7645 #endif 7646 7647 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7648 ("sctp_set_state: Can't set substate (new_state = %x)", 7649 new_state)); 7650 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7651 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7652 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7653 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7654 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7655 } 7656 #if defined(KDTRACE_HOOKS) 7657 if (((old_state & SCTP_STATE_MASK) != new_state) && 7658 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7659 (new_state == SCTP_STATE_INUSE))) { 7660 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7661 } 7662 #endif 7663 } 7664 7665 void 7666 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7667 { 7668 #if defined(KDTRACE_HOOKS) 7669 int old_state = stcb->asoc.state; 7670 #endif 7671 7672 KASSERT((substate & SCTP_STATE_MASK) == 0, 7673 ("sctp_add_substate: Can't set state (substate = %x)", 7674 substate)); 7675 stcb->asoc.state |= substate; 7676 #if defined(KDTRACE_HOOKS) 7677 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7678 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7679 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7680 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7681 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7682 } 7683 #endif 7684 } 7685