1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 if (override_tag) { 1195 asoc->init_seq_number = initial_tsn; 1196 } else { 1197 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1198 } 1199 asoc->asconf_seq_out = asoc->init_seq_number; 1200 asoc->str_reset_seq_out = asoc->init_seq_number; 1201 asoc->sending_seq = asoc->init_seq_number; 1202 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1203 /* we are optimisitic here */ 1204 asoc->peer_supports_nat = 0; 1205 asoc->sent_queue_retran_cnt = 0; 1206 1207 /* for CMT */ 1208 asoc->last_net_cmt_send_started = NULL; 1209 1210 asoc->last_acked_seq = asoc->init_seq_number - 1; 1211 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1212 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1213 1214 /* here we are different, we hold the next one we expect */ 1215 asoc->str_reset_seq_in = asoc->init_seq_number; 1216 1217 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1218 asoc->initial_rto = inp->sctp_ep.initial_rto; 1219 1220 asoc->default_mtu = inp->sctp_ep.default_mtu; 1221 asoc->max_init_times = inp->sctp_ep.max_init_times; 1222 asoc->max_send_times = inp->sctp_ep.max_send_times; 1223 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1224 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1225 asoc->free_chunk_cnt = 0; 1226 1227 asoc->iam_blocking = 0; 1228 asoc->context = inp->sctp_context; 1229 asoc->local_strreset_support = inp->local_strreset_support; 1230 asoc->def_send = inp->def_send; 1231 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1232 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1233 asoc->pr_sctp_cnt = 0; 1234 asoc->total_output_queue_size = 0; 1235 1236 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1237 asoc->scope.ipv6_addr_legal = 1; 1238 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } else { 1241 asoc->scope.ipv4_addr_legal = 0; 1242 } 1243 } else { 1244 asoc->scope.ipv6_addr_legal = 0; 1245 asoc->scope.ipv4_addr_legal = 1; 1246 } 1247 1248 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1249 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1250 1251 asoc->smallest_mtu = inp->sctp_frag_point; 1252 asoc->minrto = inp->sctp_ep.sctp_minrto; 1253 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1254 1255 asoc->stream_locked_on = 0; 1256 asoc->ecn_echo_cnt_onq = 0; 1257 asoc->stream_locked = 0; 1258 1259 asoc->send_sack = 1; 1260 1261 LIST_INIT(&asoc->sctp_restricted_addrs); 1262 1263 TAILQ_INIT(&asoc->nets); 1264 TAILQ_INIT(&asoc->pending_reply_queue); 1265 TAILQ_INIT(&asoc->asconf_ack_sent); 1266 /* Setup to fill the hb random cache at first HB */ 1267 asoc->hb_random_idx = 4; 1268 1269 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1270 1271 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1272 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1273 1274 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1275 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1276 1277 /* 1278 * Now the stream parameters, here we allocate space for all streams 1279 * that we request by default. 1280 */ 1281 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1282 o_strms; 1283 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1284 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1285 SCTP_M_STRMO); 1286 if (asoc->strmout == NULL) { 1287 /* big trouble no memory */ 1288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1289 return (ENOMEM); 1290 } 1291 for (i = 0; i < asoc->streamoutcnt; i++) { 1292 /* 1293 * inbound side must be set to 0xffff, also NOTE when we get 1294 * the INIT-ACK back (for INIT sender) we MUST reduce the 1295 * count (streamoutcnt) but first check if we sent to any of 1296 * the upper streams that were dropped (if some were). Those 1297 * that were dropped must be notified to the upper layer as 1298 * failed to send. 1299 */ 1300 TAILQ_INIT(&asoc->strmout[i].outqueue); 1301 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1302 asoc->strmout[i].chunks_on_queues = 0; 1303 #if defined(SCTP_DETAILED_STR_STATS) 1304 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1305 asoc->strmout[i].abandoned_sent[j] = 0; 1306 asoc->strmout[i].abandoned_unsent[j] = 0; 1307 } 1308 #else 1309 asoc->strmout[i].abandoned_sent[0] = 0; 1310 asoc->strmout[i].abandoned_unsent[0] = 0; 1311 #endif 1312 asoc->strmout[i].next_mid_ordered = 0; 1313 asoc->strmout[i].next_mid_unordered = 0; 1314 asoc->strmout[i].sid = i; 1315 asoc->strmout[i].last_msg_incomplete = 0; 1316 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1317 } 1318 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1319 1320 /* Now the mapping array */ 1321 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1322 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1323 SCTP_M_MAP); 1324 if (asoc->mapping_array == NULL) { 1325 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1326 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1327 return (ENOMEM); 1328 } 1329 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1330 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1331 SCTP_M_MAP); 1332 if (asoc->nr_mapping_array == NULL) { 1333 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1334 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1335 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1336 return (ENOMEM); 1337 } 1338 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1339 1340 /* Now the init of the other outqueues */ 1341 TAILQ_INIT(&asoc->free_chunks); 1342 TAILQ_INIT(&asoc->control_send_queue); 1343 TAILQ_INIT(&asoc->asconf_send_queue); 1344 TAILQ_INIT(&asoc->send_queue); 1345 TAILQ_INIT(&asoc->sent_queue); 1346 TAILQ_INIT(&asoc->resetHead); 1347 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1348 TAILQ_INIT(&asoc->asconf_queue); 1349 /* authentication fields */ 1350 asoc->authinfo.random = NULL; 1351 asoc->authinfo.active_keyid = 0; 1352 asoc->authinfo.assoc_key = NULL; 1353 asoc->authinfo.assoc_keyid = 0; 1354 asoc->authinfo.recv_key = NULL; 1355 asoc->authinfo.recv_keyid = 0; 1356 LIST_INIT(&asoc->shared_keys); 1357 asoc->marked_retrans = 0; 1358 asoc->port = inp->sctp_ep.port; 1359 asoc->timoinit = 0; 1360 asoc->timodata = 0; 1361 asoc->timosack = 0; 1362 asoc->timoshutdown = 0; 1363 asoc->timoheartbeat = 0; 1364 asoc->timocookie = 0; 1365 asoc->timoshutdownack = 0; 1366 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1367 asoc->discontinuity_time = asoc->start_time; 1368 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1369 asoc->abandoned_unsent[i] = 0; 1370 asoc->abandoned_sent[i] = 0; 1371 } 1372 /* 1373 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1374 * freed later when the association is freed. 1375 */ 1376 return (0); 1377 } 1378 1379 void 1380 sctp_print_mapping_array(struct sctp_association *asoc) 1381 { 1382 unsigned int i, limit; 1383 1384 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1385 asoc->mapping_array_size, 1386 asoc->mapping_array_base_tsn, 1387 asoc->cumulative_tsn, 1388 asoc->highest_tsn_inside_map, 1389 asoc->highest_tsn_inside_nr_map); 1390 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1391 if (asoc->mapping_array[limit - 1] != 0) { 1392 break; 1393 } 1394 } 1395 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1396 for (i = 0; i < limit; i++) { 1397 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1398 } 1399 if (limit % 16) 1400 SCTP_PRINTF("\n"); 1401 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1402 if (asoc->nr_mapping_array[limit - 1]) { 1403 break; 1404 } 1405 } 1406 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1407 for (i = 0; i < limit; i++) { 1408 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1409 } 1410 if (limit % 16) 1411 SCTP_PRINTF("\n"); 1412 } 1413 1414 int 1415 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1416 { 1417 /* mapping array needs to grow */ 1418 uint8_t *new_array1, *new_array2; 1419 uint32_t new_size; 1420 1421 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1422 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1423 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1424 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1425 /* can't get more, forget it */ 1426 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1427 if (new_array1) { 1428 SCTP_FREE(new_array1, SCTP_M_MAP); 1429 } 1430 if (new_array2) { 1431 SCTP_FREE(new_array2, SCTP_M_MAP); 1432 } 1433 return (-1); 1434 } 1435 memset(new_array1, 0, new_size); 1436 memset(new_array2, 0, new_size); 1437 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1438 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1439 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1440 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1441 asoc->mapping_array = new_array1; 1442 asoc->nr_mapping_array = new_array2; 1443 asoc->mapping_array_size = new_size; 1444 return (0); 1445 } 1446 1447 static void 1448 sctp_iterator_work(struct sctp_iterator *it) 1449 { 1450 struct epoch_tracker et; 1451 struct sctp_inpcb *tinp; 1452 int iteration_count = 0; 1453 int inp_skip = 0; 1454 int first_in = 1; 1455 1456 NET_EPOCH_ENTER(et); 1457 SCTP_INP_INFO_RLOCK(); 1458 SCTP_ITERATOR_LOCK(); 1459 sctp_it_ctl.cur_it = it; 1460 if (it->inp) { 1461 SCTP_INP_RLOCK(it->inp); 1462 SCTP_INP_DECR_REF(it->inp); 1463 } 1464 if (it->inp == NULL) { 1465 /* iterator is complete */ 1466 done_with_iterator: 1467 sctp_it_ctl.cur_it = NULL; 1468 SCTP_ITERATOR_UNLOCK(); 1469 SCTP_INP_INFO_RUNLOCK(); 1470 if (it->function_atend != NULL) { 1471 (*it->function_atend) (it->pointer, it->val); 1472 } 1473 SCTP_FREE(it, SCTP_M_ITER); 1474 NET_EPOCH_EXIT(et); 1475 return; 1476 } 1477 select_a_new_ep: 1478 if (first_in) { 1479 first_in = 0; 1480 } else { 1481 SCTP_INP_RLOCK(it->inp); 1482 } 1483 while (((it->pcb_flags) && 1484 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1485 ((it->pcb_features) && 1486 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1487 /* endpoint flags or features don't match, so keep looking */ 1488 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1489 SCTP_INP_RUNLOCK(it->inp); 1490 goto done_with_iterator; 1491 } 1492 tinp = it->inp; 1493 it->inp = LIST_NEXT(it->inp, sctp_list); 1494 it->stcb = NULL; 1495 SCTP_INP_RUNLOCK(tinp); 1496 if (it->inp == NULL) { 1497 goto done_with_iterator; 1498 } 1499 SCTP_INP_RLOCK(it->inp); 1500 } 1501 /* now go through each assoc which is in the desired state */ 1502 if (it->done_current_ep == 0) { 1503 if (it->function_inp != NULL) 1504 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1505 it->done_current_ep = 1; 1506 } 1507 if (it->stcb == NULL) { 1508 /* run the per instance function */ 1509 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1510 } 1511 if ((inp_skip) || it->stcb == NULL) { 1512 if (it->function_inp_end != NULL) { 1513 inp_skip = (*it->function_inp_end) (it->inp, 1514 it->pointer, 1515 it->val); 1516 } 1517 SCTP_INP_RUNLOCK(it->inp); 1518 goto no_stcb; 1519 } 1520 while (it->stcb) { 1521 SCTP_TCB_LOCK(it->stcb); 1522 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1523 /* not in the right state... keep looking */ 1524 SCTP_TCB_UNLOCK(it->stcb); 1525 goto next_assoc; 1526 } 1527 /* see if we have limited out the iterator loop */ 1528 iteration_count++; 1529 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1530 /* Pause to let others grab the lock */ 1531 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1532 SCTP_TCB_UNLOCK(it->stcb); 1533 SCTP_INP_INCR_REF(it->inp); 1534 SCTP_INP_RUNLOCK(it->inp); 1535 SCTP_ITERATOR_UNLOCK(); 1536 SCTP_INP_INFO_RUNLOCK(); 1537 SCTP_INP_INFO_RLOCK(); 1538 SCTP_ITERATOR_LOCK(); 1539 if (sctp_it_ctl.iterator_flags) { 1540 /* We won't be staying here */ 1541 SCTP_INP_DECR_REF(it->inp); 1542 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1543 if (sctp_it_ctl.iterator_flags & 1544 SCTP_ITERATOR_STOP_CUR_IT) { 1545 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1546 goto done_with_iterator; 1547 } 1548 if (sctp_it_ctl.iterator_flags & 1549 SCTP_ITERATOR_STOP_CUR_INP) { 1550 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1551 goto no_stcb; 1552 } 1553 /* If we reach here huh? */ 1554 SCTP_PRINTF("Unknown it ctl flag %x\n", 1555 sctp_it_ctl.iterator_flags); 1556 sctp_it_ctl.iterator_flags = 0; 1557 } 1558 SCTP_INP_RLOCK(it->inp); 1559 SCTP_INP_DECR_REF(it->inp); 1560 SCTP_TCB_LOCK(it->stcb); 1561 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1562 iteration_count = 0; 1563 } 1564 KASSERT(it->inp == it->stcb->sctp_ep, 1565 ("%s: stcb %p does not belong to inp %p, but inp %p", 1566 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1567 1568 /* run function on this one */ 1569 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1570 1571 /* 1572 * we lie here, it really needs to have its own type but 1573 * first I must verify that this won't effect things :-0 1574 */ 1575 if (it->no_chunk_output == 0) 1576 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1577 1578 SCTP_TCB_UNLOCK(it->stcb); 1579 next_assoc: 1580 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1581 if (it->stcb == NULL) { 1582 /* Run last function */ 1583 if (it->function_inp_end != NULL) { 1584 inp_skip = (*it->function_inp_end) (it->inp, 1585 it->pointer, 1586 it->val); 1587 } 1588 } 1589 } 1590 SCTP_INP_RUNLOCK(it->inp); 1591 no_stcb: 1592 /* done with all assocs on this endpoint, move on to next endpoint */ 1593 it->done_current_ep = 0; 1594 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1595 it->inp = NULL; 1596 } else { 1597 it->inp = LIST_NEXT(it->inp, sctp_list); 1598 } 1599 it->stcb = NULL; 1600 if (it->inp == NULL) { 1601 goto done_with_iterator; 1602 } 1603 goto select_a_new_ep; 1604 } 1605 1606 void 1607 sctp_iterator_worker(void) 1608 { 1609 struct sctp_iterator *it; 1610 1611 /* This function is called with the WQ lock in place */ 1612 sctp_it_ctl.iterator_running = 1; 1613 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1614 /* now lets work on this one */ 1615 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1616 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1617 CURVNET_SET(it->vn); 1618 sctp_iterator_work(it); 1619 CURVNET_RESTORE(); 1620 SCTP_IPI_ITERATOR_WQ_LOCK(); 1621 /* sa_ignore FREED_MEMORY */ 1622 } 1623 sctp_it_ctl.iterator_running = 0; 1624 return; 1625 } 1626 1627 static void 1628 sctp_handle_addr_wq(void) 1629 { 1630 /* deal with the ADDR wq from the rtsock calls */ 1631 struct sctp_laddr *wi, *nwi; 1632 struct sctp_asconf_iterator *asc; 1633 1634 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1635 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1636 if (asc == NULL) { 1637 /* Try later, no memory */ 1638 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1639 (struct sctp_inpcb *)NULL, 1640 (struct sctp_tcb *)NULL, 1641 (struct sctp_nets *)NULL); 1642 return; 1643 } 1644 LIST_INIT(&asc->list_of_work); 1645 asc->cnt = 0; 1646 1647 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1648 LIST_REMOVE(wi, sctp_nxt_addr); 1649 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1650 asc->cnt++; 1651 } 1652 1653 if (asc->cnt == 0) { 1654 SCTP_FREE(asc, SCTP_M_ASC_IT); 1655 } else { 1656 int ret; 1657 1658 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1659 sctp_asconf_iterator_stcb, 1660 NULL, /* No ep end for boundall */ 1661 SCTP_PCB_FLAGS_BOUNDALL, 1662 SCTP_PCB_ANY_FEATURES, 1663 SCTP_ASOC_ANY_STATE, 1664 (void *)asc, 0, 1665 sctp_asconf_iterator_end, NULL, 0); 1666 if (ret) { 1667 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1668 /* 1669 * Freeing if we are stopping or put back on the 1670 * addr_wq. 1671 */ 1672 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1673 sctp_asconf_iterator_end(asc, 0); 1674 } else { 1675 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1676 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1677 } 1678 SCTP_FREE(asc, SCTP_M_ASC_IT); 1679 } 1680 } 1681 } 1682 } 1683 1684 /*- 1685 * The following table shows which pointers for the inp, stcb, or net are 1686 * stored for each timer after it was started. 1687 * 1688 *|Name |Timer |inp |stcb|net | 1689 *|-----------------------------|-----------------------------|----|----|----| 1690 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1693 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1697 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1698 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1699 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1701 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1703 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1704 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1705 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1706 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1707 */ 1708 1709 void 1710 sctp_timeout_handler(void *t) 1711 { 1712 struct epoch_tracker et; 1713 struct timeval tv; 1714 struct sctp_inpcb *inp; 1715 struct sctp_tcb *stcb; 1716 struct sctp_nets *net; 1717 struct sctp_timer *tmr; 1718 struct mbuf *op_err; 1719 int type; 1720 int i, secret; 1721 bool did_output, released_asoc_reference; 1722 1723 /* 1724 * If inp, stcb or net are not NULL, then references to these were 1725 * added when the timer was started, and must be released before 1726 * this function returns. 1727 */ 1728 tmr = (struct sctp_timer *)t; 1729 inp = (struct sctp_inpcb *)tmr->ep; 1730 stcb = (struct sctp_tcb *)tmr->tcb; 1731 net = (struct sctp_nets *)tmr->net; 1732 CURVNET_SET((struct vnet *)tmr->vnet); 1733 NET_EPOCH_ENTER(et); 1734 released_asoc_reference = false; 1735 1736 #ifdef SCTP_AUDITING_ENABLED 1737 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1738 sctp_auditing(3, inp, stcb, net); 1739 #endif 1740 1741 /* sanity checks... */ 1742 KASSERT(tmr->self == NULL || tmr->self == tmr, 1743 ("sctp_timeout_handler: tmr->self corrupted")); 1744 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1745 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1746 type = tmr->type; 1747 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1748 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1749 type, stcb, stcb->sctp_ep)); 1750 tmr->stopped_from = 0xa001; 1751 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1752 SCTPDBG(SCTP_DEBUG_TIMER2, 1753 "Timer type %d handler exiting due to CLOSED association.\n", 1754 type); 1755 goto out_decr; 1756 } 1757 tmr->stopped_from = 0xa002; 1758 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1759 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1760 SCTPDBG(SCTP_DEBUG_TIMER2, 1761 "Timer type %d handler exiting due to not being active.\n", 1762 type); 1763 goto out_decr; 1764 } 1765 1766 tmr->stopped_from = 0xa003; 1767 if (stcb) { 1768 SCTP_TCB_LOCK(stcb); 1769 /* 1770 * Release reference so that association can be freed if 1771 * necessary below. This is safe now that we have acquired 1772 * the lock. 1773 */ 1774 atomic_add_int(&stcb->asoc.refcnt, -1); 1775 released_asoc_reference = true; 1776 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1777 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1778 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1779 SCTPDBG(SCTP_DEBUG_TIMER2, 1780 "Timer type %d handler exiting due to CLOSED association.\n", 1781 type); 1782 goto out; 1783 } 1784 } else if (inp != NULL) { 1785 SCTP_INP_WLOCK(inp); 1786 } else { 1787 SCTP_WQ_ADDR_LOCK(); 1788 } 1789 1790 /* Record in stopped_from which timeout occurred. */ 1791 tmr->stopped_from = type; 1792 /* mark as being serviced now */ 1793 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1794 /* 1795 * Callout has been rescheduled. 1796 */ 1797 goto out; 1798 } 1799 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1800 /* 1801 * Not active, so no action. 1802 */ 1803 goto out; 1804 } 1805 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1806 1807 /* call the handler for the appropriate timer type */ 1808 switch (type) { 1809 case SCTP_TIMER_TYPE_SEND: 1810 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1811 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1812 type, inp, stcb, net)); 1813 SCTP_STAT_INCR(sctps_timodata); 1814 stcb->asoc.timodata++; 1815 stcb->asoc.num_send_timers_up--; 1816 if (stcb->asoc.num_send_timers_up < 0) { 1817 stcb->asoc.num_send_timers_up = 0; 1818 } 1819 SCTP_TCB_LOCK_ASSERT(stcb); 1820 if (sctp_t3rxt_timer(inp, stcb, net)) { 1821 /* no need to unlock on tcb its gone */ 1822 1823 goto out_decr; 1824 } 1825 SCTP_TCB_LOCK_ASSERT(stcb); 1826 #ifdef SCTP_AUDITING_ENABLED 1827 sctp_auditing(4, inp, stcb, net); 1828 #endif 1829 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1830 did_output = true; 1831 if ((stcb->asoc.num_send_timers_up == 0) && 1832 (stcb->asoc.sent_queue_cnt > 0)) { 1833 struct sctp_tmit_chunk *chk; 1834 1835 /* 1836 * Safeguard. If there on some on the sent queue 1837 * somewhere but no timers running something is 1838 * wrong... so we start a timer on the first chunk 1839 * on the send queue on whatever net it is sent to. 1840 */ 1841 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1842 if (chk->whoTo != NULL) { 1843 break; 1844 } 1845 } 1846 if (chk != NULL) { 1847 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1848 } 1849 } 1850 break; 1851 case SCTP_TIMER_TYPE_INIT: 1852 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1853 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1854 type, inp, stcb, net)); 1855 SCTP_STAT_INCR(sctps_timoinit); 1856 stcb->asoc.timoinit++; 1857 if (sctp_t1init_timer(inp, stcb, net)) { 1858 /* no need to unlock on tcb its gone */ 1859 goto out_decr; 1860 } 1861 did_output = false; 1862 break; 1863 case SCTP_TIMER_TYPE_RECV: 1864 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1865 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1866 type, inp, stcb, net)); 1867 SCTP_STAT_INCR(sctps_timosack); 1868 stcb->asoc.timosack++; 1869 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1870 #ifdef SCTP_AUDITING_ENABLED 1871 sctp_auditing(4, inp, stcb, NULL); 1872 #endif 1873 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1874 did_output = true; 1875 break; 1876 case SCTP_TIMER_TYPE_SHUTDOWN: 1877 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1878 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1879 type, inp, stcb, net)); 1880 SCTP_STAT_INCR(sctps_timoshutdown); 1881 stcb->asoc.timoshutdown++; 1882 if (sctp_shutdown_timer(inp, stcb, net)) { 1883 /* no need to unlock on tcb its gone */ 1884 goto out_decr; 1885 } 1886 #ifdef SCTP_AUDITING_ENABLED 1887 sctp_auditing(4, inp, stcb, net); 1888 #endif 1889 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1890 did_output = true; 1891 break; 1892 case SCTP_TIMER_TYPE_HEARTBEAT: 1893 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1894 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1895 type, inp, stcb, net)); 1896 SCTP_STAT_INCR(sctps_timoheartbeat); 1897 stcb->asoc.timoheartbeat++; 1898 if (sctp_heartbeat_timer(inp, stcb, net)) { 1899 /* no need to unlock on tcb its gone */ 1900 goto out_decr; 1901 } 1902 #ifdef SCTP_AUDITING_ENABLED 1903 sctp_auditing(4, inp, stcb, net); 1904 #endif 1905 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1906 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1907 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1908 did_output = true; 1909 } else { 1910 did_output = false; 1911 } 1912 break; 1913 case SCTP_TIMER_TYPE_COOKIE: 1914 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1915 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1916 type, inp, stcb, net)); 1917 SCTP_STAT_INCR(sctps_timocookie); 1918 stcb->asoc.timocookie++; 1919 if (sctp_cookie_timer(inp, stcb, net)) { 1920 /* no need to unlock on tcb its gone */ 1921 goto out_decr; 1922 } 1923 #ifdef SCTP_AUDITING_ENABLED 1924 sctp_auditing(4, inp, stcb, net); 1925 #endif 1926 /* 1927 * We consider T3 and Cookie timer pretty much the same with 1928 * respect to where from in chunk_output. 1929 */ 1930 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1931 did_output = true; 1932 break; 1933 case SCTP_TIMER_TYPE_NEWCOOKIE: 1934 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1935 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1936 type, inp, stcb, net)); 1937 SCTP_STAT_INCR(sctps_timosecret); 1938 (void)SCTP_GETTIME_TIMEVAL(&tv); 1939 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1940 inp->sctp_ep.last_secret_number = 1941 inp->sctp_ep.current_secret_number; 1942 inp->sctp_ep.current_secret_number++; 1943 if (inp->sctp_ep.current_secret_number >= 1944 SCTP_HOW_MANY_SECRETS) { 1945 inp->sctp_ep.current_secret_number = 0; 1946 } 1947 secret = (int)inp->sctp_ep.current_secret_number; 1948 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1949 inp->sctp_ep.secret_key[secret][i] = 1950 sctp_select_initial_TSN(&inp->sctp_ep); 1951 } 1952 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1953 did_output = false; 1954 break; 1955 case SCTP_TIMER_TYPE_PATHMTURAISE: 1956 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1957 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1958 type, inp, stcb, net)); 1959 SCTP_STAT_INCR(sctps_timopathmtu); 1960 sctp_pathmtu_timer(inp, stcb, net); 1961 did_output = false; 1962 break; 1963 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1964 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1965 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1966 type, inp, stcb, net)); 1967 if (sctp_shutdownack_timer(inp, stcb, net)) { 1968 /* no need to unlock on tcb its gone */ 1969 goto out_decr; 1970 } 1971 SCTP_STAT_INCR(sctps_timoshutdownack); 1972 stcb->asoc.timoshutdownack++; 1973 #ifdef SCTP_AUDITING_ENABLED 1974 sctp_auditing(4, inp, stcb, net); 1975 #endif 1976 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1977 did_output = true; 1978 break; 1979 case SCTP_TIMER_TYPE_ASCONF: 1980 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1981 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1982 type, inp, stcb, net)); 1983 SCTP_STAT_INCR(sctps_timoasconf); 1984 if (sctp_asconf_timer(inp, stcb, net)) { 1985 /* no need to unlock on tcb its gone */ 1986 goto out_decr; 1987 } 1988 #ifdef SCTP_AUDITING_ENABLED 1989 sctp_auditing(4, inp, stcb, net); 1990 #endif 1991 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1992 did_output = true; 1993 break; 1994 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1995 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1996 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1997 type, inp, stcb, net)); 1998 SCTP_STAT_INCR(sctps_timoshutdownguard); 1999 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2000 "Shutdown guard timer expired"); 2001 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2002 /* no need to unlock on tcb its gone */ 2003 goto out_decr; 2004 case SCTP_TIMER_TYPE_AUTOCLOSE: 2005 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2006 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2007 type, inp, stcb, net)); 2008 SCTP_STAT_INCR(sctps_timoautoclose); 2009 sctp_autoclose_timer(inp, stcb); 2010 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2011 did_output = true; 2012 break; 2013 case SCTP_TIMER_TYPE_STRRESET: 2014 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2015 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2016 type, inp, stcb, net)); 2017 SCTP_STAT_INCR(sctps_timostrmrst); 2018 if (sctp_strreset_timer(inp, stcb)) { 2019 /* no need to unlock on tcb its gone */ 2020 goto out_decr; 2021 } 2022 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2023 did_output = true; 2024 break; 2025 case SCTP_TIMER_TYPE_INPKILL: 2026 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2027 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2028 type, inp, stcb, net)); 2029 SCTP_STAT_INCR(sctps_timoinpkill); 2030 /* 2031 * special case, take away our increment since WE are the 2032 * killer 2033 */ 2034 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2035 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2036 SCTP_INP_DECR_REF(inp); 2037 SCTP_INP_WUNLOCK(inp); 2038 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2039 SCTP_CALLED_FROM_INPKILL_TIMER); 2040 inp = NULL; 2041 goto out_decr; 2042 case SCTP_TIMER_TYPE_ASOCKILL: 2043 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2044 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2045 type, inp, stcb, net)); 2046 SCTP_STAT_INCR(sctps_timoassockill); 2047 /* Can we free it yet? */ 2048 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2049 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2050 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2051 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2052 /* 2053 * free asoc, always unlocks (or destroy's) so prevent 2054 * duplicate unlock or unlock of a free mtx :-0 2055 */ 2056 stcb = NULL; 2057 goto out_decr; 2058 case SCTP_TIMER_TYPE_ADDR_WQ: 2059 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2060 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2061 type, inp, stcb, net)); 2062 sctp_handle_addr_wq(); 2063 did_output = true; 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 did_output = false; 2072 break; 2073 default: 2074 #ifdef INVARIANTS 2075 panic("Unknown timer type %d", type); 2076 #else 2077 goto out; 2078 #endif 2079 } 2080 #ifdef SCTP_AUDITING_ENABLED 2081 sctp_audit_log(0xF1, (uint8_t)type); 2082 if (inp != NULL) 2083 sctp_auditing(5, inp, stcb, net); 2084 #endif 2085 if (did_output && (stcb != NULL)) { 2086 /* 2087 * Now we need to clean up the control chunk chain if an 2088 * ECNE is on it. It must be marked as UNSENT again so next 2089 * call will continue to send it until such time that we get 2090 * a CWR, to remove it. It is, however, less likely that we 2091 * will find a ecn echo on the chain though. 2092 */ 2093 sctp_fix_ecn_echo(&stcb->asoc); 2094 } 2095 out: 2096 if (stcb != NULL) { 2097 SCTP_TCB_UNLOCK(stcb); 2098 } else if (inp != NULL) { 2099 SCTP_INP_WUNLOCK(inp); 2100 } else { 2101 SCTP_WQ_ADDR_UNLOCK(); 2102 } 2103 2104 out_decr: 2105 /* These reference counts were incremented in sctp_timer_start(). */ 2106 if (inp != NULL) { 2107 SCTP_INP_DECR_REF(inp); 2108 } 2109 if ((stcb != NULL) && !released_asoc_reference) { 2110 atomic_add_int(&stcb->asoc.refcnt, -1); 2111 } 2112 if (net != NULL) { 2113 sctp_free_remote_addr(net); 2114 } 2115 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2116 CURVNET_RESTORE(); 2117 NET_EPOCH_EXIT(et); 2118 } 2119 2120 /*- 2121 * The following table shows which parameters must be provided 2122 * when calling sctp_timer_start(). For parameters not being 2123 * provided, NULL must be used. 2124 * 2125 * |Name |inp |stcb|net | 2126 * |-----------------------------|----|----|----| 2127 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2128 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2130 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2132 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2134 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2139 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2141 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2142 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2143 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2144 * 2145 */ 2146 2147 void 2148 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2149 struct sctp_nets *net) 2150 { 2151 struct sctp_timer *tmr; 2152 uint32_t to_ticks; 2153 uint32_t rndval, jitter; 2154 2155 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2156 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2157 t_type, stcb, stcb->sctp_ep)); 2158 tmr = NULL; 2159 if (stcb != NULL) { 2160 SCTP_TCB_LOCK_ASSERT(stcb); 2161 } else if (inp != NULL) { 2162 SCTP_INP_WLOCK_ASSERT(inp); 2163 } else { 2164 SCTP_WQ_ADDR_LOCK_ASSERT(); 2165 } 2166 if (stcb != NULL) { 2167 /* 2168 * Don't restart timer on association that's about to be 2169 * killed. 2170 */ 2171 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2172 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2173 SCTPDBG(SCTP_DEBUG_TIMER2, 2174 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2175 t_type, inp, stcb, net); 2176 return; 2177 } 2178 /* Don't restart timer on net that's been removed. */ 2179 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2180 SCTPDBG(SCTP_DEBUG_TIMER2, 2181 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2182 t_type, inp, stcb, net); 2183 return; 2184 } 2185 } 2186 switch (t_type) { 2187 case SCTP_TIMER_TYPE_SEND: 2188 /* Here we use the RTO timer. */ 2189 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2190 #ifdef INVARIANTS 2191 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2192 t_type, inp, stcb, net); 2193 #else 2194 return; 2195 #endif 2196 } 2197 tmr = &net->rxt_timer; 2198 if (net->RTO == 0) { 2199 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2200 } else { 2201 to_ticks = sctp_msecs_to_ticks(net->RTO); 2202 } 2203 break; 2204 case SCTP_TIMER_TYPE_INIT: 2205 /* 2206 * Here we use the INIT timer default usually about 1 2207 * second. 2208 */ 2209 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2210 #ifdef INVARIANTS 2211 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2212 t_type, inp, stcb, net); 2213 #else 2214 return; 2215 #endif 2216 } 2217 tmr = &net->rxt_timer; 2218 if (net->RTO == 0) { 2219 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2220 } else { 2221 to_ticks = sctp_msecs_to_ticks(net->RTO); 2222 } 2223 break; 2224 case SCTP_TIMER_TYPE_RECV: 2225 /* 2226 * Here we use the Delayed-Ack timer value from the inp, 2227 * ususually about 200ms. 2228 */ 2229 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2230 #ifdef INVARIANTS 2231 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2232 t_type, inp, stcb, net); 2233 #else 2234 return; 2235 #endif 2236 } 2237 tmr = &stcb->asoc.dack_timer; 2238 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2239 break; 2240 case SCTP_TIMER_TYPE_SHUTDOWN: 2241 /* Here we use the RTO of the destination. */ 2242 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2243 #ifdef INVARIANTS 2244 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2245 t_type, inp, stcb, net); 2246 #else 2247 return; 2248 #endif 2249 } 2250 tmr = &net->rxt_timer; 2251 if (net->RTO == 0) { 2252 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2253 } else { 2254 to_ticks = sctp_msecs_to_ticks(net->RTO); 2255 } 2256 break; 2257 case SCTP_TIMER_TYPE_HEARTBEAT: 2258 /* 2259 * The net is used here so that we can add in the RTO. Even 2260 * though we use a different timer. We also add the HB timer 2261 * PLUS a random jitter. 2262 */ 2263 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2264 #ifdef INVARIANTS 2265 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2266 t_type, inp, stcb, net); 2267 #else 2268 return; 2269 #endif 2270 } 2271 if ((net->dest_state & SCTP_ADDR_NOHB) && 2272 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2273 SCTPDBG(SCTP_DEBUG_TIMER2, 2274 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2275 t_type, inp, stcb, net); 2276 return; 2277 } 2278 tmr = &net->hb_timer; 2279 if (net->RTO == 0) { 2280 to_ticks = stcb->asoc.initial_rto; 2281 } else { 2282 to_ticks = net->RTO; 2283 } 2284 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2285 jitter = rndval % to_ticks; 2286 if (to_ticks > 1) { 2287 to_ticks >>= 1; 2288 } 2289 if (jitter < (UINT32_MAX - to_ticks)) { 2290 to_ticks += jitter; 2291 } else { 2292 to_ticks = UINT32_MAX; 2293 } 2294 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2295 !(net->dest_state & SCTP_ADDR_PF)) { 2296 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2297 to_ticks += net->heart_beat_delay; 2298 } else { 2299 to_ticks = UINT32_MAX; 2300 } 2301 } 2302 /* 2303 * Now we must convert the to_ticks that are now in ms to 2304 * ticks. 2305 */ 2306 to_ticks = sctp_msecs_to_ticks(to_ticks); 2307 break; 2308 case SCTP_TIMER_TYPE_COOKIE: 2309 /* 2310 * Here we can use the RTO timer from the network since one 2311 * RTT was complete. If a retransmission happened then we 2312 * will be using the RTO initial value. 2313 */ 2314 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2315 #ifdef INVARIANTS 2316 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2317 t_type, inp, stcb, net); 2318 #else 2319 return; 2320 #endif 2321 } 2322 tmr = &net->rxt_timer; 2323 if (net->RTO == 0) { 2324 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2325 } else { 2326 to_ticks = sctp_msecs_to_ticks(net->RTO); 2327 } 2328 break; 2329 case SCTP_TIMER_TYPE_NEWCOOKIE: 2330 /* 2331 * Nothing needed but the endpoint here ususually about 60 2332 * minutes. 2333 */ 2334 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2335 #ifdef INVARIANTS 2336 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2337 t_type, inp, stcb, net); 2338 #else 2339 return; 2340 #endif 2341 } 2342 tmr = &inp->sctp_ep.signature_change; 2343 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2344 break; 2345 case SCTP_TIMER_TYPE_PATHMTURAISE: 2346 /* 2347 * Here we use the value found in the EP for PMTUD, 2348 * ususually about 10 minutes. 2349 */ 2350 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2351 #ifdef INVARIANTS 2352 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2353 t_type, inp, stcb, net); 2354 #else 2355 return; 2356 #endif 2357 } 2358 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2359 SCTPDBG(SCTP_DEBUG_TIMER2, 2360 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2361 t_type, inp, stcb, net); 2362 return; 2363 } 2364 tmr = &net->pmtu_timer; 2365 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2366 break; 2367 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2368 /* Here we use the RTO of the destination. */ 2369 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2370 #ifdef INVARIANTS 2371 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2372 t_type, inp, stcb, net); 2373 #else 2374 return; 2375 #endif 2376 } 2377 tmr = &net->rxt_timer; 2378 if (net->RTO == 0) { 2379 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2380 } else { 2381 to_ticks = sctp_msecs_to_ticks(net->RTO); 2382 } 2383 break; 2384 case SCTP_TIMER_TYPE_ASCONF: 2385 /* 2386 * Here the timer comes from the stcb but its value is from 2387 * the net's RTO. 2388 */ 2389 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2390 #ifdef INVARIANTS 2391 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2392 t_type, inp, stcb, net); 2393 #else 2394 return; 2395 #endif 2396 } 2397 tmr = &stcb->asoc.asconf_timer; 2398 if (net->RTO == 0) { 2399 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2400 } else { 2401 to_ticks = sctp_msecs_to_ticks(net->RTO); 2402 } 2403 break; 2404 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2405 /* 2406 * Here we use the endpoints shutdown guard timer usually 2407 * about 3 minutes. 2408 */ 2409 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2410 #ifdef INVARIANTS 2411 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2412 t_type, inp, stcb, net); 2413 #else 2414 return; 2415 #endif 2416 } 2417 tmr = &stcb->asoc.shut_guard_timer; 2418 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2419 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2420 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2421 } else { 2422 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2423 } 2424 } else { 2425 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2426 } 2427 break; 2428 case SCTP_TIMER_TYPE_AUTOCLOSE: 2429 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2430 #ifdef INVARIANTS 2431 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2432 t_type, inp, stcb, net); 2433 #else 2434 return; 2435 #endif 2436 } 2437 tmr = &stcb->asoc.autoclose_timer; 2438 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2439 break; 2440 case SCTP_TIMER_TYPE_STRRESET: 2441 /* 2442 * Here the timer comes from the stcb but its value is from 2443 * the net's RTO. 2444 */ 2445 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2446 #ifdef INVARIANTS 2447 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2448 t_type, inp, stcb, net); 2449 #else 2450 return; 2451 #endif 2452 } 2453 tmr = &stcb->asoc.strreset_timer; 2454 if (net->RTO == 0) { 2455 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2456 } else { 2457 to_ticks = sctp_msecs_to_ticks(net->RTO); 2458 } 2459 break; 2460 case SCTP_TIMER_TYPE_INPKILL: 2461 /* 2462 * The inp is setup to die. We re-use the signature_chage 2463 * timer since that has stopped and we are in the GONE 2464 * state. 2465 */ 2466 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2467 #ifdef INVARIANTS 2468 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2469 t_type, inp, stcb, net); 2470 #else 2471 return; 2472 #endif 2473 } 2474 tmr = &inp->sctp_ep.signature_change; 2475 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2476 break; 2477 case SCTP_TIMER_TYPE_ASOCKILL: 2478 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 tmr = &stcb->asoc.strreset_timer; 2487 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2488 break; 2489 case SCTP_TIMER_TYPE_ADDR_WQ: 2490 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2491 #ifdef INVARIANTS 2492 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2493 t_type, inp, stcb, net); 2494 #else 2495 return; 2496 #endif 2497 } 2498 /* Only 1 tick away :-) */ 2499 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2500 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2501 break; 2502 case SCTP_TIMER_TYPE_PRIM_DELETED: 2503 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2504 #ifdef INVARIANTS 2505 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2506 t_type, inp, stcb, net); 2507 #else 2508 return; 2509 #endif 2510 } 2511 tmr = &stcb->asoc.delete_prim_timer; 2512 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2513 break; 2514 default: 2515 #ifdef INVARIANTS 2516 panic("Unknown timer type %d", t_type); 2517 #else 2518 return; 2519 #endif 2520 } 2521 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2522 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2523 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2524 /* 2525 * We do NOT allow you to have it already running. If it is, 2526 * we leave the current one up unchanged. 2527 */ 2528 SCTPDBG(SCTP_DEBUG_TIMER2, 2529 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2530 t_type, inp, stcb, net); 2531 return; 2532 } 2533 /* At this point we can proceed. */ 2534 if (t_type == SCTP_TIMER_TYPE_SEND) { 2535 stcb->asoc.num_send_timers_up++; 2536 } 2537 tmr->stopped_from = 0; 2538 tmr->type = t_type; 2539 tmr->ep = (void *)inp; 2540 tmr->tcb = (void *)stcb; 2541 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2542 tmr->net = NULL; 2543 } else { 2544 tmr->net = (void *)net; 2545 } 2546 tmr->self = (void *)tmr; 2547 tmr->vnet = (void *)curvnet; 2548 tmr->ticks = sctp_get_tick_count(); 2549 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2550 SCTPDBG(SCTP_DEBUG_TIMER2, 2551 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2552 t_type, to_ticks, inp, stcb, net); 2553 /* 2554 * If this is a newly scheduled callout, as opposed to a 2555 * rescheduled one, increment relevant reference counts. 2556 */ 2557 if (tmr->ep != NULL) { 2558 SCTP_INP_INCR_REF(inp); 2559 } 2560 if (tmr->tcb != NULL) { 2561 atomic_add_int(&stcb->asoc.refcnt, 1); 2562 } 2563 if (tmr->net != NULL) { 2564 atomic_add_int(&net->ref_count, 1); 2565 } 2566 } else { 2567 /* 2568 * This should not happen, since we checked for pending 2569 * above. 2570 */ 2571 SCTPDBG(SCTP_DEBUG_TIMER2, 2572 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2573 t_type, to_ticks, inp, stcb, net); 2574 } 2575 return; 2576 } 2577 2578 /*- 2579 * The following table shows which parameters must be provided 2580 * when calling sctp_timer_stop(). For parameters not being 2581 * provided, NULL must be used. 2582 * 2583 * |Name |inp |stcb|net | 2584 * |-----------------------------|----|----|----| 2585 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2586 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2587 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2588 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2589 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2590 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2591 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2592 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2593 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2594 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2595 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2596 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2598 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2599 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2600 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2601 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2602 * 2603 */ 2604 2605 void 2606 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2607 struct sctp_nets *net, uint32_t from) 2608 { 2609 struct sctp_timer *tmr; 2610 2611 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2612 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2613 t_type, stcb, stcb->sctp_ep)); 2614 if (stcb != NULL) { 2615 SCTP_TCB_LOCK_ASSERT(stcb); 2616 } else if (inp != NULL) { 2617 SCTP_INP_WLOCK_ASSERT(inp); 2618 } else { 2619 SCTP_WQ_ADDR_LOCK_ASSERT(); 2620 } 2621 tmr = NULL; 2622 switch (t_type) { 2623 case SCTP_TIMER_TYPE_SEND: 2624 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2625 #ifdef INVARIANTS 2626 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2627 t_type, inp, stcb, net); 2628 #else 2629 return; 2630 #endif 2631 } 2632 tmr = &net->rxt_timer; 2633 break; 2634 case SCTP_TIMER_TYPE_INIT: 2635 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2636 #ifdef INVARIANTS 2637 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2638 t_type, inp, stcb, net); 2639 #else 2640 return; 2641 #endif 2642 } 2643 tmr = &net->rxt_timer; 2644 break; 2645 case SCTP_TIMER_TYPE_RECV: 2646 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2647 #ifdef INVARIANTS 2648 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2649 t_type, inp, stcb, net); 2650 #else 2651 return; 2652 #endif 2653 } 2654 tmr = &stcb->asoc.dack_timer; 2655 break; 2656 case SCTP_TIMER_TYPE_SHUTDOWN: 2657 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2658 #ifdef INVARIANTS 2659 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2660 t_type, inp, stcb, net); 2661 #else 2662 return; 2663 #endif 2664 } 2665 tmr = &net->rxt_timer; 2666 break; 2667 case SCTP_TIMER_TYPE_HEARTBEAT: 2668 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2669 #ifdef INVARIANTS 2670 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2671 t_type, inp, stcb, net); 2672 #else 2673 return; 2674 #endif 2675 } 2676 tmr = &net->hb_timer; 2677 break; 2678 case SCTP_TIMER_TYPE_COOKIE: 2679 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2680 #ifdef INVARIANTS 2681 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2682 t_type, inp, stcb, net); 2683 #else 2684 return; 2685 #endif 2686 } 2687 tmr = &net->rxt_timer; 2688 break; 2689 case SCTP_TIMER_TYPE_NEWCOOKIE: 2690 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2691 #ifdef INVARIANTS 2692 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2693 t_type, inp, stcb, net); 2694 #else 2695 return; 2696 #endif 2697 } 2698 tmr = &inp->sctp_ep.signature_change; 2699 break; 2700 case SCTP_TIMER_TYPE_PATHMTURAISE: 2701 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2702 #ifdef INVARIANTS 2703 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2704 t_type, inp, stcb, net); 2705 #else 2706 return; 2707 #endif 2708 } 2709 tmr = &net->pmtu_timer; 2710 break; 2711 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2712 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2713 #ifdef INVARIANTS 2714 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2715 t_type, inp, stcb, net); 2716 #else 2717 return; 2718 #endif 2719 } 2720 tmr = &net->rxt_timer; 2721 break; 2722 case SCTP_TIMER_TYPE_ASCONF: 2723 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2724 #ifdef INVARIANTS 2725 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2726 t_type, inp, stcb, net); 2727 #else 2728 return; 2729 #endif 2730 } 2731 tmr = &stcb->asoc.asconf_timer; 2732 break; 2733 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2734 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2735 #ifdef INVARIANTS 2736 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2737 t_type, inp, stcb, net); 2738 #else 2739 return; 2740 #endif 2741 } 2742 tmr = &stcb->asoc.shut_guard_timer; 2743 break; 2744 case SCTP_TIMER_TYPE_AUTOCLOSE: 2745 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2746 #ifdef INVARIANTS 2747 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2748 t_type, inp, stcb, net); 2749 #else 2750 return; 2751 #endif 2752 } 2753 tmr = &stcb->asoc.autoclose_timer; 2754 break; 2755 case SCTP_TIMER_TYPE_STRRESET: 2756 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2757 #ifdef INVARIANTS 2758 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2759 t_type, inp, stcb, net); 2760 #else 2761 return; 2762 #endif 2763 } 2764 tmr = &stcb->asoc.strreset_timer; 2765 break; 2766 case SCTP_TIMER_TYPE_INPKILL: 2767 /* 2768 * The inp is setup to die. We re-use the signature_chage 2769 * timer since that has stopped and we are in the GONE 2770 * state. 2771 */ 2772 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2773 #ifdef INVARIANTS 2774 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2775 t_type, inp, stcb, net); 2776 #else 2777 return; 2778 #endif 2779 } 2780 tmr = &inp->sctp_ep.signature_change; 2781 break; 2782 case SCTP_TIMER_TYPE_ASOCKILL: 2783 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2784 #ifdef INVARIANTS 2785 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2786 t_type, inp, stcb, net); 2787 #else 2788 return; 2789 #endif 2790 } 2791 tmr = &stcb->asoc.strreset_timer; 2792 break; 2793 case SCTP_TIMER_TYPE_ADDR_WQ: 2794 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2795 #ifdef INVARIANTS 2796 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2797 t_type, inp, stcb, net); 2798 #else 2799 return; 2800 #endif 2801 } 2802 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2803 break; 2804 case SCTP_TIMER_TYPE_PRIM_DELETED: 2805 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2806 #ifdef INVARIANTS 2807 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2808 t_type, inp, stcb, net); 2809 #else 2810 return; 2811 #endif 2812 } 2813 tmr = &stcb->asoc.delete_prim_timer; 2814 break; 2815 default: 2816 #ifdef INVARIANTS 2817 panic("Unknown timer type %d", t_type); 2818 #else 2819 return; 2820 #endif 2821 } 2822 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2823 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2824 (tmr->type != t_type)) { 2825 /* 2826 * Ok we have a timer that is under joint use. Cookie timer 2827 * per chance with the SEND timer. We therefore are NOT 2828 * running the timer that the caller wants stopped. So just 2829 * return. 2830 */ 2831 SCTPDBG(SCTP_DEBUG_TIMER2, 2832 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2833 t_type, inp, stcb, net); 2834 return; 2835 } 2836 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2837 stcb->asoc.num_send_timers_up--; 2838 if (stcb->asoc.num_send_timers_up < 0) { 2839 stcb->asoc.num_send_timers_up = 0; 2840 } 2841 } 2842 tmr->self = NULL; 2843 tmr->stopped_from = from; 2844 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2845 KASSERT(tmr->ep == inp, 2846 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2847 t_type, inp, tmr->ep)); 2848 KASSERT(tmr->tcb == stcb, 2849 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2850 t_type, stcb, tmr->tcb)); 2851 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2852 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2853 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2854 t_type, net, tmr->net)); 2855 SCTPDBG(SCTP_DEBUG_TIMER2, 2856 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2857 t_type, inp, stcb, net); 2858 /* 2859 * If the timer was actually stopped, decrement reference 2860 * counts that were incremented in sctp_timer_start(). 2861 */ 2862 if (tmr->ep != NULL) { 2863 SCTP_INP_DECR_REF(inp); 2864 tmr->ep = NULL; 2865 } 2866 if (tmr->tcb != NULL) { 2867 atomic_add_int(&stcb->asoc.refcnt, -1); 2868 tmr->tcb = NULL; 2869 } 2870 if (tmr->net != NULL) { 2871 /* 2872 * Can't use net, since it doesn't work for 2873 * SCTP_TIMER_TYPE_ASCONF. 2874 */ 2875 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2876 tmr->net = NULL; 2877 } 2878 } else { 2879 SCTPDBG(SCTP_DEBUG_TIMER2, 2880 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2881 t_type, inp, stcb, net); 2882 } 2883 return; 2884 } 2885 2886 uint32_t 2887 sctp_calculate_len(struct mbuf *m) 2888 { 2889 uint32_t tlen = 0; 2890 struct mbuf *at; 2891 2892 at = m; 2893 while (at) { 2894 tlen += SCTP_BUF_LEN(at); 2895 at = SCTP_BUF_NEXT(at); 2896 } 2897 return (tlen); 2898 } 2899 2900 void 2901 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2902 struct sctp_association *asoc, uint32_t mtu) 2903 { 2904 /* 2905 * Reset the P-MTU size on this association, this involves changing 2906 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2907 * allow the DF flag to be cleared. 2908 */ 2909 struct sctp_tmit_chunk *chk; 2910 unsigned int eff_mtu, ovh; 2911 2912 asoc->smallest_mtu = mtu; 2913 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2914 ovh = SCTP_MIN_OVERHEAD; 2915 } else { 2916 ovh = SCTP_MIN_V4_OVERHEAD; 2917 } 2918 eff_mtu = mtu - ovh; 2919 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2920 if (chk->send_size > eff_mtu) { 2921 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2922 } 2923 } 2924 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2925 if (chk->send_size > eff_mtu) { 2926 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2927 } 2928 } 2929 } 2930 2931 /* 2932 * Given an association and starting time of the current RTT period, update 2933 * RTO in number of msecs. net should point to the current network. 2934 * Return 1, if an RTO update was performed, return 0 if no update was 2935 * performed due to invalid starting point. 2936 */ 2937 2938 int 2939 sctp_calculate_rto(struct sctp_tcb *stcb, 2940 struct sctp_association *asoc, 2941 struct sctp_nets *net, 2942 struct timeval *old, 2943 int rtt_from_sack) 2944 { 2945 struct timeval now; 2946 uint64_t rtt_us; /* RTT in us */ 2947 int32_t rtt; /* RTT in ms */ 2948 uint32_t new_rto; 2949 int first_measure = 0; 2950 2951 /************************/ 2952 /* 1. calculate new RTT */ 2953 /************************/ 2954 /* get the current time */ 2955 if (stcb->asoc.use_precise_time) { 2956 (void)SCTP_GETPTIME_TIMEVAL(&now); 2957 } else { 2958 (void)SCTP_GETTIME_TIMEVAL(&now); 2959 } 2960 if ((old->tv_sec > now.tv_sec) || 2961 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2962 /* The starting point is in the future. */ 2963 return (0); 2964 } 2965 timevalsub(&now, old); 2966 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2967 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2968 /* The RTT is larger than a sane value. */ 2969 return (0); 2970 } 2971 /* store the current RTT in us */ 2972 net->rtt = rtt_us; 2973 /* compute rtt in ms */ 2974 rtt = (int32_t)(net->rtt / 1000); 2975 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2976 /* 2977 * Tell the CC module that a new update has just occurred 2978 * from a sack 2979 */ 2980 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2981 } 2982 /* 2983 * Do we need to determine the lan? We do this only on sacks i.e. 2984 * RTT being determined from data not non-data (HB/INIT->INITACK). 2985 */ 2986 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2987 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2988 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2989 net->lan_type = SCTP_LAN_INTERNET; 2990 } else { 2991 net->lan_type = SCTP_LAN_LOCAL; 2992 } 2993 } 2994 2995 /***************************/ 2996 /* 2. update RTTVAR & SRTT */ 2997 /***************************/ 2998 /*- 2999 * Compute the scaled average lastsa and the 3000 * scaled variance lastsv as described in van Jacobson 3001 * Paper "Congestion Avoidance and Control", Annex A. 3002 * 3003 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 3004 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 3005 */ 3006 if (net->RTO_measured) { 3007 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3008 net->lastsa += rtt; 3009 if (rtt < 0) { 3010 rtt = -rtt; 3011 } 3012 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3013 net->lastsv += rtt; 3014 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3015 rto_logging(net, SCTP_LOG_RTTVAR); 3016 } 3017 } else { 3018 /* First RTO measurment */ 3019 net->RTO_measured = 1; 3020 first_measure = 1; 3021 net->lastsa = rtt << SCTP_RTT_SHIFT; 3022 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3024 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3025 } 3026 } 3027 if (net->lastsv == 0) { 3028 net->lastsv = SCTP_CLOCK_GRANULARITY; 3029 } 3030 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3031 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3032 (stcb->asoc.sat_network_lockout == 0)) { 3033 stcb->asoc.sat_network = 1; 3034 } else if ((!first_measure) && stcb->asoc.sat_network) { 3035 stcb->asoc.sat_network = 0; 3036 stcb->asoc.sat_network_lockout = 1; 3037 } 3038 /* bound it, per C6/C7 in Section 5.3.1 */ 3039 if (new_rto < stcb->asoc.minrto) { 3040 new_rto = stcb->asoc.minrto; 3041 } 3042 if (new_rto > stcb->asoc.maxrto) { 3043 new_rto = stcb->asoc.maxrto; 3044 } 3045 net->RTO = new_rto; 3046 return (1); 3047 } 3048 3049 /* 3050 * return a pointer to a contiguous piece of data from the given mbuf chain 3051 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3052 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3053 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3054 */ 3055 caddr_t 3056 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3057 { 3058 uint32_t count; 3059 uint8_t *ptr; 3060 3061 ptr = in_ptr; 3062 if ((off < 0) || (len <= 0)) 3063 return (NULL); 3064 3065 /* find the desired start location */ 3066 while ((m != NULL) && (off > 0)) { 3067 if (off < SCTP_BUF_LEN(m)) 3068 break; 3069 off -= SCTP_BUF_LEN(m); 3070 m = SCTP_BUF_NEXT(m); 3071 } 3072 if (m == NULL) 3073 return (NULL); 3074 3075 /* is the current mbuf large enough (eg. contiguous)? */ 3076 if ((SCTP_BUF_LEN(m) - off) >= len) { 3077 return (mtod(m, caddr_t)+off); 3078 } else { 3079 /* else, it spans more than one mbuf, so save a temp copy... */ 3080 while ((m != NULL) && (len > 0)) { 3081 count = min(SCTP_BUF_LEN(m) - off, len); 3082 memcpy(ptr, mtod(m, caddr_t)+off, count); 3083 len -= count; 3084 ptr += count; 3085 off = 0; 3086 m = SCTP_BUF_NEXT(m); 3087 } 3088 if ((m == NULL) && (len > 0)) 3089 return (NULL); 3090 else 3091 return ((caddr_t)in_ptr); 3092 } 3093 } 3094 3095 struct sctp_paramhdr * 3096 sctp_get_next_param(struct mbuf *m, 3097 int offset, 3098 struct sctp_paramhdr *pull, 3099 int pull_limit) 3100 { 3101 /* This just provides a typed signature to Peter's Pull routine */ 3102 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3103 (uint8_t *)pull)); 3104 } 3105 3106 struct mbuf * 3107 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3108 { 3109 struct mbuf *m_last; 3110 caddr_t dp; 3111 3112 if (padlen > 3) { 3113 return (NULL); 3114 } 3115 if (padlen <= M_TRAILINGSPACE(m)) { 3116 /* 3117 * The easy way. We hope the majority of the time we hit 3118 * here :) 3119 */ 3120 m_last = m; 3121 } else { 3122 /* Hard way we must grow the mbuf chain */ 3123 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3124 if (m_last == NULL) { 3125 return (NULL); 3126 } 3127 SCTP_BUF_LEN(m_last) = 0; 3128 SCTP_BUF_NEXT(m_last) = NULL; 3129 SCTP_BUF_NEXT(m) = m_last; 3130 } 3131 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3132 SCTP_BUF_LEN(m_last) += padlen; 3133 memset(dp, 0, padlen); 3134 return (m_last); 3135 } 3136 3137 struct mbuf * 3138 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3139 { 3140 /* find the last mbuf in chain and pad it */ 3141 struct mbuf *m_at; 3142 3143 if (last_mbuf != NULL) { 3144 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3145 } else { 3146 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3147 if (SCTP_BUF_NEXT(m_at) == NULL) { 3148 return (sctp_add_pad_tombuf(m_at, padval)); 3149 } 3150 } 3151 } 3152 return (NULL); 3153 } 3154 3155 static void 3156 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3157 uint16_t error, struct sctp_abort_chunk *abort, 3158 bool from_peer, bool timedout, int so_locked) 3159 { 3160 struct mbuf *m_notify; 3161 struct sctp_assoc_change *sac; 3162 struct sctp_queued_to_read *control; 3163 unsigned int notif_len; 3164 uint16_t abort_len; 3165 unsigned int i; 3166 3167 KASSERT(abort == NULL || from_peer, 3168 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3169 KASSERT(!from_peer || !timedout, 3170 ("sctp_notify_assoc_change: timeouts can only be local")); 3171 if (stcb == NULL) { 3172 return; 3173 } 3174 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3175 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3176 if (abort != NULL) { 3177 abort_len = ntohs(abort->ch.chunk_length); 3178 /* 3179 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3180 * contiguous. 3181 */ 3182 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3183 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3184 } 3185 } else { 3186 abort_len = 0; 3187 } 3188 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3189 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3190 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3191 notif_len += abort_len; 3192 } 3193 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3194 if (m_notify == NULL) { 3195 /* Retry with smaller value. */ 3196 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3197 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3198 if (m_notify == NULL) { 3199 goto set_error; 3200 } 3201 } 3202 SCTP_BUF_NEXT(m_notify) = NULL; 3203 sac = mtod(m_notify, struct sctp_assoc_change *); 3204 memset(sac, 0, notif_len); 3205 sac->sac_type = SCTP_ASSOC_CHANGE; 3206 sac->sac_flags = 0; 3207 sac->sac_length = sizeof(struct sctp_assoc_change); 3208 sac->sac_state = state; 3209 sac->sac_error = error; 3210 if (state == SCTP_CANT_STR_ASSOC) { 3211 sac->sac_outbound_streams = 0; 3212 sac->sac_inbound_streams = 0; 3213 } else { 3214 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3215 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3216 } 3217 sac->sac_assoc_id = sctp_get_associd(stcb); 3218 if (notif_len > sizeof(struct sctp_assoc_change)) { 3219 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3220 i = 0; 3221 if (stcb->asoc.prsctp_supported == 1) { 3222 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3223 } 3224 if (stcb->asoc.auth_supported == 1) { 3225 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3226 } 3227 if (stcb->asoc.asconf_supported == 1) { 3228 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3229 } 3230 if (stcb->asoc.idata_supported == 1) { 3231 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3232 } 3233 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3234 if (stcb->asoc.reconfig_supported == 1) { 3235 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3236 } 3237 sac->sac_length += i; 3238 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3239 memcpy(sac->sac_info, abort, abort_len); 3240 sac->sac_length += abort_len; 3241 } 3242 } 3243 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3244 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3245 0, 0, stcb->asoc.context, 0, 0, 0, 3246 m_notify); 3247 if (control != NULL) { 3248 control->length = SCTP_BUF_LEN(m_notify); 3249 control->spec_flags = M_NOTIFICATION; 3250 /* not that we need this */ 3251 control->tail_mbuf = m_notify; 3252 sctp_add_to_readq(stcb->sctp_ep, stcb, 3253 control, 3254 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3255 so_locked); 3256 } else { 3257 sctp_m_freem(m_notify); 3258 } 3259 } 3260 /* 3261 * For 1-to-1 style sockets, we send up and error when an ABORT 3262 * comes in. 3263 */ 3264 set_error: 3265 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3266 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3267 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3268 SOCK_LOCK(stcb->sctp_socket); 3269 if (from_peer) { 3270 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3271 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3272 stcb->sctp_socket->so_error = ECONNREFUSED; 3273 } else { 3274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3275 stcb->sctp_socket->so_error = ECONNRESET; 3276 } 3277 } else { 3278 if (timedout) { 3279 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3280 stcb->sctp_socket->so_error = ETIMEDOUT; 3281 } else { 3282 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3283 stcb->sctp_socket->so_error = ECONNABORTED; 3284 } 3285 } 3286 SOCK_UNLOCK(stcb->sctp_socket); 3287 } 3288 /* Wake ANY sleepers */ 3289 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3290 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3291 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3292 socantrcvmore(stcb->sctp_socket); 3293 } 3294 sorwakeup(stcb->sctp_socket); 3295 sowwakeup(stcb->sctp_socket); 3296 } 3297 3298 static void 3299 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3300 struct sockaddr *sa, uint32_t error, int so_locked) 3301 { 3302 struct mbuf *m_notify; 3303 struct sctp_paddr_change *spc; 3304 struct sctp_queued_to_read *control; 3305 3306 if ((stcb == NULL) || 3307 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3308 /* event not enabled */ 3309 return; 3310 } 3311 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3312 if (m_notify == NULL) 3313 return; 3314 SCTP_BUF_LEN(m_notify) = 0; 3315 spc = mtod(m_notify, struct sctp_paddr_change *); 3316 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3317 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3318 spc->spc_flags = 0; 3319 spc->spc_length = sizeof(struct sctp_paddr_change); 3320 switch (sa->sa_family) { 3321 #ifdef INET 3322 case AF_INET: 3323 #ifdef INET6 3324 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3325 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3326 (struct sockaddr_in6 *)&spc->spc_aaddr); 3327 } else { 3328 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3329 } 3330 #else 3331 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3332 #endif 3333 break; 3334 #endif 3335 #ifdef INET6 3336 case AF_INET6: 3337 { 3338 struct sockaddr_in6 *sin6; 3339 3340 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3341 3342 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3343 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3344 if (sin6->sin6_scope_id == 0) { 3345 /* recover scope_id for user */ 3346 (void)sa6_recoverscope(sin6); 3347 } else { 3348 /* clear embedded scope_id for user */ 3349 in6_clearscope(&sin6->sin6_addr); 3350 } 3351 } 3352 break; 3353 } 3354 #endif 3355 default: 3356 /* TSNH */ 3357 break; 3358 } 3359 spc->spc_state = state; 3360 spc->spc_error = error; 3361 spc->spc_assoc_id = sctp_get_associd(stcb); 3362 3363 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3364 SCTP_BUF_NEXT(m_notify) = NULL; 3365 3366 /* append to socket */ 3367 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3368 0, 0, stcb->asoc.context, 0, 0, 0, 3369 m_notify); 3370 if (control == NULL) { 3371 /* no memory */ 3372 sctp_m_freem(m_notify); 3373 return; 3374 } 3375 control->length = SCTP_BUF_LEN(m_notify); 3376 control->spec_flags = M_NOTIFICATION; 3377 /* not that we need this */ 3378 control->tail_mbuf = m_notify; 3379 sctp_add_to_readq(stcb->sctp_ep, stcb, 3380 control, 3381 &stcb->sctp_socket->so_rcv, 1, 3382 SCTP_READ_LOCK_NOT_HELD, 3383 so_locked); 3384 } 3385 3386 static void 3387 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3388 struct sctp_tmit_chunk *chk, int so_locked) 3389 { 3390 struct mbuf *m_notify; 3391 struct sctp_send_failed *ssf; 3392 struct sctp_send_failed_event *ssfe; 3393 struct sctp_queued_to_read *control; 3394 struct sctp_chunkhdr *chkhdr; 3395 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3396 3397 if ((stcb == NULL) || 3398 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3399 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3400 /* event not enabled */ 3401 return; 3402 } 3403 3404 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3405 notifhdr_len = sizeof(struct sctp_send_failed_event); 3406 } else { 3407 notifhdr_len = sizeof(struct sctp_send_failed); 3408 } 3409 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3410 if (m_notify == NULL) 3411 /* no space left */ 3412 return; 3413 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3414 if (stcb->asoc.idata_supported) { 3415 chkhdr_len = sizeof(struct sctp_idata_chunk); 3416 } else { 3417 chkhdr_len = sizeof(struct sctp_data_chunk); 3418 } 3419 /* Use some defaults in case we can't access the chunk header */ 3420 if (chk->send_size >= chkhdr_len) { 3421 payload_len = chk->send_size - chkhdr_len; 3422 } else { 3423 payload_len = 0; 3424 } 3425 padding_len = 0; 3426 if (chk->data != NULL) { 3427 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3428 if (chkhdr != NULL) { 3429 chk_len = ntohs(chkhdr->chunk_length); 3430 if ((chk_len >= chkhdr_len) && 3431 (chk->send_size >= chk_len) && 3432 (chk->send_size - chk_len < 4)) { 3433 padding_len = chk->send_size - chk_len; 3434 payload_len = chk->send_size - chkhdr_len - padding_len; 3435 } 3436 } 3437 } 3438 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3439 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3440 memset(ssfe, 0, notifhdr_len); 3441 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3442 if (sent) { 3443 ssfe->ssfe_flags = SCTP_DATA_SENT; 3444 } else { 3445 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3446 } 3447 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3448 ssfe->ssfe_error = error; 3449 /* not exactly what the user sent in, but should be close :) */ 3450 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3451 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3452 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3453 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3454 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3455 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3456 } else { 3457 ssf = mtod(m_notify, struct sctp_send_failed *); 3458 memset(ssf, 0, notifhdr_len); 3459 ssf->ssf_type = SCTP_SEND_FAILED; 3460 if (sent) { 3461 ssf->ssf_flags = SCTP_DATA_SENT; 3462 } else { 3463 ssf->ssf_flags = SCTP_DATA_UNSENT; 3464 } 3465 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3466 ssf->ssf_error = error; 3467 /* not exactly what the user sent in, but should be close :) */ 3468 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3469 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3470 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3471 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3472 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3473 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3474 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3475 } 3476 if (chk->data != NULL) { 3477 /* Trim off the sctp chunk header (it should be there) */ 3478 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3479 m_adj(chk->data, chkhdr_len); 3480 m_adj(chk->data, -padding_len); 3481 sctp_mbuf_crush(chk->data); 3482 chk->send_size -= (chkhdr_len + padding_len); 3483 } 3484 } 3485 SCTP_BUF_NEXT(m_notify) = chk->data; 3486 /* Steal off the mbuf */ 3487 chk->data = NULL; 3488 /* 3489 * For this case, we check the actual socket buffer, since the assoc 3490 * is going away we don't want to overfill the socket buffer for a 3491 * non-reader 3492 */ 3493 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3494 sctp_m_freem(m_notify); 3495 return; 3496 } 3497 /* append to socket */ 3498 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3499 0, 0, stcb->asoc.context, 0, 0, 0, 3500 m_notify); 3501 if (control == NULL) { 3502 /* no memory */ 3503 sctp_m_freem(m_notify); 3504 return; 3505 } 3506 control->length = SCTP_BUF_LEN(m_notify); 3507 control->spec_flags = M_NOTIFICATION; 3508 /* not that we need this */ 3509 control->tail_mbuf = m_notify; 3510 sctp_add_to_readq(stcb->sctp_ep, stcb, 3511 control, 3512 &stcb->sctp_socket->so_rcv, 1, 3513 SCTP_READ_LOCK_NOT_HELD, 3514 so_locked); 3515 } 3516 3517 static void 3518 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3519 struct sctp_stream_queue_pending *sp, int so_locked) 3520 { 3521 struct mbuf *m_notify; 3522 struct sctp_send_failed *ssf; 3523 struct sctp_send_failed_event *ssfe; 3524 struct sctp_queued_to_read *control; 3525 int notifhdr_len; 3526 3527 if ((stcb == NULL) || 3528 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3529 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3530 /* event not enabled */ 3531 return; 3532 } 3533 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3534 notifhdr_len = sizeof(struct sctp_send_failed_event); 3535 } else { 3536 notifhdr_len = sizeof(struct sctp_send_failed); 3537 } 3538 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3539 if (m_notify == NULL) { 3540 /* no space left */ 3541 return; 3542 } 3543 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3544 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3545 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3546 memset(ssfe, 0, notifhdr_len); 3547 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3548 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3549 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3550 ssfe->ssfe_error = error; 3551 /* not exactly what the user sent in, but should be close :) */ 3552 ssfe->ssfe_info.snd_sid = sp->sid; 3553 if (sp->some_taken) { 3554 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3555 } else { 3556 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3557 } 3558 ssfe->ssfe_info.snd_ppid = sp->ppid; 3559 ssfe->ssfe_info.snd_context = sp->context; 3560 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3561 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3562 } else { 3563 ssf = mtod(m_notify, struct sctp_send_failed *); 3564 memset(ssf, 0, notifhdr_len); 3565 ssf->ssf_type = SCTP_SEND_FAILED; 3566 ssf->ssf_flags = SCTP_DATA_UNSENT; 3567 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3568 ssf->ssf_error = error; 3569 /* not exactly what the user sent in, but should be close :) */ 3570 ssf->ssf_info.sinfo_stream = sp->sid; 3571 ssf->ssf_info.sinfo_ssn = 0; 3572 if (sp->some_taken) { 3573 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3574 } else { 3575 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3576 } 3577 ssf->ssf_info.sinfo_ppid = sp->ppid; 3578 ssf->ssf_info.sinfo_context = sp->context; 3579 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3580 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3581 } 3582 SCTP_BUF_NEXT(m_notify) = sp->data; 3583 3584 /* Steal off the mbuf */ 3585 sp->data = NULL; 3586 /* 3587 * For this case, we check the actual socket buffer, since the assoc 3588 * is going away we don't want to overfill the socket buffer for a 3589 * non-reader 3590 */ 3591 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3592 sctp_m_freem(m_notify); 3593 return; 3594 } 3595 /* append to socket */ 3596 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3597 0, 0, stcb->asoc.context, 0, 0, 0, 3598 m_notify); 3599 if (control == NULL) { 3600 /* no memory */ 3601 sctp_m_freem(m_notify); 3602 return; 3603 } 3604 control->length = SCTP_BUF_LEN(m_notify); 3605 control->spec_flags = M_NOTIFICATION; 3606 /* not that we need this */ 3607 control->tail_mbuf = m_notify; 3608 sctp_add_to_readq(stcb->sctp_ep, stcb, 3609 control, 3610 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3611 } 3612 3613 static void 3614 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3615 { 3616 struct mbuf *m_notify; 3617 struct sctp_adaptation_event *sai; 3618 struct sctp_queued_to_read *control; 3619 3620 if ((stcb == NULL) || 3621 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3622 /* event not enabled */ 3623 return; 3624 } 3625 3626 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3627 if (m_notify == NULL) 3628 /* no space left */ 3629 return; 3630 SCTP_BUF_LEN(m_notify) = 0; 3631 sai = mtod(m_notify, struct sctp_adaptation_event *); 3632 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3633 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3634 sai->sai_flags = 0; 3635 sai->sai_length = sizeof(struct sctp_adaptation_event); 3636 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3637 sai->sai_assoc_id = sctp_get_associd(stcb); 3638 3639 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3640 SCTP_BUF_NEXT(m_notify) = NULL; 3641 3642 /* append to socket */ 3643 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3644 0, 0, stcb->asoc.context, 0, 0, 0, 3645 m_notify); 3646 if (control == NULL) { 3647 /* no memory */ 3648 sctp_m_freem(m_notify); 3649 return; 3650 } 3651 control->length = SCTP_BUF_LEN(m_notify); 3652 control->spec_flags = M_NOTIFICATION; 3653 /* not that we need this */ 3654 control->tail_mbuf = m_notify; 3655 sctp_add_to_readq(stcb->sctp_ep, stcb, 3656 control, 3657 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3658 } 3659 3660 /* This always must be called with the read-queue LOCKED in the INP */ 3661 static void 3662 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3663 uint32_t val, int so_locked) 3664 { 3665 struct mbuf *m_notify; 3666 struct sctp_pdapi_event *pdapi; 3667 struct sctp_queued_to_read *control; 3668 struct sockbuf *sb; 3669 3670 if ((stcb == NULL) || 3671 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3672 /* event not enabled */ 3673 return; 3674 } 3675 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3676 return; 3677 } 3678 3679 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3680 if (m_notify == NULL) 3681 /* no space left */ 3682 return; 3683 SCTP_BUF_LEN(m_notify) = 0; 3684 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3685 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3686 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3687 pdapi->pdapi_flags = 0; 3688 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3689 pdapi->pdapi_indication = error; 3690 pdapi->pdapi_stream = (val >> 16); 3691 pdapi->pdapi_seq = (val & 0x0000ffff); 3692 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3693 3694 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3695 SCTP_BUF_NEXT(m_notify) = NULL; 3696 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3697 0, 0, stcb->asoc.context, 0, 0, 0, 3698 m_notify); 3699 if (control == NULL) { 3700 /* no memory */ 3701 sctp_m_freem(m_notify); 3702 return; 3703 } 3704 control->length = SCTP_BUF_LEN(m_notify); 3705 control->spec_flags = M_NOTIFICATION; 3706 /* not that we need this */ 3707 control->tail_mbuf = m_notify; 3708 sb = &stcb->sctp_socket->so_rcv; 3709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3710 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3711 } 3712 sctp_sballoc(stcb, sb, m_notify); 3713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3714 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3715 } 3716 control->end_added = 1; 3717 if (stcb->asoc.control_pdapi) 3718 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3719 else { 3720 /* we really should not see this case */ 3721 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3722 } 3723 if (stcb->sctp_ep && stcb->sctp_socket) { 3724 /* This should always be the case */ 3725 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3726 } 3727 } 3728 3729 static void 3730 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3731 { 3732 struct mbuf *m_notify; 3733 struct sctp_shutdown_event *sse; 3734 struct sctp_queued_to_read *control; 3735 3736 /* 3737 * For TCP model AND UDP connected sockets we will send an error up 3738 * when an SHUTDOWN completes 3739 */ 3740 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3741 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3742 /* mark socket closed for read/write and wakeup! */ 3743 socantsendmore(stcb->sctp_socket); 3744 } 3745 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3746 /* event not enabled */ 3747 return; 3748 } 3749 3750 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3751 if (m_notify == NULL) 3752 /* no space left */ 3753 return; 3754 sse = mtod(m_notify, struct sctp_shutdown_event *); 3755 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3756 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3757 sse->sse_flags = 0; 3758 sse->sse_length = sizeof(struct sctp_shutdown_event); 3759 sse->sse_assoc_id = sctp_get_associd(stcb); 3760 3761 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3762 SCTP_BUF_NEXT(m_notify) = NULL; 3763 3764 /* append to socket */ 3765 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3766 0, 0, stcb->asoc.context, 0, 0, 0, 3767 m_notify); 3768 if (control == NULL) { 3769 /* no memory */ 3770 sctp_m_freem(m_notify); 3771 return; 3772 } 3773 control->length = SCTP_BUF_LEN(m_notify); 3774 control->spec_flags = M_NOTIFICATION; 3775 /* not that we need this */ 3776 control->tail_mbuf = m_notify; 3777 sctp_add_to_readq(stcb->sctp_ep, stcb, 3778 control, 3779 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3780 } 3781 3782 static void 3783 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3784 int so_locked) 3785 { 3786 struct mbuf *m_notify; 3787 struct sctp_sender_dry_event *event; 3788 struct sctp_queued_to_read *control; 3789 3790 if ((stcb == NULL) || 3791 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3792 /* event not enabled */ 3793 return; 3794 } 3795 3796 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3797 if (m_notify == NULL) { 3798 /* no space left */ 3799 return; 3800 } 3801 SCTP_BUF_LEN(m_notify) = 0; 3802 event = mtod(m_notify, struct sctp_sender_dry_event *); 3803 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3804 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3805 event->sender_dry_flags = 0; 3806 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3807 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3808 3809 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3810 SCTP_BUF_NEXT(m_notify) = NULL; 3811 3812 /* append to socket */ 3813 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3814 0, 0, stcb->asoc.context, 0, 0, 0, 3815 m_notify); 3816 if (control == NULL) { 3817 /* no memory */ 3818 sctp_m_freem(m_notify); 3819 return; 3820 } 3821 control->length = SCTP_BUF_LEN(m_notify); 3822 control->spec_flags = M_NOTIFICATION; 3823 /* not that we need this */ 3824 control->tail_mbuf = m_notify; 3825 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3826 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3827 } 3828 3829 void 3830 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3831 { 3832 struct mbuf *m_notify; 3833 struct sctp_queued_to_read *control; 3834 struct sctp_stream_change_event *stradd; 3835 3836 if ((stcb == NULL) || 3837 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3838 /* event not enabled */ 3839 return; 3840 } 3841 if ((stcb->asoc.peer_req_out) && flag) { 3842 /* Peer made the request, don't tell the local user */ 3843 stcb->asoc.peer_req_out = 0; 3844 return; 3845 } 3846 stcb->asoc.peer_req_out = 0; 3847 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3848 if (m_notify == NULL) 3849 /* no space left */ 3850 return; 3851 SCTP_BUF_LEN(m_notify) = 0; 3852 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3853 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3854 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3855 stradd->strchange_flags = flag; 3856 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3857 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3858 stradd->strchange_instrms = numberin; 3859 stradd->strchange_outstrms = numberout; 3860 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3861 SCTP_BUF_NEXT(m_notify) = NULL; 3862 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3863 /* no space */ 3864 sctp_m_freem(m_notify); 3865 return; 3866 } 3867 /* append to socket */ 3868 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3869 0, 0, stcb->asoc.context, 0, 0, 0, 3870 m_notify); 3871 if (control == NULL) { 3872 /* no memory */ 3873 sctp_m_freem(m_notify); 3874 return; 3875 } 3876 control->length = SCTP_BUF_LEN(m_notify); 3877 control->spec_flags = M_NOTIFICATION; 3878 /* not that we need this */ 3879 control->tail_mbuf = m_notify; 3880 sctp_add_to_readq(stcb->sctp_ep, stcb, 3881 control, 3882 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3883 } 3884 3885 void 3886 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3887 { 3888 struct mbuf *m_notify; 3889 struct sctp_queued_to_read *control; 3890 struct sctp_assoc_reset_event *strasoc; 3891 3892 if ((stcb == NULL) || 3893 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3894 /* event not enabled */ 3895 return; 3896 } 3897 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3898 if (m_notify == NULL) 3899 /* no space left */ 3900 return; 3901 SCTP_BUF_LEN(m_notify) = 0; 3902 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3903 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3904 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3905 strasoc->assocreset_flags = flag; 3906 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3907 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3908 strasoc->assocreset_local_tsn = sending_tsn; 3909 strasoc->assocreset_remote_tsn = recv_tsn; 3910 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3911 SCTP_BUF_NEXT(m_notify) = NULL; 3912 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3913 /* no space */ 3914 sctp_m_freem(m_notify); 3915 return; 3916 } 3917 /* append to socket */ 3918 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3919 0, 0, stcb->asoc.context, 0, 0, 0, 3920 m_notify); 3921 if (control == NULL) { 3922 /* no memory */ 3923 sctp_m_freem(m_notify); 3924 return; 3925 } 3926 control->length = SCTP_BUF_LEN(m_notify); 3927 control->spec_flags = M_NOTIFICATION; 3928 /* not that we need this */ 3929 control->tail_mbuf = m_notify; 3930 sctp_add_to_readq(stcb->sctp_ep, stcb, 3931 control, 3932 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3933 } 3934 3935 static void 3936 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3937 int number_entries, uint16_t *list, int flag) 3938 { 3939 struct mbuf *m_notify; 3940 struct sctp_queued_to_read *control; 3941 struct sctp_stream_reset_event *strreset; 3942 int len; 3943 3944 if ((stcb == NULL) || 3945 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3946 /* event not enabled */ 3947 return; 3948 } 3949 3950 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3951 if (m_notify == NULL) 3952 /* no space left */ 3953 return; 3954 SCTP_BUF_LEN(m_notify) = 0; 3955 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3956 if (len > M_TRAILINGSPACE(m_notify)) { 3957 /* never enough room */ 3958 sctp_m_freem(m_notify); 3959 return; 3960 } 3961 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3962 memset(strreset, 0, len); 3963 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3964 strreset->strreset_flags = flag; 3965 strreset->strreset_length = len; 3966 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3967 if (number_entries) { 3968 int i; 3969 3970 for (i = 0; i < number_entries; i++) { 3971 strreset->strreset_stream_list[i] = ntohs(list[i]); 3972 } 3973 } 3974 SCTP_BUF_LEN(m_notify) = len; 3975 SCTP_BUF_NEXT(m_notify) = NULL; 3976 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3977 /* no space */ 3978 sctp_m_freem(m_notify); 3979 return; 3980 } 3981 /* append to socket */ 3982 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3983 0, 0, stcb->asoc.context, 0, 0, 0, 3984 m_notify); 3985 if (control == NULL) { 3986 /* no memory */ 3987 sctp_m_freem(m_notify); 3988 return; 3989 } 3990 control->length = SCTP_BUF_LEN(m_notify); 3991 control->spec_flags = M_NOTIFICATION; 3992 /* not that we need this */ 3993 control->tail_mbuf = m_notify; 3994 sctp_add_to_readq(stcb->sctp_ep, stcb, 3995 control, 3996 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3997 } 3998 3999 static void 4000 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 4001 { 4002 struct mbuf *m_notify; 4003 struct sctp_remote_error *sre; 4004 struct sctp_queued_to_read *control; 4005 unsigned int notif_len; 4006 uint16_t chunk_len; 4007 4008 if ((stcb == NULL) || 4009 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4010 return; 4011 } 4012 if (chunk != NULL) { 4013 chunk_len = ntohs(chunk->ch.chunk_length); 4014 /* 4015 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4016 * contiguous. 4017 */ 4018 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4019 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4020 } 4021 } else { 4022 chunk_len = 0; 4023 } 4024 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4025 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4026 if (m_notify == NULL) { 4027 /* Retry with smaller value. */ 4028 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4029 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4030 if (m_notify == NULL) { 4031 return; 4032 } 4033 } 4034 SCTP_BUF_NEXT(m_notify) = NULL; 4035 sre = mtod(m_notify, struct sctp_remote_error *); 4036 memset(sre, 0, notif_len); 4037 sre->sre_type = SCTP_REMOTE_ERROR; 4038 sre->sre_flags = 0; 4039 sre->sre_length = sizeof(struct sctp_remote_error); 4040 sre->sre_error = error; 4041 sre->sre_assoc_id = sctp_get_associd(stcb); 4042 if (notif_len > sizeof(struct sctp_remote_error)) { 4043 memcpy(sre->sre_data, chunk, chunk_len); 4044 sre->sre_length += chunk_len; 4045 } 4046 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4047 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4048 0, 0, stcb->asoc.context, 0, 0, 0, 4049 m_notify); 4050 if (control != NULL) { 4051 control->length = SCTP_BUF_LEN(m_notify); 4052 control->spec_flags = M_NOTIFICATION; 4053 /* not that we need this */ 4054 control->tail_mbuf = m_notify; 4055 sctp_add_to_readq(stcb->sctp_ep, stcb, 4056 control, 4057 &stcb->sctp_socket->so_rcv, 1, 4058 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4059 } else { 4060 sctp_m_freem(m_notify); 4061 } 4062 } 4063 4064 void 4065 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4066 uint32_t error, void *data, int so_locked) 4067 { 4068 if ((stcb == NULL) || 4069 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4070 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4071 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4072 /* If the socket is gone we are out of here */ 4073 return; 4074 } 4075 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4076 return; 4077 } 4078 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4079 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4080 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4081 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4082 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4083 /* Don't report these in front states */ 4084 return; 4085 } 4086 } 4087 switch (notification) { 4088 case SCTP_NOTIFY_ASSOC_UP: 4089 if (stcb->asoc.assoc_up_sent == 0) { 4090 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4091 stcb->asoc.assoc_up_sent = 1; 4092 } 4093 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4094 sctp_notify_adaptation_layer(stcb); 4095 } 4096 if (stcb->asoc.auth_supported == 0) { 4097 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4098 NULL, so_locked); 4099 } 4100 break; 4101 case SCTP_NOTIFY_ASSOC_DOWN: 4102 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4103 break; 4104 case SCTP_NOTIFY_INTERFACE_DOWN: 4105 { 4106 struct sctp_nets *net; 4107 4108 net = (struct sctp_nets *)data; 4109 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4110 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4111 break; 4112 } 4113 case SCTP_NOTIFY_INTERFACE_UP: 4114 { 4115 struct sctp_nets *net; 4116 4117 net = (struct sctp_nets *)data; 4118 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4119 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4120 break; 4121 } 4122 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4123 { 4124 struct sctp_nets *net; 4125 4126 net = (struct sctp_nets *)data; 4127 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4128 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4129 break; 4130 } 4131 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4132 sctp_notify_send_failed2(stcb, error, 4133 (struct sctp_stream_queue_pending *)data, so_locked); 4134 break; 4135 case SCTP_NOTIFY_SENT_DG_FAIL: 4136 sctp_notify_send_failed(stcb, 1, error, 4137 (struct sctp_tmit_chunk *)data, so_locked); 4138 break; 4139 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4140 sctp_notify_send_failed(stcb, 0, error, 4141 (struct sctp_tmit_chunk *)data, so_locked); 4142 break; 4143 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4144 { 4145 uint32_t val; 4146 4147 val = *((uint32_t *)data); 4148 4149 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4150 break; 4151 } 4152 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4153 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4154 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4155 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4156 } else { 4157 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4158 } 4159 break; 4160 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4161 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4162 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4163 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4164 } else { 4165 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4166 } 4167 break; 4168 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4169 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4170 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4171 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4172 } else { 4173 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4174 } 4175 break; 4176 case SCTP_NOTIFY_ASSOC_RESTART: 4177 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4178 if (stcb->asoc.auth_supported == 0) { 4179 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4180 NULL, so_locked); 4181 } 4182 break; 4183 case SCTP_NOTIFY_STR_RESET_SEND: 4184 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4185 break; 4186 case SCTP_NOTIFY_STR_RESET_RECV: 4187 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4188 break; 4189 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4190 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4191 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4192 break; 4193 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4194 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4195 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4196 break; 4197 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4198 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4199 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4200 break; 4201 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4202 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4203 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4204 break; 4205 case SCTP_NOTIFY_ASCONF_ADD_IP: 4206 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4207 error, so_locked); 4208 break; 4209 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4210 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4211 error, so_locked); 4212 break; 4213 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4214 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4215 error, so_locked); 4216 break; 4217 case SCTP_NOTIFY_PEER_SHUTDOWN: 4218 sctp_notify_shutdown_event(stcb); 4219 break; 4220 case SCTP_NOTIFY_AUTH_NEW_KEY: 4221 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4222 (uint16_t)(uintptr_t)data, 4223 so_locked); 4224 break; 4225 case SCTP_NOTIFY_AUTH_FREE_KEY: 4226 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4227 (uint16_t)(uintptr_t)data, 4228 so_locked); 4229 break; 4230 case SCTP_NOTIFY_NO_PEER_AUTH: 4231 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4232 (uint16_t)(uintptr_t)data, 4233 so_locked); 4234 break; 4235 case SCTP_NOTIFY_SENDER_DRY: 4236 sctp_notify_sender_dry_event(stcb, so_locked); 4237 break; 4238 case SCTP_NOTIFY_REMOTE_ERROR: 4239 sctp_notify_remote_error(stcb, error, data); 4240 break; 4241 default: 4242 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4243 __func__, notification, notification); 4244 break; 4245 } /* end switch */ 4246 } 4247 4248 void 4249 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4250 { 4251 struct sctp_association *asoc; 4252 struct sctp_stream_out *outs; 4253 struct sctp_tmit_chunk *chk, *nchk; 4254 struct sctp_stream_queue_pending *sp, *nsp; 4255 int i; 4256 4257 if (stcb == NULL) { 4258 return; 4259 } 4260 asoc = &stcb->asoc; 4261 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4262 /* already being freed */ 4263 return; 4264 } 4265 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4266 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4267 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4268 return; 4269 } 4270 /* now through all the gunk freeing chunks */ 4271 /* sent queue SHOULD be empty */ 4272 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4273 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4274 asoc->sent_queue_cnt--; 4275 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4276 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4277 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4278 #ifdef INVARIANTS 4279 } else { 4280 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4281 #endif 4282 } 4283 } 4284 if (chk->data != NULL) { 4285 sctp_free_bufspace(stcb, asoc, chk, 1); 4286 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4287 error, chk, so_locked); 4288 if (chk->data) { 4289 sctp_m_freem(chk->data); 4290 chk->data = NULL; 4291 } 4292 } 4293 sctp_free_a_chunk(stcb, chk, so_locked); 4294 /* sa_ignore FREED_MEMORY */ 4295 } 4296 /* pending send queue SHOULD be empty */ 4297 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4298 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4299 asoc->send_queue_cnt--; 4300 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4301 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4302 #ifdef INVARIANTS 4303 } else { 4304 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4305 #endif 4306 } 4307 if (chk->data != NULL) { 4308 sctp_free_bufspace(stcb, asoc, chk, 1); 4309 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4310 error, chk, so_locked); 4311 if (chk->data) { 4312 sctp_m_freem(chk->data); 4313 chk->data = NULL; 4314 } 4315 } 4316 sctp_free_a_chunk(stcb, chk, so_locked); 4317 /* sa_ignore FREED_MEMORY */ 4318 } 4319 for (i = 0; i < asoc->streamoutcnt; i++) { 4320 /* For each stream */ 4321 outs = &asoc->strmout[i]; 4322 /* clean up any sends there */ 4323 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4324 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4325 TAILQ_REMOVE(&outs->outqueue, sp, next); 4326 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4327 sctp_free_spbufspace(stcb, asoc, sp); 4328 if (sp->data) { 4329 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4330 error, (void *)sp, so_locked); 4331 if (sp->data) { 4332 sctp_m_freem(sp->data); 4333 sp->data = NULL; 4334 sp->tail_mbuf = NULL; 4335 sp->length = 0; 4336 } 4337 } 4338 if (sp->net) { 4339 sctp_free_remote_addr(sp->net); 4340 sp->net = NULL; 4341 } 4342 /* Free the chunk */ 4343 sctp_free_a_strmoq(stcb, sp, so_locked); 4344 /* sa_ignore FREED_MEMORY */ 4345 } 4346 } 4347 } 4348 4349 void 4350 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4351 uint16_t error, struct sctp_abort_chunk *abort, 4352 int so_locked) 4353 { 4354 if (stcb == NULL) { 4355 return; 4356 } 4357 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4358 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4359 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4360 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4361 } 4362 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4363 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4364 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4365 return; 4366 } 4367 SCTP_TCB_SEND_LOCK(stcb); 4368 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4369 /* Tell them we lost the asoc */ 4370 sctp_report_all_outbound(stcb, error, so_locked); 4371 SCTP_TCB_SEND_UNLOCK(stcb); 4372 if (from_peer) { 4373 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4374 } else { 4375 if (timeout) { 4376 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4377 } else { 4378 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4379 } 4380 } 4381 } 4382 4383 void 4384 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4385 struct mbuf *m, int iphlen, 4386 struct sockaddr *src, struct sockaddr *dst, 4387 struct sctphdr *sh, struct mbuf *op_err, 4388 uint8_t mflowtype, uint32_t mflowid, 4389 uint32_t vrf_id, uint16_t port) 4390 { 4391 struct sctp_gen_error_cause *cause; 4392 uint32_t vtag; 4393 uint16_t cause_code; 4394 4395 if (stcb != NULL) { 4396 vtag = stcb->asoc.peer_vtag; 4397 vrf_id = stcb->asoc.vrf_id; 4398 if (op_err != NULL) { 4399 /* Read the cause code from the error cause. */ 4400 cause = mtod(op_err, struct sctp_gen_error_cause *); 4401 cause_code = ntohs(cause->code); 4402 } else { 4403 cause_code = 0; 4404 } 4405 } else { 4406 vtag = 0; 4407 } 4408 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4409 mflowtype, mflowid, inp->fibnum, 4410 vrf_id, port); 4411 if (stcb != NULL) { 4412 /* We have a TCB to abort, send notification too */ 4413 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4414 /* Ok, now lets free it */ 4415 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4416 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4417 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4418 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4419 } 4420 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4421 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4422 } 4423 } 4424 #ifdef SCTP_ASOCLOG_OF_TSNS 4425 void 4426 sctp_print_out_track_log(struct sctp_tcb *stcb) 4427 { 4428 #ifdef NOSIY_PRINTS 4429 int i; 4430 4431 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4432 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4433 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4434 SCTP_PRINTF("None rcvd\n"); 4435 goto none_in; 4436 } 4437 if (stcb->asoc.tsn_in_wrapped) { 4438 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4439 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4440 stcb->asoc.in_tsnlog[i].tsn, 4441 stcb->asoc.in_tsnlog[i].strm, 4442 stcb->asoc.in_tsnlog[i].seq, 4443 stcb->asoc.in_tsnlog[i].flgs, 4444 stcb->asoc.in_tsnlog[i].sz); 4445 } 4446 } 4447 if (stcb->asoc.tsn_in_at) { 4448 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4449 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4450 stcb->asoc.in_tsnlog[i].tsn, 4451 stcb->asoc.in_tsnlog[i].strm, 4452 stcb->asoc.in_tsnlog[i].seq, 4453 stcb->asoc.in_tsnlog[i].flgs, 4454 stcb->asoc.in_tsnlog[i].sz); 4455 } 4456 } 4457 none_in: 4458 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4459 if ((stcb->asoc.tsn_out_at == 0) && 4460 (stcb->asoc.tsn_out_wrapped == 0)) { 4461 SCTP_PRINTF("None sent\n"); 4462 } 4463 if (stcb->asoc.tsn_out_wrapped) { 4464 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4465 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4466 stcb->asoc.out_tsnlog[i].tsn, 4467 stcb->asoc.out_tsnlog[i].strm, 4468 stcb->asoc.out_tsnlog[i].seq, 4469 stcb->asoc.out_tsnlog[i].flgs, 4470 stcb->asoc.out_tsnlog[i].sz); 4471 } 4472 } 4473 if (stcb->asoc.tsn_out_at) { 4474 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4475 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4476 stcb->asoc.out_tsnlog[i].tsn, 4477 stcb->asoc.out_tsnlog[i].strm, 4478 stcb->asoc.out_tsnlog[i].seq, 4479 stcb->asoc.out_tsnlog[i].flgs, 4480 stcb->asoc.out_tsnlog[i].sz); 4481 } 4482 } 4483 #endif 4484 } 4485 #endif 4486 4487 void 4488 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4489 struct mbuf *op_err, bool timedout, int so_locked) 4490 { 4491 struct sctp_gen_error_cause *cause; 4492 uint16_t cause_code; 4493 4494 if (stcb == NULL) { 4495 /* Got to have a TCB */ 4496 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4497 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4498 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4499 SCTP_CALLED_DIRECTLY_NOCMPSET); 4500 } 4501 } 4502 return; 4503 } 4504 if (op_err != NULL) { 4505 /* Read the cause code from the error cause. */ 4506 cause = mtod(op_err, struct sctp_gen_error_cause *); 4507 cause_code = ntohs(cause->code); 4508 } else { 4509 cause_code = 0; 4510 } 4511 /* notify the peer */ 4512 sctp_send_abort_tcb(stcb, op_err, so_locked); 4513 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4514 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4515 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4516 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4517 } 4518 /* notify the ulp */ 4519 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4520 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4521 } 4522 /* now free the asoc */ 4523 #ifdef SCTP_ASOCLOG_OF_TSNS 4524 sctp_print_out_track_log(stcb); 4525 #endif 4526 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4527 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4528 } 4529 4530 void 4531 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4532 struct sockaddr *src, struct sockaddr *dst, 4533 struct sctphdr *sh, struct sctp_inpcb *inp, 4534 struct mbuf *cause, 4535 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4536 uint32_t vrf_id, uint16_t port) 4537 { 4538 struct sctp_chunkhdr *ch, chunk_buf; 4539 unsigned int chk_length; 4540 int contains_init_chunk; 4541 4542 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4543 /* Generate a TO address for future reference */ 4544 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4545 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4546 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4547 SCTP_CALLED_DIRECTLY_NOCMPSET); 4548 } 4549 } 4550 contains_init_chunk = 0; 4551 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4552 sizeof(*ch), (uint8_t *)&chunk_buf); 4553 while (ch != NULL) { 4554 chk_length = ntohs(ch->chunk_length); 4555 if (chk_length < sizeof(*ch)) { 4556 /* break to abort land */ 4557 break; 4558 } 4559 switch (ch->chunk_type) { 4560 case SCTP_INIT: 4561 contains_init_chunk = 1; 4562 break; 4563 case SCTP_PACKET_DROPPED: 4564 /* we don't respond to pkt-dropped */ 4565 return; 4566 case SCTP_ABORT_ASSOCIATION: 4567 /* we don't respond with an ABORT to an ABORT */ 4568 return; 4569 case SCTP_SHUTDOWN_COMPLETE: 4570 /* 4571 * we ignore it since we are not waiting for it and 4572 * peer is gone 4573 */ 4574 return; 4575 case SCTP_SHUTDOWN_ACK: 4576 sctp_send_shutdown_complete2(src, dst, sh, 4577 mflowtype, mflowid, fibnum, 4578 vrf_id, port); 4579 return; 4580 default: 4581 break; 4582 } 4583 offset += SCTP_SIZE32(chk_length); 4584 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4585 sizeof(*ch), (uint8_t *)&chunk_buf); 4586 } 4587 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4588 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4589 (contains_init_chunk == 0))) { 4590 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4591 mflowtype, mflowid, fibnum, 4592 vrf_id, port); 4593 } 4594 } 4595 4596 /* 4597 * check the inbound datagram to make sure there is not an abort inside it, 4598 * if there is return 1, else return 0. 4599 */ 4600 int 4601 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4602 { 4603 struct sctp_chunkhdr *ch; 4604 struct sctp_init_chunk *init_chk, chunk_buf; 4605 int offset; 4606 unsigned int chk_length; 4607 4608 offset = iphlen + sizeof(struct sctphdr); 4609 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4610 (uint8_t *)&chunk_buf); 4611 while (ch != NULL) { 4612 chk_length = ntohs(ch->chunk_length); 4613 if (chk_length < sizeof(*ch)) { 4614 /* packet is probably corrupt */ 4615 break; 4616 } 4617 /* we seem to be ok, is it an abort? */ 4618 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4619 /* yep, tell them */ 4620 return (1); 4621 } 4622 if ((ch->chunk_type == SCTP_INITIATION) || 4623 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4624 /* need to update the Vtag */ 4625 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4626 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4627 if (init_chk != NULL) { 4628 *vtag = ntohl(init_chk->init.initiate_tag); 4629 } 4630 } 4631 /* Nope, move to the next chunk */ 4632 offset += SCTP_SIZE32(chk_length); 4633 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4634 sizeof(*ch), (uint8_t *)&chunk_buf); 4635 } 4636 return (0); 4637 } 4638 4639 /* 4640 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4641 * set (i.e. it's 0) so, create this function to compare link local scopes 4642 */ 4643 #ifdef INET6 4644 uint32_t 4645 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4646 { 4647 struct sockaddr_in6 a, b; 4648 4649 /* save copies */ 4650 a = *addr1; 4651 b = *addr2; 4652 4653 if (a.sin6_scope_id == 0) 4654 if (sa6_recoverscope(&a)) { 4655 /* can't get scope, so can't match */ 4656 return (0); 4657 } 4658 if (b.sin6_scope_id == 0) 4659 if (sa6_recoverscope(&b)) { 4660 /* can't get scope, so can't match */ 4661 return (0); 4662 } 4663 if (a.sin6_scope_id != b.sin6_scope_id) 4664 return (0); 4665 4666 return (1); 4667 } 4668 4669 /* 4670 * returns a sockaddr_in6 with embedded scope recovered and removed 4671 */ 4672 struct sockaddr_in6 * 4673 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4674 { 4675 /* check and strip embedded scope junk */ 4676 if (addr->sin6_family == AF_INET6) { 4677 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4678 if (addr->sin6_scope_id == 0) { 4679 *store = *addr; 4680 if (!sa6_recoverscope(store)) { 4681 /* use the recovered scope */ 4682 addr = store; 4683 } 4684 } else { 4685 /* else, return the original "to" addr */ 4686 in6_clearscope(&addr->sin6_addr); 4687 } 4688 } 4689 } 4690 return (addr); 4691 } 4692 #endif 4693 4694 /* 4695 * are the two addresses the same? currently a "scopeless" check returns: 1 4696 * if same, 0 if not 4697 */ 4698 int 4699 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4700 { 4701 4702 /* must be valid */ 4703 if (sa1 == NULL || sa2 == NULL) 4704 return (0); 4705 4706 /* must be the same family */ 4707 if (sa1->sa_family != sa2->sa_family) 4708 return (0); 4709 4710 switch (sa1->sa_family) { 4711 #ifdef INET6 4712 case AF_INET6: 4713 { 4714 /* IPv6 addresses */ 4715 struct sockaddr_in6 *sin6_1, *sin6_2; 4716 4717 sin6_1 = (struct sockaddr_in6 *)sa1; 4718 sin6_2 = (struct sockaddr_in6 *)sa2; 4719 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4720 sin6_2)); 4721 } 4722 #endif 4723 #ifdef INET 4724 case AF_INET: 4725 { 4726 /* IPv4 addresses */ 4727 struct sockaddr_in *sin_1, *sin_2; 4728 4729 sin_1 = (struct sockaddr_in *)sa1; 4730 sin_2 = (struct sockaddr_in *)sa2; 4731 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4732 } 4733 #endif 4734 default: 4735 /* we don't do these... */ 4736 return (0); 4737 } 4738 } 4739 4740 void 4741 sctp_print_address(struct sockaddr *sa) 4742 { 4743 #ifdef INET6 4744 char ip6buf[INET6_ADDRSTRLEN]; 4745 #endif 4746 4747 switch (sa->sa_family) { 4748 #ifdef INET6 4749 case AF_INET6: 4750 { 4751 struct sockaddr_in6 *sin6; 4752 4753 sin6 = (struct sockaddr_in6 *)sa; 4754 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4755 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4756 ntohs(sin6->sin6_port), 4757 sin6->sin6_scope_id); 4758 break; 4759 } 4760 #endif 4761 #ifdef INET 4762 case AF_INET: 4763 { 4764 struct sockaddr_in *sin; 4765 unsigned char *p; 4766 4767 sin = (struct sockaddr_in *)sa; 4768 p = (unsigned char *)&sin->sin_addr; 4769 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4770 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4771 break; 4772 } 4773 #endif 4774 default: 4775 SCTP_PRINTF("?\n"); 4776 break; 4777 } 4778 } 4779 4780 void 4781 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4782 struct sctp_inpcb *new_inp, 4783 struct sctp_tcb *stcb, 4784 int waitflags) 4785 { 4786 /* 4787 * go through our old INP and pull off any control structures that 4788 * belong to stcb and move then to the new inp. 4789 */ 4790 struct socket *old_so, *new_so; 4791 struct sctp_queued_to_read *control, *nctl; 4792 struct sctp_readhead tmp_queue; 4793 struct mbuf *m; 4794 int error = 0; 4795 4796 old_so = old_inp->sctp_socket; 4797 new_so = new_inp->sctp_socket; 4798 TAILQ_INIT(&tmp_queue); 4799 error = sblock(&old_so->so_rcv, waitflags); 4800 if (error) { 4801 /* 4802 * Gak, can't get sblock, we have a problem. data will be 4803 * left stranded.. and we don't dare look at it since the 4804 * other thread may be reading something. Oh well, its a 4805 * screwed up app that does a peeloff OR a accept while 4806 * reading from the main socket... actually its only the 4807 * peeloff() case, since I think read will fail on a 4808 * listening socket.. 4809 */ 4810 return; 4811 } 4812 /* lock the socket buffers */ 4813 SCTP_INP_READ_LOCK(old_inp); 4814 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4815 /* Pull off all for out target stcb */ 4816 if (control->stcb == stcb) { 4817 /* remove it we want it */ 4818 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4819 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4820 m = control->data; 4821 while (m) { 4822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4823 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4824 } 4825 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4827 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4828 } 4829 m = SCTP_BUF_NEXT(m); 4830 } 4831 } 4832 } 4833 SCTP_INP_READ_UNLOCK(old_inp); 4834 /* Remove the sb-lock on the old socket */ 4835 4836 sbunlock(&old_so->so_rcv); 4837 /* Now we move them over to the new socket buffer */ 4838 SCTP_INP_READ_LOCK(new_inp); 4839 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4840 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4841 m = control->data; 4842 while (m) { 4843 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4844 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4845 } 4846 sctp_sballoc(stcb, &new_so->so_rcv, m); 4847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4848 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4849 } 4850 m = SCTP_BUF_NEXT(m); 4851 } 4852 } 4853 SCTP_INP_READ_UNLOCK(new_inp); 4854 } 4855 4856 void 4857 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4858 struct sctp_tcb *stcb, 4859 int so_locked 4860 SCTP_UNUSED 4861 ) 4862 { 4863 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4864 sctp_sorwakeup(inp, inp->sctp_socket); 4865 } 4866 } 4867 4868 void 4869 sctp_add_to_readq(struct sctp_inpcb *inp, 4870 struct sctp_tcb *stcb, 4871 struct sctp_queued_to_read *control, 4872 struct sockbuf *sb, 4873 int end, 4874 int inp_read_lock_held, 4875 int so_locked) 4876 { 4877 /* 4878 * Here we must place the control on the end of the socket read 4879 * queue AND increment sb_cc so that select will work properly on 4880 * read. 4881 */ 4882 struct mbuf *m, *prev = NULL; 4883 4884 if (inp == NULL) { 4885 /* Gak, TSNH!! */ 4886 #ifdef INVARIANTS 4887 panic("Gak, inp NULL on add_to_readq"); 4888 #endif 4889 return; 4890 } 4891 if (inp_read_lock_held == 0) 4892 SCTP_INP_READ_LOCK(inp); 4893 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4894 if (!control->on_strm_q) { 4895 sctp_free_remote_addr(control->whoFrom); 4896 if (control->data) { 4897 sctp_m_freem(control->data); 4898 control->data = NULL; 4899 } 4900 sctp_free_a_readq(stcb, control); 4901 } 4902 if (inp_read_lock_held == 0) 4903 SCTP_INP_READ_UNLOCK(inp); 4904 return; 4905 } 4906 if (!(control->spec_flags & M_NOTIFICATION)) { 4907 atomic_add_int(&inp->total_recvs, 1); 4908 if (!control->do_not_ref_stcb) { 4909 atomic_add_int(&stcb->total_recvs, 1); 4910 } 4911 } 4912 m = control->data; 4913 control->held_length = 0; 4914 control->length = 0; 4915 while (m) { 4916 if (SCTP_BUF_LEN(m) == 0) { 4917 /* Skip mbufs with NO length */ 4918 if (prev == NULL) { 4919 /* First one */ 4920 control->data = sctp_m_free(m); 4921 m = control->data; 4922 } else { 4923 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4924 m = SCTP_BUF_NEXT(prev); 4925 } 4926 if (m == NULL) { 4927 control->tail_mbuf = prev; 4928 } 4929 continue; 4930 } 4931 prev = m; 4932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4933 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4934 } 4935 sctp_sballoc(stcb, sb, m); 4936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4937 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4938 } 4939 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4940 m = SCTP_BUF_NEXT(m); 4941 } 4942 if (prev != NULL) { 4943 control->tail_mbuf = prev; 4944 } else { 4945 /* Everything got collapsed out?? */ 4946 if (!control->on_strm_q) { 4947 sctp_free_remote_addr(control->whoFrom); 4948 sctp_free_a_readq(stcb, control); 4949 } 4950 if (inp_read_lock_held == 0) 4951 SCTP_INP_READ_UNLOCK(inp); 4952 return; 4953 } 4954 if (end) { 4955 control->end_added = 1; 4956 } 4957 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4958 control->on_read_q = 1; 4959 if (inp_read_lock_held == 0) 4960 SCTP_INP_READ_UNLOCK(inp); 4961 if (inp && inp->sctp_socket) { 4962 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4963 } 4964 } 4965 4966 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4967 *************ALTERNATE ROUTING CODE 4968 */ 4969 4970 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4971 *************ALTERNATE ROUTING CODE 4972 */ 4973 4974 struct mbuf * 4975 sctp_generate_cause(uint16_t code, char *info) 4976 { 4977 struct mbuf *m; 4978 struct sctp_gen_error_cause *cause; 4979 size_t info_len; 4980 uint16_t len; 4981 4982 if ((code == 0) || (info == NULL)) { 4983 return (NULL); 4984 } 4985 info_len = strlen(info); 4986 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4987 return (NULL); 4988 } 4989 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4990 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4991 if (m != NULL) { 4992 SCTP_BUF_LEN(m) = len; 4993 cause = mtod(m, struct sctp_gen_error_cause *); 4994 cause->code = htons(code); 4995 cause->length = htons(len); 4996 memcpy(cause->info, info, info_len); 4997 } 4998 return (m); 4999 } 5000 5001 struct mbuf * 5002 sctp_generate_no_user_data_cause(uint32_t tsn) 5003 { 5004 struct mbuf *m; 5005 struct sctp_error_no_user_data *no_user_data_cause; 5006 uint16_t len; 5007 5008 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5009 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5010 if (m != NULL) { 5011 SCTP_BUF_LEN(m) = len; 5012 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5013 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5014 no_user_data_cause->cause.length = htons(len); 5015 no_user_data_cause->tsn = htonl(tsn); 5016 } 5017 return (m); 5018 } 5019 5020 #ifdef SCTP_MBCNT_LOGGING 5021 void 5022 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5023 struct sctp_tmit_chunk *tp1, int chk_cnt) 5024 { 5025 if (tp1->data == NULL) { 5026 return; 5027 } 5028 asoc->chunks_on_out_queue -= chk_cnt; 5029 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5030 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5031 asoc->total_output_queue_size, 5032 tp1->book_size, 5033 0, 5034 tp1->mbcnt); 5035 } 5036 if (asoc->total_output_queue_size >= tp1->book_size) { 5037 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5038 } else { 5039 asoc->total_output_queue_size = 0; 5040 } 5041 5042 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5043 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5044 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5045 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5046 } else { 5047 stcb->sctp_socket->so_snd.sb_cc = 0; 5048 } 5049 } 5050 } 5051 5052 #endif 5053 5054 int 5055 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5056 uint8_t sent, int so_locked) 5057 { 5058 struct sctp_stream_out *strq; 5059 struct sctp_tmit_chunk *chk = NULL, *tp2; 5060 struct sctp_stream_queue_pending *sp; 5061 uint32_t mid; 5062 uint16_t sid; 5063 uint8_t foundeom = 0; 5064 int ret_sz = 0; 5065 int notdone; 5066 int do_wakeup_routine = 0; 5067 5068 sid = tp1->rec.data.sid; 5069 mid = tp1->rec.data.mid; 5070 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5071 stcb->asoc.abandoned_sent[0]++; 5072 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5073 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5074 #if defined(SCTP_DETAILED_STR_STATS) 5075 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5076 #endif 5077 } else { 5078 stcb->asoc.abandoned_unsent[0]++; 5079 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5080 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5081 #if defined(SCTP_DETAILED_STR_STATS) 5082 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5083 #endif 5084 } 5085 do { 5086 ret_sz += tp1->book_size; 5087 if (tp1->data != NULL) { 5088 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5089 sctp_flight_size_decrease(tp1); 5090 sctp_total_flight_decrease(stcb, tp1); 5091 } 5092 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5093 stcb->asoc.peers_rwnd += tp1->send_size; 5094 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5095 if (sent) { 5096 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5097 } else { 5098 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5099 } 5100 if (tp1->data) { 5101 sctp_m_freem(tp1->data); 5102 tp1->data = NULL; 5103 } 5104 do_wakeup_routine = 1; 5105 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5106 stcb->asoc.sent_queue_cnt_removeable--; 5107 } 5108 } 5109 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5110 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5111 SCTP_DATA_NOT_FRAG) { 5112 /* not frag'ed we ae done */ 5113 notdone = 0; 5114 foundeom = 1; 5115 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5116 /* end of frag, we are done */ 5117 notdone = 0; 5118 foundeom = 1; 5119 } else { 5120 /* 5121 * Its a begin or middle piece, we must mark all of 5122 * it 5123 */ 5124 notdone = 1; 5125 tp1 = TAILQ_NEXT(tp1, sctp_next); 5126 } 5127 } while (tp1 && notdone); 5128 if (foundeom == 0) { 5129 /* 5130 * The multi-part message was scattered across the send and 5131 * sent queue. 5132 */ 5133 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5134 if ((tp1->rec.data.sid != sid) || 5135 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5136 break; 5137 } 5138 /* 5139 * save to chk in case we have some on stream out 5140 * queue. If so and we have an un-transmitted one we 5141 * don't have to fudge the TSN. 5142 */ 5143 chk = tp1; 5144 ret_sz += tp1->book_size; 5145 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5146 if (sent) { 5147 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5148 } else { 5149 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5150 } 5151 if (tp1->data) { 5152 sctp_m_freem(tp1->data); 5153 tp1->data = NULL; 5154 } 5155 /* No flight involved here book the size to 0 */ 5156 tp1->book_size = 0; 5157 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5158 foundeom = 1; 5159 } 5160 do_wakeup_routine = 1; 5161 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5162 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5163 /* 5164 * on to the sent queue so we can wait for it to be 5165 * passed by. 5166 */ 5167 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5168 sctp_next); 5169 stcb->asoc.send_queue_cnt--; 5170 stcb->asoc.sent_queue_cnt++; 5171 } 5172 } 5173 if (foundeom == 0) { 5174 /* 5175 * Still no eom found. That means there is stuff left on the 5176 * stream out queue.. yuck. 5177 */ 5178 SCTP_TCB_SEND_LOCK(stcb); 5179 strq = &stcb->asoc.strmout[sid]; 5180 sp = TAILQ_FIRST(&strq->outqueue); 5181 if (sp != NULL) { 5182 sp->discard_rest = 1; 5183 /* 5184 * We may need to put a chunk on the queue that 5185 * holds the TSN that would have been sent with the 5186 * LAST bit. 5187 */ 5188 if (chk == NULL) { 5189 /* Yep, we have to */ 5190 sctp_alloc_a_chunk(stcb, chk); 5191 if (chk == NULL) { 5192 /* 5193 * we are hosed. All we can do is 5194 * nothing.. which will cause an 5195 * abort if the peer is paying 5196 * attention. 5197 */ 5198 goto oh_well; 5199 } 5200 memset(chk, 0, sizeof(*chk)); 5201 chk->rec.data.rcv_flags = 0; 5202 chk->sent = SCTP_FORWARD_TSN_SKIP; 5203 chk->asoc = &stcb->asoc; 5204 if (stcb->asoc.idata_supported == 0) { 5205 if (sp->sinfo_flags & SCTP_UNORDERED) { 5206 chk->rec.data.mid = 0; 5207 } else { 5208 chk->rec.data.mid = strq->next_mid_ordered; 5209 } 5210 } else { 5211 if (sp->sinfo_flags & SCTP_UNORDERED) { 5212 chk->rec.data.mid = strq->next_mid_unordered; 5213 } else { 5214 chk->rec.data.mid = strq->next_mid_ordered; 5215 } 5216 } 5217 chk->rec.data.sid = sp->sid; 5218 chk->rec.data.ppid = sp->ppid; 5219 chk->rec.data.context = sp->context; 5220 chk->flags = sp->act_flags; 5221 chk->whoTo = NULL; 5222 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5223 strq->chunks_on_queues++; 5224 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5225 stcb->asoc.sent_queue_cnt++; 5226 stcb->asoc.pr_sctp_cnt++; 5227 } 5228 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5229 if (sp->sinfo_flags & SCTP_UNORDERED) { 5230 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5231 } 5232 if (stcb->asoc.idata_supported == 0) { 5233 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5234 strq->next_mid_ordered++; 5235 } 5236 } else { 5237 if (sp->sinfo_flags & SCTP_UNORDERED) { 5238 strq->next_mid_unordered++; 5239 } else { 5240 strq->next_mid_ordered++; 5241 } 5242 } 5243 oh_well: 5244 if (sp->data) { 5245 /* 5246 * Pull any data to free up the SB and allow 5247 * sender to "add more" while we will throw 5248 * away :-) 5249 */ 5250 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5251 ret_sz += sp->length; 5252 do_wakeup_routine = 1; 5253 sp->some_taken = 1; 5254 sctp_m_freem(sp->data); 5255 sp->data = NULL; 5256 sp->tail_mbuf = NULL; 5257 sp->length = 0; 5258 } 5259 } 5260 SCTP_TCB_SEND_UNLOCK(stcb); 5261 } 5262 if (do_wakeup_routine) { 5263 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5264 } 5265 return (ret_sz); 5266 } 5267 5268 /* 5269 * checks to see if the given address, sa, is one that is currently known by 5270 * the kernel note: can't distinguish the same address on multiple interfaces 5271 * and doesn't handle multiple addresses with different zone/scope id's note: 5272 * ifa_ifwithaddr() compares the entire sockaddr struct 5273 */ 5274 struct sctp_ifa * 5275 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5276 int holds_lock) 5277 { 5278 struct sctp_laddr *laddr; 5279 5280 if (holds_lock == 0) { 5281 SCTP_INP_RLOCK(inp); 5282 } 5283 5284 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5285 if (laddr->ifa == NULL) 5286 continue; 5287 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5288 continue; 5289 #ifdef INET 5290 if (addr->sa_family == AF_INET) { 5291 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5292 laddr->ifa->address.sin.sin_addr.s_addr) { 5293 /* found him. */ 5294 break; 5295 } 5296 } 5297 #endif 5298 #ifdef INET6 5299 if (addr->sa_family == AF_INET6) { 5300 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5301 &laddr->ifa->address.sin6)) { 5302 /* found him. */ 5303 break; 5304 } 5305 } 5306 #endif 5307 } 5308 if (holds_lock == 0) { 5309 SCTP_INP_RUNLOCK(inp); 5310 } 5311 if (laddr != NULL) { 5312 return (laddr->ifa); 5313 } else { 5314 return (NULL); 5315 } 5316 } 5317 5318 uint32_t 5319 sctp_get_ifa_hash_val(struct sockaddr *addr) 5320 { 5321 switch (addr->sa_family) { 5322 #ifdef INET 5323 case AF_INET: 5324 { 5325 struct sockaddr_in *sin; 5326 5327 sin = (struct sockaddr_in *)addr; 5328 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5329 } 5330 #endif 5331 #ifdef INET6 5332 case AF_INET6: 5333 { 5334 struct sockaddr_in6 *sin6; 5335 uint32_t hash_of_addr; 5336 5337 sin6 = (struct sockaddr_in6 *)addr; 5338 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5339 sin6->sin6_addr.s6_addr32[1] + 5340 sin6->sin6_addr.s6_addr32[2] + 5341 sin6->sin6_addr.s6_addr32[3]); 5342 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5343 return (hash_of_addr); 5344 } 5345 #endif 5346 default: 5347 break; 5348 } 5349 return (0); 5350 } 5351 5352 struct sctp_ifa * 5353 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5354 { 5355 struct sctp_ifa *sctp_ifap; 5356 struct sctp_vrf *vrf; 5357 struct sctp_ifalist *hash_head; 5358 uint32_t hash_of_addr; 5359 5360 if (holds_lock == 0) { 5361 SCTP_IPI_ADDR_RLOCK(); 5362 } else { 5363 SCTP_IPI_ADDR_LOCK_ASSERT(); 5364 } 5365 5366 vrf = sctp_find_vrf(vrf_id); 5367 if (vrf == NULL) { 5368 if (holds_lock == 0) 5369 SCTP_IPI_ADDR_RUNLOCK(); 5370 return (NULL); 5371 } 5372 5373 hash_of_addr = sctp_get_ifa_hash_val(addr); 5374 5375 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5376 if (hash_head == NULL) { 5377 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5378 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5379 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5380 sctp_print_address(addr); 5381 SCTP_PRINTF("No such bucket for address\n"); 5382 if (holds_lock == 0) 5383 SCTP_IPI_ADDR_RUNLOCK(); 5384 5385 return (NULL); 5386 } 5387 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5388 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5389 continue; 5390 #ifdef INET 5391 if (addr->sa_family == AF_INET) { 5392 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5393 sctp_ifap->address.sin.sin_addr.s_addr) { 5394 /* found him. */ 5395 break; 5396 } 5397 } 5398 #endif 5399 #ifdef INET6 5400 if (addr->sa_family == AF_INET6) { 5401 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5402 &sctp_ifap->address.sin6)) { 5403 /* found him. */ 5404 break; 5405 } 5406 } 5407 #endif 5408 } 5409 if (holds_lock == 0) 5410 SCTP_IPI_ADDR_RUNLOCK(); 5411 return (sctp_ifap); 5412 } 5413 5414 static void 5415 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5416 uint32_t rwnd_req) 5417 { 5418 /* User pulled some data, do we need a rwnd update? */ 5419 struct epoch_tracker et; 5420 int r_unlocked = 0; 5421 uint32_t dif, rwnd; 5422 struct socket *so = NULL; 5423 5424 if (stcb == NULL) 5425 return; 5426 5427 atomic_add_int(&stcb->asoc.refcnt, 1); 5428 5429 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5430 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5431 /* Pre-check If we are freeing no update */ 5432 goto no_lock; 5433 } 5434 SCTP_INP_INCR_REF(stcb->sctp_ep); 5435 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5436 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5437 goto out; 5438 } 5439 so = stcb->sctp_socket; 5440 if (so == NULL) { 5441 goto out; 5442 } 5443 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5444 /* Have you have freed enough to look */ 5445 *freed_so_far = 0; 5446 /* Yep, its worth a look and the lock overhead */ 5447 5448 /* Figure out what the rwnd would be */ 5449 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5450 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5451 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5452 } else { 5453 dif = 0; 5454 } 5455 if (dif >= rwnd_req) { 5456 if (hold_rlock) { 5457 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5458 r_unlocked = 1; 5459 } 5460 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5461 /* 5462 * One last check before we allow the guy possibly 5463 * to get in. There is a race, where the guy has not 5464 * reached the gate. In that case 5465 */ 5466 goto out; 5467 } 5468 SCTP_TCB_LOCK(stcb); 5469 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5470 /* No reports here */ 5471 SCTP_TCB_UNLOCK(stcb); 5472 goto out; 5473 } 5474 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5475 NET_EPOCH_ENTER(et); 5476 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5477 5478 sctp_chunk_output(stcb->sctp_ep, stcb, 5479 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5480 /* make sure no timer is running */ 5481 NET_EPOCH_EXIT(et); 5482 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5483 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5484 SCTP_TCB_UNLOCK(stcb); 5485 } else { 5486 /* Update how much we have pending */ 5487 stcb->freed_by_sorcv_sincelast = dif; 5488 } 5489 out: 5490 if (so && r_unlocked && hold_rlock) { 5491 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5492 } 5493 5494 SCTP_INP_DECR_REF(stcb->sctp_ep); 5495 no_lock: 5496 atomic_add_int(&stcb->asoc.refcnt, -1); 5497 return; 5498 } 5499 5500 int 5501 sctp_sorecvmsg(struct socket *so, 5502 struct uio *uio, 5503 struct mbuf **mp, 5504 struct sockaddr *from, 5505 int fromlen, 5506 int *msg_flags, 5507 struct sctp_sndrcvinfo *sinfo, 5508 int filling_sinfo) 5509 { 5510 /* 5511 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5512 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5513 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5514 * On the way out we may send out any combination of: 5515 * MSG_NOTIFICATION MSG_EOR 5516 * 5517 */ 5518 struct sctp_inpcb *inp = NULL; 5519 ssize_t my_len = 0; 5520 ssize_t cp_len = 0; 5521 int error = 0; 5522 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5523 struct mbuf *m = NULL; 5524 struct sctp_tcb *stcb = NULL; 5525 int wakeup_read_socket = 0; 5526 int freecnt_applied = 0; 5527 int out_flags = 0, in_flags = 0; 5528 int block_allowed = 1; 5529 uint32_t freed_so_far = 0; 5530 ssize_t copied_so_far = 0; 5531 int in_eeor_mode = 0; 5532 int no_rcv_needed = 0; 5533 uint32_t rwnd_req = 0; 5534 int hold_sblock = 0; 5535 int hold_rlock = 0; 5536 ssize_t slen = 0; 5537 uint32_t held_length = 0; 5538 int sockbuf_lock = 0; 5539 5540 if (uio == NULL) { 5541 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5542 return (EINVAL); 5543 } 5544 5545 if (msg_flags) { 5546 in_flags = *msg_flags; 5547 if (in_flags & MSG_PEEK) 5548 SCTP_STAT_INCR(sctps_read_peeks); 5549 } else { 5550 in_flags = 0; 5551 } 5552 slen = uio->uio_resid; 5553 5554 /* Pull in and set up our int flags */ 5555 if (in_flags & MSG_OOB) { 5556 /* Out of band's NOT supported */ 5557 return (EOPNOTSUPP); 5558 } 5559 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5560 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5561 return (EINVAL); 5562 } 5563 if ((in_flags & (MSG_DONTWAIT 5564 | MSG_NBIO 5565 )) || 5566 SCTP_SO_IS_NBIO(so)) { 5567 block_allowed = 0; 5568 } 5569 /* setup the endpoint */ 5570 inp = (struct sctp_inpcb *)so->so_pcb; 5571 if (inp == NULL) { 5572 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5573 return (EFAULT); 5574 } 5575 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5576 /* Must be at least a MTU's worth */ 5577 if (rwnd_req < SCTP_MIN_RWND) 5578 rwnd_req = SCTP_MIN_RWND; 5579 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5581 sctp_misc_ints(SCTP_SORECV_ENTER, 5582 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5583 } 5584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5585 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5586 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5587 } 5588 5589 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5590 if (error) { 5591 goto release_unlocked; 5592 } 5593 sockbuf_lock = 1; 5594 restart: 5595 5596 restart_nosblocks: 5597 if (hold_sblock == 0) { 5598 SOCKBUF_LOCK(&so->so_rcv); 5599 hold_sblock = 1; 5600 } 5601 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5602 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5603 goto out; 5604 } 5605 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5606 if (so->so_error) { 5607 error = so->so_error; 5608 if ((in_flags & MSG_PEEK) == 0) 5609 so->so_error = 0; 5610 goto out; 5611 } else { 5612 if (so->so_rcv.sb_cc == 0) { 5613 /* indicate EOF */ 5614 error = 0; 5615 goto out; 5616 } 5617 } 5618 } 5619 if (so->so_rcv.sb_cc <= held_length) { 5620 if (so->so_error) { 5621 error = so->so_error; 5622 if ((in_flags & MSG_PEEK) == 0) { 5623 so->so_error = 0; 5624 } 5625 goto out; 5626 } 5627 if ((so->so_rcv.sb_cc == 0) && 5628 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5629 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5630 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5631 /* 5632 * For active open side clear flags for 5633 * re-use passive open is blocked by 5634 * connect. 5635 */ 5636 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5637 /* 5638 * You were aborted, passive side 5639 * always hits here 5640 */ 5641 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5642 error = ECONNRESET; 5643 } 5644 so->so_state &= ~(SS_ISCONNECTING | 5645 SS_ISDISCONNECTING | 5646 SS_ISCONFIRMING | 5647 SS_ISCONNECTED); 5648 if (error == 0) { 5649 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5650 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5651 error = ENOTCONN; 5652 } 5653 } 5654 goto out; 5655 } 5656 } 5657 if (block_allowed) { 5658 error = sbwait(&so->so_rcv); 5659 if (error) { 5660 goto out; 5661 } 5662 held_length = 0; 5663 goto restart_nosblocks; 5664 } else { 5665 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5666 error = EWOULDBLOCK; 5667 goto out; 5668 } 5669 } 5670 if (hold_sblock == 1) { 5671 SOCKBUF_UNLOCK(&so->so_rcv); 5672 hold_sblock = 0; 5673 } 5674 /* we possibly have data we can read */ 5675 /* sa_ignore FREED_MEMORY */ 5676 control = TAILQ_FIRST(&inp->read_queue); 5677 if (control == NULL) { 5678 /* 5679 * This could be happening since the appender did the 5680 * increment but as not yet did the tailq insert onto the 5681 * read_queue 5682 */ 5683 if (hold_rlock == 0) { 5684 SCTP_INP_READ_LOCK(inp); 5685 } 5686 control = TAILQ_FIRST(&inp->read_queue); 5687 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5688 #ifdef INVARIANTS 5689 panic("Huh, its non zero and nothing on control?"); 5690 #endif 5691 so->so_rcv.sb_cc = 0; 5692 } 5693 SCTP_INP_READ_UNLOCK(inp); 5694 hold_rlock = 0; 5695 goto restart; 5696 } 5697 5698 if ((control->length == 0) && 5699 (control->do_not_ref_stcb)) { 5700 /* 5701 * Clean up code for freeing assoc that left behind a 5702 * pdapi.. maybe a peer in EEOR that just closed after 5703 * sending and never indicated a EOR. 5704 */ 5705 if (hold_rlock == 0) { 5706 hold_rlock = 1; 5707 SCTP_INP_READ_LOCK(inp); 5708 } 5709 control->held_length = 0; 5710 if (control->data) { 5711 /* Hmm there is data here .. fix */ 5712 struct mbuf *m_tmp; 5713 int cnt = 0; 5714 5715 m_tmp = control->data; 5716 while (m_tmp) { 5717 cnt += SCTP_BUF_LEN(m_tmp); 5718 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5719 control->tail_mbuf = m_tmp; 5720 control->end_added = 1; 5721 } 5722 m_tmp = SCTP_BUF_NEXT(m_tmp); 5723 } 5724 control->length = cnt; 5725 } else { 5726 /* remove it */ 5727 TAILQ_REMOVE(&inp->read_queue, control, next); 5728 /* Add back any hiddend data */ 5729 sctp_free_remote_addr(control->whoFrom); 5730 sctp_free_a_readq(stcb, control); 5731 } 5732 if (hold_rlock) { 5733 hold_rlock = 0; 5734 SCTP_INP_READ_UNLOCK(inp); 5735 } 5736 goto restart; 5737 } 5738 if ((control->length == 0) && 5739 (control->end_added == 1)) { 5740 /* 5741 * Do we also need to check for (control->pdapi_aborted == 5742 * 1)? 5743 */ 5744 if (hold_rlock == 0) { 5745 hold_rlock = 1; 5746 SCTP_INP_READ_LOCK(inp); 5747 } 5748 TAILQ_REMOVE(&inp->read_queue, control, next); 5749 if (control->data) { 5750 #ifdef INVARIANTS 5751 panic("control->data not null but control->length == 0"); 5752 #else 5753 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5754 sctp_m_freem(control->data); 5755 control->data = NULL; 5756 #endif 5757 } 5758 if (control->aux_data) { 5759 sctp_m_free(control->aux_data); 5760 control->aux_data = NULL; 5761 } 5762 #ifdef INVARIANTS 5763 if (control->on_strm_q) { 5764 panic("About to free ctl:%p so:%p and its in %d", 5765 control, so, control->on_strm_q); 5766 } 5767 #endif 5768 sctp_free_remote_addr(control->whoFrom); 5769 sctp_free_a_readq(stcb, control); 5770 if (hold_rlock) { 5771 hold_rlock = 0; 5772 SCTP_INP_READ_UNLOCK(inp); 5773 } 5774 goto restart; 5775 } 5776 if (control->length == 0) { 5777 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5778 (filling_sinfo)) { 5779 /* find a more suitable one then this */ 5780 ctl = TAILQ_NEXT(control, next); 5781 while (ctl) { 5782 if ((ctl->stcb != control->stcb) && (ctl->length) && 5783 (ctl->some_taken || 5784 (ctl->spec_flags & M_NOTIFICATION) || 5785 ((ctl->do_not_ref_stcb == 0) && 5786 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5787 ) { 5788 /*- 5789 * If we have a different TCB next, and there is data 5790 * present. If we have already taken some (pdapi), OR we can 5791 * ref the tcb and no delivery as started on this stream, we 5792 * take it. Note we allow a notification on a different 5793 * assoc to be delivered.. 5794 */ 5795 control = ctl; 5796 goto found_one; 5797 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5798 (ctl->length) && 5799 ((ctl->some_taken) || 5800 ((ctl->do_not_ref_stcb == 0) && 5801 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5802 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5803 /*- 5804 * If we have the same tcb, and there is data present, and we 5805 * have the strm interleave feature present. Then if we have 5806 * taken some (pdapi) or we can refer to tht tcb AND we have 5807 * not started a delivery for this stream, we can take it. 5808 * Note we do NOT allow a notificaiton on the same assoc to 5809 * be delivered. 5810 */ 5811 control = ctl; 5812 goto found_one; 5813 } 5814 ctl = TAILQ_NEXT(ctl, next); 5815 } 5816 } 5817 /* 5818 * if we reach here, not suitable replacement is available 5819 * <or> fragment interleave is NOT on. So stuff the sb_cc 5820 * into the our held count, and its time to sleep again. 5821 */ 5822 held_length = so->so_rcv.sb_cc; 5823 control->held_length = so->so_rcv.sb_cc; 5824 goto restart; 5825 } 5826 /* Clear the held length since there is something to read */ 5827 control->held_length = 0; 5828 found_one: 5829 /* 5830 * If we reach here, control has a some data for us to read off. 5831 * Note that stcb COULD be NULL. 5832 */ 5833 if (hold_rlock == 0) { 5834 hold_rlock = 1; 5835 SCTP_INP_READ_LOCK(inp); 5836 } 5837 control->some_taken++; 5838 stcb = control->stcb; 5839 if (stcb) { 5840 if ((control->do_not_ref_stcb == 0) && 5841 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5842 if (freecnt_applied == 0) 5843 stcb = NULL; 5844 } else if (control->do_not_ref_stcb == 0) { 5845 /* you can't free it on me please */ 5846 /* 5847 * The lock on the socket buffer protects us so the 5848 * free code will stop. But since we used the 5849 * socketbuf lock and the sender uses the tcb_lock 5850 * to increment, we need to use the atomic add to 5851 * the refcnt 5852 */ 5853 if (freecnt_applied) { 5854 #ifdef INVARIANTS 5855 panic("refcnt already incremented"); 5856 #else 5857 SCTP_PRINTF("refcnt already incremented?\n"); 5858 #endif 5859 } else { 5860 atomic_add_int(&stcb->asoc.refcnt, 1); 5861 freecnt_applied = 1; 5862 } 5863 /* 5864 * Setup to remember how much we have not yet told 5865 * the peer our rwnd has opened up. Note we grab the 5866 * value from the tcb from last time. Note too that 5867 * sack sending clears this when a sack is sent, 5868 * which is fine. Once we hit the rwnd_req, we then 5869 * will go to the sctp_user_rcvd() that will not 5870 * lock until it KNOWs it MUST send a WUP-SACK. 5871 */ 5872 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5873 stcb->freed_by_sorcv_sincelast = 0; 5874 } 5875 } 5876 if (stcb && 5877 ((control->spec_flags & M_NOTIFICATION) == 0) && 5878 control->do_not_ref_stcb == 0) { 5879 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5880 } 5881 5882 /* First lets get off the sinfo and sockaddr info */ 5883 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5884 sinfo->sinfo_stream = control->sinfo_stream; 5885 sinfo->sinfo_ssn = (uint16_t)control->mid; 5886 sinfo->sinfo_flags = control->sinfo_flags; 5887 sinfo->sinfo_ppid = control->sinfo_ppid; 5888 sinfo->sinfo_context = control->sinfo_context; 5889 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5890 sinfo->sinfo_tsn = control->sinfo_tsn; 5891 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5892 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5893 nxt = TAILQ_NEXT(control, next); 5894 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5895 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5896 struct sctp_extrcvinfo *s_extra; 5897 5898 s_extra = (struct sctp_extrcvinfo *)sinfo; 5899 if ((nxt) && 5900 (nxt->length)) { 5901 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5902 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5903 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5904 } 5905 if (nxt->spec_flags & M_NOTIFICATION) { 5906 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5907 } 5908 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5909 s_extra->serinfo_next_length = nxt->length; 5910 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5911 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5912 if (nxt->tail_mbuf != NULL) { 5913 if (nxt->end_added) { 5914 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5915 } 5916 } 5917 } else { 5918 /* 5919 * we explicitly 0 this, since the memcpy 5920 * got some other things beyond the older 5921 * sinfo_ that is on the control's structure 5922 * :-D 5923 */ 5924 nxt = NULL; 5925 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5926 s_extra->serinfo_next_aid = 0; 5927 s_extra->serinfo_next_length = 0; 5928 s_extra->serinfo_next_ppid = 0; 5929 s_extra->serinfo_next_stream = 0; 5930 } 5931 } 5932 /* 5933 * update off the real current cum-ack, if we have an stcb. 5934 */ 5935 if ((control->do_not_ref_stcb == 0) && stcb) 5936 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5937 /* 5938 * mask off the high bits, we keep the actual chunk bits in 5939 * there. 5940 */ 5941 sinfo->sinfo_flags &= 0x00ff; 5942 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5943 sinfo->sinfo_flags |= SCTP_UNORDERED; 5944 } 5945 } 5946 #ifdef SCTP_ASOCLOG_OF_TSNS 5947 { 5948 int index, newindex; 5949 struct sctp_pcbtsn_rlog *entry; 5950 5951 do { 5952 index = inp->readlog_index; 5953 newindex = index + 1; 5954 if (newindex >= SCTP_READ_LOG_SIZE) { 5955 newindex = 0; 5956 } 5957 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5958 entry = &inp->readlog[index]; 5959 entry->vtag = control->sinfo_assoc_id; 5960 entry->strm = control->sinfo_stream; 5961 entry->seq = (uint16_t)control->mid; 5962 entry->sz = control->length; 5963 entry->flgs = control->sinfo_flags; 5964 } 5965 #endif 5966 if ((fromlen > 0) && (from != NULL)) { 5967 union sctp_sockstore store; 5968 size_t len; 5969 5970 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5971 #ifdef INET6 5972 case AF_INET6: 5973 len = sizeof(struct sockaddr_in6); 5974 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5975 store.sin6.sin6_port = control->port_from; 5976 break; 5977 #endif 5978 #ifdef INET 5979 case AF_INET: 5980 #ifdef INET6 5981 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5982 len = sizeof(struct sockaddr_in6); 5983 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5984 &store.sin6); 5985 store.sin6.sin6_port = control->port_from; 5986 } else { 5987 len = sizeof(struct sockaddr_in); 5988 store.sin = control->whoFrom->ro._l_addr.sin; 5989 store.sin.sin_port = control->port_from; 5990 } 5991 #else 5992 len = sizeof(struct sockaddr_in); 5993 store.sin = control->whoFrom->ro._l_addr.sin; 5994 store.sin.sin_port = control->port_from; 5995 #endif 5996 break; 5997 #endif 5998 default: 5999 len = 0; 6000 break; 6001 } 6002 memcpy(from, &store, min((size_t)fromlen, len)); 6003 #ifdef INET6 6004 { 6005 struct sockaddr_in6 lsa6, *from6; 6006 6007 from6 = (struct sockaddr_in6 *)from; 6008 sctp_recover_scope_mac(from6, (&lsa6)); 6009 } 6010 #endif 6011 } 6012 if (hold_rlock) { 6013 SCTP_INP_READ_UNLOCK(inp); 6014 hold_rlock = 0; 6015 } 6016 if (hold_sblock) { 6017 SOCKBUF_UNLOCK(&so->so_rcv); 6018 hold_sblock = 0; 6019 } 6020 /* now copy out what data we can */ 6021 if (mp == NULL) { 6022 /* copy out each mbuf in the chain up to length */ 6023 get_more_data: 6024 m = control->data; 6025 while (m) { 6026 /* Move out all we can */ 6027 cp_len = uio->uio_resid; 6028 my_len = SCTP_BUF_LEN(m); 6029 if (cp_len > my_len) { 6030 /* not enough in this buf */ 6031 cp_len = my_len; 6032 } 6033 if (hold_rlock) { 6034 SCTP_INP_READ_UNLOCK(inp); 6035 hold_rlock = 0; 6036 } 6037 if (cp_len > 0) 6038 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6039 /* re-read */ 6040 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6041 goto release; 6042 } 6043 6044 if ((control->do_not_ref_stcb == 0) && stcb && 6045 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6046 no_rcv_needed = 1; 6047 } 6048 if (error) { 6049 /* error we are out of here */ 6050 goto release; 6051 } 6052 SCTP_INP_READ_LOCK(inp); 6053 hold_rlock = 1; 6054 if (cp_len == SCTP_BUF_LEN(m)) { 6055 if ((SCTP_BUF_NEXT(m) == NULL) && 6056 (control->end_added)) { 6057 out_flags |= MSG_EOR; 6058 if ((control->do_not_ref_stcb == 0) && 6059 (control->stcb != NULL) && 6060 ((control->spec_flags & M_NOTIFICATION) == 0)) 6061 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6062 } 6063 if (control->spec_flags & M_NOTIFICATION) { 6064 out_flags |= MSG_NOTIFICATION; 6065 } 6066 /* we ate up the mbuf */ 6067 if (in_flags & MSG_PEEK) { 6068 /* just looking */ 6069 m = SCTP_BUF_NEXT(m); 6070 copied_so_far += cp_len; 6071 } else { 6072 /* dispose of the mbuf */ 6073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6074 sctp_sblog(&so->so_rcv, 6075 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6076 } 6077 sctp_sbfree(control, stcb, &so->so_rcv, m); 6078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6079 sctp_sblog(&so->so_rcv, 6080 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6081 } 6082 copied_so_far += cp_len; 6083 freed_so_far += (uint32_t)cp_len; 6084 freed_so_far += MSIZE; 6085 atomic_subtract_int(&control->length, cp_len); 6086 control->data = sctp_m_free(m); 6087 m = control->data; 6088 /* 6089 * been through it all, must hold sb 6090 * lock ok to null tail 6091 */ 6092 if (control->data == NULL) { 6093 #ifdef INVARIANTS 6094 if ((control->end_added == 0) || 6095 (TAILQ_NEXT(control, next) == NULL)) { 6096 /* 6097 * If the end is not 6098 * added, OR the 6099 * next is NOT null 6100 * we MUST have the 6101 * lock. 6102 */ 6103 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6104 panic("Hmm we don't own the lock?"); 6105 } 6106 } 6107 #endif 6108 control->tail_mbuf = NULL; 6109 #ifdef INVARIANTS 6110 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6111 panic("end_added, nothing left and no MSG_EOR"); 6112 } 6113 #endif 6114 } 6115 } 6116 } else { 6117 /* Do we need to trim the mbuf? */ 6118 if (control->spec_flags & M_NOTIFICATION) { 6119 out_flags |= MSG_NOTIFICATION; 6120 } 6121 if ((in_flags & MSG_PEEK) == 0) { 6122 SCTP_BUF_RESV_UF(m, cp_len); 6123 SCTP_BUF_LEN(m) -= (int)cp_len; 6124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6125 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6126 } 6127 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6128 if ((control->do_not_ref_stcb == 0) && 6129 stcb) { 6130 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6131 } 6132 copied_so_far += cp_len; 6133 freed_so_far += (uint32_t)cp_len; 6134 freed_so_far += MSIZE; 6135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6136 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6137 SCTP_LOG_SBRESULT, 0); 6138 } 6139 atomic_subtract_int(&control->length, cp_len); 6140 } else { 6141 copied_so_far += cp_len; 6142 } 6143 } 6144 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6145 break; 6146 } 6147 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6148 (control->do_not_ref_stcb == 0) && 6149 (freed_so_far >= rwnd_req)) { 6150 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6151 } 6152 } /* end while(m) */ 6153 /* 6154 * At this point we have looked at it all and we either have 6155 * a MSG_EOR/or read all the user wants... <OR> 6156 * control->length == 0. 6157 */ 6158 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6159 /* we are done with this control */ 6160 if (control->length == 0) { 6161 if (control->data) { 6162 #ifdef INVARIANTS 6163 panic("control->data not null at read eor?"); 6164 #else 6165 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6166 sctp_m_freem(control->data); 6167 control->data = NULL; 6168 #endif 6169 } 6170 done_with_control: 6171 if (hold_rlock == 0) { 6172 SCTP_INP_READ_LOCK(inp); 6173 hold_rlock = 1; 6174 } 6175 TAILQ_REMOVE(&inp->read_queue, control, next); 6176 /* Add back any hiddend data */ 6177 if (control->held_length) { 6178 held_length = 0; 6179 control->held_length = 0; 6180 wakeup_read_socket = 1; 6181 } 6182 if (control->aux_data) { 6183 sctp_m_free(control->aux_data); 6184 control->aux_data = NULL; 6185 } 6186 no_rcv_needed = control->do_not_ref_stcb; 6187 sctp_free_remote_addr(control->whoFrom); 6188 control->data = NULL; 6189 #ifdef INVARIANTS 6190 if (control->on_strm_q) { 6191 panic("About to free ctl:%p so:%p and its in %d", 6192 control, so, control->on_strm_q); 6193 } 6194 #endif 6195 sctp_free_a_readq(stcb, control); 6196 control = NULL; 6197 if ((freed_so_far >= rwnd_req) && 6198 (no_rcv_needed == 0)) 6199 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6200 6201 } else { 6202 /* 6203 * The user did not read all of this 6204 * message, turn off the returned MSG_EOR 6205 * since we are leaving more behind on the 6206 * control to read. 6207 */ 6208 #ifdef INVARIANTS 6209 if (control->end_added && 6210 (control->data == NULL) && 6211 (control->tail_mbuf == NULL)) { 6212 panic("Gak, control->length is corrupt?"); 6213 } 6214 #endif 6215 no_rcv_needed = control->do_not_ref_stcb; 6216 out_flags &= ~MSG_EOR; 6217 } 6218 } 6219 if (out_flags & MSG_EOR) { 6220 goto release; 6221 } 6222 if ((uio->uio_resid == 0) || 6223 ((in_eeor_mode) && 6224 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6225 goto release; 6226 } 6227 /* 6228 * If I hit here the receiver wants more and this message is 6229 * NOT done (pd-api). So two questions. Can we block? if not 6230 * we are done. Did the user NOT set MSG_WAITALL? 6231 */ 6232 if (block_allowed == 0) { 6233 goto release; 6234 } 6235 /* 6236 * We need to wait for more data a few things: - We don't 6237 * sbunlock() so we don't get someone else reading. - We 6238 * must be sure to account for the case where what is added 6239 * is NOT to our control when we wakeup. 6240 */ 6241 6242 /* 6243 * Do we need to tell the transport a rwnd update might be 6244 * needed before we go to sleep? 6245 */ 6246 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6247 ((freed_so_far >= rwnd_req) && 6248 (control->do_not_ref_stcb == 0) && 6249 (no_rcv_needed == 0))) { 6250 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6251 } 6252 wait_some_more: 6253 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6254 goto release; 6255 } 6256 6257 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6258 goto release; 6259 6260 if (hold_rlock == 1) { 6261 SCTP_INP_READ_UNLOCK(inp); 6262 hold_rlock = 0; 6263 } 6264 if (hold_sblock == 0) { 6265 SOCKBUF_LOCK(&so->so_rcv); 6266 hold_sblock = 1; 6267 } 6268 if ((copied_so_far) && (control->length == 0) && 6269 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6270 goto release; 6271 } 6272 if (so->so_rcv.sb_cc <= control->held_length) { 6273 error = sbwait(&so->so_rcv); 6274 if (error) { 6275 goto release; 6276 } 6277 control->held_length = 0; 6278 } 6279 if (hold_sblock) { 6280 SOCKBUF_UNLOCK(&so->so_rcv); 6281 hold_sblock = 0; 6282 } 6283 if (control->length == 0) { 6284 /* still nothing here */ 6285 if (control->end_added == 1) { 6286 /* he aborted, or is done i.e.did a shutdown */ 6287 out_flags |= MSG_EOR; 6288 if (control->pdapi_aborted) { 6289 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6290 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6291 6292 out_flags |= MSG_TRUNC; 6293 } else { 6294 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6295 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6296 } 6297 goto done_with_control; 6298 } 6299 if (so->so_rcv.sb_cc > held_length) { 6300 control->held_length = so->so_rcv.sb_cc; 6301 held_length = 0; 6302 } 6303 goto wait_some_more; 6304 } else if (control->data == NULL) { 6305 /* 6306 * we must re-sync since data is probably being 6307 * added 6308 */ 6309 SCTP_INP_READ_LOCK(inp); 6310 if ((control->length > 0) && (control->data == NULL)) { 6311 /* 6312 * big trouble.. we have the lock and its 6313 * corrupt? 6314 */ 6315 #ifdef INVARIANTS 6316 panic("Impossible data==NULL length !=0"); 6317 #endif 6318 out_flags |= MSG_EOR; 6319 out_flags |= MSG_TRUNC; 6320 control->length = 0; 6321 SCTP_INP_READ_UNLOCK(inp); 6322 goto done_with_control; 6323 } 6324 SCTP_INP_READ_UNLOCK(inp); 6325 /* We will fall around to get more data */ 6326 } 6327 goto get_more_data; 6328 } else { 6329 /*- 6330 * Give caller back the mbuf chain, 6331 * store in uio_resid the length 6332 */ 6333 wakeup_read_socket = 0; 6334 if ((control->end_added == 0) || 6335 (TAILQ_NEXT(control, next) == NULL)) { 6336 /* Need to get rlock */ 6337 if (hold_rlock == 0) { 6338 SCTP_INP_READ_LOCK(inp); 6339 hold_rlock = 1; 6340 } 6341 } 6342 if (control->end_added) { 6343 out_flags |= MSG_EOR; 6344 if ((control->do_not_ref_stcb == 0) && 6345 (control->stcb != NULL) && 6346 ((control->spec_flags & M_NOTIFICATION) == 0)) 6347 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6348 } 6349 if (control->spec_flags & M_NOTIFICATION) { 6350 out_flags |= MSG_NOTIFICATION; 6351 } 6352 uio->uio_resid = control->length; 6353 *mp = control->data; 6354 m = control->data; 6355 while (m) { 6356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6357 sctp_sblog(&so->so_rcv, 6358 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6359 } 6360 sctp_sbfree(control, stcb, &so->so_rcv, m); 6361 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6362 freed_so_far += MSIZE; 6363 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6364 sctp_sblog(&so->so_rcv, 6365 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6366 } 6367 m = SCTP_BUF_NEXT(m); 6368 } 6369 control->data = control->tail_mbuf = NULL; 6370 control->length = 0; 6371 if (out_flags & MSG_EOR) { 6372 /* Done with this control */ 6373 goto done_with_control; 6374 } 6375 } 6376 release: 6377 if (hold_rlock == 1) { 6378 SCTP_INP_READ_UNLOCK(inp); 6379 hold_rlock = 0; 6380 } 6381 if (hold_sblock == 1) { 6382 SOCKBUF_UNLOCK(&so->so_rcv); 6383 hold_sblock = 0; 6384 } 6385 6386 sbunlock(&so->so_rcv); 6387 sockbuf_lock = 0; 6388 6389 release_unlocked: 6390 if (hold_sblock) { 6391 SOCKBUF_UNLOCK(&so->so_rcv); 6392 hold_sblock = 0; 6393 } 6394 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6395 if ((freed_so_far >= rwnd_req) && 6396 (control && (control->do_not_ref_stcb == 0)) && 6397 (no_rcv_needed == 0)) 6398 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6399 } 6400 out: 6401 if (msg_flags) { 6402 *msg_flags = out_flags; 6403 } 6404 if (((out_flags & MSG_EOR) == 0) && 6405 ((in_flags & MSG_PEEK) == 0) && 6406 (sinfo) && 6407 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6408 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6409 struct sctp_extrcvinfo *s_extra; 6410 6411 s_extra = (struct sctp_extrcvinfo *)sinfo; 6412 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6413 } 6414 if (hold_rlock == 1) { 6415 SCTP_INP_READ_UNLOCK(inp); 6416 } 6417 if (hold_sblock) { 6418 SOCKBUF_UNLOCK(&so->so_rcv); 6419 } 6420 if (sockbuf_lock) { 6421 sbunlock(&so->so_rcv); 6422 } 6423 6424 if (freecnt_applied) { 6425 /* 6426 * The lock on the socket buffer protects us so the free 6427 * code will stop. But since we used the socketbuf lock and 6428 * the sender uses the tcb_lock to increment, we need to use 6429 * the atomic add to the refcnt. 6430 */ 6431 if (stcb == NULL) { 6432 #ifdef INVARIANTS 6433 panic("stcb for refcnt has gone NULL?"); 6434 goto stage_left; 6435 #else 6436 goto stage_left; 6437 #endif 6438 } 6439 /* Save the value back for next time */ 6440 stcb->freed_by_sorcv_sincelast = freed_so_far; 6441 atomic_add_int(&stcb->asoc.refcnt, -1); 6442 } 6443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6444 if (stcb) { 6445 sctp_misc_ints(SCTP_SORECV_DONE, 6446 freed_so_far, 6447 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6448 stcb->asoc.my_rwnd, 6449 so->so_rcv.sb_cc); 6450 } else { 6451 sctp_misc_ints(SCTP_SORECV_DONE, 6452 freed_so_far, 6453 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6454 0, 6455 so->so_rcv.sb_cc); 6456 } 6457 } 6458 stage_left: 6459 if (wakeup_read_socket) { 6460 sctp_sorwakeup(inp, so); 6461 } 6462 return (error); 6463 } 6464 6465 #ifdef SCTP_MBUF_LOGGING 6466 struct mbuf * 6467 sctp_m_free(struct mbuf *m) 6468 { 6469 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6470 sctp_log_mb(m, SCTP_MBUF_IFREE); 6471 } 6472 return (m_free(m)); 6473 } 6474 6475 void 6476 sctp_m_freem(struct mbuf *mb) 6477 { 6478 while (mb != NULL) 6479 mb = sctp_m_free(mb); 6480 } 6481 6482 #endif 6483 6484 int 6485 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6486 { 6487 /* 6488 * Given a local address. For all associations that holds the 6489 * address, request a peer-set-primary. 6490 */ 6491 struct sctp_ifa *ifa; 6492 struct sctp_laddr *wi; 6493 6494 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6495 if (ifa == NULL) { 6496 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6497 return (EADDRNOTAVAIL); 6498 } 6499 /* 6500 * Now that we have the ifa we must awaken the iterator with this 6501 * message. 6502 */ 6503 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6504 if (wi == NULL) { 6505 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6506 return (ENOMEM); 6507 } 6508 /* Now incr the count and int wi structure */ 6509 SCTP_INCR_LADDR_COUNT(); 6510 memset(wi, 0, sizeof(*wi)); 6511 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6512 wi->ifa = ifa; 6513 wi->action = SCTP_SET_PRIM_ADDR; 6514 atomic_add_int(&ifa->refcount, 1); 6515 6516 /* Now add it to the work queue */ 6517 SCTP_WQ_ADDR_LOCK(); 6518 /* 6519 * Should this really be a tailq? As it is we will process the 6520 * newest first :-0 6521 */ 6522 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6523 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6524 (struct sctp_inpcb *)NULL, 6525 (struct sctp_tcb *)NULL, 6526 (struct sctp_nets *)NULL); 6527 SCTP_WQ_ADDR_UNLOCK(); 6528 return (0); 6529 } 6530 6531 int 6532 sctp_soreceive(struct socket *so, 6533 struct sockaddr **psa, 6534 struct uio *uio, 6535 struct mbuf **mp0, 6536 struct mbuf **controlp, 6537 int *flagsp) 6538 { 6539 int error, fromlen; 6540 uint8_t sockbuf[256]; 6541 struct sockaddr *from; 6542 struct sctp_extrcvinfo sinfo; 6543 int filling_sinfo = 1; 6544 int flags; 6545 struct sctp_inpcb *inp; 6546 6547 inp = (struct sctp_inpcb *)so->so_pcb; 6548 /* pickup the assoc we are reading from */ 6549 if (inp == NULL) { 6550 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6551 return (EINVAL); 6552 } 6553 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6554 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6555 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6556 (controlp == NULL)) { 6557 /* user does not want the sndrcv ctl */ 6558 filling_sinfo = 0; 6559 } 6560 if (psa) { 6561 from = (struct sockaddr *)sockbuf; 6562 fromlen = sizeof(sockbuf); 6563 from->sa_len = 0; 6564 } else { 6565 from = NULL; 6566 fromlen = 0; 6567 } 6568 6569 if (filling_sinfo) { 6570 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6571 } 6572 if (flagsp != NULL) { 6573 flags = *flagsp; 6574 } else { 6575 flags = 0; 6576 } 6577 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6578 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6579 if (flagsp != NULL) { 6580 *flagsp = flags; 6581 } 6582 if (controlp != NULL) { 6583 /* copy back the sinfo in a CMSG format */ 6584 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6585 *controlp = sctp_build_ctl_nchunk(inp, 6586 (struct sctp_sndrcvinfo *)&sinfo); 6587 } else { 6588 *controlp = NULL; 6589 } 6590 } 6591 if (psa) { 6592 /* copy back the address info */ 6593 if (from && from->sa_len) { 6594 *psa = sodupsockaddr(from, M_NOWAIT); 6595 } else { 6596 *psa = NULL; 6597 } 6598 } 6599 return (error); 6600 } 6601 6602 int 6603 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6604 int totaddr, int *error) 6605 { 6606 int added = 0; 6607 int i; 6608 struct sctp_inpcb *inp; 6609 struct sockaddr *sa; 6610 size_t incr = 0; 6611 #ifdef INET 6612 struct sockaddr_in *sin; 6613 #endif 6614 #ifdef INET6 6615 struct sockaddr_in6 *sin6; 6616 #endif 6617 6618 sa = addr; 6619 inp = stcb->sctp_ep; 6620 *error = 0; 6621 for (i = 0; i < totaddr; i++) { 6622 switch (sa->sa_family) { 6623 #ifdef INET 6624 case AF_INET: 6625 incr = sizeof(struct sockaddr_in); 6626 sin = (struct sockaddr_in *)sa; 6627 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6628 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6629 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6630 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6631 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6632 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6633 *error = EINVAL; 6634 goto out_now; 6635 } 6636 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6637 SCTP_DONOT_SETSCOPE, 6638 SCTP_ADDR_IS_CONFIRMED)) { 6639 /* assoc gone no un-lock */ 6640 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6641 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6642 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6643 *error = ENOBUFS; 6644 goto out_now; 6645 } 6646 added++; 6647 break; 6648 #endif 6649 #ifdef INET6 6650 case AF_INET6: 6651 incr = sizeof(struct sockaddr_in6); 6652 sin6 = (struct sockaddr_in6 *)sa; 6653 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6654 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6655 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6656 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6657 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6658 *error = EINVAL; 6659 goto out_now; 6660 } 6661 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6662 SCTP_DONOT_SETSCOPE, 6663 SCTP_ADDR_IS_CONFIRMED)) { 6664 /* assoc gone no un-lock */ 6665 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6666 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6667 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6668 *error = ENOBUFS; 6669 goto out_now; 6670 } 6671 added++; 6672 break; 6673 #endif 6674 default: 6675 break; 6676 } 6677 sa = (struct sockaddr *)((caddr_t)sa + incr); 6678 } 6679 out_now: 6680 return (added); 6681 } 6682 6683 int 6684 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6685 unsigned int totaddr, 6686 unsigned int *num_v4, unsigned int *num_v6, 6687 unsigned int limit) 6688 { 6689 struct sockaddr *sa; 6690 struct sctp_tcb *stcb; 6691 unsigned int incr, at, i; 6692 6693 at = 0; 6694 sa = addr; 6695 *num_v6 = *num_v4 = 0; 6696 /* account and validate addresses */ 6697 if (totaddr == 0) { 6698 return (EINVAL); 6699 } 6700 for (i = 0; i < totaddr; i++) { 6701 if (at + sizeof(struct sockaddr) > limit) { 6702 return (EINVAL); 6703 } 6704 switch (sa->sa_family) { 6705 #ifdef INET 6706 case AF_INET: 6707 incr = (unsigned int)sizeof(struct sockaddr_in); 6708 if (sa->sa_len != incr) { 6709 return (EINVAL); 6710 } 6711 (*num_v4) += 1; 6712 break; 6713 #endif 6714 #ifdef INET6 6715 case AF_INET6: 6716 { 6717 struct sockaddr_in6 *sin6; 6718 6719 incr = (unsigned int)sizeof(struct sockaddr_in6); 6720 if (sa->sa_len != incr) { 6721 return (EINVAL); 6722 } 6723 sin6 = (struct sockaddr_in6 *)sa; 6724 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6725 /* Must be non-mapped for connectx */ 6726 return (EINVAL); 6727 } 6728 (*num_v6) += 1; 6729 break; 6730 } 6731 #endif 6732 default: 6733 return (EINVAL); 6734 } 6735 if ((at + incr) > limit) { 6736 return (EINVAL); 6737 } 6738 SCTP_INP_INCR_REF(inp); 6739 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6740 if (stcb != NULL) { 6741 SCTP_TCB_UNLOCK(stcb); 6742 return (EALREADY); 6743 } else { 6744 SCTP_INP_DECR_REF(inp); 6745 } 6746 at += incr; 6747 sa = (struct sockaddr *)((caddr_t)sa + incr); 6748 } 6749 return (0); 6750 } 6751 6752 /* 6753 * sctp_bindx(ADD) for one address. 6754 * assumes all arguments are valid/checked by caller. 6755 */ 6756 void 6757 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6758 struct sockaddr *sa, uint32_t vrf_id, int *error, 6759 void *p) 6760 { 6761 #if defined(INET) && defined(INET6) 6762 struct sockaddr_in sin; 6763 #endif 6764 #ifdef INET6 6765 struct sockaddr_in6 *sin6; 6766 #endif 6767 #ifdef INET 6768 struct sockaddr_in *sinp; 6769 #endif 6770 struct sockaddr *addr_to_use; 6771 struct sctp_inpcb *lep; 6772 uint16_t port; 6773 6774 /* see if we're bound all already! */ 6775 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6776 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6777 *error = EINVAL; 6778 return; 6779 } 6780 switch (sa->sa_family) { 6781 #ifdef INET6 6782 case AF_INET6: 6783 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6784 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6785 *error = EINVAL; 6786 return; 6787 } 6788 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6789 /* can only bind v6 on PF_INET6 sockets */ 6790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6791 *error = EINVAL; 6792 return; 6793 } 6794 sin6 = (struct sockaddr_in6 *)sa; 6795 port = sin6->sin6_port; 6796 #ifdef INET 6797 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6798 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6799 SCTP_IPV6_V6ONLY(inp)) { 6800 /* can't bind v4-mapped on PF_INET sockets */ 6801 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6802 *error = EINVAL; 6803 return; 6804 } 6805 in6_sin6_2_sin(&sin, sin6); 6806 addr_to_use = (struct sockaddr *)&sin; 6807 } else { 6808 addr_to_use = sa; 6809 } 6810 #else 6811 addr_to_use = sa; 6812 #endif 6813 break; 6814 #endif 6815 #ifdef INET 6816 case AF_INET: 6817 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6818 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6819 *error = EINVAL; 6820 return; 6821 } 6822 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6823 SCTP_IPV6_V6ONLY(inp)) { 6824 /* can't bind v4 on PF_INET sockets */ 6825 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6826 *error = EINVAL; 6827 return; 6828 } 6829 sinp = (struct sockaddr_in *)sa; 6830 port = sinp->sin_port; 6831 addr_to_use = sa; 6832 break; 6833 #endif 6834 default: 6835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6836 *error = EINVAL; 6837 return; 6838 } 6839 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6840 if (p == NULL) { 6841 /* Can't get proc for Net/Open BSD */ 6842 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6843 *error = EINVAL; 6844 return; 6845 } 6846 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6847 return; 6848 } 6849 /* Validate the incoming port. */ 6850 if ((port != 0) && (port != inp->sctp_lport)) { 6851 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6852 *error = EINVAL; 6853 return; 6854 } 6855 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6856 if (lep == NULL) { 6857 /* add the address */ 6858 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6859 SCTP_ADD_IP_ADDRESS, vrf_id); 6860 } else { 6861 if (lep != inp) { 6862 *error = EADDRINUSE; 6863 } 6864 SCTP_INP_DECR_REF(lep); 6865 } 6866 } 6867 6868 /* 6869 * sctp_bindx(DELETE) for one address. 6870 * assumes all arguments are valid/checked by caller. 6871 */ 6872 void 6873 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6874 struct sockaddr *sa, uint32_t vrf_id, int *error) 6875 { 6876 struct sockaddr *addr_to_use; 6877 #if defined(INET) && defined(INET6) 6878 struct sockaddr_in6 *sin6; 6879 struct sockaddr_in sin; 6880 #endif 6881 6882 /* see if we're bound all already! */ 6883 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6885 *error = EINVAL; 6886 return; 6887 } 6888 switch (sa->sa_family) { 6889 #ifdef INET6 6890 case AF_INET6: 6891 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6893 *error = EINVAL; 6894 return; 6895 } 6896 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6897 /* can only bind v6 on PF_INET6 sockets */ 6898 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6899 *error = EINVAL; 6900 return; 6901 } 6902 #ifdef INET 6903 sin6 = (struct sockaddr_in6 *)sa; 6904 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6905 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6906 SCTP_IPV6_V6ONLY(inp)) { 6907 /* can't bind mapped-v4 on PF_INET sockets */ 6908 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6909 *error = EINVAL; 6910 return; 6911 } 6912 in6_sin6_2_sin(&sin, sin6); 6913 addr_to_use = (struct sockaddr *)&sin; 6914 } else { 6915 addr_to_use = sa; 6916 } 6917 #else 6918 addr_to_use = sa; 6919 #endif 6920 break; 6921 #endif 6922 #ifdef INET 6923 case AF_INET: 6924 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6926 *error = EINVAL; 6927 return; 6928 } 6929 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6930 SCTP_IPV6_V6ONLY(inp)) { 6931 /* can't bind v4 on PF_INET sockets */ 6932 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6933 *error = EINVAL; 6934 return; 6935 } 6936 addr_to_use = sa; 6937 break; 6938 #endif 6939 default: 6940 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6941 *error = EINVAL; 6942 return; 6943 } 6944 /* No lock required mgmt_ep_sa does its own locking. */ 6945 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6946 vrf_id); 6947 } 6948 6949 /* 6950 * returns the valid local address count for an assoc, taking into account 6951 * all scoping rules 6952 */ 6953 int 6954 sctp_local_addr_count(struct sctp_tcb *stcb) 6955 { 6956 int loopback_scope; 6957 #if defined(INET) 6958 int ipv4_local_scope, ipv4_addr_legal; 6959 #endif 6960 #if defined(INET6) 6961 int local_scope, site_scope, ipv6_addr_legal; 6962 #endif 6963 struct sctp_vrf *vrf; 6964 struct sctp_ifn *sctp_ifn; 6965 struct sctp_ifa *sctp_ifa; 6966 int count = 0; 6967 6968 /* Turn on all the appropriate scopes */ 6969 loopback_scope = stcb->asoc.scope.loopback_scope; 6970 #if defined(INET) 6971 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6972 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6973 #endif 6974 #if defined(INET6) 6975 local_scope = stcb->asoc.scope.local_scope; 6976 site_scope = stcb->asoc.scope.site_scope; 6977 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6978 #endif 6979 SCTP_IPI_ADDR_RLOCK(); 6980 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6981 if (vrf == NULL) { 6982 /* no vrf, no addresses */ 6983 SCTP_IPI_ADDR_RUNLOCK(); 6984 return (0); 6985 } 6986 6987 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6988 /* 6989 * bound all case: go through all ifns on the vrf 6990 */ 6991 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6992 if ((loopback_scope == 0) && 6993 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6994 continue; 6995 } 6996 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6997 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6998 continue; 6999 switch (sctp_ifa->address.sa.sa_family) { 7000 #ifdef INET 7001 case AF_INET: 7002 if (ipv4_addr_legal) { 7003 struct sockaddr_in *sin; 7004 7005 sin = &sctp_ifa->address.sin; 7006 if (sin->sin_addr.s_addr == 0) { 7007 /* 7008 * skip unspecified 7009 * addrs 7010 */ 7011 continue; 7012 } 7013 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7014 &sin->sin_addr) != 0) { 7015 continue; 7016 } 7017 if ((ipv4_local_scope == 0) && 7018 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7019 continue; 7020 } 7021 /* count this one */ 7022 count++; 7023 } else { 7024 continue; 7025 } 7026 break; 7027 #endif 7028 #ifdef INET6 7029 case AF_INET6: 7030 if (ipv6_addr_legal) { 7031 struct sockaddr_in6 *sin6; 7032 7033 sin6 = &sctp_ifa->address.sin6; 7034 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7035 continue; 7036 } 7037 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7038 &sin6->sin6_addr) != 0) { 7039 continue; 7040 } 7041 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7042 if (local_scope == 0) 7043 continue; 7044 if (sin6->sin6_scope_id == 0) { 7045 if (sa6_recoverscope(sin6) != 0) 7046 /* 7047 * 7048 * bad 7049 * link 7050 * 7051 * local 7052 * 7053 * address 7054 */ 7055 continue; 7056 } 7057 } 7058 if ((site_scope == 0) && 7059 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7060 continue; 7061 } 7062 /* count this one */ 7063 count++; 7064 } 7065 break; 7066 #endif 7067 default: 7068 /* TSNH */ 7069 break; 7070 } 7071 } 7072 } 7073 } else { 7074 /* 7075 * subset bound case 7076 */ 7077 struct sctp_laddr *laddr; 7078 7079 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7080 sctp_nxt_addr) { 7081 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7082 continue; 7083 } 7084 /* count this one */ 7085 count++; 7086 } 7087 } 7088 SCTP_IPI_ADDR_RUNLOCK(); 7089 return (count); 7090 } 7091 7092 #if defined(SCTP_LOCAL_TRACE_BUF) 7093 7094 void 7095 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7096 { 7097 uint32_t saveindex, newindex; 7098 7099 do { 7100 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7101 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7102 newindex = 1; 7103 } else { 7104 newindex = saveindex + 1; 7105 } 7106 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7107 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7108 saveindex = 0; 7109 } 7110 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7111 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7112 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7113 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7114 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7115 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7116 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7117 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7118 } 7119 7120 #endif 7121 static void 7122 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7123 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7124 { 7125 struct ip *iph; 7126 #ifdef INET6 7127 struct ip6_hdr *ip6; 7128 #endif 7129 struct mbuf *sp, *last; 7130 struct udphdr *uhdr; 7131 uint16_t port; 7132 7133 if ((m->m_flags & M_PKTHDR) == 0) { 7134 /* Can't handle one that is not a pkt hdr */ 7135 goto out; 7136 } 7137 /* Pull the src port */ 7138 iph = mtod(m, struct ip *); 7139 uhdr = (struct udphdr *)((caddr_t)iph + off); 7140 port = uhdr->uh_sport; 7141 /* 7142 * Split out the mbuf chain. Leave the IP header in m, place the 7143 * rest in the sp. 7144 */ 7145 sp = m_split(m, off, M_NOWAIT); 7146 if (sp == NULL) { 7147 /* Gak, drop packet, we can't do a split */ 7148 goto out; 7149 } 7150 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7151 /* Gak, packet can't have an SCTP header in it - too small */ 7152 m_freem(sp); 7153 goto out; 7154 } 7155 /* Now pull up the UDP header and SCTP header together */ 7156 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7157 if (sp == NULL) { 7158 /* Gak pullup failed */ 7159 goto out; 7160 } 7161 /* Trim out the UDP header */ 7162 m_adj(sp, sizeof(struct udphdr)); 7163 7164 /* Now reconstruct the mbuf chain */ 7165 for (last = m; last->m_next; last = last->m_next); 7166 last->m_next = sp; 7167 m->m_pkthdr.len += sp->m_pkthdr.len; 7168 /* 7169 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7170 * checksum and it was valid. Since CSUM_DATA_VALID == 7171 * CSUM_SCTP_VALID this would imply that the HW also verified the 7172 * SCTP checksum. Therefore, clear the bit. 7173 */ 7174 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7175 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7176 m->m_pkthdr.len, 7177 if_name(m->m_pkthdr.rcvif), 7178 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7179 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7180 iph = mtod(m, struct ip *); 7181 switch (iph->ip_v) { 7182 #ifdef INET 7183 case IPVERSION: 7184 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7185 sctp_input_with_port(m, off, port); 7186 break; 7187 #endif 7188 #ifdef INET6 7189 case IPV6_VERSION >> 4: 7190 ip6 = mtod(m, struct ip6_hdr *); 7191 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7192 sctp6_input_with_port(&m, &off, port); 7193 break; 7194 #endif 7195 default: 7196 goto out; 7197 break; 7198 } 7199 return; 7200 out: 7201 m_freem(m); 7202 } 7203 7204 #ifdef INET 7205 static void 7206 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7207 { 7208 struct ip *outer_ip, *inner_ip; 7209 struct sctphdr *sh; 7210 struct icmp *icmp; 7211 struct udphdr *udp; 7212 struct sctp_inpcb *inp; 7213 struct sctp_tcb *stcb; 7214 struct sctp_nets *net; 7215 struct sctp_init_chunk *ch; 7216 struct sockaddr_in src, dst; 7217 uint8_t type, code; 7218 7219 inner_ip = (struct ip *)vip; 7220 icmp = (struct icmp *)((caddr_t)inner_ip - 7221 (sizeof(struct icmp) - sizeof(struct ip))); 7222 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7223 if (ntohs(outer_ip->ip_len) < 7224 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7225 return; 7226 } 7227 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7228 sh = (struct sctphdr *)(udp + 1); 7229 memset(&src, 0, sizeof(struct sockaddr_in)); 7230 src.sin_family = AF_INET; 7231 src.sin_len = sizeof(struct sockaddr_in); 7232 src.sin_port = sh->src_port; 7233 src.sin_addr = inner_ip->ip_src; 7234 memset(&dst, 0, sizeof(struct sockaddr_in)); 7235 dst.sin_family = AF_INET; 7236 dst.sin_len = sizeof(struct sockaddr_in); 7237 dst.sin_port = sh->dest_port; 7238 dst.sin_addr = inner_ip->ip_dst; 7239 /* 7240 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7241 * holds our local endpoint address. Thus we reverse the dst and the 7242 * src in the lookup. 7243 */ 7244 inp = NULL; 7245 net = NULL; 7246 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7247 (struct sockaddr *)&src, 7248 &inp, &net, 1, 7249 SCTP_DEFAULT_VRFID); 7250 if ((stcb != NULL) && 7251 (net != NULL) && 7252 (inp != NULL)) { 7253 /* Check the UDP port numbers */ 7254 if ((udp->uh_dport != net->port) || 7255 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7256 SCTP_TCB_UNLOCK(stcb); 7257 return; 7258 } 7259 /* Check the verification tag */ 7260 if (ntohl(sh->v_tag) != 0) { 7261 /* 7262 * This must be the verification tag used for 7263 * sending out packets. We don't consider packets 7264 * reflecting the verification tag. 7265 */ 7266 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7267 SCTP_TCB_UNLOCK(stcb); 7268 return; 7269 } 7270 } else { 7271 if (ntohs(outer_ip->ip_len) >= 7272 sizeof(struct ip) + 7273 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7274 /* 7275 * In this case we can check if we got an 7276 * INIT chunk and if the initiate tag 7277 * matches. 7278 */ 7279 ch = (struct sctp_init_chunk *)(sh + 1); 7280 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7281 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7282 SCTP_TCB_UNLOCK(stcb); 7283 return; 7284 } 7285 } else { 7286 SCTP_TCB_UNLOCK(stcb); 7287 return; 7288 } 7289 } 7290 type = icmp->icmp_type; 7291 code = icmp->icmp_code; 7292 if ((type == ICMP_UNREACH) && 7293 (code == ICMP_UNREACH_PORT)) { 7294 code = ICMP_UNREACH_PROTOCOL; 7295 } 7296 sctp_notify(inp, stcb, net, type, code, 7297 ntohs(inner_ip->ip_len), 7298 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7299 } else { 7300 if ((stcb == NULL) && (inp != NULL)) { 7301 /* reduce ref-count */ 7302 SCTP_INP_WLOCK(inp); 7303 SCTP_INP_DECR_REF(inp); 7304 SCTP_INP_WUNLOCK(inp); 7305 } 7306 if (stcb) { 7307 SCTP_TCB_UNLOCK(stcb); 7308 } 7309 } 7310 return; 7311 } 7312 #endif 7313 7314 #ifdef INET6 7315 static void 7316 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7317 { 7318 struct ip6ctlparam *ip6cp; 7319 struct sctp_inpcb *inp; 7320 struct sctp_tcb *stcb; 7321 struct sctp_nets *net; 7322 struct sctphdr sh; 7323 struct udphdr udp; 7324 struct sockaddr_in6 src, dst; 7325 uint8_t type, code; 7326 7327 ip6cp = (struct ip6ctlparam *)d; 7328 /* 7329 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7330 */ 7331 if (ip6cp->ip6c_m == NULL) { 7332 return; 7333 } 7334 /* 7335 * Check if we can safely examine the ports and the verification tag 7336 * of the SCTP common header. 7337 */ 7338 if (ip6cp->ip6c_m->m_pkthdr.len < 7339 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7340 return; 7341 } 7342 /* Copy out the UDP header. */ 7343 memset(&udp, 0, sizeof(struct udphdr)); 7344 m_copydata(ip6cp->ip6c_m, 7345 ip6cp->ip6c_off, 7346 sizeof(struct udphdr), 7347 (caddr_t)&udp); 7348 /* Copy out the port numbers and the verification tag. */ 7349 memset(&sh, 0, sizeof(struct sctphdr)); 7350 m_copydata(ip6cp->ip6c_m, 7351 ip6cp->ip6c_off + sizeof(struct udphdr), 7352 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7353 (caddr_t)&sh); 7354 memset(&src, 0, sizeof(struct sockaddr_in6)); 7355 src.sin6_family = AF_INET6; 7356 src.sin6_len = sizeof(struct sockaddr_in6); 7357 src.sin6_port = sh.src_port; 7358 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7359 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7360 return; 7361 } 7362 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7363 dst.sin6_family = AF_INET6; 7364 dst.sin6_len = sizeof(struct sockaddr_in6); 7365 dst.sin6_port = sh.dest_port; 7366 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7367 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7368 return; 7369 } 7370 inp = NULL; 7371 net = NULL; 7372 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7373 (struct sockaddr *)&src, 7374 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7375 if ((stcb != NULL) && 7376 (net != NULL) && 7377 (inp != NULL)) { 7378 /* Check the UDP port numbers */ 7379 if ((udp.uh_dport != net->port) || 7380 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7381 SCTP_TCB_UNLOCK(stcb); 7382 return; 7383 } 7384 /* Check the verification tag */ 7385 if (ntohl(sh.v_tag) != 0) { 7386 /* 7387 * This must be the verification tag used for 7388 * sending out packets. We don't consider packets 7389 * reflecting the verification tag. 7390 */ 7391 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7392 SCTP_TCB_UNLOCK(stcb); 7393 return; 7394 } 7395 } else { 7396 if (ip6cp->ip6c_m->m_pkthdr.len >= 7397 ip6cp->ip6c_off + sizeof(struct udphdr) + 7398 sizeof(struct sctphdr) + 7399 sizeof(struct sctp_chunkhdr) + 7400 offsetof(struct sctp_init, a_rwnd)) { 7401 /* 7402 * In this case we can check if we got an 7403 * INIT chunk and if the initiate tag 7404 * matches. 7405 */ 7406 uint32_t initiate_tag; 7407 uint8_t chunk_type; 7408 7409 m_copydata(ip6cp->ip6c_m, 7410 ip6cp->ip6c_off + 7411 sizeof(struct udphdr) + 7412 sizeof(struct sctphdr), 7413 sizeof(uint8_t), 7414 (caddr_t)&chunk_type); 7415 m_copydata(ip6cp->ip6c_m, 7416 ip6cp->ip6c_off + 7417 sizeof(struct udphdr) + 7418 sizeof(struct sctphdr) + 7419 sizeof(struct sctp_chunkhdr), 7420 sizeof(uint32_t), 7421 (caddr_t)&initiate_tag); 7422 if ((chunk_type != SCTP_INITIATION) || 7423 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7424 SCTP_TCB_UNLOCK(stcb); 7425 return; 7426 } 7427 } else { 7428 SCTP_TCB_UNLOCK(stcb); 7429 return; 7430 } 7431 } 7432 type = ip6cp->ip6c_icmp6->icmp6_type; 7433 code = ip6cp->ip6c_icmp6->icmp6_code; 7434 if ((type == ICMP6_DST_UNREACH) && 7435 (code == ICMP6_DST_UNREACH_NOPORT)) { 7436 type = ICMP6_PARAM_PROB; 7437 code = ICMP6_PARAMPROB_NEXTHEADER; 7438 } 7439 sctp6_notify(inp, stcb, net, type, code, 7440 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7441 } else { 7442 if ((stcb == NULL) && (inp != NULL)) { 7443 /* reduce inp's ref-count */ 7444 SCTP_INP_WLOCK(inp); 7445 SCTP_INP_DECR_REF(inp); 7446 SCTP_INP_WUNLOCK(inp); 7447 } 7448 if (stcb) { 7449 SCTP_TCB_UNLOCK(stcb); 7450 } 7451 } 7452 } 7453 #endif 7454 7455 void 7456 sctp_over_udp_stop(void) 7457 { 7458 /* 7459 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7460 * for writting! 7461 */ 7462 #ifdef INET 7463 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7464 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7465 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7466 } 7467 #endif 7468 #ifdef INET6 7469 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7470 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7471 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7472 } 7473 #endif 7474 } 7475 7476 int 7477 sctp_over_udp_start(void) 7478 { 7479 uint16_t port; 7480 int ret; 7481 #ifdef INET 7482 struct sockaddr_in sin; 7483 #endif 7484 #ifdef INET6 7485 struct sockaddr_in6 sin6; 7486 #endif 7487 /* 7488 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7489 * for writting! 7490 */ 7491 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7492 if (ntohs(port) == 0) { 7493 /* Must have a port set */ 7494 return (EINVAL); 7495 } 7496 #ifdef INET 7497 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7498 /* Already running -- must stop first */ 7499 return (EALREADY); 7500 } 7501 #endif 7502 #ifdef INET6 7503 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7504 /* Already running -- must stop first */ 7505 return (EALREADY); 7506 } 7507 #endif 7508 #ifdef INET 7509 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7510 SOCK_DGRAM, IPPROTO_UDP, 7511 curthread->td_ucred, curthread))) { 7512 sctp_over_udp_stop(); 7513 return (ret); 7514 } 7515 /* Call the special UDP hook. */ 7516 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7517 sctp_recv_udp_tunneled_packet, 7518 sctp_recv_icmp_tunneled_packet, 7519 NULL))) { 7520 sctp_over_udp_stop(); 7521 return (ret); 7522 } 7523 /* Ok, we have a socket, bind it to the port. */ 7524 memset(&sin, 0, sizeof(struct sockaddr_in)); 7525 sin.sin_len = sizeof(struct sockaddr_in); 7526 sin.sin_family = AF_INET; 7527 sin.sin_port = htons(port); 7528 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7529 (struct sockaddr *)&sin, curthread))) { 7530 sctp_over_udp_stop(); 7531 return (ret); 7532 } 7533 #endif 7534 #ifdef INET6 7535 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7536 SOCK_DGRAM, IPPROTO_UDP, 7537 curthread->td_ucred, curthread))) { 7538 sctp_over_udp_stop(); 7539 return (ret); 7540 } 7541 /* Call the special UDP hook. */ 7542 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7543 sctp_recv_udp_tunneled_packet, 7544 sctp_recv_icmp6_tunneled_packet, 7545 NULL))) { 7546 sctp_over_udp_stop(); 7547 return (ret); 7548 } 7549 /* Ok, we have a socket, bind it to the port. */ 7550 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7551 sin6.sin6_len = sizeof(struct sockaddr_in6); 7552 sin6.sin6_family = AF_INET6; 7553 sin6.sin6_port = htons(port); 7554 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7555 (struct sockaddr *)&sin6, curthread))) { 7556 sctp_over_udp_stop(); 7557 return (ret); 7558 } 7559 #endif 7560 return (0); 7561 } 7562 7563 /* 7564 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7565 * If all arguments are zero, zero is returned. 7566 */ 7567 uint32_t 7568 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7569 { 7570 if (mtu1 > 0) { 7571 if (mtu2 > 0) { 7572 if (mtu3 > 0) { 7573 return (min(mtu1, min(mtu2, mtu3))); 7574 } else { 7575 return (min(mtu1, mtu2)); 7576 } 7577 } else { 7578 if (mtu3 > 0) { 7579 return (min(mtu1, mtu3)); 7580 } else { 7581 return (mtu1); 7582 } 7583 } 7584 } else { 7585 if (mtu2 > 0) { 7586 if (mtu3 > 0) { 7587 return (min(mtu2, mtu3)); 7588 } else { 7589 return (mtu2); 7590 } 7591 } else { 7592 return (mtu3); 7593 } 7594 } 7595 } 7596 7597 void 7598 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7599 { 7600 struct in_conninfo inc; 7601 7602 memset(&inc, 0, sizeof(struct in_conninfo)); 7603 inc.inc_fibnum = fibnum; 7604 switch (addr->sa.sa_family) { 7605 #ifdef INET 7606 case AF_INET: 7607 inc.inc_faddr = addr->sin.sin_addr; 7608 break; 7609 #endif 7610 #ifdef INET6 7611 case AF_INET6: 7612 inc.inc_flags |= INC_ISIPV6; 7613 inc.inc6_faddr = addr->sin6.sin6_addr; 7614 break; 7615 #endif 7616 default: 7617 return; 7618 } 7619 tcp_hc_updatemtu(&inc, (u_long)mtu); 7620 } 7621 7622 uint32_t 7623 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7624 { 7625 struct in_conninfo inc; 7626 7627 memset(&inc, 0, sizeof(struct in_conninfo)); 7628 inc.inc_fibnum = fibnum; 7629 switch (addr->sa.sa_family) { 7630 #ifdef INET 7631 case AF_INET: 7632 inc.inc_faddr = addr->sin.sin_addr; 7633 break; 7634 #endif 7635 #ifdef INET6 7636 case AF_INET6: 7637 inc.inc_flags |= INC_ISIPV6; 7638 inc.inc6_faddr = addr->sin6.sin6_addr; 7639 break; 7640 #endif 7641 default: 7642 return (0); 7643 } 7644 return ((uint32_t)tcp_hc_getmtu(&inc)); 7645 } 7646 7647 void 7648 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7649 { 7650 #if defined(KDTRACE_HOOKS) 7651 int old_state = stcb->asoc.state; 7652 #endif 7653 7654 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7655 ("sctp_set_state: Can't set substate (new_state = %x)", 7656 new_state)); 7657 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7658 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7659 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7660 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7661 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7662 } 7663 #if defined(KDTRACE_HOOKS) 7664 if (((old_state & SCTP_STATE_MASK) != new_state) && 7665 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7666 (new_state == SCTP_STATE_INUSE))) { 7667 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7668 } 7669 #endif 7670 } 7671 7672 void 7673 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7674 { 7675 #if defined(KDTRACE_HOOKS) 7676 int old_state = stcb->asoc.state; 7677 #endif 7678 7679 KASSERT((substate & SCTP_STATE_MASK) == 0, 7680 ("sctp_add_substate: Can't set state (substate = %x)", 7681 substate)); 7682 stcb->asoc.state |= substate; 7683 #if defined(KDTRACE_HOOKS) 7684 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7685 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7686 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7687 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7688 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7689 } 7690 #endif 7691 } 7692