1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 if (override_tag) { 1195 asoc->init_seq_number = initial_tsn; 1196 } else { 1197 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1198 } 1199 asoc->asconf_seq_out = asoc->init_seq_number; 1200 asoc->str_reset_seq_out = asoc->init_seq_number; 1201 asoc->sending_seq = asoc->init_seq_number; 1202 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1203 /* we are optimistic here */ 1204 asoc->peer_supports_nat = 0; 1205 asoc->sent_queue_retran_cnt = 0; 1206 1207 /* for CMT */ 1208 asoc->last_net_cmt_send_started = NULL; 1209 1210 asoc->last_acked_seq = asoc->init_seq_number - 1; 1211 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1212 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1213 1214 /* here we are different, we hold the next one we expect */ 1215 asoc->str_reset_seq_in = asoc->init_seq_number; 1216 1217 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1218 asoc->initial_rto = inp->sctp_ep.initial_rto; 1219 1220 asoc->default_mtu = inp->sctp_ep.default_mtu; 1221 asoc->max_init_times = inp->sctp_ep.max_init_times; 1222 asoc->max_send_times = inp->sctp_ep.max_send_times; 1223 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1224 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1225 asoc->free_chunk_cnt = 0; 1226 1227 asoc->iam_blocking = 0; 1228 asoc->context = inp->sctp_context; 1229 asoc->local_strreset_support = inp->local_strreset_support; 1230 asoc->def_send = inp->def_send; 1231 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1232 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1233 asoc->pr_sctp_cnt = 0; 1234 asoc->total_output_queue_size = 0; 1235 1236 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1237 asoc->scope.ipv6_addr_legal = 1; 1238 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } else { 1241 asoc->scope.ipv4_addr_legal = 0; 1242 } 1243 } else { 1244 asoc->scope.ipv6_addr_legal = 0; 1245 asoc->scope.ipv4_addr_legal = 1; 1246 } 1247 1248 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1249 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1250 1251 asoc->smallest_mtu = 0; 1252 asoc->minrto = inp->sctp_ep.sctp_minrto; 1253 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1254 1255 asoc->stream_locked_on = 0; 1256 asoc->ecn_echo_cnt_onq = 0; 1257 asoc->stream_locked = 0; 1258 1259 asoc->send_sack = 1; 1260 1261 LIST_INIT(&asoc->sctp_restricted_addrs); 1262 1263 TAILQ_INIT(&asoc->nets); 1264 TAILQ_INIT(&asoc->pending_reply_queue); 1265 TAILQ_INIT(&asoc->asconf_ack_sent); 1266 /* Setup to fill the hb random cache at first HB */ 1267 asoc->hb_random_idx = 4; 1268 1269 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1270 1271 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1272 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1273 1274 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1275 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1276 1277 /* 1278 * Now the stream parameters, here we allocate space for all streams 1279 * that we request by default. 1280 */ 1281 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1282 o_strms; 1283 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1284 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1285 SCTP_M_STRMO); 1286 if (asoc->strmout == NULL) { 1287 /* big trouble no memory */ 1288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1289 return (ENOMEM); 1290 } 1291 SCTP_TCB_LOCK(stcb); 1292 for (i = 0; i < asoc->streamoutcnt; i++) { 1293 /* 1294 * inbound side must be set to 0xffff, also NOTE when we get 1295 * the INIT-ACK back (for INIT sender) we MUST reduce the 1296 * count (streamoutcnt) but first check if we sent to any of 1297 * the upper streams that were dropped (if some were). Those 1298 * that were dropped must be notified to the upper layer as 1299 * failed to send. 1300 */ 1301 TAILQ_INIT(&asoc->strmout[i].outqueue); 1302 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1303 asoc->strmout[i].chunks_on_queues = 0; 1304 #if defined(SCTP_DETAILED_STR_STATS) 1305 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1306 asoc->strmout[i].abandoned_sent[j] = 0; 1307 asoc->strmout[i].abandoned_unsent[j] = 0; 1308 } 1309 #else 1310 asoc->strmout[i].abandoned_sent[0] = 0; 1311 asoc->strmout[i].abandoned_unsent[0] = 0; 1312 #endif 1313 asoc->strmout[i].next_mid_ordered = 0; 1314 asoc->strmout[i].next_mid_unordered = 0; 1315 asoc->strmout[i].sid = i; 1316 asoc->strmout[i].last_msg_incomplete = 0; 1317 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1318 } 1319 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1320 SCTP_TCB_UNLOCK(stcb); 1321 1322 /* Now the mapping array */ 1323 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1324 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1329 return (ENOMEM); 1330 } 1331 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1332 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1333 SCTP_M_MAP); 1334 if (asoc->nr_mapping_array == NULL) { 1335 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1336 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1337 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1338 return (ENOMEM); 1339 } 1340 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1341 1342 /* Now the init of the other outqueues */ 1343 TAILQ_INIT(&asoc->free_chunks); 1344 TAILQ_INIT(&asoc->control_send_queue); 1345 TAILQ_INIT(&asoc->asconf_send_queue); 1346 TAILQ_INIT(&asoc->send_queue); 1347 TAILQ_INIT(&asoc->sent_queue); 1348 TAILQ_INIT(&asoc->resetHead); 1349 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1350 TAILQ_INIT(&asoc->asconf_queue); 1351 /* authentication fields */ 1352 asoc->authinfo.random = NULL; 1353 asoc->authinfo.active_keyid = 0; 1354 asoc->authinfo.assoc_key = NULL; 1355 asoc->authinfo.assoc_keyid = 0; 1356 asoc->authinfo.recv_key = NULL; 1357 asoc->authinfo.recv_keyid = 0; 1358 LIST_INIT(&asoc->shared_keys); 1359 asoc->marked_retrans = 0; 1360 asoc->port = inp->sctp_ep.port; 1361 asoc->timoinit = 0; 1362 asoc->timodata = 0; 1363 asoc->timosack = 0; 1364 asoc->timoshutdown = 0; 1365 asoc->timoheartbeat = 0; 1366 asoc->timocookie = 0; 1367 asoc->timoshutdownack = 0; 1368 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1369 asoc->discontinuity_time = asoc->start_time; 1370 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1371 asoc->abandoned_unsent[i] = 0; 1372 asoc->abandoned_sent[i] = 0; 1373 } 1374 /* 1375 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1376 * freed later when the association is freed. 1377 */ 1378 return (0); 1379 } 1380 1381 void 1382 sctp_print_mapping_array(struct sctp_association *asoc) 1383 { 1384 unsigned int i, limit; 1385 1386 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1387 asoc->mapping_array_size, 1388 asoc->mapping_array_base_tsn, 1389 asoc->cumulative_tsn, 1390 asoc->highest_tsn_inside_map, 1391 asoc->highest_tsn_inside_nr_map); 1392 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1393 if (asoc->mapping_array[limit - 1] != 0) { 1394 break; 1395 } 1396 } 1397 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1398 for (i = 0; i < limit; i++) { 1399 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1400 } 1401 if (limit % 16) 1402 SCTP_PRINTF("\n"); 1403 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1404 if (asoc->nr_mapping_array[limit - 1]) { 1405 break; 1406 } 1407 } 1408 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1409 for (i = 0; i < limit; i++) { 1410 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1411 } 1412 if (limit % 16) 1413 SCTP_PRINTF("\n"); 1414 } 1415 1416 int 1417 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1418 { 1419 /* mapping array needs to grow */ 1420 uint8_t *new_array1, *new_array2; 1421 uint32_t new_size; 1422 1423 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1424 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1425 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1426 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1427 /* can't get more, forget it */ 1428 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1429 if (new_array1) { 1430 SCTP_FREE(new_array1, SCTP_M_MAP); 1431 } 1432 if (new_array2) { 1433 SCTP_FREE(new_array2, SCTP_M_MAP); 1434 } 1435 return (-1); 1436 } 1437 memset(new_array1, 0, new_size); 1438 memset(new_array2, 0, new_size); 1439 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1440 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1441 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1442 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1443 asoc->mapping_array = new_array1; 1444 asoc->nr_mapping_array = new_array2; 1445 asoc->mapping_array_size = new_size; 1446 return (0); 1447 } 1448 1449 static void 1450 sctp_iterator_work(struct sctp_iterator *it) 1451 { 1452 struct epoch_tracker et; 1453 struct sctp_inpcb *tinp; 1454 int iteration_count = 0; 1455 int inp_skip = 0; 1456 int first_in = 1; 1457 1458 NET_EPOCH_ENTER(et); 1459 SCTP_INP_INFO_RLOCK(); 1460 SCTP_ITERATOR_LOCK(); 1461 sctp_it_ctl.cur_it = it; 1462 if (it->inp) { 1463 SCTP_INP_RLOCK(it->inp); 1464 SCTP_INP_DECR_REF(it->inp); 1465 } 1466 if (it->inp == NULL) { 1467 /* iterator is complete */ 1468 done_with_iterator: 1469 sctp_it_ctl.cur_it = NULL; 1470 SCTP_ITERATOR_UNLOCK(); 1471 SCTP_INP_INFO_RUNLOCK(); 1472 if (it->function_atend != NULL) { 1473 (*it->function_atend) (it->pointer, it->val); 1474 } 1475 SCTP_FREE(it, SCTP_M_ITER); 1476 NET_EPOCH_EXIT(et); 1477 return; 1478 } 1479 select_a_new_ep: 1480 if (first_in) { 1481 first_in = 0; 1482 } else { 1483 SCTP_INP_RLOCK(it->inp); 1484 } 1485 while (((it->pcb_flags) && 1486 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1487 ((it->pcb_features) && 1488 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1489 /* endpoint flags or features don't match, so keep looking */ 1490 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1491 SCTP_INP_RUNLOCK(it->inp); 1492 goto done_with_iterator; 1493 } 1494 tinp = it->inp; 1495 it->inp = LIST_NEXT(it->inp, sctp_list); 1496 it->stcb = NULL; 1497 SCTP_INP_RUNLOCK(tinp); 1498 if (it->inp == NULL) { 1499 goto done_with_iterator; 1500 } 1501 SCTP_INP_RLOCK(it->inp); 1502 } 1503 /* now go through each assoc which is in the desired state */ 1504 if (it->done_current_ep == 0) { 1505 if (it->function_inp != NULL) 1506 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1507 it->done_current_ep = 1; 1508 } 1509 if (it->stcb == NULL) { 1510 /* run the per instance function */ 1511 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1512 } 1513 if ((inp_skip) || it->stcb == NULL) { 1514 if (it->function_inp_end != NULL) { 1515 inp_skip = (*it->function_inp_end) (it->inp, 1516 it->pointer, 1517 it->val); 1518 } 1519 SCTP_INP_RUNLOCK(it->inp); 1520 goto no_stcb; 1521 } 1522 while (it->stcb != NULL) { 1523 SCTP_TCB_LOCK(it->stcb); 1524 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1525 /* not in the right state... keep looking */ 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 goto next_assoc; 1528 } 1529 /* see if we have limited out the iterator loop */ 1530 iteration_count++; 1531 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1532 /* Pause to let others grab the lock */ 1533 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1534 SCTP_TCB_UNLOCK(it->stcb); 1535 SCTP_INP_INCR_REF(it->inp); 1536 SCTP_INP_RUNLOCK(it->inp); 1537 SCTP_ITERATOR_UNLOCK(); 1538 SCTP_INP_INFO_RUNLOCK(); 1539 SCTP_INP_INFO_RLOCK(); 1540 SCTP_ITERATOR_LOCK(); 1541 if (sctp_it_ctl.iterator_flags) { 1542 /* We won't be staying here */ 1543 SCTP_INP_DECR_REF(it->inp); 1544 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1545 if (sctp_it_ctl.iterator_flags & 1546 SCTP_ITERATOR_STOP_CUR_IT) { 1547 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1548 goto done_with_iterator; 1549 } 1550 if (sctp_it_ctl.iterator_flags & 1551 SCTP_ITERATOR_STOP_CUR_INP) { 1552 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1553 goto no_stcb; 1554 } 1555 /* If we reach here huh? */ 1556 SCTP_PRINTF("Unknown it ctl flag %x\n", 1557 sctp_it_ctl.iterator_flags); 1558 sctp_it_ctl.iterator_flags = 0; 1559 } 1560 SCTP_INP_RLOCK(it->inp); 1561 SCTP_INP_DECR_REF(it->inp); 1562 SCTP_TCB_LOCK(it->stcb); 1563 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1564 iteration_count = 0; 1565 } 1566 KASSERT(it->inp == it->stcb->sctp_ep, 1567 ("%s: stcb %p does not belong to inp %p, but inp %p", 1568 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1569 SCTP_INP_RLOCK_ASSERT(it->inp); 1570 SCTP_TCB_LOCK_ASSERT(it->stcb); 1571 1572 /* run function on this one */ 1573 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1574 SCTP_INP_RLOCK_ASSERT(it->inp); 1575 SCTP_TCB_LOCK_ASSERT(it->stcb); 1576 1577 /* 1578 * we lie here, it really needs to have its own type but 1579 * first I must verify that this won't effect things :-0 1580 */ 1581 if (it->no_chunk_output == 0) { 1582 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1583 SCTP_INP_RLOCK_ASSERT(it->inp); 1584 SCTP_TCB_LOCK_ASSERT(it->stcb); 1585 } 1586 1587 SCTP_TCB_UNLOCK(it->stcb); 1588 next_assoc: 1589 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1590 if (it->stcb == NULL) { 1591 /* Run last function */ 1592 if (it->function_inp_end != NULL) { 1593 inp_skip = (*it->function_inp_end) (it->inp, 1594 it->pointer, 1595 it->val); 1596 } 1597 } 1598 } 1599 SCTP_INP_RUNLOCK(it->inp); 1600 no_stcb: 1601 /* done with all assocs on this endpoint, move on to next endpoint */ 1602 it->done_current_ep = 0; 1603 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1604 it->inp = NULL; 1605 } else { 1606 it->inp = LIST_NEXT(it->inp, sctp_list); 1607 } 1608 it->stcb = NULL; 1609 if (it->inp == NULL) { 1610 goto done_with_iterator; 1611 } 1612 goto select_a_new_ep; 1613 } 1614 1615 void 1616 sctp_iterator_worker(void) 1617 { 1618 struct sctp_iterator *it; 1619 1620 /* This function is called with the WQ lock in place */ 1621 sctp_it_ctl.iterator_running = 1; 1622 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1623 /* now lets work on this one */ 1624 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1625 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1626 CURVNET_SET(it->vn); 1627 sctp_iterator_work(it); 1628 CURVNET_RESTORE(); 1629 SCTP_IPI_ITERATOR_WQ_LOCK(); 1630 /* sa_ignore FREED_MEMORY */ 1631 } 1632 sctp_it_ctl.iterator_running = 0; 1633 return; 1634 } 1635 1636 static void 1637 sctp_handle_addr_wq(void) 1638 { 1639 /* deal with the ADDR wq from the rtsock calls */ 1640 struct sctp_laddr *wi, *nwi; 1641 struct sctp_asconf_iterator *asc; 1642 1643 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1644 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1645 if (asc == NULL) { 1646 /* Try later, no memory */ 1647 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1648 (struct sctp_inpcb *)NULL, 1649 (struct sctp_tcb *)NULL, 1650 (struct sctp_nets *)NULL); 1651 return; 1652 } 1653 LIST_INIT(&asc->list_of_work); 1654 asc->cnt = 0; 1655 1656 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1657 LIST_REMOVE(wi, sctp_nxt_addr); 1658 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1659 asc->cnt++; 1660 } 1661 1662 if (asc->cnt == 0) { 1663 SCTP_FREE(asc, SCTP_M_ASC_IT); 1664 } else { 1665 int ret; 1666 1667 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1668 sctp_asconf_iterator_stcb, 1669 NULL, /* No ep end for boundall */ 1670 SCTP_PCB_FLAGS_BOUNDALL, 1671 SCTP_PCB_ANY_FEATURES, 1672 SCTP_ASOC_ANY_STATE, 1673 (void *)asc, 0, 1674 sctp_asconf_iterator_end, NULL, 0); 1675 if (ret) { 1676 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1677 /* 1678 * Freeing if we are stopping or put back on the 1679 * addr_wq. 1680 */ 1681 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1682 sctp_asconf_iterator_end(asc, 0); 1683 } else { 1684 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1685 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1686 } 1687 SCTP_FREE(asc, SCTP_M_ASC_IT); 1688 } 1689 } 1690 } 1691 } 1692 1693 /*- 1694 * The following table shows which pointers for the inp, stcb, or net are 1695 * stored for each timer after it was started. 1696 * 1697 *|Name |Timer |inp |stcb|net | 1698 *|-----------------------------|-----------------------------|----|----|----| 1699 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1701 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1704 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1706 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1708 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1710 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1711 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1713 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1714 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1715 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1716 */ 1717 1718 void 1719 sctp_timeout_handler(void *t) 1720 { 1721 struct epoch_tracker et; 1722 struct timeval tv; 1723 struct sctp_inpcb *inp; 1724 struct sctp_tcb *stcb; 1725 struct sctp_nets *net; 1726 struct sctp_timer *tmr; 1727 struct mbuf *op_err; 1728 int type; 1729 int i, secret; 1730 bool did_output, released_asoc_reference; 1731 1732 /* 1733 * If inp, stcb or net are not NULL, then references to these were 1734 * added when the timer was started, and must be released before 1735 * this function returns. 1736 */ 1737 tmr = (struct sctp_timer *)t; 1738 inp = (struct sctp_inpcb *)tmr->ep; 1739 stcb = (struct sctp_tcb *)tmr->tcb; 1740 net = (struct sctp_nets *)tmr->net; 1741 CURVNET_SET((struct vnet *)tmr->vnet); 1742 NET_EPOCH_ENTER(et); 1743 released_asoc_reference = false; 1744 1745 #ifdef SCTP_AUDITING_ENABLED 1746 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1747 sctp_auditing(3, inp, stcb, net); 1748 #endif 1749 1750 /* sanity checks... */ 1751 KASSERT(tmr->self == NULL || tmr->self == tmr, 1752 ("sctp_timeout_handler: tmr->self corrupted")); 1753 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1754 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1755 type = tmr->type; 1756 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1757 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1758 type, stcb, stcb->sctp_ep)); 1759 tmr->stopped_from = 0xa001; 1760 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1761 SCTPDBG(SCTP_DEBUG_TIMER2, 1762 "Timer type %d handler exiting due to CLOSED association.\n", 1763 type); 1764 goto out_decr; 1765 } 1766 tmr->stopped_from = 0xa002; 1767 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1768 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1769 SCTPDBG(SCTP_DEBUG_TIMER2, 1770 "Timer type %d handler exiting due to not being active.\n", 1771 type); 1772 goto out_decr; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 /* 1779 * Release reference so that association can be freed if 1780 * necessary below. This is safe now that we have acquired 1781 * the lock. 1782 */ 1783 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1784 released_asoc_reference = true; 1785 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1786 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1787 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1788 SCTPDBG(SCTP_DEBUG_TIMER2, 1789 "Timer type %d handler exiting due to CLOSED association.\n", 1790 type); 1791 goto out; 1792 } 1793 } else if (inp != NULL) { 1794 SCTP_INP_WLOCK(inp); 1795 } else { 1796 SCTP_WQ_ADDR_LOCK(); 1797 } 1798 1799 /* Record in stopped_from which timeout occurred. */ 1800 tmr->stopped_from = type; 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 did_output = true; 1840 if ((stcb->asoc.num_send_timers_up == 0) && 1841 (stcb->asoc.sent_queue_cnt > 0)) { 1842 struct sctp_tmit_chunk *chk; 1843 1844 /* 1845 * Safeguard. If there on some on the sent queue 1846 * somewhere but no timers running something is 1847 * wrong... so we start a timer on the first chunk 1848 * on the send queue on whatever net it is sent to. 1849 */ 1850 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1851 if (chk->whoTo != NULL) { 1852 break; 1853 } 1854 } 1855 if (chk != NULL) { 1856 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1857 } 1858 } 1859 break; 1860 case SCTP_TIMER_TYPE_INIT: 1861 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1862 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1863 type, inp, stcb, net)); 1864 SCTP_STAT_INCR(sctps_timoinit); 1865 stcb->asoc.timoinit++; 1866 if (sctp_t1init_timer(inp, stcb, net)) { 1867 /* no need to unlock on tcb its gone */ 1868 goto out_decr; 1869 } 1870 did_output = false; 1871 break; 1872 case SCTP_TIMER_TYPE_RECV: 1873 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1874 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1875 type, inp, stcb, net)); 1876 SCTP_STAT_INCR(sctps_timosack); 1877 stcb->asoc.timosack++; 1878 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1879 #ifdef SCTP_AUDITING_ENABLED 1880 sctp_auditing(4, inp, stcb, NULL); 1881 #endif 1882 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1883 did_output = true; 1884 break; 1885 case SCTP_TIMER_TYPE_SHUTDOWN: 1886 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1887 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1888 type, inp, stcb, net)); 1889 SCTP_STAT_INCR(sctps_timoshutdown); 1890 stcb->asoc.timoshutdown++; 1891 if (sctp_shutdown_timer(inp, stcb, net)) { 1892 /* no need to unlock on tcb its gone */ 1893 goto out_decr; 1894 } 1895 #ifdef SCTP_AUDITING_ENABLED 1896 sctp_auditing(4, inp, stcb, net); 1897 #endif 1898 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1899 did_output = true; 1900 break; 1901 case SCTP_TIMER_TYPE_HEARTBEAT: 1902 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1903 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1904 type, inp, stcb, net)); 1905 SCTP_STAT_INCR(sctps_timoheartbeat); 1906 stcb->asoc.timoheartbeat++; 1907 if (sctp_heartbeat_timer(inp, stcb, net)) { 1908 /* no need to unlock on tcb its gone */ 1909 goto out_decr; 1910 } 1911 #ifdef SCTP_AUDITING_ENABLED 1912 sctp_auditing(4, inp, stcb, net); 1913 #endif 1914 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1915 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1916 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1917 did_output = true; 1918 } else { 1919 did_output = false; 1920 } 1921 break; 1922 case SCTP_TIMER_TYPE_COOKIE: 1923 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1924 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1925 type, inp, stcb, net)); 1926 SCTP_STAT_INCR(sctps_timocookie); 1927 stcb->asoc.timocookie++; 1928 if (sctp_cookie_timer(inp, stcb, net)) { 1929 /* no need to unlock on tcb its gone */ 1930 goto out_decr; 1931 } 1932 #ifdef SCTP_AUDITING_ENABLED 1933 sctp_auditing(4, inp, stcb, net); 1934 #endif 1935 /* 1936 * We consider T3 and Cookie timer pretty much the same with 1937 * respect to where from in chunk_output. 1938 */ 1939 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1940 did_output = true; 1941 break; 1942 case SCTP_TIMER_TYPE_NEWCOOKIE: 1943 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1944 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1945 type, inp, stcb, net)); 1946 SCTP_STAT_INCR(sctps_timosecret); 1947 (void)SCTP_GETTIME_TIMEVAL(&tv); 1948 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1949 inp->sctp_ep.last_secret_number = 1950 inp->sctp_ep.current_secret_number; 1951 inp->sctp_ep.current_secret_number++; 1952 if (inp->sctp_ep.current_secret_number >= 1953 SCTP_HOW_MANY_SECRETS) { 1954 inp->sctp_ep.current_secret_number = 0; 1955 } 1956 secret = (int)inp->sctp_ep.current_secret_number; 1957 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1958 inp->sctp_ep.secret_key[secret][i] = 1959 sctp_select_initial_TSN(&inp->sctp_ep); 1960 } 1961 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1962 did_output = false; 1963 break; 1964 case SCTP_TIMER_TYPE_PATHMTURAISE: 1965 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1966 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1967 type, inp, stcb, net)); 1968 SCTP_STAT_INCR(sctps_timopathmtu); 1969 sctp_pathmtu_timer(inp, stcb, net); 1970 did_output = false; 1971 break; 1972 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1973 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1974 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1975 type, inp, stcb, net)); 1976 if (sctp_shutdownack_timer(inp, stcb, net)) { 1977 /* no need to unlock on tcb its gone */ 1978 goto out_decr; 1979 } 1980 SCTP_STAT_INCR(sctps_timoshutdownack); 1981 stcb->asoc.timoshutdownack++; 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_ASCONF: 1989 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoasconf); 1993 if (sctp_asconf_timer(inp, stcb, net)) { 1994 /* no need to unlock on tcb its gone */ 1995 goto out_decr; 1996 } 1997 #ifdef SCTP_AUDITING_ENABLED 1998 sctp_auditing(4, inp, stcb, net); 1999 #endif 2000 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2001 did_output = true; 2002 break; 2003 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2004 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2005 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2006 type, inp, stcb, net)); 2007 SCTP_STAT_INCR(sctps_timoshutdownguard); 2008 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2009 "Shutdown guard timer expired"); 2010 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2011 /* no need to unlock on tcb its gone */ 2012 goto out_decr; 2013 case SCTP_TIMER_TYPE_AUTOCLOSE: 2014 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2015 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2016 type, inp, stcb, net)); 2017 SCTP_STAT_INCR(sctps_timoautoclose); 2018 sctp_autoclose_timer(inp, stcb); 2019 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2020 did_output = true; 2021 break; 2022 case SCTP_TIMER_TYPE_STRRESET: 2023 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2024 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2025 type, inp, stcb, net)); 2026 SCTP_STAT_INCR(sctps_timostrmrst); 2027 if (sctp_strreset_timer(inp, stcb)) { 2028 /* no need to unlock on tcb its gone */ 2029 goto out_decr; 2030 } 2031 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2032 did_output = true; 2033 break; 2034 case SCTP_TIMER_TYPE_INPKILL: 2035 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2036 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2037 type, inp, stcb, net)); 2038 SCTP_STAT_INCR(sctps_timoinpkill); 2039 /* 2040 * special case, take away our increment since WE are the 2041 * killer 2042 */ 2043 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2044 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2045 SCTP_INP_DECR_REF(inp); 2046 SCTP_INP_WUNLOCK(inp); 2047 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2048 SCTP_CALLED_FROM_INPKILL_TIMER); 2049 inp = NULL; 2050 goto out_decr; 2051 case SCTP_TIMER_TYPE_ASOCKILL: 2052 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2053 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2054 type, inp, stcb, net)); 2055 SCTP_STAT_INCR(sctps_timoassockill); 2056 /* Can we free it yet? */ 2057 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2058 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2059 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2060 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2061 /* 2062 * free asoc, always unlocks (or destroy's) so prevent 2063 * duplicate unlock or unlock of a free mtx :-0 2064 */ 2065 stcb = NULL; 2066 goto out_decr; 2067 case SCTP_TIMER_TYPE_ADDR_WQ: 2068 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2069 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2070 type, inp, stcb, net)); 2071 sctp_handle_addr_wq(); 2072 did_output = true; 2073 break; 2074 case SCTP_TIMER_TYPE_PRIM_DELETED: 2075 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2076 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2077 type, inp, stcb, net)); 2078 SCTP_STAT_INCR(sctps_timodelprim); 2079 sctp_delete_prim_timer(inp, stcb); 2080 did_output = false; 2081 break; 2082 default: 2083 #ifdef INVARIANTS 2084 panic("Unknown timer type %d", type); 2085 #else 2086 goto out; 2087 #endif 2088 } 2089 #ifdef SCTP_AUDITING_ENABLED 2090 sctp_audit_log(0xF1, (uint8_t)type); 2091 if (inp != NULL) 2092 sctp_auditing(5, inp, stcb, net); 2093 #endif 2094 if (did_output && (stcb != NULL)) { 2095 /* 2096 * Now we need to clean up the control chunk chain if an 2097 * ECNE is on it. It must be marked as UNSENT again so next 2098 * call will continue to send it until such time that we get 2099 * a CWR, to remove it. It is, however, less likely that we 2100 * will find a ecn echo on the chain though. 2101 */ 2102 sctp_fix_ecn_echo(&stcb->asoc); 2103 } 2104 out: 2105 if (stcb != NULL) { 2106 SCTP_TCB_UNLOCK(stcb); 2107 } else if (inp != NULL) { 2108 SCTP_INP_WUNLOCK(inp); 2109 } else { 2110 SCTP_WQ_ADDR_UNLOCK(); 2111 } 2112 2113 out_decr: 2114 /* These reference counts were incremented in sctp_timer_start(). */ 2115 if (inp != NULL) { 2116 SCTP_INP_DECR_REF(inp); 2117 } 2118 if ((stcb != NULL) && !released_asoc_reference) { 2119 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2120 } 2121 if (net != NULL) { 2122 sctp_free_remote_addr(net); 2123 } 2124 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2125 CURVNET_RESTORE(); 2126 NET_EPOCH_EXIT(et); 2127 } 2128 2129 /*- 2130 * The following table shows which parameters must be provided 2131 * when calling sctp_timer_start(). For parameters not being 2132 * provided, NULL must be used. 2133 * 2134 * |Name |inp |stcb|net | 2135 * |-----------------------------|----|----|----| 2136 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2139 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2141 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2143 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2145 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2147 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2148 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2149 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2150 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2151 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2152 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2153 * 2154 */ 2155 2156 void 2157 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2158 struct sctp_nets *net) 2159 { 2160 struct sctp_timer *tmr; 2161 uint32_t to_ticks; 2162 uint32_t rndval, jitter; 2163 2164 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2165 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2166 t_type, stcb, stcb->sctp_ep)); 2167 tmr = NULL; 2168 if (stcb != NULL) { 2169 SCTP_TCB_LOCK_ASSERT(stcb); 2170 } else if (inp != NULL) { 2171 SCTP_INP_WLOCK_ASSERT(inp); 2172 } else { 2173 SCTP_WQ_ADDR_LOCK_ASSERT(); 2174 } 2175 if (stcb != NULL) { 2176 /* 2177 * Don't restart timer on association that's about to be 2178 * killed. 2179 */ 2180 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2181 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2182 SCTPDBG(SCTP_DEBUG_TIMER2, 2183 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2184 t_type, inp, stcb, net); 2185 return; 2186 } 2187 /* Don't restart timer on net that's been removed. */ 2188 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2189 SCTPDBG(SCTP_DEBUG_TIMER2, 2190 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2191 t_type, inp, stcb, net); 2192 return; 2193 } 2194 } 2195 switch (t_type) { 2196 case SCTP_TIMER_TYPE_SEND: 2197 /* Here we use the RTO timer. */ 2198 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2199 #ifdef INVARIANTS 2200 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2201 t_type, inp, stcb, net); 2202 #else 2203 return; 2204 #endif 2205 } 2206 tmr = &net->rxt_timer; 2207 if (net->RTO == 0) { 2208 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2209 } else { 2210 to_ticks = sctp_msecs_to_ticks(net->RTO); 2211 } 2212 break; 2213 case SCTP_TIMER_TYPE_INIT: 2214 /* 2215 * Here we use the INIT timer default usually about 1 2216 * second. 2217 */ 2218 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2219 #ifdef INVARIANTS 2220 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2221 t_type, inp, stcb, net); 2222 #else 2223 return; 2224 #endif 2225 } 2226 tmr = &net->rxt_timer; 2227 if (net->RTO == 0) { 2228 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2229 } else { 2230 to_ticks = sctp_msecs_to_ticks(net->RTO); 2231 } 2232 break; 2233 case SCTP_TIMER_TYPE_RECV: 2234 /* 2235 * Here we use the Delayed-Ack timer value from the inp, 2236 * usually about 200ms. 2237 */ 2238 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2239 #ifdef INVARIANTS 2240 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2241 t_type, inp, stcb, net); 2242 #else 2243 return; 2244 #endif 2245 } 2246 tmr = &stcb->asoc.dack_timer; 2247 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2248 break; 2249 case SCTP_TIMER_TYPE_SHUTDOWN: 2250 /* Here we use the RTO of the destination. */ 2251 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2252 #ifdef INVARIANTS 2253 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2254 t_type, inp, stcb, net); 2255 #else 2256 return; 2257 #endif 2258 } 2259 tmr = &net->rxt_timer; 2260 if (net->RTO == 0) { 2261 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2262 } else { 2263 to_ticks = sctp_msecs_to_ticks(net->RTO); 2264 } 2265 break; 2266 case SCTP_TIMER_TYPE_HEARTBEAT: 2267 /* 2268 * The net is used here so that we can add in the RTO. Even 2269 * though we use a different timer. We also add the HB timer 2270 * PLUS a random jitter. 2271 */ 2272 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2273 #ifdef INVARIANTS 2274 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2275 t_type, inp, stcb, net); 2276 #else 2277 return; 2278 #endif 2279 } 2280 if ((net->dest_state & SCTP_ADDR_NOHB) && 2281 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2282 SCTPDBG(SCTP_DEBUG_TIMER2, 2283 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2284 t_type, inp, stcb, net); 2285 return; 2286 } 2287 tmr = &net->hb_timer; 2288 if (net->RTO == 0) { 2289 to_ticks = stcb->asoc.initial_rto; 2290 } else { 2291 to_ticks = net->RTO; 2292 } 2293 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2294 jitter = rndval % to_ticks; 2295 if (to_ticks > 1) { 2296 to_ticks >>= 1; 2297 } 2298 if (jitter < (UINT32_MAX - to_ticks)) { 2299 to_ticks += jitter; 2300 } else { 2301 to_ticks = UINT32_MAX; 2302 } 2303 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2304 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2305 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2306 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2307 to_ticks += net->heart_beat_delay; 2308 } else { 2309 to_ticks = UINT32_MAX; 2310 } 2311 } 2312 /* 2313 * Now we must convert the to_ticks that are now in ms to 2314 * ticks. 2315 */ 2316 to_ticks = sctp_msecs_to_ticks(to_ticks); 2317 break; 2318 case SCTP_TIMER_TYPE_COOKIE: 2319 /* 2320 * Here we can use the RTO timer from the network since one 2321 * RTT was complete. If a retransmission happened then we 2322 * will be using the RTO initial value. 2323 */ 2324 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2325 #ifdef INVARIANTS 2326 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2327 t_type, inp, stcb, net); 2328 #else 2329 return; 2330 #endif 2331 } 2332 tmr = &net->rxt_timer; 2333 if (net->RTO == 0) { 2334 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2335 } else { 2336 to_ticks = sctp_msecs_to_ticks(net->RTO); 2337 } 2338 break; 2339 case SCTP_TIMER_TYPE_NEWCOOKIE: 2340 /* 2341 * Nothing needed but the endpoint here usually about 60 2342 * minutes. 2343 */ 2344 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2345 #ifdef INVARIANTS 2346 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2347 t_type, inp, stcb, net); 2348 #else 2349 return; 2350 #endif 2351 } 2352 tmr = &inp->sctp_ep.signature_change; 2353 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2354 break; 2355 case SCTP_TIMER_TYPE_PATHMTURAISE: 2356 /* 2357 * Here we use the value found in the EP for PMTUD, usually 2358 * about 10 minutes. 2359 */ 2360 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2361 #ifdef INVARIANTS 2362 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2363 t_type, inp, stcb, net); 2364 #else 2365 return; 2366 #endif 2367 } 2368 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2369 SCTPDBG(SCTP_DEBUG_TIMER2, 2370 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2371 t_type, inp, stcb, net); 2372 return; 2373 } 2374 tmr = &net->pmtu_timer; 2375 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2376 break; 2377 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2378 /* Here we use the RTO of the destination. */ 2379 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2380 #ifdef INVARIANTS 2381 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2382 t_type, inp, stcb, net); 2383 #else 2384 return; 2385 #endif 2386 } 2387 tmr = &net->rxt_timer; 2388 if (net->RTO == 0) { 2389 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2390 } else { 2391 to_ticks = sctp_msecs_to_ticks(net->RTO); 2392 } 2393 break; 2394 case SCTP_TIMER_TYPE_ASCONF: 2395 /* 2396 * Here the timer comes from the stcb but its value is from 2397 * the net's RTO. 2398 */ 2399 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2400 #ifdef INVARIANTS 2401 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2402 t_type, inp, stcb, net); 2403 #else 2404 return; 2405 #endif 2406 } 2407 tmr = &stcb->asoc.asconf_timer; 2408 if (net->RTO == 0) { 2409 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2410 } else { 2411 to_ticks = sctp_msecs_to_ticks(net->RTO); 2412 } 2413 break; 2414 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2415 /* 2416 * Here we use the endpoints shutdown guard timer usually 2417 * about 3 minutes. 2418 */ 2419 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2420 #ifdef INVARIANTS 2421 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2422 t_type, inp, stcb, net); 2423 #else 2424 return; 2425 #endif 2426 } 2427 tmr = &stcb->asoc.shut_guard_timer; 2428 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2429 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2430 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2431 } else { 2432 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2433 } 2434 } else { 2435 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2436 } 2437 break; 2438 case SCTP_TIMER_TYPE_AUTOCLOSE: 2439 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2440 #ifdef INVARIANTS 2441 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2442 t_type, inp, stcb, net); 2443 #else 2444 return; 2445 #endif 2446 } 2447 tmr = &stcb->asoc.autoclose_timer; 2448 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2449 break; 2450 case SCTP_TIMER_TYPE_STRRESET: 2451 /* 2452 * Here the timer comes from the stcb but its value is from 2453 * the net's RTO. 2454 */ 2455 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2456 #ifdef INVARIANTS 2457 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2458 t_type, inp, stcb, net); 2459 #else 2460 return; 2461 #endif 2462 } 2463 tmr = &stcb->asoc.strreset_timer; 2464 if (net->RTO == 0) { 2465 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2466 } else { 2467 to_ticks = sctp_msecs_to_ticks(net->RTO); 2468 } 2469 break; 2470 case SCTP_TIMER_TYPE_INPKILL: 2471 /* 2472 * The inp is setup to die. We re-use the signature_change 2473 * timer since that has stopped and we are in the GONE 2474 * state. 2475 */ 2476 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2477 #ifdef INVARIANTS 2478 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2479 t_type, inp, stcb, net); 2480 #else 2481 return; 2482 #endif 2483 } 2484 tmr = &inp->sctp_ep.signature_change; 2485 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2486 break; 2487 case SCTP_TIMER_TYPE_ASOCKILL: 2488 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2489 #ifdef INVARIANTS 2490 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2491 t_type, inp, stcb, net); 2492 #else 2493 return; 2494 #endif 2495 } 2496 tmr = &stcb->asoc.strreset_timer; 2497 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2498 break; 2499 case SCTP_TIMER_TYPE_ADDR_WQ: 2500 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2501 #ifdef INVARIANTS 2502 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2503 t_type, inp, stcb, net); 2504 #else 2505 return; 2506 #endif 2507 } 2508 /* Only 1 tick away :-) */ 2509 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2510 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2511 break; 2512 case SCTP_TIMER_TYPE_PRIM_DELETED: 2513 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2514 #ifdef INVARIANTS 2515 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2516 t_type, inp, stcb, net); 2517 #else 2518 return; 2519 #endif 2520 } 2521 tmr = &stcb->asoc.delete_prim_timer; 2522 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2523 break; 2524 default: 2525 #ifdef INVARIANTS 2526 panic("Unknown timer type %d", t_type); 2527 #else 2528 return; 2529 #endif 2530 } 2531 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2532 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2533 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2534 /* 2535 * We do NOT allow you to have it already running. If it is, 2536 * we leave the current one up unchanged. 2537 */ 2538 SCTPDBG(SCTP_DEBUG_TIMER2, 2539 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2540 t_type, inp, stcb, net); 2541 return; 2542 } 2543 /* At this point we can proceed. */ 2544 if (t_type == SCTP_TIMER_TYPE_SEND) { 2545 stcb->asoc.num_send_timers_up++; 2546 } 2547 tmr->stopped_from = 0; 2548 tmr->type = t_type; 2549 tmr->ep = (void *)inp; 2550 tmr->tcb = (void *)stcb; 2551 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2552 tmr->net = NULL; 2553 } else { 2554 tmr->net = (void *)net; 2555 } 2556 tmr->self = (void *)tmr; 2557 tmr->vnet = (void *)curvnet; 2558 tmr->ticks = sctp_get_tick_count(); 2559 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2560 SCTPDBG(SCTP_DEBUG_TIMER2, 2561 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2562 t_type, to_ticks, inp, stcb, net); 2563 /* 2564 * If this is a newly scheduled callout, as opposed to a 2565 * rescheduled one, increment relevant reference counts. 2566 */ 2567 if (tmr->ep != NULL) { 2568 SCTP_INP_INCR_REF(inp); 2569 } 2570 if (tmr->tcb != NULL) { 2571 atomic_add_int(&stcb->asoc.refcnt, 1); 2572 } 2573 if (tmr->net != NULL) { 2574 atomic_add_int(&net->ref_count, 1); 2575 } 2576 } else { 2577 /* 2578 * This should not happen, since we checked for pending 2579 * above. 2580 */ 2581 SCTPDBG(SCTP_DEBUG_TIMER2, 2582 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2583 t_type, to_ticks, inp, stcb, net); 2584 } 2585 return; 2586 } 2587 2588 /*- 2589 * The following table shows which parameters must be provided 2590 * when calling sctp_timer_stop(). For parameters not being 2591 * provided, NULL must be used. 2592 * 2593 * |Name |inp |stcb|net | 2594 * |-----------------------------|----|----|----| 2595 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2596 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2597 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2598 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2600 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2601 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2602 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2604 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2605 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2606 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2608 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2609 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2610 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2611 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2612 * 2613 */ 2614 2615 void 2616 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2617 struct sctp_nets *net, uint32_t from) 2618 { 2619 struct sctp_timer *tmr; 2620 2621 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2622 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2623 t_type, stcb, stcb->sctp_ep)); 2624 if (stcb != NULL) { 2625 SCTP_TCB_LOCK_ASSERT(stcb); 2626 } else if (inp != NULL) { 2627 SCTP_INP_WLOCK_ASSERT(inp); 2628 } else { 2629 SCTP_WQ_ADDR_LOCK_ASSERT(); 2630 } 2631 tmr = NULL; 2632 switch (t_type) { 2633 case SCTP_TIMER_TYPE_SEND: 2634 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2635 #ifdef INVARIANTS 2636 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2637 t_type, inp, stcb, net); 2638 #else 2639 return; 2640 #endif 2641 } 2642 tmr = &net->rxt_timer; 2643 break; 2644 case SCTP_TIMER_TYPE_INIT: 2645 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2646 #ifdef INVARIANTS 2647 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2648 t_type, inp, stcb, net); 2649 #else 2650 return; 2651 #endif 2652 } 2653 tmr = &net->rxt_timer; 2654 break; 2655 case SCTP_TIMER_TYPE_RECV: 2656 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2657 #ifdef INVARIANTS 2658 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2659 t_type, inp, stcb, net); 2660 #else 2661 return; 2662 #endif 2663 } 2664 tmr = &stcb->asoc.dack_timer; 2665 break; 2666 case SCTP_TIMER_TYPE_SHUTDOWN: 2667 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2668 #ifdef INVARIANTS 2669 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2670 t_type, inp, stcb, net); 2671 #else 2672 return; 2673 #endif 2674 } 2675 tmr = &net->rxt_timer; 2676 break; 2677 case SCTP_TIMER_TYPE_HEARTBEAT: 2678 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2679 #ifdef INVARIANTS 2680 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2681 t_type, inp, stcb, net); 2682 #else 2683 return; 2684 #endif 2685 } 2686 tmr = &net->hb_timer; 2687 break; 2688 case SCTP_TIMER_TYPE_COOKIE: 2689 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2690 #ifdef INVARIANTS 2691 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2692 t_type, inp, stcb, net); 2693 #else 2694 return; 2695 #endif 2696 } 2697 tmr = &net->rxt_timer; 2698 break; 2699 case SCTP_TIMER_TYPE_NEWCOOKIE: 2700 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2701 #ifdef INVARIANTS 2702 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2703 t_type, inp, stcb, net); 2704 #else 2705 return; 2706 #endif 2707 } 2708 tmr = &inp->sctp_ep.signature_change; 2709 break; 2710 case SCTP_TIMER_TYPE_PATHMTURAISE: 2711 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2712 #ifdef INVARIANTS 2713 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2714 t_type, inp, stcb, net); 2715 #else 2716 return; 2717 #endif 2718 } 2719 tmr = &net->pmtu_timer; 2720 break; 2721 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2722 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2723 #ifdef INVARIANTS 2724 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2725 t_type, inp, stcb, net); 2726 #else 2727 return; 2728 #endif 2729 } 2730 tmr = &net->rxt_timer; 2731 break; 2732 case SCTP_TIMER_TYPE_ASCONF: 2733 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2734 #ifdef INVARIANTS 2735 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2736 t_type, inp, stcb, net); 2737 #else 2738 return; 2739 #endif 2740 } 2741 tmr = &stcb->asoc.asconf_timer; 2742 break; 2743 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2744 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2745 #ifdef INVARIANTS 2746 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2747 t_type, inp, stcb, net); 2748 #else 2749 return; 2750 #endif 2751 } 2752 tmr = &stcb->asoc.shut_guard_timer; 2753 break; 2754 case SCTP_TIMER_TYPE_AUTOCLOSE: 2755 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2756 #ifdef INVARIANTS 2757 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2758 t_type, inp, stcb, net); 2759 #else 2760 return; 2761 #endif 2762 } 2763 tmr = &stcb->asoc.autoclose_timer; 2764 break; 2765 case SCTP_TIMER_TYPE_STRRESET: 2766 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2767 #ifdef INVARIANTS 2768 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2769 t_type, inp, stcb, net); 2770 #else 2771 return; 2772 #endif 2773 } 2774 tmr = &stcb->asoc.strreset_timer; 2775 break; 2776 case SCTP_TIMER_TYPE_INPKILL: 2777 /* 2778 * The inp is setup to die. We re-use the signature_change 2779 * timer since that has stopped and we are in the GONE 2780 * state. 2781 */ 2782 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2783 #ifdef INVARIANTS 2784 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2785 t_type, inp, stcb, net); 2786 #else 2787 return; 2788 #endif 2789 } 2790 tmr = &inp->sctp_ep.signature_change; 2791 break; 2792 case SCTP_TIMER_TYPE_ASOCKILL: 2793 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2794 #ifdef INVARIANTS 2795 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2796 t_type, inp, stcb, net); 2797 #else 2798 return; 2799 #endif 2800 } 2801 tmr = &stcb->asoc.strreset_timer; 2802 break; 2803 case SCTP_TIMER_TYPE_ADDR_WQ: 2804 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2805 #ifdef INVARIANTS 2806 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2807 t_type, inp, stcb, net); 2808 #else 2809 return; 2810 #endif 2811 } 2812 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2813 break; 2814 case SCTP_TIMER_TYPE_PRIM_DELETED: 2815 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2816 #ifdef INVARIANTS 2817 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2818 t_type, inp, stcb, net); 2819 #else 2820 return; 2821 #endif 2822 } 2823 tmr = &stcb->asoc.delete_prim_timer; 2824 break; 2825 default: 2826 #ifdef INVARIANTS 2827 panic("Unknown timer type %d", t_type); 2828 #else 2829 return; 2830 #endif 2831 } 2832 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2833 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2834 (tmr->type != t_type)) { 2835 /* 2836 * Ok we have a timer that is under joint use. Cookie timer 2837 * per chance with the SEND timer. We therefore are NOT 2838 * running the timer that the caller wants stopped. So just 2839 * return. 2840 */ 2841 SCTPDBG(SCTP_DEBUG_TIMER2, 2842 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2843 t_type, inp, stcb, net); 2844 return; 2845 } 2846 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2847 stcb->asoc.num_send_timers_up--; 2848 if (stcb->asoc.num_send_timers_up < 0) { 2849 stcb->asoc.num_send_timers_up = 0; 2850 } 2851 } 2852 tmr->self = NULL; 2853 tmr->stopped_from = from; 2854 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2855 KASSERT(tmr->ep == inp, 2856 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2857 t_type, inp, tmr->ep)); 2858 KASSERT(tmr->tcb == stcb, 2859 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2860 t_type, stcb, tmr->tcb)); 2861 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2862 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2863 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2864 t_type, net, tmr->net)); 2865 SCTPDBG(SCTP_DEBUG_TIMER2, 2866 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2867 t_type, inp, stcb, net); 2868 /* 2869 * If the timer was actually stopped, decrement reference 2870 * counts that were incremented in sctp_timer_start(). 2871 */ 2872 if (tmr->ep != NULL) { 2873 tmr->ep = NULL; 2874 SCTP_INP_DECR_REF(inp); 2875 } 2876 if (tmr->tcb != NULL) { 2877 tmr->tcb = NULL; 2878 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2879 } 2880 if (tmr->net != NULL) { 2881 struct sctp_nets *tmr_net; 2882 2883 /* 2884 * Can't use net, since it doesn't work for 2885 * SCTP_TIMER_TYPE_ASCONF. 2886 */ 2887 tmr_net = tmr->net; 2888 tmr->net = NULL; 2889 sctp_free_remote_addr(tmr_net); 2890 } 2891 } else { 2892 SCTPDBG(SCTP_DEBUG_TIMER2, 2893 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2894 t_type, inp, stcb, net); 2895 } 2896 return; 2897 } 2898 2899 uint32_t 2900 sctp_calculate_len(struct mbuf *m) 2901 { 2902 struct mbuf *at; 2903 uint32_t tlen; 2904 2905 tlen = 0; 2906 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2907 tlen += SCTP_BUF_LEN(at); 2908 } 2909 return (tlen); 2910 } 2911 2912 /* 2913 * Given an association and starting time of the current RTT period, update 2914 * RTO in number of msecs. net should point to the current network. 2915 * Return 1, if an RTO update was performed, return 0 if no update was 2916 * performed due to invalid starting point. 2917 */ 2918 2919 int 2920 sctp_calculate_rto(struct sctp_tcb *stcb, 2921 struct sctp_association *asoc, 2922 struct sctp_nets *net, 2923 struct timeval *old, 2924 int rtt_from_sack) 2925 { 2926 struct timeval now; 2927 uint64_t rtt_us; /* RTT in us */ 2928 int32_t rtt; /* RTT in ms */ 2929 uint32_t new_rto; 2930 int first_measure = 0; 2931 2932 /************************/ 2933 /* 1. calculate new RTT */ 2934 /************************/ 2935 /* get the current time */ 2936 if (stcb->asoc.use_precise_time) { 2937 (void)SCTP_GETPTIME_TIMEVAL(&now); 2938 } else { 2939 (void)SCTP_GETTIME_TIMEVAL(&now); 2940 } 2941 if ((old->tv_sec > now.tv_sec) || 2942 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2943 /* The starting point is in the future. */ 2944 return (0); 2945 } 2946 timevalsub(&now, old); 2947 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2948 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2949 /* The RTT is larger than a sane value. */ 2950 return (0); 2951 } 2952 /* store the current RTT in us */ 2953 net->rtt = rtt_us; 2954 /* compute rtt in ms */ 2955 rtt = (int32_t)(net->rtt / 1000); 2956 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2957 /* 2958 * Tell the CC module that a new update has just occurred 2959 * from a sack 2960 */ 2961 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2962 } 2963 /* 2964 * Do we need to determine the lan? We do this only on sacks i.e. 2965 * RTT being determined from data not non-data (HB/INIT->INITACK). 2966 */ 2967 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2968 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2969 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2970 net->lan_type = SCTP_LAN_INTERNET; 2971 } else { 2972 net->lan_type = SCTP_LAN_LOCAL; 2973 } 2974 } 2975 2976 /***************************/ 2977 /* 2. update RTTVAR & SRTT */ 2978 /***************************/ 2979 /*- 2980 * Compute the scaled average lastsa and the 2981 * scaled variance lastsv as described in van Jacobson 2982 * Paper "Congestion Avoidance and Control", Annex A. 2983 * 2984 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2985 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2986 */ 2987 if (net->RTO_measured) { 2988 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2989 net->lastsa += rtt; 2990 if (rtt < 0) { 2991 rtt = -rtt; 2992 } 2993 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2994 net->lastsv += rtt; 2995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2996 rto_logging(net, SCTP_LOG_RTTVAR); 2997 } 2998 } else { 2999 /* First RTO measurement */ 3000 net->RTO_measured = 1; 3001 first_measure = 1; 3002 net->lastsa = rtt << SCTP_RTT_SHIFT; 3003 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3004 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3005 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3006 } 3007 } 3008 if (net->lastsv == 0) { 3009 net->lastsv = SCTP_CLOCK_GRANULARITY; 3010 } 3011 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3012 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3013 (stcb->asoc.sat_network_lockout == 0)) { 3014 stcb->asoc.sat_network = 1; 3015 } else if ((!first_measure) && stcb->asoc.sat_network) { 3016 stcb->asoc.sat_network = 0; 3017 stcb->asoc.sat_network_lockout = 1; 3018 } 3019 /* bound it, per C6/C7 in Section 5.3.1 */ 3020 if (new_rto < stcb->asoc.minrto) { 3021 new_rto = stcb->asoc.minrto; 3022 } 3023 if (new_rto > stcb->asoc.maxrto) { 3024 new_rto = stcb->asoc.maxrto; 3025 } 3026 net->RTO = new_rto; 3027 return (1); 3028 } 3029 3030 /* 3031 * return a pointer to a contiguous piece of data from the given mbuf chain 3032 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3033 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3034 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3035 */ 3036 caddr_t 3037 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3038 { 3039 uint32_t count; 3040 uint8_t *ptr; 3041 3042 ptr = in_ptr; 3043 if ((off < 0) || (len <= 0)) 3044 return (NULL); 3045 3046 /* find the desired start location */ 3047 while ((m != NULL) && (off > 0)) { 3048 if (off < SCTP_BUF_LEN(m)) 3049 break; 3050 off -= SCTP_BUF_LEN(m); 3051 m = SCTP_BUF_NEXT(m); 3052 } 3053 if (m == NULL) 3054 return (NULL); 3055 3056 /* is the current mbuf large enough (eg. contiguous)? */ 3057 if ((SCTP_BUF_LEN(m) - off) >= len) { 3058 return (mtod(m, caddr_t)+off); 3059 } else { 3060 /* else, it spans more than one mbuf, so save a temp copy... */ 3061 while ((m != NULL) && (len > 0)) { 3062 count = min(SCTP_BUF_LEN(m) - off, len); 3063 memcpy(ptr, mtod(m, caddr_t)+off, count); 3064 len -= count; 3065 ptr += count; 3066 off = 0; 3067 m = SCTP_BUF_NEXT(m); 3068 } 3069 if ((m == NULL) && (len > 0)) 3070 return (NULL); 3071 else 3072 return ((caddr_t)in_ptr); 3073 } 3074 } 3075 3076 struct sctp_paramhdr * 3077 sctp_get_next_param(struct mbuf *m, 3078 int offset, 3079 struct sctp_paramhdr *pull, 3080 int pull_limit) 3081 { 3082 /* This just provides a typed signature to Peter's Pull routine */ 3083 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3084 (uint8_t *)pull)); 3085 } 3086 3087 struct mbuf * 3088 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3089 { 3090 struct mbuf *m_last; 3091 caddr_t dp; 3092 3093 if (padlen > 3) { 3094 return (NULL); 3095 } 3096 if (padlen <= M_TRAILINGSPACE(m)) { 3097 /* 3098 * The easy way. We hope the majority of the time we hit 3099 * here :) 3100 */ 3101 m_last = m; 3102 } else { 3103 /* Hard way we must grow the mbuf chain */ 3104 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3105 if (m_last == NULL) { 3106 return (NULL); 3107 } 3108 SCTP_BUF_LEN(m_last) = 0; 3109 SCTP_BUF_NEXT(m_last) = NULL; 3110 SCTP_BUF_NEXT(m) = m_last; 3111 } 3112 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3113 SCTP_BUF_LEN(m_last) += padlen; 3114 memset(dp, 0, padlen); 3115 return (m_last); 3116 } 3117 3118 struct mbuf * 3119 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3120 { 3121 /* find the last mbuf in chain and pad it */ 3122 struct mbuf *m_at; 3123 3124 if (last_mbuf != NULL) { 3125 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3126 } else { 3127 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3128 if (SCTP_BUF_NEXT(m_at) == NULL) { 3129 return (sctp_add_pad_tombuf(m_at, padval)); 3130 } 3131 } 3132 } 3133 return (NULL); 3134 } 3135 3136 static void 3137 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3138 uint16_t error, struct sctp_abort_chunk *abort, 3139 bool from_peer, bool timedout, int so_locked) 3140 { 3141 struct mbuf *m_notify; 3142 struct sctp_assoc_change *sac; 3143 struct sctp_queued_to_read *control; 3144 unsigned int notif_len; 3145 uint16_t abort_len; 3146 unsigned int i; 3147 3148 KASSERT(abort == NULL || from_peer, 3149 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3150 KASSERT(!from_peer || !timedout, 3151 ("sctp_notify_assoc_change: timeouts can only be local")); 3152 if (stcb == NULL) { 3153 return; 3154 } 3155 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3156 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3157 if (abort != NULL) { 3158 abort_len = ntohs(abort->ch.chunk_length); 3159 /* 3160 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3161 * contiguous. 3162 */ 3163 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3164 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3165 } 3166 } else { 3167 abort_len = 0; 3168 } 3169 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3170 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3171 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3172 notif_len += abort_len; 3173 } 3174 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3175 if (m_notify == NULL) { 3176 /* Retry with smaller value. */ 3177 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3178 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3179 if (m_notify == NULL) { 3180 goto set_error; 3181 } 3182 } 3183 SCTP_BUF_NEXT(m_notify) = NULL; 3184 sac = mtod(m_notify, struct sctp_assoc_change *); 3185 memset(sac, 0, notif_len); 3186 sac->sac_type = SCTP_ASSOC_CHANGE; 3187 sac->sac_flags = 0; 3188 sac->sac_length = sizeof(struct sctp_assoc_change); 3189 sac->sac_state = state; 3190 sac->sac_error = error; 3191 if (state == SCTP_CANT_STR_ASSOC) { 3192 sac->sac_outbound_streams = 0; 3193 sac->sac_inbound_streams = 0; 3194 } else { 3195 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3196 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3197 } 3198 sac->sac_assoc_id = sctp_get_associd(stcb); 3199 if (notif_len > sizeof(struct sctp_assoc_change)) { 3200 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3201 i = 0; 3202 if (stcb->asoc.prsctp_supported == 1) { 3203 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3204 } 3205 if (stcb->asoc.auth_supported == 1) { 3206 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3207 } 3208 if (stcb->asoc.asconf_supported == 1) { 3209 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3210 } 3211 if (stcb->asoc.idata_supported == 1) { 3212 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3213 } 3214 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3215 if (stcb->asoc.reconfig_supported == 1) { 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3217 } 3218 sac->sac_length += i; 3219 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3220 memcpy(sac->sac_info, abort, abort_len); 3221 sac->sac_length += abort_len; 3222 } 3223 } 3224 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3225 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3226 0, 0, stcb->asoc.context, 0, 0, 0, 3227 m_notify); 3228 if (control != NULL) { 3229 control->length = SCTP_BUF_LEN(m_notify); 3230 control->spec_flags = M_NOTIFICATION; 3231 /* not that we need this */ 3232 control->tail_mbuf = m_notify; 3233 sctp_add_to_readq(stcb->sctp_ep, stcb, 3234 control, 3235 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3236 so_locked); 3237 } else { 3238 sctp_m_freem(m_notify); 3239 } 3240 } 3241 /* 3242 * For 1-to-1 style sockets, we send up and error when an ABORT 3243 * comes in. 3244 */ 3245 set_error: 3246 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3247 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3248 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3249 SOCK_LOCK(stcb->sctp_socket); 3250 if (from_peer) { 3251 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3252 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3253 stcb->sctp_socket->so_error = ECONNREFUSED; 3254 } else { 3255 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3256 stcb->sctp_socket->so_error = ECONNRESET; 3257 } 3258 } else { 3259 if (timedout) { 3260 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3261 stcb->sctp_socket->so_error = ETIMEDOUT; 3262 } else { 3263 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3264 stcb->sctp_socket->so_error = ECONNABORTED; 3265 } 3266 } 3267 SOCK_UNLOCK(stcb->sctp_socket); 3268 } 3269 /* Wake ANY sleepers */ 3270 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3271 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3272 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3273 socantrcvmore(stcb->sctp_socket); 3274 } 3275 sorwakeup(stcb->sctp_socket); 3276 sowwakeup(stcb->sctp_socket); 3277 } 3278 3279 static void 3280 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3281 struct sockaddr *sa, uint32_t error, int so_locked) 3282 { 3283 struct mbuf *m_notify; 3284 struct sctp_paddr_change *spc; 3285 struct sctp_queued_to_read *control; 3286 3287 if ((stcb == NULL) || 3288 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3289 /* event not enabled */ 3290 return; 3291 } 3292 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3293 if (m_notify == NULL) 3294 return; 3295 SCTP_BUF_LEN(m_notify) = 0; 3296 spc = mtod(m_notify, struct sctp_paddr_change *); 3297 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3298 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3299 spc->spc_flags = 0; 3300 spc->spc_length = sizeof(struct sctp_paddr_change); 3301 switch (sa->sa_family) { 3302 #ifdef INET 3303 case AF_INET: 3304 #ifdef INET6 3305 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3306 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3307 (struct sockaddr_in6 *)&spc->spc_aaddr); 3308 } else { 3309 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3310 } 3311 #else 3312 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3313 #endif 3314 break; 3315 #endif 3316 #ifdef INET6 3317 case AF_INET6: 3318 { 3319 struct sockaddr_in6 *sin6; 3320 3321 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3322 3323 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3324 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3325 if (sin6->sin6_scope_id == 0) { 3326 /* recover scope_id for user */ 3327 (void)sa6_recoverscope(sin6); 3328 } else { 3329 /* clear embedded scope_id for user */ 3330 in6_clearscope(&sin6->sin6_addr); 3331 } 3332 } 3333 break; 3334 } 3335 #endif 3336 default: 3337 /* TSNH */ 3338 break; 3339 } 3340 spc->spc_state = state; 3341 spc->spc_error = error; 3342 spc->spc_assoc_id = sctp_get_associd(stcb); 3343 3344 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3345 SCTP_BUF_NEXT(m_notify) = NULL; 3346 3347 /* append to socket */ 3348 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3349 0, 0, stcb->asoc.context, 0, 0, 0, 3350 m_notify); 3351 if (control == NULL) { 3352 /* no memory */ 3353 sctp_m_freem(m_notify); 3354 return; 3355 } 3356 control->length = SCTP_BUF_LEN(m_notify); 3357 control->spec_flags = M_NOTIFICATION; 3358 /* not that we need this */ 3359 control->tail_mbuf = m_notify; 3360 sctp_add_to_readq(stcb->sctp_ep, stcb, 3361 control, 3362 &stcb->sctp_socket->so_rcv, 1, 3363 SCTP_READ_LOCK_NOT_HELD, 3364 so_locked); 3365 } 3366 3367 static void 3368 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3369 struct sctp_tmit_chunk *chk, int so_locked) 3370 { 3371 struct mbuf *m_notify; 3372 struct sctp_send_failed *ssf; 3373 struct sctp_send_failed_event *ssfe; 3374 struct sctp_queued_to_read *control; 3375 struct sctp_chunkhdr *chkhdr; 3376 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3377 3378 if ((stcb == NULL) || 3379 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3380 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3381 /* event not enabled */ 3382 return; 3383 } 3384 3385 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3386 notifhdr_len = sizeof(struct sctp_send_failed_event); 3387 } else { 3388 notifhdr_len = sizeof(struct sctp_send_failed); 3389 } 3390 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3391 if (m_notify == NULL) 3392 /* no space left */ 3393 return; 3394 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3395 if (stcb->asoc.idata_supported) { 3396 chkhdr_len = sizeof(struct sctp_idata_chunk); 3397 } else { 3398 chkhdr_len = sizeof(struct sctp_data_chunk); 3399 } 3400 /* Use some defaults in case we can't access the chunk header */ 3401 if (chk->send_size >= chkhdr_len) { 3402 payload_len = chk->send_size - chkhdr_len; 3403 } else { 3404 payload_len = 0; 3405 } 3406 padding_len = 0; 3407 if (chk->data != NULL) { 3408 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3409 if (chkhdr != NULL) { 3410 chk_len = ntohs(chkhdr->chunk_length); 3411 if ((chk_len >= chkhdr_len) && 3412 (chk->send_size >= chk_len) && 3413 (chk->send_size - chk_len < 4)) { 3414 padding_len = chk->send_size - chk_len; 3415 payload_len = chk->send_size - chkhdr_len - padding_len; 3416 } 3417 } 3418 } 3419 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3420 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3421 memset(ssfe, 0, notifhdr_len); 3422 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3423 if (sent) { 3424 ssfe->ssfe_flags = SCTP_DATA_SENT; 3425 } else { 3426 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3427 } 3428 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3429 ssfe->ssfe_error = error; 3430 /* not exactly what the user sent in, but should be close :) */ 3431 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3432 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3433 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3434 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3435 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3436 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3437 } else { 3438 ssf = mtod(m_notify, struct sctp_send_failed *); 3439 memset(ssf, 0, notifhdr_len); 3440 ssf->ssf_type = SCTP_SEND_FAILED; 3441 if (sent) { 3442 ssf->ssf_flags = SCTP_DATA_SENT; 3443 } else { 3444 ssf->ssf_flags = SCTP_DATA_UNSENT; 3445 } 3446 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3447 ssf->ssf_error = error; 3448 /* not exactly what the user sent in, but should be close :) */ 3449 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3450 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3451 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3452 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3453 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3454 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3455 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3456 } 3457 if (chk->data != NULL) { 3458 /* Trim off the sctp chunk header (it should be there) */ 3459 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3460 m_adj(chk->data, chkhdr_len); 3461 m_adj(chk->data, -padding_len); 3462 sctp_mbuf_crush(chk->data); 3463 chk->send_size -= (chkhdr_len + padding_len); 3464 } 3465 } 3466 SCTP_BUF_NEXT(m_notify) = chk->data; 3467 /* Steal off the mbuf */ 3468 chk->data = NULL; 3469 /* 3470 * For this case, we check the actual socket buffer, since the assoc 3471 * is going away we don't want to overfill the socket buffer for a 3472 * non-reader 3473 */ 3474 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3475 sctp_m_freem(m_notify); 3476 return; 3477 } 3478 /* append to socket */ 3479 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3480 0, 0, stcb->asoc.context, 0, 0, 0, 3481 m_notify); 3482 if (control == NULL) { 3483 /* no memory */ 3484 sctp_m_freem(m_notify); 3485 return; 3486 } 3487 control->length = SCTP_BUF_LEN(m_notify); 3488 control->spec_flags = M_NOTIFICATION; 3489 /* not that we need this */ 3490 control->tail_mbuf = m_notify; 3491 sctp_add_to_readq(stcb->sctp_ep, stcb, 3492 control, 3493 &stcb->sctp_socket->so_rcv, 1, 3494 SCTP_READ_LOCK_NOT_HELD, 3495 so_locked); 3496 } 3497 3498 static void 3499 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3500 struct sctp_stream_queue_pending *sp, int so_locked) 3501 { 3502 struct mbuf *m_notify; 3503 struct sctp_send_failed *ssf; 3504 struct sctp_send_failed_event *ssfe; 3505 struct sctp_queued_to_read *control; 3506 int notifhdr_len; 3507 3508 if ((stcb == NULL) || 3509 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3510 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3511 /* event not enabled */ 3512 return; 3513 } 3514 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3515 notifhdr_len = sizeof(struct sctp_send_failed_event); 3516 } else { 3517 notifhdr_len = sizeof(struct sctp_send_failed); 3518 } 3519 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3520 if (m_notify == NULL) { 3521 /* no space left */ 3522 return; 3523 } 3524 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3525 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3526 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3527 memset(ssfe, 0, notifhdr_len); 3528 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3529 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3530 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3531 ssfe->ssfe_error = error; 3532 /* not exactly what the user sent in, but should be close :) */ 3533 ssfe->ssfe_info.snd_sid = sp->sid; 3534 if (sp->some_taken) { 3535 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3536 } else { 3537 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3538 } 3539 ssfe->ssfe_info.snd_ppid = sp->ppid; 3540 ssfe->ssfe_info.snd_context = sp->context; 3541 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3542 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3543 } else { 3544 ssf = mtod(m_notify, struct sctp_send_failed *); 3545 memset(ssf, 0, notifhdr_len); 3546 ssf->ssf_type = SCTP_SEND_FAILED; 3547 ssf->ssf_flags = SCTP_DATA_UNSENT; 3548 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3549 ssf->ssf_error = error; 3550 /* not exactly what the user sent in, but should be close :) */ 3551 ssf->ssf_info.sinfo_stream = sp->sid; 3552 ssf->ssf_info.sinfo_ssn = 0; 3553 if (sp->some_taken) { 3554 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3555 } else { 3556 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3557 } 3558 ssf->ssf_info.sinfo_ppid = sp->ppid; 3559 ssf->ssf_info.sinfo_context = sp->context; 3560 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3561 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3562 } 3563 SCTP_BUF_NEXT(m_notify) = sp->data; 3564 3565 /* Steal off the mbuf */ 3566 sp->data = NULL; 3567 /* 3568 * For this case, we check the actual socket buffer, since the assoc 3569 * is going away we don't want to overfill the socket buffer for a 3570 * non-reader 3571 */ 3572 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3573 sctp_m_freem(m_notify); 3574 return; 3575 } 3576 /* append to socket */ 3577 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3578 0, 0, stcb->asoc.context, 0, 0, 0, 3579 m_notify); 3580 if (control == NULL) { 3581 /* no memory */ 3582 sctp_m_freem(m_notify); 3583 return; 3584 } 3585 control->length = SCTP_BUF_LEN(m_notify); 3586 control->spec_flags = M_NOTIFICATION; 3587 /* not that we need this */ 3588 control->tail_mbuf = m_notify; 3589 sctp_add_to_readq(stcb->sctp_ep, stcb, 3590 control, 3591 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3592 } 3593 3594 static void 3595 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3596 { 3597 struct mbuf *m_notify; 3598 struct sctp_adaptation_event *sai; 3599 struct sctp_queued_to_read *control; 3600 3601 if ((stcb == NULL) || 3602 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3603 /* event not enabled */ 3604 return; 3605 } 3606 3607 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3608 if (m_notify == NULL) 3609 /* no space left */ 3610 return; 3611 SCTP_BUF_LEN(m_notify) = 0; 3612 sai = mtod(m_notify, struct sctp_adaptation_event *); 3613 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3614 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3615 sai->sai_flags = 0; 3616 sai->sai_length = sizeof(struct sctp_adaptation_event); 3617 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3618 sai->sai_assoc_id = sctp_get_associd(stcb); 3619 3620 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3621 SCTP_BUF_NEXT(m_notify) = NULL; 3622 3623 /* append to socket */ 3624 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3625 0, 0, stcb->asoc.context, 0, 0, 0, 3626 m_notify); 3627 if (control == NULL) { 3628 /* no memory */ 3629 sctp_m_freem(m_notify); 3630 return; 3631 } 3632 control->length = SCTP_BUF_LEN(m_notify); 3633 control->spec_flags = M_NOTIFICATION; 3634 /* not that we need this */ 3635 control->tail_mbuf = m_notify; 3636 sctp_add_to_readq(stcb->sctp_ep, stcb, 3637 control, 3638 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3639 } 3640 3641 /* This always must be called with the read-queue LOCKED in the INP */ 3642 static void 3643 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3644 uint32_t val, int so_locked) 3645 { 3646 struct mbuf *m_notify; 3647 struct sctp_pdapi_event *pdapi; 3648 struct sctp_queued_to_read *control; 3649 struct sockbuf *sb; 3650 3651 if ((stcb == NULL) || 3652 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3653 /* event not enabled */ 3654 return; 3655 } 3656 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3657 return; 3658 } 3659 3660 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3661 if (m_notify == NULL) 3662 /* no space left */ 3663 return; 3664 SCTP_BUF_LEN(m_notify) = 0; 3665 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3666 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3667 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3668 pdapi->pdapi_flags = 0; 3669 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3670 pdapi->pdapi_indication = error; 3671 pdapi->pdapi_stream = (val >> 16); 3672 pdapi->pdapi_seq = (val & 0x0000ffff); 3673 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3674 3675 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3676 SCTP_BUF_NEXT(m_notify) = NULL; 3677 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3678 0, 0, stcb->asoc.context, 0, 0, 0, 3679 m_notify); 3680 if (control == NULL) { 3681 /* no memory */ 3682 sctp_m_freem(m_notify); 3683 return; 3684 } 3685 control->length = SCTP_BUF_LEN(m_notify); 3686 control->spec_flags = M_NOTIFICATION; 3687 /* not that we need this */ 3688 control->tail_mbuf = m_notify; 3689 sb = &stcb->sctp_socket->so_rcv; 3690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3691 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3692 } 3693 sctp_sballoc(stcb, sb, m_notify); 3694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3695 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3696 } 3697 control->end_added = 1; 3698 if (stcb->asoc.control_pdapi) 3699 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3700 else { 3701 /* we really should not see this case */ 3702 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3703 } 3704 if (stcb->sctp_ep && stcb->sctp_socket) { 3705 /* This should always be the case */ 3706 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3707 } 3708 } 3709 3710 static void 3711 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3712 { 3713 struct mbuf *m_notify; 3714 struct sctp_shutdown_event *sse; 3715 struct sctp_queued_to_read *control; 3716 3717 /* 3718 * For TCP model AND UDP connected sockets we will send an error up 3719 * when an SHUTDOWN completes 3720 */ 3721 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3722 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3723 /* mark socket closed for read/write and wakeup! */ 3724 socantsendmore(stcb->sctp_socket); 3725 } 3726 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3727 /* event not enabled */ 3728 return; 3729 } 3730 3731 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3732 if (m_notify == NULL) 3733 /* no space left */ 3734 return; 3735 sse = mtod(m_notify, struct sctp_shutdown_event *); 3736 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3737 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3738 sse->sse_flags = 0; 3739 sse->sse_length = sizeof(struct sctp_shutdown_event); 3740 sse->sse_assoc_id = sctp_get_associd(stcb); 3741 3742 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3743 SCTP_BUF_NEXT(m_notify) = NULL; 3744 3745 /* append to socket */ 3746 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3747 0, 0, stcb->asoc.context, 0, 0, 0, 3748 m_notify); 3749 if (control == NULL) { 3750 /* no memory */ 3751 sctp_m_freem(m_notify); 3752 return; 3753 } 3754 control->length = SCTP_BUF_LEN(m_notify); 3755 control->spec_flags = M_NOTIFICATION; 3756 /* not that we need this */ 3757 control->tail_mbuf = m_notify; 3758 sctp_add_to_readq(stcb->sctp_ep, stcb, 3759 control, 3760 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3761 } 3762 3763 static void 3764 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3765 int so_locked) 3766 { 3767 struct mbuf *m_notify; 3768 struct sctp_sender_dry_event *event; 3769 struct sctp_queued_to_read *control; 3770 3771 if ((stcb == NULL) || 3772 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3773 /* event not enabled */ 3774 return; 3775 } 3776 3777 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3778 if (m_notify == NULL) { 3779 /* no space left */ 3780 return; 3781 } 3782 SCTP_BUF_LEN(m_notify) = 0; 3783 event = mtod(m_notify, struct sctp_sender_dry_event *); 3784 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3785 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3786 event->sender_dry_flags = 0; 3787 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3788 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3789 3790 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3791 SCTP_BUF_NEXT(m_notify) = NULL; 3792 3793 /* append to socket */ 3794 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3795 0, 0, stcb->asoc.context, 0, 0, 0, 3796 m_notify); 3797 if (control == NULL) { 3798 /* no memory */ 3799 sctp_m_freem(m_notify); 3800 return; 3801 } 3802 control->length = SCTP_BUF_LEN(m_notify); 3803 control->spec_flags = M_NOTIFICATION; 3804 /* not that we need this */ 3805 control->tail_mbuf = m_notify; 3806 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3807 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3808 } 3809 3810 void 3811 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3812 { 3813 struct mbuf *m_notify; 3814 struct sctp_queued_to_read *control; 3815 struct sctp_stream_change_event *stradd; 3816 3817 if ((stcb == NULL) || 3818 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3819 /* event not enabled */ 3820 return; 3821 } 3822 if ((stcb->asoc.peer_req_out) && flag) { 3823 /* Peer made the request, don't tell the local user */ 3824 stcb->asoc.peer_req_out = 0; 3825 return; 3826 } 3827 stcb->asoc.peer_req_out = 0; 3828 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3829 if (m_notify == NULL) 3830 /* no space left */ 3831 return; 3832 SCTP_BUF_LEN(m_notify) = 0; 3833 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3834 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3835 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3836 stradd->strchange_flags = flag; 3837 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3838 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3839 stradd->strchange_instrms = numberin; 3840 stradd->strchange_outstrms = numberout; 3841 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3842 SCTP_BUF_NEXT(m_notify) = NULL; 3843 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3844 /* no space */ 3845 sctp_m_freem(m_notify); 3846 return; 3847 } 3848 /* append to socket */ 3849 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3850 0, 0, stcb->asoc.context, 0, 0, 0, 3851 m_notify); 3852 if (control == NULL) { 3853 /* no memory */ 3854 sctp_m_freem(m_notify); 3855 return; 3856 } 3857 control->length = SCTP_BUF_LEN(m_notify); 3858 control->spec_flags = M_NOTIFICATION; 3859 /* not that we need this */ 3860 control->tail_mbuf = m_notify; 3861 sctp_add_to_readq(stcb->sctp_ep, stcb, 3862 control, 3863 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3864 } 3865 3866 void 3867 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3868 { 3869 struct mbuf *m_notify; 3870 struct sctp_queued_to_read *control; 3871 struct sctp_assoc_reset_event *strasoc; 3872 3873 if ((stcb == NULL) || 3874 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3875 /* event not enabled */ 3876 return; 3877 } 3878 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3879 if (m_notify == NULL) 3880 /* no space left */ 3881 return; 3882 SCTP_BUF_LEN(m_notify) = 0; 3883 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3884 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3885 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3886 strasoc->assocreset_flags = flag; 3887 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3888 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3889 strasoc->assocreset_local_tsn = sending_tsn; 3890 strasoc->assocreset_remote_tsn = recv_tsn; 3891 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3892 SCTP_BUF_NEXT(m_notify) = NULL; 3893 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3894 /* no space */ 3895 sctp_m_freem(m_notify); 3896 return; 3897 } 3898 /* append to socket */ 3899 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3900 0, 0, stcb->asoc.context, 0, 0, 0, 3901 m_notify); 3902 if (control == NULL) { 3903 /* no memory */ 3904 sctp_m_freem(m_notify); 3905 return; 3906 } 3907 control->length = SCTP_BUF_LEN(m_notify); 3908 control->spec_flags = M_NOTIFICATION; 3909 /* not that we need this */ 3910 control->tail_mbuf = m_notify; 3911 sctp_add_to_readq(stcb->sctp_ep, stcb, 3912 control, 3913 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3914 } 3915 3916 static void 3917 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3918 int number_entries, uint16_t *list, int flag) 3919 { 3920 struct mbuf *m_notify; 3921 struct sctp_queued_to_read *control; 3922 struct sctp_stream_reset_event *strreset; 3923 int len; 3924 3925 if ((stcb == NULL) || 3926 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3927 /* event not enabled */ 3928 return; 3929 } 3930 3931 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3932 if (m_notify == NULL) 3933 /* no space left */ 3934 return; 3935 SCTP_BUF_LEN(m_notify) = 0; 3936 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3937 if (len > M_TRAILINGSPACE(m_notify)) { 3938 /* never enough room */ 3939 sctp_m_freem(m_notify); 3940 return; 3941 } 3942 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3943 memset(strreset, 0, len); 3944 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3945 strreset->strreset_flags = flag; 3946 strreset->strreset_length = len; 3947 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3948 if (number_entries) { 3949 int i; 3950 3951 for (i = 0; i < number_entries; i++) { 3952 strreset->strreset_stream_list[i] = ntohs(list[i]); 3953 } 3954 } 3955 SCTP_BUF_LEN(m_notify) = len; 3956 SCTP_BUF_NEXT(m_notify) = NULL; 3957 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3958 /* no space */ 3959 sctp_m_freem(m_notify); 3960 return; 3961 } 3962 /* append to socket */ 3963 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3964 0, 0, stcb->asoc.context, 0, 0, 0, 3965 m_notify); 3966 if (control == NULL) { 3967 /* no memory */ 3968 sctp_m_freem(m_notify); 3969 return; 3970 } 3971 control->length = SCTP_BUF_LEN(m_notify); 3972 control->spec_flags = M_NOTIFICATION; 3973 /* not that we need this */ 3974 control->tail_mbuf = m_notify; 3975 sctp_add_to_readq(stcb->sctp_ep, stcb, 3976 control, 3977 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3978 } 3979 3980 static void 3981 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3982 { 3983 struct mbuf *m_notify; 3984 struct sctp_remote_error *sre; 3985 struct sctp_queued_to_read *control; 3986 unsigned int notif_len; 3987 uint16_t chunk_len; 3988 3989 if ((stcb == NULL) || 3990 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3991 return; 3992 } 3993 if (chunk != NULL) { 3994 chunk_len = ntohs(chunk->ch.chunk_length); 3995 /* 3996 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3997 * contiguous. 3998 */ 3999 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4000 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4001 } 4002 } else { 4003 chunk_len = 0; 4004 } 4005 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4006 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4007 if (m_notify == NULL) { 4008 /* Retry with smaller value. */ 4009 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4010 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4011 if (m_notify == NULL) { 4012 return; 4013 } 4014 } 4015 SCTP_BUF_NEXT(m_notify) = NULL; 4016 sre = mtod(m_notify, struct sctp_remote_error *); 4017 memset(sre, 0, notif_len); 4018 sre->sre_type = SCTP_REMOTE_ERROR; 4019 sre->sre_flags = 0; 4020 sre->sre_length = sizeof(struct sctp_remote_error); 4021 sre->sre_error = error; 4022 sre->sre_assoc_id = sctp_get_associd(stcb); 4023 if (notif_len > sizeof(struct sctp_remote_error)) { 4024 memcpy(sre->sre_data, chunk, chunk_len); 4025 sre->sre_length += chunk_len; 4026 } 4027 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4028 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4029 0, 0, stcb->asoc.context, 0, 0, 0, 4030 m_notify); 4031 if (control != NULL) { 4032 control->length = SCTP_BUF_LEN(m_notify); 4033 control->spec_flags = M_NOTIFICATION; 4034 /* not that we need this */ 4035 control->tail_mbuf = m_notify; 4036 sctp_add_to_readq(stcb->sctp_ep, stcb, 4037 control, 4038 &stcb->sctp_socket->so_rcv, 1, 4039 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4040 } else { 4041 sctp_m_freem(m_notify); 4042 } 4043 } 4044 4045 void 4046 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4047 uint32_t error, void *data, int so_locked) 4048 { 4049 if ((stcb == NULL) || 4050 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4051 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4052 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4053 /* If the socket is gone we are out of here */ 4054 return; 4055 } 4056 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4057 return; 4058 } 4059 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4060 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4061 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4062 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4063 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4064 /* Don't report these in front states */ 4065 return; 4066 } 4067 } 4068 switch (notification) { 4069 case SCTP_NOTIFY_ASSOC_UP: 4070 if (stcb->asoc.assoc_up_sent == 0) { 4071 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4072 stcb->asoc.assoc_up_sent = 1; 4073 } 4074 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4075 sctp_notify_adaptation_layer(stcb); 4076 } 4077 if (stcb->asoc.auth_supported == 0) { 4078 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4079 NULL, so_locked); 4080 } 4081 break; 4082 case SCTP_NOTIFY_ASSOC_DOWN: 4083 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4084 break; 4085 case SCTP_NOTIFY_INTERFACE_DOWN: 4086 { 4087 struct sctp_nets *net; 4088 4089 net = (struct sctp_nets *)data; 4090 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4091 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4092 break; 4093 } 4094 case SCTP_NOTIFY_INTERFACE_UP: 4095 { 4096 struct sctp_nets *net; 4097 4098 net = (struct sctp_nets *)data; 4099 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4100 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4101 break; 4102 } 4103 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4104 { 4105 struct sctp_nets *net; 4106 4107 net = (struct sctp_nets *)data; 4108 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4109 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4110 break; 4111 } 4112 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4113 sctp_notify_send_failed2(stcb, error, 4114 (struct sctp_stream_queue_pending *)data, so_locked); 4115 break; 4116 case SCTP_NOTIFY_SENT_DG_FAIL: 4117 sctp_notify_send_failed(stcb, 1, error, 4118 (struct sctp_tmit_chunk *)data, so_locked); 4119 break; 4120 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4121 sctp_notify_send_failed(stcb, 0, error, 4122 (struct sctp_tmit_chunk *)data, so_locked); 4123 break; 4124 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4125 { 4126 uint32_t val; 4127 4128 val = *((uint32_t *)data); 4129 4130 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4131 break; 4132 } 4133 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4134 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4135 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4136 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4137 } else { 4138 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4139 } 4140 break; 4141 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4142 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4143 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4144 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4145 } else { 4146 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4147 } 4148 break; 4149 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4150 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4151 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4152 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4153 } else { 4154 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4155 } 4156 break; 4157 case SCTP_NOTIFY_ASSOC_RESTART: 4158 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4159 if (stcb->asoc.auth_supported == 0) { 4160 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4161 NULL, so_locked); 4162 } 4163 break; 4164 case SCTP_NOTIFY_STR_RESET_SEND: 4165 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4166 break; 4167 case SCTP_NOTIFY_STR_RESET_RECV: 4168 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4169 break; 4170 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4171 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4172 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4173 break; 4174 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4175 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4176 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4177 break; 4178 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4179 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4180 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4181 break; 4182 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4183 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4184 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4185 break; 4186 case SCTP_NOTIFY_ASCONF_ADD_IP: 4187 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4188 error, so_locked); 4189 break; 4190 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4191 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4192 error, so_locked); 4193 break; 4194 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4195 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4196 error, so_locked); 4197 break; 4198 case SCTP_NOTIFY_PEER_SHUTDOWN: 4199 sctp_notify_shutdown_event(stcb); 4200 break; 4201 case SCTP_NOTIFY_AUTH_NEW_KEY: 4202 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4203 (uint16_t)(uintptr_t)data, 4204 so_locked); 4205 break; 4206 case SCTP_NOTIFY_AUTH_FREE_KEY: 4207 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4208 (uint16_t)(uintptr_t)data, 4209 so_locked); 4210 break; 4211 case SCTP_NOTIFY_NO_PEER_AUTH: 4212 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4213 (uint16_t)(uintptr_t)data, 4214 so_locked); 4215 break; 4216 case SCTP_NOTIFY_SENDER_DRY: 4217 sctp_notify_sender_dry_event(stcb, so_locked); 4218 break; 4219 case SCTP_NOTIFY_REMOTE_ERROR: 4220 sctp_notify_remote_error(stcb, error, data); 4221 break; 4222 default: 4223 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4224 __func__, notification, notification); 4225 break; 4226 } /* end switch */ 4227 } 4228 4229 void 4230 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4231 { 4232 struct sctp_association *asoc; 4233 struct sctp_stream_out *outs; 4234 struct sctp_tmit_chunk *chk, *nchk; 4235 struct sctp_stream_queue_pending *sp, *nsp; 4236 int i; 4237 4238 if (stcb == NULL) { 4239 return; 4240 } 4241 asoc = &stcb->asoc; 4242 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4243 /* already being freed */ 4244 return; 4245 } 4246 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4247 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4248 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4249 return; 4250 } 4251 /* now through all the gunk freeing chunks */ 4252 /* sent queue SHOULD be empty */ 4253 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4254 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4255 asoc->sent_queue_cnt--; 4256 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4257 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4258 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4259 #ifdef INVARIANTS 4260 } else { 4261 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4262 #endif 4263 } 4264 } 4265 if (chk->data != NULL) { 4266 sctp_free_bufspace(stcb, asoc, chk, 1); 4267 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4268 error, chk, so_locked); 4269 if (chk->data) { 4270 sctp_m_freem(chk->data); 4271 chk->data = NULL; 4272 } 4273 } 4274 sctp_free_a_chunk(stcb, chk, so_locked); 4275 /* sa_ignore FREED_MEMORY */ 4276 } 4277 /* pending send queue SHOULD be empty */ 4278 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4279 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4280 asoc->send_queue_cnt--; 4281 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4282 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4283 #ifdef INVARIANTS 4284 } else { 4285 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4286 #endif 4287 } 4288 if (chk->data != NULL) { 4289 sctp_free_bufspace(stcb, asoc, chk, 1); 4290 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4291 error, chk, so_locked); 4292 if (chk->data) { 4293 sctp_m_freem(chk->data); 4294 chk->data = NULL; 4295 } 4296 } 4297 sctp_free_a_chunk(stcb, chk, so_locked); 4298 /* sa_ignore FREED_MEMORY */ 4299 } 4300 for (i = 0; i < asoc->streamoutcnt; i++) { 4301 /* For each stream */ 4302 outs = &asoc->strmout[i]; 4303 /* clean up any sends there */ 4304 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4305 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4306 TAILQ_REMOVE(&outs->outqueue, sp, next); 4307 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4308 sctp_free_spbufspace(stcb, asoc, sp); 4309 if (sp->data) { 4310 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4311 error, (void *)sp, so_locked); 4312 if (sp->data) { 4313 sctp_m_freem(sp->data); 4314 sp->data = NULL; 4315 sp->tail_mbuf = NULL; 4316 sp->length = 0; 4317 } 4318 } 4319 if (sp->net) { 4320 sctp_free_remote_addr(sp->net); 4321 sp->net = NULL; 4322 } 4323 /* Free the chunk */ 4324 sctp_free_a_strmoq(stcb, sp, so_locked); 4325 /* sa_ignore FREED_MEMORY */ 4326 } 4327 } 4328 } 4329 4330 void 4331 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4332 uint16_t error, struct sctp_abort_chunk *abort, 4333 int so_locked) 4334 { 4335 if (stcb == NULL) { 4336 return; 4337 } 4338 SCTP_TCB_LOCK_ASSERT(stcb); 4339 4340 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4341 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4342 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4343 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4344 } 4345 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4346 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4347 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4348 return; 4349 } 4350 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4351 /* Tell them we lost the asoc */ 4352 sctp_report_all_outbound(stcb, error, so_locked); 4353 if (from_peer) { 4354 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4355 } else { 4356 if (timeout) { 4357 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4358 } else { 4359 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4360 } 4361 } 4362 } 4363 4364 void 4365 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4366 struct mbuf *m, int iphlen, 4367 struct sockaddr *src, struct sockaddr *dst, 4368 struct sctphdr *sh, struct mbuf *op_err, 4369 uint8_t mflowtype, uint32_t mflowid, 4370 uint32_t vrf_id, uint16_t port) 4371 { 4372 struct sctp_gen_error_cause *cause; 4373 uint32_t vtag; 4374 uint16_t cause_code; 4375 4376 if (stcb != NULL) { 4377 vtag = stcb->asoc.peer_vtag; 4378 vrf_id = stcb->asoc.vrf_id; 4379 if (op_err != NULL) { 4380 /* Read the cause code from the error cause. */ 4381 cause = mtod(op_err, struct sctp_gen_error_cause *); 4382 cause_code = ntohs(cause->code); 4383 } else { 4384 cause_code = 0; 4385 } 4386 } else { 4387 vtag = 0; 4388 } 4389 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4390 mflowtype, mflowid, inp->fibnum, 4391 vrf_id, port); 4392 if (stcb != NULL) { 4393 /* We have a TCB to abort, send notification too */ 4394 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4395 /* Ok, now lets free it */ 4396 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4397 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4398 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4399 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4400 } 4401 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4402 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4403 } 4404 } 4405 #ifdef SCTP_ASOCLOG_OF_TSNS 4406 void 4407 sctp_print_out_track_log(struct sctp_tcb *stcb) 4408 { 4409 #ifdef NOSIY_PRINTS 4410 int i; 4411 4412 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4413 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4414 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4415 SCTP_PRINTF("None rcvd\n"); 4416 goto none_in; 4417 } 4418 if (stcb->asoc.tsn_in_wrapped) { 4419 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4420 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4421 stcb->asoc.in_tsnlog[i].tsn, 4422 stcb->asoc.in_tsnlog[i].strm, 4423 stcb->asoc.in_tsnlog[i].seq, 4424 stcb->asoc.in_tsnlog[i].flgs, 4425 stcb->asoc.in_tsnlog[i].sz); 4426 } 4427 } 4428 if (stcb->asoc.tsn_in_at) { 4429 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4430 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4431 stcb->asoc.in_tsnlog[i].tsn, 4432 stcb->asoc.in_tsnlog[i].strm, 4433 stcb->asoc.in_tsnlog[i].seq, 4434 stcb->asoc.in_tsnlog[i].flgs, 4435 stcb->asoc.in_tsnlog[i].sz); 4436 } 4437 } 4438 none_in: 4439 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4440 if ((stcb->asoc.tsn_out_at == 0) && 4441 (stcb->asoc.tsn_out_wrapped == 0)) { 4442 SCTP_PRINTF("None sent\n"); 4443 } 4444 if (stcb->asoc.tsn_out_wrapped) { 4445 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4446 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4447 stcb->asoc.out_tsnlog[i].tsn, 4448 stcb->asoc.out_tsnlog[i].strm, 4449 stcb->asoc.out_tsnlog[i].seq, 4450 stcb->asoc.out_tsnlog[i].flgs, 4451 stcb->asoc.out_tsnlog[i].sz); 4452 } 4453 } 4454 if (stcb->asoc.tsn_out_at) { 4455 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4456 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4457 stcb->asoc.out_tsnlog[i].tsn, 4458 stcb->asoc.out_tsnlog[i].strm, 4459 stcb->asoc.out_tsnlog[i].seq, 4460 stcb->asoc.out_tsnlog[i].flgs, 4461 stcb->asoc.out_tsnlog[i].sz); 4462 } 4463 } 4464 #endif 4465 } 4466 #endif 4467 4468 void 4469 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4470 struct mbuf *op_err, bool timedout, int so_locked) 4471 { 4472 struct sctp_gen_error_cause *cause; 4473 uint16_t cause_code; 4474 4475 if (stcb == NULL) { 4476 /* Got to have a TCB */ 4477 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4478 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4479 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4480 SCTP_CALLED_DIRECTLY_NOCMPSET); 4481 } 4482 } 4483 return; 4484 } 4485 if (op_err != NULL) { 4486 /* Read the cause code from the error cause. */ 4487 cause = mtod(op_err, struct sctp_gen_error_cause *); 4488 cause_code = ntohs(cause->code); 4489 } else { 4490 cause_code = 0; 4491 } 4492 /* notify the peer */ 4493 sctp_send_abort_tcb(stcb, op_err, so_locked); 4494 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4495 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4496 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4497 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4498 } 4499 /* notify the ulp */ 4500 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4501 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4502 } 4503 /* now free the asoc */ 4504 #ifdef SCTP_ASOCLOG_OF_TSNS 4505 sctp_print_out_track_log(stcb); 4506 #endif 4507 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4508 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4509 } 4510 4511 void 4512 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4513 struct sockaddr *src, struct sockaddr *dst, 4514 struct sctphdr *sh, struct sctp_inpcb *inp, 4515 struct mbuf *cause, 4516 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4517 uint32_t vrf_id, uint16_t port) 4518 { 4519 struct sctp_chunkhdr *ch, chunk_buf; 4520 unsigned int chk_length; 4521 int contains_init_chunk; 4522 4523 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4524 /* Generate a TO address for future reference */ 4525 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4526 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4527 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4528 SCTP_CALLED_DIRECTLY_NOCMPSET); 4529 } 4530 } 4531 contains_init_chunk = 0; 4532 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4533 sizeof(*ch), (uint8_t *)&chunk_buf); 4534 while (ch != NULL) { 4535 chk_length = ntohs(ch->chunk_length); 4536 if (chk_length < sizeof(*ch)) { 4537 /* break to abort land */ 4538 break; 4539 } 4540 switch (ch->chunk_type) { 4541 case SCTP_INIT: 4542 contains_init_chunk = 1; 4543 break; 4544 case SCTP_PACKET_DROPPED: 4545 /* we don't respond to pkt-dropped */ 4546 return; 4547 case SCTP_ABORT_ASSOCIATION: 4548 /* we don't respond with an ABORT to an ABORT */ 4549 return; 4550 case SCTP_SHUTDOWN_COMPLETE: 4551 /* 4552 * we ignore it since we are not waiting for it and 4553 * peer is gone 4554 */ 4555 return; 4556 case SCTP_SHUTDOWN_ACK: 4557 sctp_send_shutdown_complete2(src, dst, sh, 4558 mflowtype, mflowid, fibnum, 4559 vrf_id, port); 4560 return; 4561 default: 4562 break; 4563 } 4564 offset += SCTP_SIZE32(chk_length); 4565 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4566 sizeof(*ch), (uint8_t *)&chunk_buf); 4567 } 4568 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4569 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4570 (contains_init_chunk == 0))) { 4571 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4572 mflowtype, mflowid, fibnum, 4573 vrf_id, port); 4574 } 4575 } 4576 4577 /* 4578 * check the inbound datagram to make sure there is not an abort inside it, 4579 * if there is return 1, else return 0. 4580 */ 4581 int 4582 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4583 { 4584 struct sctp_chunkhdr *ch; 4585 struct sctp_init_chunk *init_chk, chunk_buf; 4586 int offset; 4587 unsigned int chk_length; 4588 4589 offset = iphlen + sizeof(struct sctphdr); 4590 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4591 (uint8_t *)&chunk_buf); 4592 while (ch != NULL) { 4593 chk_length = ntohs(ch->chunk_length); 4594 if (chk_length < sizeof(*ch)) { 4595 /* packet is probably corrupt */ 4596 break; 4597 } 4598 /* we seem to be ok, is it an abort? */ 4599 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4600 /* yep, tell them */ 4601 return (1); 4602 } 4603 if ((ch->chunk_type == SCTP_INITIATION) || 4604 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4605 /* need to update the Vtag */ 4606 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4607 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4608 if (init_chk != NULL) { 4609 *vtag = ntohl(init_chk->init.initiate_tag); 4610 } 4611 } 4612 /* Nope, move to the next chunk */ 4613 offset += SCTP_SIZE32(chk_length); 4614 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4615 sizeof(*ch), (uint8_t *)&chunk_buf); 4616 } 4617 return (0); 4618 } 4619 4620 /* 4621 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4622 * set (i.e. it's 0) so, create this function to compare link local scopes 4623 */ 4624 #ifdef INET6 4625 uint32_t 4626 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4627 { 4628 struct sockaddr_in6 a, b; 4629 4630 /* save copies */ 4631 a = *addr1; 4632 b = *addr2; 4633 4634 if (a.sin6_scope_id == 0) 4635 if (sa6_recoverscope(&a)) { 4636 /* can't get scope, so can't match */ 4637 return (0); 4638 } 4639 if (b.sin6_scope_id == 0) 4640 if (sa6_recoverscope(&b)) { 4641 /* can't get scope, so can't match */ 4642 return (0); 4643 } 4644 if (a.sin6_scope_id != b.sin6_scope_id) 4645 return (0); 4646 4647 return (1); 4648 } 4649 4650 /* 4651 * returns a sockaddr_in6 with embedded scope recovered and removed 4652 */ 4653 struct sockaddr_in6 * 4654 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4655 { 4656 /* check and strip embedded scope junk */ 4657 if (addr->sin6_family == AF_INET6) { 4658 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4659 if (addr->sin6_scope_id == 0) { 4660 *store = *addr; 4661 if (!sa6_recoverscope(store)) { 4662 /* use the recovered scope */ 4663 addr = store; 4664 } 4665 } else { 4666 /* else, return the original "to" addr */ 4667 in6_clearscope(&addr->sin6_addr); 4668 } 4669 } 4670 } 4671 return (addr); 4672 } 4673 #endif 4674 4675 /* 4676 * are the two addresses the same? currently a "scopeless" check returns: 1 4677 * if same, 0 if not 4678 */ 4679 int 4680 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4681 { 4682 4683 /* must be valid */ 4684 if (sa1 == NULL || sa2 == NULL) 4685 return (0); 4686 4687 /* must be the same family */ 4688 if (sa1->sa_family != sa2->sa_family) 4689 return (0); 4690 4691 switch (sa1->sa_family) { 4692 #ifdef INET6 4693 case AF_INET6: 4694 { 4695 /* IPv6 addresses */ 4696 struct sockaddr_in6 *sin6_1, *sin6_2; 4697 4698 sin6_1 = (struct sockaddr_in6 *)sa1; 4699 sin6_2 = (struct sockaddr_in6 *)sa2; 4700 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4701 sin6_2)); 4702 } 4703 #endif 4704 #ifdef INET 4705 case AF_INET: 4706 { 4707 /* IPv4 addresses */ 4708 struct sockaddr_in *sin_1, *sin_2; 4709 4710 sin_1 = (struct sockaddr_in *)sa1; 4711 sin_2 = (struct sockaddr_in *)sa2; 4712 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4713 } 4714 #endif 4715 default: 4716 /* we don't do these... */ 4717 return (0); 4718 } 4719 } 4720 4721 void 4722 sctp_print_address(struct sockaddr *sa) 4723 { 4724 #ifdef INET6 4725 char ip6buf[INET6_ADDRSTRLEN]; 4726 #endif 4727 4728 switch (sa->sa_family) { 4729 #ifdef INET6 4730 case AF_INET6: 4731 { 4732 struct sockaddr_in6 *sin6; 4733 4734 sin6 = (struct sockaddr_in6 *)sa; 4735 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4736 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4737 ntohs(sin6->sin6_port), 4738 sin6->sin6_scope_id); 4739 break; 4740 } 4741 #endif 4742 #ifdef INET 4743 case AF_INET: 4744 { 4745 struct sockaddr_in *sin; 4746 unsigned char *p; 4747 4748 sin = (struct sockaddr_in *)sa; 4749 p = (unsigned char *)&sin->sin_addr; 4750 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4751 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4752 break; 4753 } 4754 #endif 4755 default: 4756 SCTP_PRINTF("?\n"); 4757 break; 4758 } 4759 } 4760 4761 void 4762 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4763 struct sctp_inpcb *new_inp, 4764 struct sctp_tcb *stcb, 4765 int waitflags) 4766 { 4767 /* 4768 * go through our old INP and pull off any control structures that 4769 * belong to stcb and move then to the new inp. 4770 */ 4771 struct socket *old_so, *new_so; 4772 struct sctp_queued_to_read *control, *nctl; 4773 struct sctp_readhead tmp_queue; 4774 struct mbuf *m; 4775 int error = 0; 4776 4777 old_so = old_inp->sctp_socket; 4778 new_so = new_inp->sctp_socket; 4779 TAILQ_INIT(&tmp_queue); 4780 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4781 if (error) { 4782 /* 4783 * Gak, can't get I/O lock, we have a problem. data will be 4784 * left stranded.. and we don't dare look at it since the 4785 * other thread may be reading something. Oh well, its a 4786 * screwed up app that does a peeloff OR a accept while 4787 * reading from the main socket... actually its only the 4788 * peeloff() case, since I think read will fail on a 4789 * listening socket.. 4790 */ 4791 return; 4792 } 4793 /* lock the socket buffers */ 4794 SCTP_INP_READ_LOCK(old_inp); 4795 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4796 /* Pull off all for out target stcb */ 4797 if (control->stcb == stcb) { 4798 /* remove it we want it */ 4799 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4800 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4801 m = control->data; 4802 while (m) { 4803 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4804 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4805 } 4806 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4807 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4808 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4809 } 4810 m = SCTP_BUF_NEXT(m); 4811 } 4812 } 4813 } 4814 SCTP_INP_READ_UNLOCK(old_inp); 4815 /* Remove the recv-lock on the old socket */ 4816 SOCK_IO_RECV_UNLOCK(old_so); 4817 /* Now we move them over to the new socket buffer */ 4818 SCTP_INP_READ_LOCK(new_inp); 4819 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4820 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4821 m = control->data; 4822 while (m) { 4823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4824 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4825 } 4826 sctp_sballoc(stcb, &new_so->so_rcv, m); 4827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4828 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4829 } 4830 m = SCTP_BUF_NEXT(m); 4831 } 4832 } 4833 SCTP_INP_READ_UNLOCK(new_inp); 4834 } 4835 4836 void 4837 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4838 struct sctp_tcb *stcb, 4839 int so_locked 4840 SCTP_UNUSED 4841 ) 4842 { 4843 if ((inp != NULL) && 4844 (inp->sctp_socket != NULL) && 4845 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4846 !SCTP_IS_LISTENING(inp))) { 4847 sctp_sorwakeup(inp, inp->sctp_socket); 4848 } 4849 } 4850 4851 void 4852 sctp_add_to_readq(struct sctp_inpcb *inp, 4853 struct sctp_tcb *stcb, 4854 struct sctp_queued_to_read *control, 4855 struct sockbuf *sb, 4856 int end, 4857 int inp_read_lock_held, 4858 int so_locked) 4859 { 4860 /* 4861 * Here we must place the control on the end of the socket read 4862 * queue AND increment sb_cc so that select will work properly on 4863 * read. 4864 */ 4865 struct mbuf *m, *prev = NULL; 4866 4867 if (inp == NULL) { 4868 /* Gak, TSNH!! */ 4869 #ifdef INVARIANTS 4870 panic("Gak, inp NULL on add_to_readq"); 4871 #endif 4872 return; 4873 } 4874 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4875 SCTP_INP_READ_LOCK(inp); 4876 } 4877 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4878 if (!control->on_strm_q) { 4879 sctp_free_remote_addr(control->whoFrom); 4880 if (control->data) { 4881 sctp_m_freem(control->data); 4882 control->data = NULL; 4883 } 4884 sctp_free_a_readq(stcb, control); 4885 } 4886 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4887 SCTP_INP_READ_UNLOCK(inp); 4888 } 4889 return; 4890 } 4891 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4892 atomic_add_int(&inp->total_recvs, 1); 4893 if (!control->do_not_ref_stcb) { 4894 atomic_add_int(&stcb->total_recvs, 1); 4895 } 4896 } 4897 m = control->data; 4898 control->held_length = 0; 4899 control->length = 0; 4900 while (m != NULL) { 4901 if (SCTP_BUF_LEN(m) == 0) { 4902 /* Skip mbufs with NO length */ 4903 if (prev == NULL) { 4904 /* First one */ 4905 control->data = sctp_m_free(m); 4906 m = control->data; 4907 } else { 4908 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4909 m = SCTP_BUF_NEXT(prev); 4910 } 4911 if (m == NULL) { 4912 control->tail_mbuf = prev; 4913 } 4914 continue; 4915 } 4916 prev = m; 4917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4918 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4919 } 4920 sctp_sballoc(stcb, sb, m); 4921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4922 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4923 } 4924 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4925 m = SCTP_BUF_NEXT(m); 4926 } 4927 if (prev != NULL) { 4928 control->tail_mbuf = prev; 4929 } else { 4930 /* Everything got collapsed out?? */ 4931 if (!control->on_strm_q) { 4932 sctp_free_remote_addr(control->whoFrom); 4933 sctp_free_a_readq(stcb, control); 4934 } 4935 if (inp_read_lock_held == 0) 4936 SCTP_INP_READ_UNLOCK(inp); 4937 return; 4938 } 4939 if (end) { 4940 control->end_added = 1; 4941 } 4942 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4943 control->on_read_q = 1; 4944 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4945 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4946 } 4947 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4948 SCTP_INP_READ_UNLOCK(inp); 4949 } 4950 } 4951 4952 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4953 *************ALTERNATE ROUTING CODE 4954 */ 4955 4956 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4957 *************ALTERNATE ROUTING CODE 4958 */ 4959 4960 struct mbuf * 4961 sctp_generate_cause(uint16_t code, char *info) 4962 { 4963 struct mbuf *m; 4964 struct sctp_gen_error_cause *cause; 4965 size_t info_len; 4966 uint16_t len; 4967 4968 if ((code == 0) || (info == NULL)) { 4969 return (NULL); 4970 } 4971 info_len = strlen(info); 4972 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4973 return (NULL); 4974 } 4975 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4976 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4977 if (m != NULL) { 4978 SCTP_BUF_LEN(m) = len; 4979 cause = mtod(m, struct sctp_gen_error_cause *); 4980 cause->code = htons(code); 4981 cause->length = htons(len); 4982 memcpy(cause->info, info, info_len); 4983 } 4984 return (m); 4985 } 4986 4987 struct mbuf * 4988 sctp_generate_no_user_data_cause(uint32_t tsn) 4989 { 4990 struct mbuf *m; 4991 struct sctp_error_no_user_data *no_user_data_cause; 4992 uint16_t len; 4993 4994 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4995 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4996 if (m != NULL) { 4997 SCTP_BUF_LEN(m) = len; 4998 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4999 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5000 no_user_data_cause->cause.length = htons(len); 5001 no_user_data_cause->tsn = htonl(tsn); 5002 } 5003 return (m); 5004 } 5005 5006 #ifdef SCTP_MBCNT_LOGGING 5007 void 5008 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5009 struct sctp_tmit_chunk *tp1, int chk_cnt) 5010 { 5011 if (tp1->data == NULL) { 5012 return; 5013 } 5014 asoc->chunks_on_out_queue -= chk_cnt; 5015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5016 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5017 asoc->total_output_queue_size, 5018 tp1->book_size, 5019 0, 5020 tp1->mbcnt); 5021 } 5022 if (asoc->total_output_queue_size >= tp1->book_size) { 5023 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5024 } else { 5025 asoc->total_output_queue_size = 0; 5026 } 5027 5028 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5029 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5030 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5031 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5032 } else { 5033 stcb->sctp_socket->so_snd.sb_cc = 0; 5034 } 5035 } 5036 } 5037 5038 #endif 5039 5040 int 5041 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5042 uint8_t sent, int so_locked) 5043 { 5044 struct sctp_stream_out *strq; 5045 struct sctp_tmit_chunk *chk = NULL, *tp2; 5046 struct sctp_stream_queue_pending *sp; 5047 uint32_t mid; 5048 uint16_t sid; 5049 uint8_t foundeom = 0; 5050 int ret_sz = 0; 5051 int notdone; 5052 int do_wakeup_routine = 0; 5053 5054 SCTP_TCB_LOCK_ASSERT(stcb); 5055 5056 sid = tp1->rec.data.sid; 5057 mid = tp1->rec.data.mid; 5058 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5059 stcb->asoc.abandoned_sent[0]++; 5060 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5061 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5062 #if defined(SCTP_DETAILED_STR_STATS) 5063 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5064 #endif 5065 } else { 5066 stcb->asoc.abandoned_unsent[0]++; 5067 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5068 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5069 #if defined(SCTP_DETAILED_STR_STATS) 5070 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5071 #endif 5072 } 5073 do { 5074 ret_sz += tp1->book_size; 5075 if (tp1->data != NULL) { 5076 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5077 sctp_flight_size_decrease(tp1); 5078 sctp_total_flight_decrease(stcb, tp1); 5079 } 5080 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5081 stcb->asoc.peers_rwnd += tp1->send_size; 5082 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5083 if (sent) { 5084 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5085 } else { 5086 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5087 } 5088 if (tp1->data) { 5089 sctp_m_freem(tp1->data); 5090 tp1->data = NULL; 5091 } 5092 do_wakeup_routine = 1; 5093 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5094 stcb->asoc.sent_queue_cnt_removeable--; 5095 } 5096 } 5097 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5098 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5099 SCTP_DATA_NOT_FRAG) { 5100 /* not frag'ed we ae done */ 5101 notdone = 0; 5102 foundeom = 1; 5103 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5104 /* end of frag, we are done */ 5105 notdone = 0; 5106 foundeom = 1; 5107 } else { 5108 /* 5109 * Its a begin or middle piece, we must mark all of 5110 * it 5111 */ 5112 notdone = 1; 5113 tp1 = TAILQ_NEXT(tp1, sctp_next); 5114 } 5115 } while (tp1 && notdone); 5116 if (foundeom == 0) { 5117 /* 5118 * The multi-part message was scattered across the send and 5119 * sent queue. 5120 */ 5121 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5122 if ((tp1->rec.data.sid != sid) || 5123 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5124 break; 5125 } 5126 /* 5127 * save to chk in case we have some on stream out 5128 * queue. If so and we have an un-transmitted one we 5129 * don't have to fudge the TSN. 5130 */ 5131 chk = tp1; 5132 ret_sz += tp1->book_size; 5133 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5134 if (sent) { 5135 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5136 } else { 5137 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5138 } 5139 if (tp1->data) { 5140 sctp_m_freem(tp1->data); 5141 tp1->data = NULL; 5142 } 5143 /* No flight involved here book the size to 0 */ 5144 tp1->book_size = 0; 5145 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5146 foundeom = 1; 5147 } 5148 do_wakeup_routine = 1; 5149 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5150 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5151 /* 5152 * on to the sent queue so we can wait for it to be 5153 * passed by. 5154 */ 5155 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5156 sctp_next); 5157 stcb->asoc.send_queue_cnt--; 5158 stcb->asoc.sent_queue_cnt++; 5159 } 5160 } 5161 if (foundeom == 0) { 5162 /* 5163 * Still no eom found. That means there is stuff left on the 5164 * stream out queue.. yuck. 5165 */ 5166 strq = &stcb->asoc.strmout[sid]; 5167 sp = TAILQ_FIRST(&strq->outqueue); 5168 if (sp != NULL) { 5169 sp->discard_rest = 1; 5170 /* 5171 * We may need to put a chunk on the queue that 5172 * holds the TSN that would have been sent with the 5173 * LAST bit. 5174 */ 5175 if (chk == NULL) { 5176 /* Yep, we have to */ 5177 sctp_alloc_a_chunk(stcb, chk); 5178 if (chk == NULL) { 5179 /* 5180 * we are hosed. All we can do is 5181 * nothing.. which will cause an 5182 * abort if the peer is paying 5183 * attention. 5184 */ 5185 goto oh_well; 5186 } 5187 memset(chk, 0, sizeof(*chk)); 5188 chk->rec.data.rcv_flags = 0; 5189 chk->sent = SCTP_FORWARD_TSN_SKIP; 5190 chk->asoc = &stcb->asoc; 5191 if (stcb->asoc.idata_supported == 0) { 5192 if (sp->sinfo_flags & SCTP_UNORDERED) { 5193 chk->rec.data.mid = 0; 5194 } else { 5195 chk->rec.data.mid = strq->next_mid_ordered; 5196 } 5197 } else { 5198 if (sp->sinfo_flags & SCTP_UNORDERED) { 5199 chk->rec.data.mid = strq->next_mid_unordered; 5200 } else { 5201 chk->rec.data.mid = strq->next_mid_ordered; 5202 } 5203 } 5204 chk->rec.data.sid = sp->sid; 5205 chk->rec.data.ppid = sp->ppid; 5206 chk->rec.data.context = sp->context; 5207 chk->flags = sp->act_flags; 5208 chk->whoTo = NULL; 5209 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5210 strq->chunks_on_queues++; 5211 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5212 stcb->asoc.sent_queue_cnt++; 5213 stcb->asoc.pr_sctp_cnt++; 5214 } 5215 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5216 if (sp->sinfo_flags & SCTP_UNORDERED) { 5217 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5218 } 5219 if (stcb->asoc.idata_supported == 0) { 5220 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5221 strq->next_mid_ordered++; 5222 } 5223 } else { 5224 if (sp->sinfo_flags & SCTP_UNORDERED) { 5225 strq->next_mid_unordered++; 5226 } else { 5227 strq->next_mid_ordered++; 5228 } 5229 } 5230 oh_well: 5231 if (sp->data) { 5232 /* 5233 * Pull any data to free up the SB and allow 5234 * sender to "add more" while we will throw 5235 * away :-) 5236 */ 5237 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5238 ret_sz += sp->length; 5239 do_wakeup_routine = 1; 5240 sp->some_taken = 1; 5241 sctp_m_freem(sp->data); 5242 sp->data = NULL; 5243 sp->tail_mbuf = NULL; 5244 sp->length = 0; 5245 } 5246 } 5247 } 5248 if (do_wakeup_routine) { 5249 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5250 } 5251 return (ret_sz); 5252 } 5253 5254 /* 5255 * checks to see if the given address, sa, is one that is currently known by 5256 * the kernel note: can't distinguish the same address on multiple interfaces 5257 * and doesn't handle multiple addresses with different zone/scope id's note: 5258 * ifa_ifwithaddr() compares the entire sockaddr struct 5259 */ 5260 struct sctp_ifa * 5261 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5262 int holds_lock) 5263 { 5264 struct sctp_laddr *laddr; 5265 5266 if (holds_lock == 0) { 5267 SCTP_INP_RLOCK(inp); 5268 } 5269 5270 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5271 if (laddr->ifa == NULL) 5272 continue; 5273 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5274 continue; 5275 #ifdef INET 5276 if (addr->sa_family == AF_INET) { 5277 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5278 laddr->ifa->address.sin.sin_addr.s_addr) { 5279 /* found him. */ 5280 break; 5281 } 5282 } 5283 #endif 5284 #ifdef INET6 5285 if (addr->sa_family == AF_INET6) { 5286 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5287 &laddr->ifa->address.sin6)) { 5288 /* found him. */ 5289 break; 5290 } 5291 } 5292 #endif 5293 } 5294 if (holds_lock == 0) { 5295 SCTP_INP_RUNLOCK(inp); 5296 } 5297 if (laddr != NULL) { 5298 return (laddr->ifa); 5299 } else { 5300 return (NULL); 5301 } 5302 } 5303 5304 uint32_t 5305 sctp_get_ifa_hash_val(struct sockaddr *addr) 5306 { 5307 switch (addr->sa_family) { 5308 #ifdef INET 5309 case AF_INET: 5310 { 5311 struct sockaddr_in *sin; 5312 5313 sin = (struct sockaddr_in *)addr; 5314 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5315 } 5316 #endif 5317 #ifdef INET6 5318 case AF_INET6: 5319 { 5320 struct sockaddr_in6 *sin6; 5321 uint32_t hash_of_addr; 5322 5323 sin6 = (struct sockaddr_in6 *)addr; 5324 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5325 sin6->sin6_addr.s6_addr32[1] + 5326 sin6->sin6_addr.s6_addr32[2] + 5327 sin6->sin6_addr.s6_addr32[3]); 5328 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5329 return (hash_of_addr); 5330 } 5331 #endif 5332 default: 5333 break; 5334 } 5335 return (0); 5336 } 5337 5338 struct sctp_ifa * 5339 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5340 { 5341 struct sctp_ifa *sctp_ifap; 5342 struct sctp_vrf *vrf; 5343 struct sctp_ifalist *hash_head; 5344 uint32_t hash_of_addr; 5345 5346 if (holds_lock == 0) { 5347 SCTP_IPI_ADDR_RLOCK(); 5348 } else { 5349 SCTP_IPI_ADDR_LOCK_ASSERT(); 5350 } 5351 5352 vrf = sctp_find_vrf(vrf_id); 5353 if (vrf == NULL) { 5354 if (holds_lock == 0) 5355 SCTP_IPI_ADDR_RUNLOCK(); 5356 return (NULL); 5357 } 5358 5359 hash_of_addr = sctp_get_ifa_hash_val(addr); 5360 5361 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5362 if (hash_head == NULL) { 5363 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5364 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5365 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5366 sctp_print_address(addr); 5367 SCTP_PRINTF("No such bucket for address\n"); 5368 if (holds_lock == 0) 5369 SCTP_IPI_ADDR_RUNLOCK(); 5370 5371 return (NULL); 5372 } 5373 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5374 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5375 continue; 5376 #ifdef INET 5377 if (addr->sa_family == AF_INET) { 5378 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5379 sctp_ifap->address.sin.sin_addr.s_addr) { 5380 /* found him. */ 5381 break; 5382 } 5383 } 5384 #endif 5385 #ifdef INET6 5386 if (addr->sa_family == AF_INET6) { 5387 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5388 &sctp_ifap->address.sin6)) { 5389 /* found him. */ 5390 break; 5391 } 5392 } 5393 #endif 5394 } 5395 if (holds_lock == 0) 5396 SCTP_IPI_ADDR_RUNLOCK(); 5397 return (sctp_ifap); 5398 } 5399 5400 static void 5401 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5402 uint32_t rwnd_req) 5403 { 5404 /* User pulled some data, do we need a rwnd update? */ 5405 struct epoch_tracker et; 5406 int r_unlocked = 0; 5407 uint32_t dif, rwnd; 5408 struct socket *so = NULL; 5409 5410 if (stcb == NULL) 5411 return; 5412 5413 atomic_add_int(&stcb->asoc.refcnt, 1); 5414 5415 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5416 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5417 /* Pre-check If we are freeing no update */ 5418 goto no_lock; 5419 } 5420 SCTP_INP_INCR_REF(stcb->sctp_ep); 5421 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5422 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5423 goto out; 5424 } 5425 so = stcb->sctp_socket; 5426 if (so == NULL) { 5427 goto out; 5428 } 5429 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5430 /* Have you have freed enough to look */ 5431 *freed_so_far = 0; 5432 /* Yep, its worth a look and the lock overhead */ 5433 5434 /* Figure out what the rwnd would be */ 5435 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5436 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5437 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5438 } else { 5439 dif = 0; 5440 } 5441 if (dif >= rwnd_req) { 5442 if (hold_rlock) { 5443 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5444 r_unlocked = 1; 5445 } 5446 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5447 /* 5448 * One last check before we allow the guy possibly 5449 * to get in. There is a race, where the guy has not 5450 * reached the gate. In that case 5451 */ 5452 goto out; 5453 } 5454 SCTP_TCB_LOCK(stcb); 5455 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5456 /* No reports here */ 5457 SCTP_TCB_UNLOCK(stcb); 5458 goto out; 5459 } 5460 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5461 NET_EPOCH_ENTER(et); 5462 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5463 5464 sctp_chunk_output(stcb->sctp_ep, stcb, 5465 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5466 /* make sure no timer is running */ 5467 NET_EPOCH_EXIT(et); 5468 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5469 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5470 SCTP_TCB_UNLOCK(stcb); 5471 } else { 5472 /* Update how much we have pending */ 5473 stcb->freed_by_sorcv_sincelast = dif; 5474 } 5475 out: 5476 if (so && r_unlocked && hold_rlock) { 5477 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5478 } 5479 5480 SCTP_INP_DECR_REF(stcb->sctp_ep); 5481 no_lock: 5482 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5483 return; 5484 } 5485 5486 int 5487 sctp_sorecvmsg(struct socket *so, 5488 struct uio *uio, 5489 struct mbuf **mp, 5490 struct sockaddr *from, 5491 int fromlen, 5492 int *msg_flags, 5493 struct sctp_sndrcvinfo *sinfo, 5494 int filling_sinfo) 5495 { 5496 /* 5497 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5498 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5499 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5500 * On the way out we may send out any combination of: 5501 * MSG_NOTIFICATION MSG_EOR 5502 * 5503 */ 5504 struct sctp_inpcb *inp = NULL; 5505 ssize_t my_len = 0; 5506 ssize_t cp_len = 0; 5507 int error = 0; 5508 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5509 struct mbuf *m = NULL; 5510 struct sctp_tcb *stcb = NULL; 5511 int wakeup_read_socket = 0; 5512 int freecnt_applied = 0; 5513 int out_flags = 0, in_flags = 0; 5514 int block_allowed = 1; 5515 uint32_t freed_so_far = 0; 5516 ssize_t copied_so_far = 0; 5517 int in_eeor_mode = 0; 5518 int no_rcv_needed = 0; 5519 uint32_t rwnd_req = 0; 5520 int hold_sblock = 0; 5521 int hold_rlock = 0; 5522 ssize_t slen = 0; 5523 uint32_t held_length = 0; 5524 int sockbuf_lock = 0; 5525 5526 if (uio == NULL) { 5527 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5528 return (EINVAL); 5529 } 5530 5531 if (msg_flags) { 5532 in_flags = *msg_flags; 5533 if (in_flags & MSG_PEEK) 5534 SCTP_STAT_INCR(sctps_read_peeks); 5535 } else { 5536 in_flags = 0; 5537 } 5538 slen = uio->uio_resid; 5539 5540 /* Pull in and set up our int flags */ 5541 if (in_flags & MSG_OOB) { 5542 /* Out of band's NOT supported */ 5543 return (EOPNOTSUPP); 5544 } 5545 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5546 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5547 return (EINVAL); 5548 } 5549 if ((in_flags & (MSG_DONTWAIT 5550 | MSG_NBIO 5551 )) || 5552 SCTP_SO_IS_NBIO(so)) { 5553 block_allowed = 0; 5554 } 5555 /* setup the endpoint */ 5556 inp = (struct sctp_inpcb *)so->so_pcb; 5557 if (inp == NULL) { 5558 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5559 return (EFAULT); 5560 } 5561 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5562 /* Must be at least a MTU's worth */ 5563 if (rwnd_req < SCTP_MIN_RWND) 5564 rwnd_req = SCTP_MIN_RWND; 5565 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5566 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5567 sctp_misc_ints(SCTP_SORECV_ENTER, 5568 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5569 } 5570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5571 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5572 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5573 } 5574 5575 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5576 if (error) { 5577 goto release_unlocked; 5578 } 5579 sockbuf_lock = 1; 5580 restart: 5581 5582 restart_nosblocks: 5583 if (hold_sblock == 0) { 5584 SOCKBUF_LOCK(&so->so_rcv); 5585 hold_sblock = 1; 5586 } 5587 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5588 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5589 goto out; 5590 } 5591 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5592 if (so->so_error) { 5593 error = so->so_error; 5594 if ((in_flags & MSG_PEEK) == 0) 5595 so->so_error = 0; 5596 goto out; 5597 } else { 5598 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5599 /* indicate EOF */ 5600 error = 0; 5601 goto out; 5602 } 5603 } 5604 } 5605 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5606 if (so->so_error) { 5607 error = so->so_error; 5608 if ((in_flags & MSG_PEEK) == 0) { 5609 so->so_error = 0; 5610 } 5611 goto out; 5612 } 5613 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5614 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5615 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5616 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5617 /* 5618 * For active open side clear flags for 5619 * re-use passive open is blocked by 5620 * connect. 5621 */ 5622 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5623 /* 5624 * You were aborted, passive side 5625 * always hits here 5626 */ 5627 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5628 error = ECONNRESET; 5629 } 5630 so->so_state &= ~(SS_ISCONNECTING | 5631 SS_ISDISCONNECTING | 5632 SS_ISCONFIRMING | 5633 SS_ISCONNECTED); 5634 if (error == 0) { 5635 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5636 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5637 error = ENOTCONN; 5638 } 5639 } 5640 goto out; 5641 } 5642 } 5643 if (block_allowed) { 5644 error = sbwait(so, SO_RCV); 5645 if (error) { 5646 goto out; 5647 } 5648 held_length = 0; 5649 goto restart_nosblocks; 5650 } else { 5651 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5652 error = EWOULDBLOCK; 5653 goto out; 5654 } 5655 } 5656 if (hold_sblock == 1) { 5657 SOCKBUF_UNLOCK(&so->so_rcv); 5658 hold_sblock = 0; 5659 } 5660 /* we possibly have data we can read */ 5661 /* sa_ignore FREED_MEMORY */ 5662 control = TAILQ_FIRST(&inp->read_queue); 5663 if (control == NULL) { 5664 /* 5665 * This could be happening since the appender did the 5666 * increment but as not yet did the tailq insert onto the 5667 * read_queue 5668 */ 5669 if (hold_rlock == 0) { 5670 SCTP_INP_READ_LOCK(inp); 5671 } 5672 control = TAILQ_FIRST(&inp->read_queue); 5673 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5674 #ifdef INVARIANTS 5675 panic("Huh, its non zero and nothing on control?"); 5676 #endif 5677 SCTP_SB_CLEAR(so->so_rcv); 5678 } 5679 SCTP_INP_READ_UNLOCK(inp); 5680 hold_rlock = 0; 5681 goto restart; 5682 } 5683 5684 if ((control->length == 0) && 5685 (control->do_not_ref_stcb)) { 5686 /* 5687 * Clean up code for freeing assoc that left behind a 5688 * pdapi.. maybe a peer in EEOR that just closed after 5689 * sending and never indicated a EOR. 5690 */ 5691 if (hold_rlock == 0) { 5692 hold_rlock = 1; 5693 SCTP_INP_READ_LOCK(inp); 5694 } 5695 control->held_length = 0; 5696 if (control->data) { 5697 /* Hmm there is data here .. fix */ 5698 struct mbuf *m_tmp; 5699 int cnt = 0; 5700 5701 m_tmp = control->data; 5702 while (m_tmp) { 5703 cnt += SCTP_BUF_LEN(m_tmp); 5704 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5705 control->tail_mbuf = m_tmp; 5706 control->end_added = 1; 5707 } 5708 m_tmp = SCTP_BUF_NEXT(m_tmp); 5709 } 5710 control->length = cnt; 5711 } else { 5712 /* remove it */ 5713 TAILQ_REMOVE(&inp->read_queue, control, next); 5714 /* Add back any hidden data */ 5715 sctp_free_remote_addr(control->whoFrom); 5716 sctp_free_a_readq(stcb, control); 5717 } 5718 if (hold_rlock) { 5719 hold_rlock = 0; 5720 SCTP_INP_READ_UNLOCK(inp); 5721 } 5722 goto restart; 5723 } 5724 if ((control->length == 0) && 5725 (control->end_added == 1)) { 5726 /* 5727 * Do we also need to check for (control->pdapi_aborted == 5728 * 1)? 5729 */ 5730 if (hold_rlock == 0) { 5731 hold_rlock = 1; 5732 SCTP_INP_READ_LOCK(inp); 5733 } 5734 TAILQ_REMOVE(&inp->read_queue, control, next); 5735 if (control->data) { 5736 #ifdef INVARIANTS 5737 panic("control->data not null but control->length == 0"); 5738 #else 5739 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5740 sctp_m_freem(control->data); 5741 control->data = NULL; 5742 #endif 5743 } 5744 if (control->aux_data) { 5745 sctp_m_free(control->aux_data); 5746 control->aux_data = NULL; 5747 } 5748 #ifdef INVARIANTS 5749 if (control->on_strm_q) { 5750 panic("About to free ctl:%p so:%p and its in %d", 5751 control, so, control->on_strm_q); 5752 } 5753 #endif 5754 sctp_free_remote_addr(control->whoFrom); 5755 sctp_free_a_readq(stcb, control); 5756 if (hold_rlock) { 5757 hold_rlock = 0; 5758 SCTP_INP_READ_UNLOCK(inp); 5759 } 5760 goto restart; 5761 } 5762 if (control->length == 0) { 5763 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5764 (filling_sinfo)) { 5765 /* find a more suitable one then this */ 5766 ctl = TAILQ_NEXT(control, next); 5767 while (ctl) { 5768 if ((ctl->stcb != control->stcb) && (ctl->length) && 5769 (ctl->some_taken || 5770 (ctl->spec_flags & M_NOTIFICATION) || 5771 ((ctl->do_not_ref_stcb == 0) && 5772 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5773 ) { 5774 /*- 5775 * If we have a different TCB next, and there is data 5776 * present. If we have already taken some (pdapi), OR we can 5777 * ref the tcb and no delivery as started on this stream, we 5778 * take it. Note we allow a notification on a different 5779 * assoc to be delivered.. 5780 */ 5781 control = ctl; 5782 goto found_one; 5783 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5784 (ctl->length) && 5785 ((ctl->some_taken) || 5786 ((ctl->do_not_ref_stcb == 0) && 5787 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5788 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5789 /*- 5790 * If we have the same tcb, and there is data present, and we 5791 * have the strm interleave feature present. Then if we have 5792 * taken some (pdapi) or we can refer to tht tcb AND we have 5793 * not started a delivery for this stream, we can take it. 5794 * Note we do NOT allow a notification on the same assoc to 5795 * be delivered. 5796 */ 5797 control = ctl; 5798 goto found_one; 5799 } 5800 ctl = TAILQ_NEXT(ctl, next); 5801 } 5802 } 5803 /* 5804 * if we reach here, not suitable replacement is available 5805 * <or> fragment interleave is NOT on. So stuff the sb_cc 5806 * into the our held count, and its time to sleep again. 5807 */ 5808 held_length = SCTP_SBAVAIL(&so->so_rcv); 5809 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5810 goto restart; 5811 } 5812 /* Clear the held length since there is something to read */ 5813 control->held_length = 0; 5814 found_one: 5815 /* 5816 * If we reach here, control has a some data for us to read off. 5817 * Note that stcb COULD be NULL. 5818 */ 5819 if (hold_rlock == 0) { 5820 hold_rlock = 1; 5821 SCTP_INP_READ_LOCK(inp); 5822 } 5823 control->some_taken++; 5824 stcb = control->stcb; 5825 if (stcb) { 5826 if ((control->do_not_ref_stcb == 0) && 5827 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5828 if (freecnt_applied == 0) 5829 stcb = NULL; 5830 } else if (control->do_not_ref_stcb == 0) { 5831 /* you can't free it on me please */ 5832 /* 5833 * The lock on the socket buffer protects us so the 5834 * free code will stop. But since we used the 5835 * socketbuf lock and the sender uses the tcb_lock 5836 * to increment, we need to use the atomic add to 5837 * the refcnt 5838 */ 5839 if (freecnt_applied) { 5840 #ifdef INVARIANTS 5841 panic("refcnt already incremented"); 5842 #else 5843 SCTP_PRINTF("refcnt already incremented?\n"); 5844 #endif 5845 } else { 5846 atomic_add_int(&stcb->asoc.refcnt, 1); 5847 freecnt_applied = 1; 5848 } 5849 /* 5850 * Setup to remember how much we have not yet told 5851 * the peer our rwnd has opened up. Note we grab the 5852 * value from the tcb from last time. Note too that 5853 * sack sending clears this when a sack is sent, 5854 * which is fine. Once we hit the rwnd_req, we then 5855 * will go to the sctp_user_rcvd() that will not 5856 * lock until it KNOWs it MUST send a WUP-SACK. 5857 */ 5858 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5859 stcb->freed_by_sorcv_sincelast = 0; 5860 } 5861 } 5862 if (stcb && 5863 ((control->spec_flags & M_NOTIFICATION) == 0) && 5864 control->do_not_ref_stcb == 0) { 5865 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5866 } 5867 5868 /* First lets get off the sinfo and sockaddr info */ 5869 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5870 sinfo->sinfo_stream = control->sinfo_stream; 5871 sinfo->sinfo_ssn = (uint16_t)control->mid; 5872 sinfo->sinfo_flags = control->sinfo_flags; 5873 sinfo->sinfo_ppid = control->sinfo_ppid; 5874 sinfo->sinfo_context = control->sinfo_context; 5875 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5876 sinfo->sinfo_tsn = control->sinfo_tsn; 5877 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5878 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5879 nxt = TAILQ_NEXT(control, next); 5880 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5881 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5882 struct sctp_extrcvinfo *s_extra; 5883 5884 s_extra = (struct sctp_extrcvinfo *)sinfo; 5885 if ((nxt) && 5886 (nxt->length)) { 5887 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5888 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5889 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5890 } 5891 if (nxt->spec_flags & M_NOTIFICATION) { 5892 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5893 } 5894 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5895 s_extra->serinfo_next_length = nxt->length; 5896 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5897 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5898 if (nxt->tail_mbuf != NULL) { 5899 if (nxt->end_added) { 5900 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5901 } 5902 } 5903 } else { 5904 /* 5905 * we explicitly 0 this, since the memcpy 5906 * got some other things beyond the older 5907 * sinfo_ that is on the control's structure 5908 * :-D 5909 */ 5910 nxt = NULL; 5911 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5912 s_extra->serinfo_next_aid = 0; 5913 s_extra->serinfo_next_length = 0; 5914 s_extra->serinfo_next_ppid = 0; 5915 s_extra->serinfo_next_stream = 0; 5916 } 5917 } 5918 /* 5919 * update off the real current cum-ack, if we have an stcb. 5920 */ 5921 if ((control->do_not_ref_stcb == 0) && stcb) 5922 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5923 /* 5924 * mask off the high bits, we keep the actual chunk bits in 5925 * there. 5926 */ 5927 sinfo->sinfo_flags &= 0x00ff; 5928 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5929 sinfo->sinfo_flags |= SCTP_UNORDERED; 5930 } 5931 } 5932 #ifdef SCTP_ASOCLOG_OF_TSNS 5933 { 5934 int index, newindex; 5935 struct sctp_pcbtsn_rlog *entry; 5936 5937 do { 5938 index = inp->readlog_index; 5939 newindex = index + 1; 5940 if (newindex >= SCTP_READ_LOG_SIZE) { 5941 newindex = 0; 5942 } 5943 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5944 entry = &inp->readlog[index]; 5945 entry->vtag = control->sinfo_assoc_id; 5946 entry->strm = control->sinfo_stream; 5947 entry->seq = (uint16_t)control->mid; 5948 entry->sz = control->length; 5949 entry->flgs = control->sinfo_flags; 5950 } 5951 #endif 5952 if ((fromlen > 0) && (from != NULL)) { 5953 union sctp_sockstore store; 5954 size_t len; 5955 5956 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5957 #ifdef INET6 5958 case AF_INET6: 5959 len = sizeof(struct sockaddr_in6); 5960 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5961 store.sin6.sin6_port = control->port_from; 5962 break; 5963 #endif 5964 #ifdef INET 5965 case AF_INET: 5966 #ifdef INET6 5967 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5968 len = sizeof(struct sockaddr_in6); 5969 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5970 &store.sin6); 5971 store.sin6.sin6_port = control->port_from; 5972 } else { 5973 len = sizeof(struct sockaddr_in); 5974 store.sin = control->whoFrom->ro._l_addr.sin; 5975 store.sin.sin_port = control->port_from; 5976 } 5977 #else 5978 len = sizeof(struct sockaddr_in); 5979 store.sin = control->whoFrom->ro._l_addr.sin; 5980 store.sin.sin_port = control->port_from; 5981 #endif 5982 break; 5983 #endif 5984 default: 5985 len = 0; 5986 break; 5987 } 5988 memcpy(from, &store, min((size_t)fromlen, len)); 5989 #ifdef INET6 5990 { 5991 struct sockaddr_in6 lsa6, *from6; 5992 5993 from6 = (struct sockaddr_in6 *)from; 5994 sctp_recover_scope_mac(from6, (&lsa6)); 5995 } 5996 #endif 5997 } 5998 if (hold_rlock) { 5999 SCTP_INP_READ_UNLOCK(inp); 6000 hold_rlock = 0; 6001 } 6002 if (hold_sblock) { 6003 SOCKBUF_UNLOCK(&so->so_rcv); 6004 hold_sblock = 0; 6005 } 6006 /* now copy out what data we can */ 6007 if (mp == NULL) { 6008 /* copy out each mbuf in the chain up to length */ 6009 get_more_data: 6010 m = control->data; 6011 while (m) { 6012 /* Move out all we can */ 6013 cp_len = uio->uio_resid; 6014 my_len = SCTP_BUF_LEN(m); 6015 if (cp_len > my_len) { 6016 /* not enough in this buf */ 6017 cp_len = my_len; 6018 } 6019 if (hold_rlock) { 6020 SCTP_INP_READ_UNLOCK(inp); 6021 hold_rlock = 0; 6022 } 6023 if (cp_len > 0) 6024 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6025 /* re-read */ 6026 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6027 goto release; 6028 } 6029 6030 if ((control->do_not_ref_stcb == 0) && stcb && 6031 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6032 no_rcv_needed = 1; 6033 } 6034 if (error) { 6035 /* error we are out of here */ 6036 goto release; 6037 } 6038 SCTP_INP_READ_LOCK(inp); 6039 hold_rlock = 1; 6040 if (cp_len == SCTP_BUF_LEN(m)) { 6041 if ((SCTP_BUF_NEXT(m) == NULL) && 6042 (control->end_added)) { 6043 out_flags |= MSG_EOR; 6044 if ((control->do_not_ref_stcb == 0) && 6045 (control->stcb != NULL) && 6046 ((control->spec_flags & M_NOTIFICATION) == 0)) 6047 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6048 } 6049 if (control->spec_flags & M_NOTIFICATION) { 6050 out_flags |= MSG_NOTIFICATION; 6051 } 6052 /* we ate up the mbuf */ 6053 if (in_flags & MSG_PEEK) { 6054 /* just looking */ 6055 m = SCTP_BUF_NEXT(m); 6056 copied_so_far += cp_len; 6057 } else { 6058 /* dispose of the mbuf */ 6059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6060 sctp_sblog(&so->so_rcv, 6061 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6062 } 6063 sctp_sbfree(control, stcb, &so->so_rcv, m); 6064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6065 sctp_sblog(&so->so_rcv, 6066 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6067 } 6068 copied_so_far += cp_len; 6069 freed_so_far += (uint32_t)cp_len; 6070 freed_so_far += MSIZE; 6071 atomic_subtract_int(&control->length, (int)cp_len); 6072 control->data = sctp_m_free(m); 6073 m = control->data; 6074 /* 6075 * been through it all, must hold sb 6076 * lock ok to null tail 6077 */ 6078 if (control->data == NULL) { 6079 #ifdef INVARIANTS 6080 if ((control->end_added == 0) || 6081 (TAILQ_NEXT(control, next) == NULL)) { 6082 /* 6083 * If the end is not 6084 * added, OR the 6085 * next is NOT null 6086 * we MUST have the 6087 * lock. 6088 */ 6089 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6090 panic("Hmm we don't own the lock?"); 6091 } 6092 } 6093 #endif 6094 control->tail_mbuf = NULL; 6095 #ifdef INVARIANTS 6096 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6097 panic("end_added, nothing left and no MSG_EOR"); 6098 } 6099 #endif 6100 } 6101 } 6102 } else { 6103 /* Do we need to trim the mbuf? */ 6104 if (control->spec_flags & M_NOTIFICATION) { 6105 out_flags |= MSG_NOTIFICATION; 6106 } 6107 if ((in_flags & MSG_PEEK) == 0) { 6108 SCTP_BUF_RESV_UF(m, cp_len); 6109 SCTP_BUF_LEN(m) -= (int)cp_len; 6110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6111 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6112 } 6113 atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); 6114 if ((control->do_not_ref_stcb == 0) && 6115 stcb) { 6116 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6117 } 6118 copied_so_far += cp_len; 6119 freed_so_far += (uint32_t)cp_len; 6120 freed_so_far += MSIZE; 6121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6122 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6123 SCTP_LOG_SBRESULT, 0); 6124 } 6125 atomic_subtract_int(&control->length, (int)cp_len); 6126 } else { 6127 copied_so_far += cp_len; 6128 } 6129 } 6130 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6131 break; 6132 } 6133 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6134 (control->do_not_ref_stcb == 0) && 6135 (freed_so_far >= rwnd_req)) { 6136 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6137 } 6138 } /* end while(m) */ 6139 /* 6140 * At this point we have looked at it all and we either have 6141 * a MSG_EOR/or read all the user wants... <OR> 6142 * control->length == 0. 6143 */ 6144 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6145 /* we are done with this control */ 6146 if (control->length == 0) { 6147 if (control->data) { 6148 #ifdef INVARIANTS 6149 panic("control->data not null at read eor?"); 6150 #else 6151 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6152 sctp_m_freem(control->data); 6153 control->data = NULL; 6154 #endif 6155 } 6156 done_with_control: 6157 if (hold_rlock == 0) { 6158 SCTP_INP_READ_LOCK(inp); 6159 hold_rlock = 1; 6160 } 6161 TAILQ_REMOVE(&inp->read_queue, control, next); 6162 /* Add back any hidden data */ 6163 if (control->held_length) { 6164 held_length = 0; 6165 control->held_length = 0; 6166 wakeup_read_socket = 1; 6167 } 6168 if (control->aux_data) { 6169 sctp_m_free(control->aux_data); 6170 control->aux_data = NULL; 6171 } 6172 no_rcv_needed = control->do_not_ref_stcb; 6173 sctp_free_remote_addr(control->whoFrom); 6174 control->data = NULL; 6175 #ifdef INVARIANTS 6176 if (control->on_strm_q) { 6177 panic("About to free ctl:%p so:%p and its in %d", 6178 control, so, control->on_strm_q); 6179 } 6180 #endif 6181 sctp_free_a_readq(stcb, control); 6182 control = NULL; 6183 if ((freed_so_far >= rwnd_req) && 6184 (no_rcv_needed == 0)) 6185 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6186 6187 } else { 6188 /* 6189 * The user did not read all of this 6190 * message, turn off the returned MSG_EOR 6191 * since we are leaving more behind on the 6192 * control to read. 6193 */ 6194 #ifdef INVARIANTS 6195 if (control->end_added && 6196 (control->data == NULL) && 6197 (control->tail_mbuf == NULL)) { 6198 panic("Gak, control->length is corrupt?"); 6199 } 6200 #endif 6201 no_rcv_needed = control->do_not_ref_stcb; 6202 out_flags &= ~MSG_EOR; 6203 } 6204 } 6205 if (out_flags & MSG_EOR) { 6206 goto release; 6207 } 6208 if ((uio->uio_resid == 0) || 6209 ((in_eeor_mode) && 6210 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6211 goto release; 6212 } 6213 /* 6214 * If I hit here the receiver wants more and this message is 6215 * NOT done (pd-api). So two questions. Can we block? if not 6216 * we are done. Did the user NOT set MSG_WAITALL? 6217 */ 6218 if (block_allowed == 0) { 6219 goto release; 6220 } 6221 /* 6222 * We need to wait for more data a few things: - We don't 6223 * release the I/O lock so we don't get someone else 6224 * reading. - We must be sure to account for the case where 6225 * what is added is NOT to our control when we wakeup. 6226 */ 6227 6228 /* 6229 * Do we need to tell the transport a rwnd update might be 6230 * needed before we go to sleep? 6231 */ 6232 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6233 ((freed_so_far >= rwnd_req) && 6234 (control->do_not_ref_stcb == 0) && 6235 (no_rcv_needed == 0))) { 6236 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6237 } 6238 wait_some_more: 6239 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6240 goto release; 6241 } 6242 6243 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6244 goto release; 6245 6246 if (hold_rlock == 1) { 6247 SCTP_INP_READ_UNLOCK(inp); 6248 hold_rlock = 0; 6249 } 6250 if (hold_sblock == 0) { 6251 SOCKBUF_LOCK(&so->so_rcv); 6252 hold_sblock = 1; 6253 } 6254 if ((copied_so_far) && (control->length == 0) && 6255 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6256 goto release; 6257 } 6258 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6259 error = sbwait(so, SO_RCV); 6260 if (error) { 6261 goto release; 6262 } 6263 control->held_length = 0; 6264 } 6265 if (hold_sblock) { 6266 SOCKBUF_UNLOCK(&so->so_rcv); 6267 hold_sblock = 0; 6268 } 6269 if (control->length == 0) { 6270 /* still nothing here */ 6271 if (control->end_added == 1) { 6272 /* he aborted, or is done i.e.did a shutdown */ 6273 out_flags |= MSG_EOR; 6274 if (control->pdapi_aborted) { 6275 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6276 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6277 6278 out_flags |= MSG_TRUNC; 6279 } else { 6280 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6281 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6282 } 6283 goto done_with_control; 6284 } 6285 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6286 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6287 held_length = 0; 6288 } 6289 goto wait_some_more; 6290 } else if (control->data == NULL) { 6291 /* 6292 * we must re-sync since data is probably being 6293 * added 6294 */ 6295 SCTP_INP_READ_LOCK(inp); 6296 if ((control->length > 0) && (control->data == NULL)) { 6297 /* 6298 * big trouble.. we have the lock and its 6299 * corrupt? 6300 */ 6301 #ifdef INVARIANTS 6302 panic("Impossible data==NULL length !=0"); 6303 #endif 6304 out_flags |= MSG_EOR; 6305 out_flags |= MSG_TRUNC; 6306 control->length = 0; 6307 SCTP_INP_READ_UNLOCK(inp); 6308 goto done_with_control; 6309 } 6310 SCTP_INP_READ_UNLOCK(inp); 6311 /* We will fall around to get more data */ 6312 } 6313 goto get_more_data; 6314 } else { 6315 /*- 6316 * Give caller back the mbuf chain, 6317 * store in uio_resid the length 6318 */ 6319 wakeup_read_socket = 0; 6320 if ((control->end_added == 0) || 6321 (TAILQ_NEXT(control, next) == NULL)) { 6322 /* Need to get rlock */ 6323 if (hold_rlock == 0) { 6324 SCTP_INP_READ_LOCK(inp); 6325 hold_rlock = 1; 6326 } 6327 } 6328 if (control->end_added) { 6329 out_flags |= MSG_EOR; 6330 if ((control->do_not_ref_stcb == 0) && 6331 (control->stcb != NULL) && 6332 ((control->spec_flags & M_NOTIFICATION) == 0)) 6333 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6334 } 6335 if (control->spec_flags & M_NOTIFICATION) { 6336 out_flags |= MSG_NOTIFICATION; 6337 } 6338 uio->uio_resid = control->length; 6339 *mp = control->data; 6340 m = control->data; 6341 while (m) { 6342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6343 sctp_sblog(&so->so_rcv, 6344 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6345 } 6346 sctp_sbfree(control, stcb, &so->so_rcv, m); 6347 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6348 freed_so_far += MSIZE; 6349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6350 sctp_sblog(&so->so_rcv, 6351 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6352 } 6353 m = SCTP_BUF_NEXT(m); 6354 } 6355 control->data = control->tail_mbuf = NULL; 6356 control->length = 0; 6357 if (out_flags & MSG_EOR) { 6358 /* Done with this control */ 6359 goto done_with_control; 6360 } 6361 } 6362 release: 6363 if (hold_rlock == 1) { 6364 SCTP_INP_READ_UNLOCK(inp); 6365 hold_rlock = 0; 6366 } 6367 if (hold_sblock == 1) { 6368 SOCKBUF_UNLOCK(&so->so_rcv); 6369 hold_sblock = 0; 6370 } 6371 6372 SOCK_IO_RECV_UNLOCK(so); 6373 sockbuf_lock = 0; 6374 6375 release_unlocked: 6376 if (hold_sblock) { 6377 SOCKBUF_UNLOCK(&so->so_rcv); 6378 hold_sblock = 0; 6379 } 6380 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6381 if ((freed_so_far >= rwnd_req) && 6382 (control && (control->do_not_ref_stcb == 0)) && 6383 (no_rcv_needed == 0)) 6384 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6385 } 6386 out: 6387 if (msg_flags) { 6388 *msg_flags = out_flags; 6389 } 6390 if (((out_flags & MSG_EOR) == 0) && 6391 ((in_flags & MSG_PEEK) == 0) && 6392 (sinfo) && 6393 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6394 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6395 struct sctp_extrcvinfo *s_extra; 6396 6397 s_extra = (struct sctp_extrcvinfo *)sinfo; 6398 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6399 } 6400 if (hold_rlock == 1) { 6401 SCTP_INP_READ_UNLOCK(inp); 6402 } 6403 if (hold_sblock) { 6404 SOCKBUF_UNLOCK(&so->so_rcv); 6405 } 6406 if (sockbuf_lock) { 6407 SOCK_IO_RECV_UNLOCK(so); 6408 } 6409 6410 if (freecnt_applied) { 6411 /* 6412 * The lock on the socket buffer protects us so the free 6413 * code will stop. But since we used the socketbuf lock and 6414 * the sender uses the tcb_lock to increment, we need to use 6415 * the atomic add to the refcnt. 6416 */ 6417 if (stcb == NULL) { 6418 #ifdef INVARIANTS 6419 panic("stcb for refcnt has gone NULL?"); 6420 goto stage_left; 6421 #else 6422 goto stage_left; 6423 #endif 6424 } 6425 /* Save the value back for next time */ 6426 stcb->freed_by_sorcv_sincelast = freed_so_far; 6427 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6428 } 6429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6430 if (stcb) { 6431 sctp_misc_ints(SCTP_SORECV_DONE, 6432 freed_so_far, 6433 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6434 stcb->asoc.my_rwnd, 6435 SCTP_SBAVAIL(&so->so_rcv)); 6436 } else { 6437 sctp_misc_ints(SCTP_SORECV_DONE, 6438 freed_so_far, 6439 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6440 0, 6441 SCTP_SBAVAIL(&so->so_rcv)); 6442 } 6443 } 6444 stage_left: 6445 if (wakeup_read_socket) { 6446 sctp_sorwakeup(inp, so); 6447 } 6448 return (error); 6449 } 6450 6451 #ifdef SCTP_MBUF_LOGGING 6452 struct mbuf * 6453 sctp_m_free(struct mbuf *m) 6454 { 6455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6456 sctp_log_mb(m, SCTP_MBUF_IFREE); 6457 } 6458 return (m_free(m)); 6459 } 6460 6461 void 6462 sctp_m_freem(struct mbuf *mb) 6463 { 6464 while (mb != NULL) 6465 mb = sctp_m_free(mb); 6466 } 6467 6468 #endif 6469 6470 int 6471 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6472 { 6473 /* 6474 * Given a local address. For all associations that holds the 6475 * address, request a peer-set-primary. 6476 */ 6477 struct sctp_ifa *ifa; 6478 struct sctp_laddr *wi; 6479 6480 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6481 if (ifa == NULL) { 6482 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6483 return (EADDRNOTAVAIL); 6484 } 6485 /* 6486 * Now that we have the ifa we must awaken the iterator with this 6487 * message. 6488 */ 6489 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6490 if (wi == NULL) { 6491 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6492 return (ENOMEM); 6493 } 6494 /* Now incr the count and int wi structure */ 6495 SCTP_INCR_LADDR_COUNT(); 6496 memset(wi, 0, sizeof(*wi)); 6497 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6498 wi->ifa = ifa; 6499 wi->action = SCTP_SET_PRIM_ADDR; 6500 atomic_add_int(&ifa->refcount, 1); 6501 6502 /* Now add it to the work queue */ 6503 SCTP_WQ_ADDR_LOCK(); 6504 /* 6505 * Should this really be a tailq? As it is we will process the 6506 * newest first :-0 6507 */ 6508 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6509 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6510 (struct sctp_inpcb *)NULL, 6511 (struct sctp_tcb *)NULL, 6512 (struct sctp_nets *)NULL); 6513 SCTP_WQ_ADDR_UNLOCK(); 6514 return (0); 6515 } 6516 6517 int 6518 sctp_soreceive(struct socket *so, 6519 struct sockaddr **psa, 6520 struct uio *uio, 6521 struct mbuf **mp0, 6522 struct mbuf **controlp, 6523 int *flagsp) 6524 { 6525 int error, fromlen; 6526 uint8_t sockbuf[256]; 6527 struct sockaddr *from; 6528 struct sctp_extrcvinfo sinfo; 6529 int filling_sinfo = 1; 6530 int flags; 6531 struct sctp_inpcb *inp; 6532 6533 inp = (struct sctp_inpcb *)so->so_pcb; 6534 /* pickup the assoc we are reading from */ 6535 if (inp == NULL) { 6536 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6537 return (EINVAL); 6538 } 6539 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6540 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6541 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6542 (controlp == NULL)) { 6543 /* user does not want the sndrcv ctl */ 6544 filling_sinfo = 0; 6545 } 6546 if (psa) { 6547 from = (struct sockaddr *)sockbuf; 6548 fromlen = sizeof(sockbuf); 6549 from->sa_len = 0; 6550 } else { 6551 from = NULL; 6552 fromlen = 0; 6553 } 6554 6555 if (filling_sinfo) { 6556 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6557 } 6558 if (flagsp != NULL) { 6559 flags = *flagsp; 6560 } else { 6561 flags = 0; 6562 } 6563 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6564 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6565 if (flagsp != NULL) { 6566 *flagsp = flags; 6567 } 6568 if (controlp != NULL) { 6569 /* copy back the sinfo in a CMSG format */ 6570 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6571 *controlp = sctp_build_ctl_nchunk(inp, 6572 (struct sctp_sndrcvinfo *)&sinfo); 6573 } else { 6574 *controlp = NULL; 6575 } 6576 } 6577 if (psa) { 6578 /* copy back the address info */ 6579 if (from && from->sa_len) { 6580 *psa = sodupsockaddr(from, M_NOWAIT); 6581 } else { 6582 *psa = NULL; 6583 } 6584 } 6585 return (error); 6586 } 6587 6588 int 6589 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6590 int totaddr, int *error) 6591 { 6592 int added = 0; 6593 int i; 6594 struct sctp_inpcb *inp; 6595 struct sockaddr *sa; 6596 size_t incr = 0; 6597 #ifdef INET 6598 struct sockaddr_in *sin; 6599 #endif 6600 #ifdef INET6 6601 struct sockaddr_in6 *sin6; 6602 #endif 6603 6604 sa = addr; 6605 inp = stcb->sctp_ep; 6606 *error = 0; 6607 for (i = 0; i < totaddr; i++) { 6608 switch (sa->sa_family) { 6609 #ifdef INET 6610 case AF_INET: 6611 incr = sizeof(struct sockaddr_in); 6612 sin = (struct sockaddr_in *)sa; 6613 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6614 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6615 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6616 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6617 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6618 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6619 *error = EINVAL; 6620 goto out_now; 6621 } 6622 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6623 SCTP_DONOT_SETSCOPE, 6624 SCTP_ADDR_IS_CONFIRMED)) { 6625 /* assoc gone no un-lock */ 6626 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6627 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6628 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6629 *error = ENOBUFS; 6630 goto out_now; 6631 } 6632 added++; 6633 break; 6634 #endif 6635 #ifdef INET6 6636 case AF_INET6: 6637 incr = sizeof(struct sockaddr_in6); 6638 sin6 = (struct sockaddr_in6 *)sa; 6639 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6640 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6641 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6642 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6643 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6644 *error = EINVAL; 6645 goto out_now; 6646 } 6647 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6648 SCTP_DONOT_SETSCOPE, 6649 SCTP_ADDR_IS_CONFIRMED)) { 6650 /* assoc gone no un-lock */ 6651 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6652 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6653 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6654 *error = ENOBUFS; 6655 goto out_now; 6656 } 6657 added++; 6658 break; 6659 #endif 6660 default: 6661 break; 6662 } 6663 sa = (struct sockaddr *)((caddr_t)sa + incr); 6664 } 6665 out_now: 6666 return (added); 6667 } 6668 6669 int 6670 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6671 unsigned int totaddr, 6672 unsigned int *num_v4, unsigned int *num_v6, 6673 unsigned int limit) 6674 { 6675 struct sockaddr *sa; 6676 struct sctp_tcb *stcb; 6677 unsigned int incr, at, i; 6678 6679 at = 0; 6680 sa = addr; 6681 *num_v6 = *num_v4 = 0; 6682 /* account and validate addresses */ 6683 if (totaddr == 0) { 6684 return (EINVAL); 6685 } 6686 for (i = 0; i < totaddr; i++) { 6687 if (at + sizeof(struct sockaddr) > limit) { 6688 return (EINVAL); 6689 } 6690 switch (sa->sa_family) { 6691 #ifdef INET 6692 case AF_INET: 6693 incr = (unsigned int)sizeof(struct sockaddr_in); 6694 if (sa->sa_len != incr) { 6695 return (EINVAL); 6696 } 6697 (*num_v4) += 1; 6698 break; 6699 #endif 6700 #ifdef INET6 6701 case AF_INET6: 6702 { 6703 struct sockaddr_in6 *sin6; 6704 6705 incr = (unsigned int)sizeof(struct sockaddr_in6); 6706 if (sa->sa_len != incr) { 6707 return (EINVAL); 6708 } 6709 sin6 = (struct sockaddr_in6 *)sa; 6710 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6711 /* Must be non-mapped for connectx */ 6712 return (EINVAL); 6713 } 6714 (*num_v6) += 1; 6715 break; 6716 } 6717 #endif 6718 default: 6719 return (EINVAL); 6720 } 6721 if ((at + incr) > limit) { 6722 return (EINVAL); 6723 } 6724 SCTP_INP_INCR_REF(inp); 6725 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6726 if (stcb != NULL) { 6727 SCTP_TCB_UNLOCK(stcb); 6728 return (EALREADY); 6729 } else { 6730 SCTP_INP_DECR_REF(inp); 6731 } 6732 at += incr; 6733 sa = (struct sockaddr *)((caddr_t)sa + incr); 6734 } 6735 return (0); 6736 } 6737 6738 /* 6739 * sctp_bindx(ADD) for one address. 6740 * assumes all arguments are valid/checked by caller. 6741 */ 6742 void 6743 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6744 struct sockaddr *sa, uint32_t vrf_id, int *error, 6745 void *p) 6746 { 6747 #if defined(INET) && defined(INET6) 6748 struct sockaddr_in sin; 6749 #endif 6750 #ifdef INET6 6751 struct sockaddr_in6 *sin6; 6752 #endif 6753 #ifdef INET 6754 struct sockaddr_in *sinp; 6755 #endif 6756 struct sockaddr *addr_to_use; 6757 struct sctp_inpcb *lep; 6758 uint16_t port; 6759 6760 /* see if we're bound all already! */ 6761 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6762 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6763 *error = EINVAL; 6764 return; 6765 } 6766 switch (sa->sa_family) { 6767 #ifdef INET6 6768 case AF_INET6: 6769 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6770 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6771 *error = EINVAL; 6772 return; 6773 } 6774 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6775 /* can only bind v6 on PF_INET6 sockets */ 6776 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6777 *error = EINVAL; 6778 return; 6779 } 6780 sin6 = (struct sockaddr_in6 *)sa; 6781 port = sin6->sin6_port; 6782 #ifdef INET 6783 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6784 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6785 SCTP_IPV6_V6ONLY(inp)) { 6786 /* can't bind v4-mapped on PF_INET sockets */ 6787 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6788 *error = EINVAL; 6789 return; 6790 } 6791 in6_sin6_2_sin(&sin, sin6); 6792 addr_to_use = (struct sockaddr *)&sin; 6793 } else { 6794 addr_to_use = sa; 6795 } 6796 #else 6797 addr_to_use = sa; 6798 #endif 6799 break; 6800 #endif 6801 #ifdef INET 6802 case AF_INET: 6803 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6804 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6805 *error = EINVAL; 6806 return; 6807 } 6808 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6809 SCTP_IPV6_V6ONLY(inp)) { 6810 /* can't bind v4 on PF_INET sockets */ 6811 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6812 *error = EINVAL; 6813 return; 6814 } 6815 sinp = (struct sockaddr_in *)sa; 6816 port = sinp->sin_port; 6817 addr_to_use = sa; 6818 break; 6819 #endif 6820 default: 6821 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6822 *error = EINVAL; 6823 return; 6824 } 6825 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6826 if (p == NULL) { 6827 /* Can't get proc for Net/Open BSD */ 6828 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6829 *error = EINVAL; 6830 return; 6831 } 6832 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6833 return; 6834 } 6835 /* Validate the incoming port. */ 6836 if ((port != 0) && (port != inp->sctp_lport)) { 6837 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6838 *error = EINVAL; 6839 return; 6840 } 6841 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6842 if (lep == NULL) { 6843 /* add the address */ 6844 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6845 SCTP_ADD_IP_ADDRESS, vrf_id); 6846 } else { 6847 if (lep != inp) { 6848 *error = EADDRINUSE; 6849 } 6850 SCTP_INP_DECR_REF(lep); 6851 } 6852 } 6853 6854 /* 6855 * sctp_bindx(DELETE) for one address. 6856 * assumes all arguments are valid/checked by caller. 6857 */ 6858 void 6859 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6860 struct sockaddr *sa, uint32_t vrf_id, int *error) 6861 { 6862 struct sockaddr *addr_to_use; 6863 #if defined(INET) && defined(INET6) 6864 struct sockaddr_in6 *sin6; 6865 struct sockaddr_in sin; 6866 #endif 6867 6868 /* see if we're bound all already! */ 6869 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6871 *error = EINVAL; 6872 return; 6873 } 6874 switch (sa->sa_family) { 6875 #ifdef INET6 6876 case AF_INET6: 6877 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6879 *error = EINVAL; 6880 return; 6881 } 6882 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6883 /* can only bind v6 on PF_INET6 sockets */ 6884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6885 *error = EINVAL; 6886 return; 6887 } 6888 #ifdef INET 6889 sin6 = (struct sockaddr_in6 *)sa; 6890 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6891 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6892 SCTP_IPV6_V6ONLY(inp)) { 6893 /* can't bind mapped-v4 on PF_INET sockets */ 6894 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6895 *error = EINVAL; 6896 return; 6897 } 6898 in6_sin6_2_sin(&sin, sin6); 6899 addr_to_use = (struct sockaddr *)&sin; 6900 } else { 6901 addr_to_use = sa; 6902 } 6903 #else 6904 addr_to_use = sa; 6905 #endif 6906 break; 6907 #endif 6908 #ifdef INET 6909 case AF_INET: 6910 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6911 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6912 *error = EINVAL; 6913 return; 6914 } 6915 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6916 SCTP_IPV6_V6ONLY(inp)) { 6917 /* can't bind v4 on PF_INET sockets */ 6918 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6919 *error = EINVAL; 6920 return; 6921 } 6922 addr_to_use = sa; 6923 break; 6924 #endif 6925 default: 6926 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6927 *error = EINVAL; 6928 return; 6929 } 6930 /* No lock required mgmt_ep_sa does its own locking. */ 6931 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6932 vrf_id); 6933 } 6934 6935 /* 6936 * returns the valid local address count for an assoc, taking into account 6937 * all scoping rules 6938 */ 6939 int 6940 sctp_local_addr_count(struct sctp_tcb *stcb) 6941 { 6942 int loopback_scope; 6943 #if defined(INET) 6944 int ipv4_local_scope, ipv4_addr_legal; 6945 #endif 6946 #if defined(INET6) 6947 int local_scope, site_scope, ipv6_addr_legal; 6948 #endif 6949 struct sctp_vrf *vrf; 6950 struct sctp_ifn *sctp_ifn; 6951 struct sctp_ifa *sctp_ifa; 6952 int count = 0; 6953 6954 /* Turn on all the appropriate scopes */ 6955 loopback_scope = stcb->asoc.scope.loopback_scope; 6956 #if defined(INET) 6957 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6958 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6959 #endif 6960 #if defined(INET6) 6961 local_scope = stcb->asoc.scope.local_scope; 6962 site_scope = stcb->asoc.scope.site_scope; 6963 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6964 #endif 6965 SCTP_IPI_ADDR_RLOCK(); 6966 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6967 if (vrf == NULL) { 6968 /* no vrf, no addresses */ 6969 SCTP_IPI_ADDR_RUNLOCK(); 6970 return (0); 6971 } 6972 6973 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6974 /* 6975 * bound all case: go through all ifns on the vrf 6976 */ 6977 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6978 if ((loopback_scope == 0) && 6979 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6980 continue; 6981 } 6982 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6983 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6984 continue; 6985 switch (sctp_ifa->address.sa.sa_family) { 6986 #ifdef INET 6987 case AF_INET: 6988 if (ipv4_addr_legal) { 6989 struct sockaddr_in *sin; 6990 6991 sin = &sctp_ifa->address.sin; 6992 if (sin->sin_addr.s_addr == 0) { 6993 /* 6994 * skip unspecified 6995 * addrs 6996 */ 6997 continue; 6998 } 6999 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7000 &sin->sin_addr) != 0) { 7001 continue; 7002 } 7003 if ((ipv4_local_scope == 0) && 7004 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7005 continue; 7006 } 7007 /* count this one */ 7008 count++; 7009 } else { 7010 continue; 7011 } 7012 break; 7013 #endif 7014 #ifdef INET6 7015 case AF_INET6: 7016 if (ipv6_addr_legal) { 7017 struct sockaddr_in6 *sin6; 7018 7019 sin6 = &sctp_ifa->address.sin6; 7020 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7021 continue; 7022 } 7023 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7024 &sin6->sin6_addr) != 0) { 7025 continue; 7026 } 7027 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7028 if (local_scope == 0) 7029 continue; 7030 if (sin6->sin6_scope_id == 0) { 7031 if (sa6_recoverscope(sin6) != 0) 7032 /* 7033 * 7034 * bad 7035 * link 7036 * 7037 * local 7038 * 7039 * address 7040 */ 7041 continue; 7042 } 7043 } 7044 if ((site_scope == 0) && 7045 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7046 continue; 7047 } 7048 /* count this one */ 7049 count++; 7050 } 7051 break; 7052 #endif 7053 default: 7054 /* TSNH */ 7055 break; 7056 } 7057 } 7058 } 7059 } else { 7060 /* 7061 * subset bound case 7062 */ 7063 struct sctp_laddr *laddr; 7064 7065 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7066 sctp_nxt_addr) { 7067 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7068 continue; 7069 } 7070 /* count this one */ 7071 count++; 7072 } 7073 } 7074 SCTP_IPI_ADDR_RUNLOCK(); 7075 return (count); 7076 } 7077 7078 #if defined(SCTP_LOCAL_TRACE_BUF) 7079 7080 void 7081 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7082 { 7083 uint32_t saveindex, newindex; 7084 7085 do { 7086 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7087 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7088 newindex = 1; 7089 } else { 7090 newindex = saveindex + 1; 7091 } 7092 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7093 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7094 saveindex = 0; 7095 } 7096 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7097 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7098 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7099 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7100 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7101 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7102 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7103 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7104 } 7105 7106 #endif 7107 static bool 7108 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7109 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7110 { 7111 struct ip *iph; 7112 #ifdef INET6 7113 struct ip6_hdr *ip6; 7114 #endif 7115 struct mbuf *sp, *last; 7116 struct udphdr *uhdr; 7117 uint16_t port; 7118 7119 if ((m->m_flags & M_PKTHDR) == 0) { 7120 /* Can't handle one that is not a pkt hdr */ 7121 goto out; 7122 } 7123 /* Pull the src port */ 7124 iph = mtod(m, struct ip *); 7125 uhdr = (struct udphdr *)((caddr_t)iph + off); 7126 port = uhdr->uh_sport; 7127 /* 7128 * Split out the mbuf chain. Leave the IP header in m, place the 7129 * rest in the sp. 7130 */ 7131 sp = m_split(m, off, M_NOWAIT); 7132 if (sp == NULL) { 7133 /* Gak, drop packet, we can't do a split */ 7134 goto out; 7135 } 7136 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7137 /* Gak, packet can't have an SCTP header in it - too small */ 7138 m_freem(sp); 7139 goto out; 7140 } 7141 /* Now pull up the UDP header and SCTP header together */ 7142 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7143 if (sp == NULL) { 7144 /* Gak pullup failed */ 7145 goto out; 7146 } 7147 /* Trim out the UDP header */ 7148 m_adj(sp, sizeof(struct udphdr)); 7149 7150 /* Now reconstruct the mbuf chain */ 7151 for (last = m; last->m_next; last = last->m_next); 7152 last->m_next = sp; 7153 m->m_pkthdr.len += sp->m_pkthdr.len; 7154 /* 7155 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7156 * checksum and it was valid. Since CSUM_DATA_VALID == 7157 * CSUM_SCTP_VALID this would imply that the HW also verified the 7158 * SCTP checksum. Therefore, clear the bit. 7159 */ 7160 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7161 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7162 m->m_pkthdr.len, 7163 if_name(m->m_pkthdr.rcvif), 7164 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7165 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7166 iph = mtod(m, struct ip *); 7167 switch (iph->ip_v) { 7168 #ifdef INET 7169 case IPVERSION: 7170 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7171 sctp_input_with_port(m, off, port); 7172 break; 7173 #endif 7174 #ifdef INET6 7175 case IPV6_VERSION >> 4: 7176 ip6 = mtod(m, struct ip6_hdr *); 7177 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7178 sctp6_input_with_port(&m, &off, port); 7179 break; 7180 #endif 7181 default: 7182 goto out; 7183 break; 7184 } 7185 return (true); 7186 out: 7187 m_freem(m); 7188 7189 return (true); 7190 } 7191 7192 #ifdef INET 7193 static void 7194 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7195 { 7196 struct ip *outer_ip, *inner_ip; 7197 struct sctphdr *sh; 7198 struct icmp *icmp; 7199 struct udphdr *udp; 7200 struct sctp_inpcb *inp; 7201 struct sctp_tcb *stcb; 7202 struct sctp_nets *net; 7203 struct sctp_init_chunk *ch; 7204 struct sockaddr_in src, dst; 7205 uint8_t type, code; 7206 7207 inner_ip = (struct ip *)vip; 7208 icmp = (struct icmp *)((caddr_t)inner_ip - 7209 (sizeof(struct icmp) - sizeof(struct ip))); 7210 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7211 if (ntohs(outer_ip->ip_len) < 7212 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7213 return; 7214 } 7215 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7216 sh = (struct sctphdr *)(udp + 1); 7217 memset(&src, 0, sizeof(struct sockaddr_in)); 7218 src.sin_family = AF_INET; 7219 src.sin_len = sizeof(struct sockaddr_in); 7220 src.sin_port = sh->src_port; 7221 src.sin_addr = inner_ip->ip_src; 7222 memset(&dst, 0, sizeof(struct sockaddr_in)); 7223 dst.sin_family = AF_INET; 7224 dst.sin_len = sizeof(struct sockaddr_in); 7225 dst.sin_port = sh->dest_port; 7226 dst.sin_addr = inner_ip->ip_dst; 7227 /* 7228 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7229 * holds our local endpoint address. Thus we reverse the dst and the 7230 * src in the lookup. 7231 */ 7232 inp = NULL; 7233 net = NULL; 7234 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7235 (struct sockaddr *)&src, 7236 &inp, &net, 1, 7237 SCTP_DEFAULT_VRFID); 7238 if ((stcb != NULL) && 7239 (net != NULL) && 7240 (inp != NULL)) { 7241 /* Check the UDP port numbers */ 7242 if ((udp->uh_dport != net->port) || 7243 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7244 SCTP_TCB_UNLOCK(stcb); 7245 return; 7246 } 7247 /* Check the verification tag */ 7248 if (ntohl(sh->v_tag) != 0) { 7249 /* 7250 * This must be the verification tag used for 7251 * sending out packets. We don't consider packets 7252 * reflecting the verification tag. 7253 */ 7254 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7255 SCTP_TCB_UNLOCK(stcb); 7256 return; 7257 } 7258 } else { 7259 if (ntohs(outer_ip->ip_len) >= 7260 sizeof(struct ip) + 7261 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7262 /* 7263 * In this case we can check if we got an 7264 * INIT chunk and if the initiate tag 7265 * matches. 7266 */ 7267 ch = (struct sctp_init_chunk *)(sh + 1); 7268 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7269 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7270 SCTP_TCB_UNLOCK(stcb); 7271 return; 7272 } 7273 } else { 7274 SCTP_TCB_UNLOCK(stcb); 7275 return; 7276 } 7277 } 7278 type = icmp->icmp_type; 7279 code = icmp->icmp_code; 7280 if ((type == ICMP_UNREACH) && 7281 (code == ICMP_UNREACH_PORT)) { 7282 code = ICMP_UNREACH_PROTOCOL; 7283 } 7284 sctp_notify(inp, stcb, net, type, code, 7285 ntohs(inner_ip->ip_len), 7286 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7287 } else { 7288 if ((stcb == NULL) && (inp != NULL)) { 7289 /* reduce ref-count */ 7290 SCTP_INP_WLOCK(inp); 7291 SCTP_INP_DECR_REF(inp); 7292 SCTP_INP_WUNLOCK(inp); 7293 } 7294 if (stcb) { 7295 SCTP_TCB_UNLOCK(stcb); 7296 } 7297 } 7298 return; 7299 } 7300 #endif 7301 7302 #ifdef INET6 7303 static void 7304 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7305 { 7306 struct ip6ctlparam *ip6cp; 7307 struct sctp_inpcb *inp; 7308 struct sctp_tcb *stcb; 7309 struct sctp_nets *net; 7310 struct sctphdr sh; 7311 struct udphdr udp; 7312 struct sockaddr_in6 src, dst; 7313 uint8_t type, code; 7314 7315 ip6cp = (struct ip6ctlparam *)d; 7316 /* 7317 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7318 */ 7319 if (ip6cp->ip6c_m == NULL) { 7320 return; 7321 } 7322 /* 7323 * Check if we can safely examine the ports and the verification tag 7324 * of the SCTP common header. 7325 */ 7326 if (ip6cp->ip6c_m->m_pkthdr.len < 7327 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7328 return; 7329 } 7330 /* Copy out the UDP header. */ 7331 memset(&udp, 0, sizeof(struct udphdr)); 7332 m_copydata(ip6cp->ip6c_m, 7333 ip6cp->ip6c_off, 7334 sizeof(struct udphdr), 7335 (caddr_t)&udp); 7336 /* Copy out the port numbers and the verification tag. */ 7337 memset(&sh, 0, sizeof(struct sctphdr)); 7338 m_copydata(ip6cp->ip6c_m, 7339 ip6cp->ip6c_off + sizeof(struct udphdr), 7340 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7341 (caddr_t)&sh); 7342 memset(&src, 0, sizeof(struct sockaddr_in6)); 7343 src.sin6_family = AF_INET6; 7344 src.sin6_len = sizeof(struct sockaddr_in6); 7345 src.sin6_port = sh.src_port; 7346 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7347 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7348 return; 7349 } 7350 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7351 dst.sin6_family = AF_INET6; 7352 dst.sin6_len = sizeof(struct sockaddr_in6); 7353 dst.sin6_port = sh.dest_port; 7354 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7355 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7356 return; 7357 } 7358 inp = NULL; 7359 net = NULL; 7360 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7361 (struct sockaddr *)&src, 7362 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7363 if ((stcb != NULL) && 7364 (net != NULL) && 7365 (inp != NULL)) { 7366 /* Check the UDP port numbers */ 7367 if ((udp.uh_dport != net->port) || 7368 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7369 SCTP_TCB_UNLOCK(stcb); 7370 return; 7371 } 7372 /* Check the verification tag */ 7373 if (ntohl(sh.v_tag) != 0) { 7374 /* 7375 * This must be the verification tag used for 7376 * sending out packets. We don't consider packets 7377 * reflecting the verification tag. 7378 */ 7379 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7380 SCTP_TCB_UNLOCK(stcb); 7381 return; 7382 } 7383 } else { 7384 if (ip6cp->ip6c_m->m_pkthdr.len >= 7385 ip6cp->ip6c_off + sizeof(struct udphdr) + 7386 sizeof(struct sctphdr) + 7387 sizeof(struct sctp_chunkhdr) + 7388 offsetof(struct sctp_init, a_rwnd)) { 7389 /* 7390 * In this case we can check if we got an 7391 * INIT chunk and if the initiate tag 7392 * matches. 7393 */ 7394 uint32_t initiate_tag; 7395 uint8_t chunk_type; 7396 7397 m_copydata(ip6cp->ip6c_m, 7398 ip6cp->ip6c_off + 7399 sizeof(struct udphdr) + 7400 sizeof(struct sctphdr), 7401 sizeof(uint8_t), 7402 (caddr_t)&chunk_type); 7403 m_copydata(ip6cp->ip6c_m, 7404 ip6cp->ip6c_off + 7405 sizeof(struct udphdr) + 7406 sizeof(struct sctphdr) + 7407 sizeof(struct sctp_chunkhdr), 7408 sizeof(uint32_t), 7409 (caddr_t)&initiate_tag); 7410 if ((chunk_type != SCTP_INITIATION) || 7411 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7412 SCTP_TCB_UNLOCK(stcb); 7413 return; 7414 } 7415 } else { 7416 SCTP_TCB_UNLOCK(stcb); 7417 return; 7418 } 7419 } 7420 type = ip6cp->ip6c_icmp6->icmp6_type; 7421 code = ip6cp->ip6c_icmp6->icmp6_code; 7422 if ((type == ICMP6_DST_UNREACH) && 7423 (code == ICMP6_DST_UNREACH_NOPORT)) { 7424 type = ICMP6_PARAM_PROB; 7425 code = ICMP6_PARAMPROB_NEXTHEADER; 7426 } 7427 sctp6_notify(inp, stcb, net, type, code, 7428 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7429 } else { 7430 if ((stcb == NULL) && (inp != NULL)) { 7431 /* reduce inp's ref-count */ 7432 SCTP_INP_WLOCK(inp); 7433 SCTP_INP_DECR_REF(inp); 7434 SCTP_INP_WUNLOCK(inp); 7435 } 7436 if (stcb) { 7437 SCTP_TCB_UNLOCK(stcb); 7438 } 7439 } 7440 } 7441 #endif 7442 7443 void 7444 sctp_over_udp_stop(void) 7445 { 7446 /* 7447 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7448 * for writing! 7449 */ 7450 #ifdef INET 7451 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7452 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7453 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7454 } 7455 #endif 7456 #ifdef INET6 7457 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7458 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7459 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7460 } 7461 #endif 7462 } 7463 7464 int 7465 sctp_over_udp_start(void) 7466 { 7467 uint16_t port; 7468 int ret; 7469 #ifdef INET 7470 struct sockaddr_in sin; 7471 #endif 7472 #ifdef INET6 7473 struct sockaddr_in6 sin6; 7474 #endif 7475 /* 7476 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7477 * for writing! 7478 */ 7479 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7480 if (ntohs(port) == 0) { 7481 /* Must have a port set */ 7482 return (EINVAL); 7483 } 7484 #ifdef INET 7485 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7486 /* Already running -- must stop first */ 7487 return (EALREADY); 7488 } 7489 #endif 7490 #ifdef INET6 7491 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7492 /* Already running -- must stop first */ 7493 return (EALREADY); 7494 } 7495 #endif 7496 #ifdef INET 7497 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7498 SOCK_DGRAM, IPPROTO_UDP, 7499 curthread->td_ucred, curthread))) { 7500 sctp_over_udp_stop(); 7501 return (ret); 7502 } 7503 /* Call the special UDP hook. */ 7504 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7505 sctp_recv_udp_tunneled_packet, 7506 sctp_recv_icmp_tunneled_packet, 7507 NULL))) { 7508 sctp_over_udp_stop(); 7509 return (ret); 7510 } 7511 /* Ok, we have a socket, bind it to the port. */ 7512 memset(&sin, 0, sizeof(struct sockaddr_in)); 7513 sin.sin_len = sizeof(struct sockaddr_in); 7514 sin.sin_family = AF_INET; 7515 sin.sin_port = htons(port); 7516 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7517 (struct sockaddr *)&sin, curthread))) { 7518 sctp_over_udp_stop(); 7519 return (ret); 7520 } 7521 #endif 7522 #ifdef INET6 7523 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7524 SOCK_DGRAM, IPPROTO_UDP, 7525 curthread->td_ucred, curthread))) { 7526 sctp_over_udp_stop(); 7527 return (ret); 7528 } 7529 /* Call the special UDP hook. */ 7530 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7531 sctp_recv_udp_tunneled_packet, 7532 sctp_recv_icmp6_tunneled_packet, 7533 NULL))) { 7534 sctp_over_udp_stop(); 7535 return (ret); 7536 } 7537 /* Ok, we have a socket, bind it to the port. */ 7538 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7539 sin6.sin6_len = sizeof(struct sockaddr_in6); 7540 sin6.sin6_family = AF_INET6; 7541 sin6.sin6_port = htons(port); 7542 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7543 (struct sockaddr *)&sin6, curthread))) { 7544 sctp_over_udp_stop(); 7545 return (ret); 7546 } 7547 #endif 7548 return (0); 7549 } 7550 7551 /* 7552 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7553 * If all arguments are zero, zero is returned. 7554 */ 7555 uint32_t 7556 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7557 { 7558 if (mtu1 > 0) { 7559 if (mtu2 > 0) { 7560 if (mtu3 > 0) { 7561 return (min(mtu1, min(mtu2, mtu3))); 7562 } else { 7563 return (min(mtu1, mtu2)); 7564 } 7565 } else { 7566 if (mtu3 > 0) { 7567 return (min(mtu1, mtu3)); 7568 } else { 7569 return (mtu1); 7570 } 7571 } 7572 } else { 7573 if (mtu2 > 0) { 7574 if (mtu3 > 0) { 7575 return (min(mtu2, mtu3)); 7576 } else { 7577 return (mtu2); 7578 } 7579 } else { 7580 return (mtu3); 7581 } 7582 } 7583 } 7584 7585 void 7586 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7587 { 7588 struct in_conninfo inc; 7589 7590 memset(&inc, 0, sizeof(struct in_conninfo)); 7591 inc.inc_fibnum = fibnum; 7592 switch (addr->sa.sa_family) { 7593 #ifdef INET 7594 case AF_INET: 7595 inc.inc_faddr = addr->sin.sin_addr; 7596 break; 7597 #endif 7598 #ifdef INET6 7599 case AF_INET6: 7600 inc.inc_flags |= INC_ISIPV6; 7601 inc.inc6_faddr = addr->sin6.sin6_addr; 7602 break; 7603 #endif 7604 default: 7605 return; 7606 } 7607 tcp_hc_updatemtu(&inc, (u_long)mtu); 7608 } 7609 7610 uint32_t 7611 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7612 { 7613 struct in_conninfo inc; 7614 7615 memset(&inc, 0, sizeof(struct in_conninfo)); 7616 inc.inc_fibnum = fibnum; 7617 switch (addr->sa.sa_family) { 7618 #ifdef INET 7619 case AF_INET: 7620 inc.inc_faddr = addr->sin.sin_addr; 7621 break; 7622 #endif 7623 #ifdef INET6 7624 case AF_INET6: 7625 inc.inc_flags |= INC_ISIPV6; 7626 inc.inc6_faddr = addr->sin6.sin6_addr; 7627 break; 7628 #endif 7629 default: 7630 return (0); 7631 } 7632 return ((uint32_t)tcp_hc_getmtu(&inc)); 7633 } 7634 7635 void 7636 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7637 { 7638 #if defined(KDTRACE_HOOKS) 7639 int old_state = stcb->asoc.state; 7640 #endif 7641 7642 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7643 ("sctp_set_state: Can't set substate (new_state = %x)", 7644 new_state)); 7645 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7646 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7647 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7648 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7649 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7650 } 7651 #if defined(KDTRACE_HOOKS) 7652 if (((old_state & SCTP_STATE_MASK) != new_state) && 7653 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7654 (new_state == SCTP_STATE_INUSE))) { 7655 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7656 } 7657 #endif 7658 } 7659 7660 void 7661 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7662 { 7663 #if defined(KDTRACE_HOOKS) 7664 int old_state = stcb->asoc.state; 7665 #endif 7666 7667 KASSERT((substate & SCTP_STATE_MASK) == 0, 7668 ("sctp_add_substate: Can't set state (substate = %x)", 7669 substate)); 7670 stcb->asoc.state |= substate; 7671 #if defined(KDTRACE_HOOKS) 7672 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7673 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7674 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7675 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7676 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7677 } 7678 #endif 7679 } 7680