1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 if (override_tag) { 1195 asoc->init_seq_number = initial_tsn; 1196 } else { 1197 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1198 } 1199 asoc->asconf_seq_out = asoc->init_seq_number; 1200 asoc->str_reset_seq_out = asoc->init_seq_number; 1201 asoc->sending_seq = asoc->init_seq_number; 1202 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1203 /* we are optimistic here */ 1204 asoc->peer_supports_nat = 0; 1205 asoc->sent_queue_retran_cnt = 0; 1206 1207 /* for CMT */ 1208 asoc->last_net_cmt_send_started = NULL; 1209 1210 asoc->last_acked_seq = asoc->init_seq_number - 1; 1211 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1212 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1213 1214 /* here we are different, we hold the next one we expect */ 1215 asoc->str_reset_seq_in = asoc->init_seq_number; 1216 1217 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1218 asoc->initial_rto = inp->sctp_ep.initial_rto; 1219 1220 asoc->default_mtu = inp->sctp_ep.default_mtu; 1221 asoc->max_init_times = inp->sctp_ep.max_init_times; 1222 asoc->max_send_times = inp->sctp_ep.max_send_times; 1223 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1224 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1225 asoc->free_chunk_cnt = 0; 1226 1227 asoc->iam_blocking = 0; 1228 asoc->context = inp->sctp_context; 1229 asoc->local_strreset_support = inp->local_strreset_support; 1230 asoc->def_send = inp->def_send; 1231 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1232 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1233 asoc->pr_sctp_cnt = 0; 1234 asoc->total_output_queue_size = 0; 1235 1236 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1237 asoc->scope.ipv6_addr_legal = 1; 1238 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } else { 1241 asoc->scope.ipv4_addr_legal = 0; 1242 } 1243 } else { 1244 asoc->scope.ipv6_addr_legal = 0; 1245 asoc->scope.ipv4_addr_legal = 1; 1246 } 1247 1248 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1249 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1250 1251 asoc->smallest_mtu = 0; 1252 asoc->minrto = inp->sctp_ep.sctp_minrto; 1253 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1254 1255 asoc->stream_locked_on = 0; 1256 asoc->ecn_echo_cnt_onq = 0; 1257 asoc->stream_locked = 0; 1258 1259 asoc->send_sack = 1; 1260 1261 LIST_INIT(&asoc->sctp_restricted_addrs); 1262 1263 TAILQ_INIT(&asoc->nets); 1264 TAILQ_INIT(&asoc->pending_reply_queue); 1265 TAILQ_INIT(&asoc->asconf_ack_sent); 1266 /* Setup to fill the hb random cache at first HB */ 1267 asoc->hb_random_idx = 4; 1268 1269 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1270 1271 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1272 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1273 1274 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1275 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1276 1277 /* 1278 * Now the stream parameters, here we allocate space for all streams 1279 * that we request by default. 1280 */ 1281 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1282 o_strms; 1283 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1284 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1285 SCTP_M_STRMO); 1286 if (asoc->strmout == NULL) { 1287 /* big trouble no memory */ 1288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1289 return (ENOMEM); 1290 } 1291 SCTP_TCB_LOCK(stcb); 1292 for (i = 0; i < asoc->streamoutcnt; i++) { 1293 /* 1294 * inbound side must be set to 0xffff, also NOTE when we get 1295 * the INIT-ACK back (for INIT sender) we MUST reduce the 1296 * count (streamoutcnt) but first check if we sent to any of 1297 * the upper streams that were dropped (if some were). Those 1298 * that were dropped must be notified to the upper layer as 1299 * failed to send. 1300 */ 1301 TAILQ_INIT(&asoc->strmout[i].outqueue); 1302 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1303 asoc->strmout[i].chunks_on_queues = 0; 1304 #if defined(SCTP_DETAILED_STR_STATS) 1305 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1306 asoc->strmout[i].abandoned_sent[j] = 0; 1307 asoc->strmout[i].abandoned_unsent[j] = 0; 1308 } 1309 #else 1310 asoc->strmout[i].abandoned_sent[0] = 0; 1311 asoc->strmout[i].abandoned_unsent[0] = 0; 1312 #endif 1313 asoc->strmout[i].next_mid_ordered = 0; 1314 asoc->strmout[i].next_mid_unordered = 0; 1315 asoc->strmout[i].sid = i; 1316 asoc->strmout[i].last_msg_incomplete = 0; 1317 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1318 } 1319 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1320 SCTP_TCB_UNLOCK(stcb); 1321 1322 /* Now the mapping array */ 1323 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1324 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1329 return (ENOMEM); 1330 } 1331 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1332 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1333 SCTP_M_MAP); 1334 if (asoc->nr_mapping_array == NULL) { 1335 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1336 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1337 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1338 return (ENOMEM); 1339 } 1340 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1341 1342 /* Now the init of the other outqueues */ 1343 TAILQ_INIT(&asoc->free_chunks); 1344 TAILQ_INIT(&asoc->control_send_queue); 1345 TAILQ_INIT(&asoc->asconf_send_queue); 1346 TAILQ_INIT(&asoc->send_queue); 1347 TAILQ_INIT(&asoc->sent_queue); 1348 TAILQ_INIT(&asoc->resetHead); 1349 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1350 TAILQ_INIT(&asoc->asconf_queue); 1351 /* authentication fields */ 1352 asoc->authinfo.random = NULL; 1353 asoc->authinfo.active_keyid = 0; 1354 asoc->authinfo.assoc_key = NULL; 1355 asoc->authinfo.assoc_keyid = 0; 1356 asoc->authinfo.recv_key = NULL; 1357 asoc->authinfo.recv_keyid = 0; 1358 LIST_INIT(&asoc->shared_keys); 1359 asoc->marked_retrans = 0; 1360 asoc->port = inp->sctp_ep.port; 1361 asoc->timoinit = 0; 1362 asoc->timodata = 0; 1363 asoc->timosack = 0; 1364 asoc->timoshutdown = 0; 1365 asoc->timoheartbeat = 0; 1366 asoc->timocookie = 0; 1367 asoc->timoshutdownack = 0; 1368 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1369 asoc->discontinuity_time = asoc->start_time; 1370 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1371 asoc->abandoned_unsent[i] = 0; 1372 asoc->abandoned_sent[i] = 0; 1373 } 1374 /* 1375 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1376 * freed later when the association is freed. 1377 */ 1378 return (0); 1379 } 1380 1381 void 1382 sctp_print_mapping_array(struct sctp_association *asoc) 1383 { 1384 unsigned int i, limit; 1385 1386 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1387 asoc->mapping_array_size, 1388 asoc->mapping_array_base_tsn, 1389 asoc->cumulative_tsn, 1390 asoc->highest_tsn_inside_map, 1391 asoc->highest_tsn_inside_nr_map); 1392 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1393 if (asoc->mapping_array[limit - 1] != 0) { 1394 break; 1395 } 1396 } 1397 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1398 for (i = 0; i < limit; i++) { 1399 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1400 } 1401 if (limit % 16) 1402 SCTP_PRINTF("\n"); 1403 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1404 if (asoc->nr_mapping_array[limit - 1]) { 1405 break; 1406 } 1407 } 1408 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1409 for (i = 0; i < limit; i++) { 1410 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1411 } 1412 if (limit % 16) 1413 SCTP_PRINTF("\n"); 1414 } 1415 1416 int 1417 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1418 { 1419 /* mapping array needs to grow */ 1420 uint8_t *new_array1, *new_array2; 1421 uint32_t new_size; 1422 1423 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1424 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1425 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1426 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1427 /* can't get more, forget it */ 1428 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1429 if (new_array1) { 1430 SCTP_FREE(new_array1, SCTP_M_MAP); 1431 } 1432 if (new_array2) { 1433 SCTP_FREE(new_array2, SCTP_M_MAP); 1434 } 1435 return (-1); 1436 } 1437 memset(new_array1, 0, new_size); 1438 memset(new_array2, 0, new_size); 1439 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1440 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1441 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1442 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1443 asoc->mapping_array = new_array1; 1444 asoc->nr_mapping_array = new_array2; 1445 asoc->mapping_array_size = new_size; 1446 return (0); 1447 } 1448 1449 static void 1450 sctp_iterator_work(struct sctp_iterator *it) 1451 { 1452 struct epoch_tracker et; 1453 struct sctp_inpcb *tinp; 1454 int iteration_count = 0; 1455 int inp_skip = 0; 1456 int first_in = 1; 1457 1458 NET_EPOCH_ENTER(et); 1459 SCTP_INP_INFO_RLOCK(); 1460 SCTP_ITERATOR_LOCK(); 1461 sctp_it_ctl.cur_it = it; 1462 if (it->inp) { 1463 SCTP_INP_RLOCK(it->inp); 1464 SCTP_INP_DECR_REF(it->inp); 1465 } 1466 if (it->inp == NULL) { 1467 /* iterator is complete */ 1468 done_with_iterator: 1469 sctp_it_ctl.cur_it = NULL; 1470 SCTP_ITERATOR_UNLOCK(); 1471 SCTP_INP_INFO_RUNLOCK(); 1472 if (it->function_atend != NULL) { 1473 (*it->function_atend) (it->pointer, it->val); 1474 } 1475 SCTP_FREE(it, SCTP_M_ITER); 1476 NET_EPOCH_EXIT(et); 1477 return; 1478 } 1479 select_a_new_ep: 1480 if (first_in) { 1481 first_in = 0; 1482 } else { 1483 SCTP_INP_RLOCK(it->inp); 1484 } 1485 while (((it->pcb_flags) && 1486 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1487 ((it->pcb_features) && 1488 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1489 /* endpoint flags or features don't match, so keep looking */ 1490 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1491 SCTP_INP_RUNLOCK(it->inp); 1492 goto done_with_iterator; 1493 } 1494 tinp = it->inp; 1495 it->inp = LIST_NEXT(it->inp, sctp_list); 1496 it->stcb = NULL; 1497 SCTP_INP_RUNLOCK(tinp); 1498 if (it->inp == NULL) { 1499 goto done_with_iterator; 1500 } 1501 SCTP_INP_RLOCK(it->inp); 1502 } 1503 /* now go through each assoc which is in the desired state */ 1504 if (it->done_current_ep == 0) { 1505 if (it->function_inp != NULL) 1506 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1507 it->done_current_ep = 1; 1508 } 1509 if (it->stcb == NULL) { 1510 /* run the per instance function */ 1511 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1512 } 1513 if ((inp_skip) || it->stcb == NULL) { 1514 if (it->function_inp_end != NULL) { 1515 inp_skip = (*it->function_inp_end) (it->inp, 1516 it->pointer, 1517 it->val); 1518 } 1519 SCTP_INP_RUNLOCK(it->inp); 1520 goto no_stcb; 1521 } 1522 while (it->stcb != NULL) { 1523 SCTP_TCB_LOCK(it->stcb); 1524 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1525 /* not in the right state... keep looking */ 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 goto next_assoc; 1528 } 1529 /* see if we have limited out the iterator loop */ 1530 iteration_count++; 1531 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1532 /* Pause to let others grab the lock */ 1533 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1534 SCTP_TCB_UNLOCK(it->stcb); 1535 SCTP_INP_INCR_REF(it->inp); 1536 SCTP_INP_RUNLOCK(it->inp); 1537 SCTP_ITERATOR_UNLOCK(); 1538 SCTP_INP_INFO_RUNLOCK(); 1539 SCTP_INP_INFO_RLOCK(); 1540 SCTP_ITERATOR_LOCK(); 1541 if (sctp_it_ctl.iterator_flags) { 1542 /* We won't be staying here */ 1543 SCTP_INP_DECR_REF(it->inp); 1544 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1545 if (sctp_it_ctl.iterator_flags & 1546 SCTP_ITERATOR_STOP_CUR_IT) { 1547 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1548 goto done_with_iterator; 1549 } 1550 if (sctp_it_ctl.iterator_flags & 1551 SCTP_ITERATOR_STOP_CUR_INP) { 1552 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1553 goto no_stcb; 1554 } 1555 /* If we reach here huh? */ 1556 SCTP_PRINTF("Unknown it ctl flag %x\n", 1557 sctp_it_ctl.iterator_flags); 1558 sctp_it_ctl.iterator_flags = 0; 1559 } 1560 SCTP_INP_RLOCK(it->inp); 1561 SCTP_INP_DECR_REF(it->inp); 1562 SCTP_TCB_LOCK(it->stcb); 1563 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1564 iteration_count = 0; 1565 } 1566 KASSERT(it->inp == it->stcb->sctp_ep, 1567 ("%s: stcb %p does not belong to inp %p, but inp %p", 1568 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1569 SCTP_INP_RLOCK_ASSERT(it->inp); 1570 SCTP_TCB_LOCK_ASSERT(it->stcb); 1571 1572 /* run function on this one */ 1573 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1574 SCTP_INP_RLOCK_ASSERT(it->inp); 1575 SCTP_TCB_LOCK_ASSERT(it->stcb); 1576 1577 /* 1578 * we lie here, it really needs to have its own type but 1579 * first I must verify that this won't effect things :-0 1580 */ 1581 if (it->no_chunk_output == 0) { 1582 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1583 SCTP_INP_RLOCK_ASSERT(it->inp); 1584 SCTP_TCB_LOCK_ASSERT(it->stcb); 1585 } 1586 1587 SCTP_TCB_UNLOCK(it->stcb); 1588 next_assoc: 1589 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1590 if (it->stcb == NULL) { 1591 /* Run last function */ 1592 if (it->function_inp_end != NULL) { 1593 inp_skip = (*it->function_inp_end) (it->inp, 1594 it->pointer, 1595 it->val); 1596 } 1597 } 1598 } 1599 SCTP_INP_RUNLOCK(it->inp); 1600 no_stcb: 1601 /* done with all assocs on this endpoint, move on to next endpoint */ 1602 it->done_current_ep = 0; 1603 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1604 it->inp = NULL; 1605 } else { 1606 it->inp = LIST_NEXT(it->inp, sctp_list); 1607 } 1608 it->stcb = NULL; 1609 if (it->inp == NULL) { 1610 goto done_with_iterator; 1611 } 1612 goto select_a_new_ep; 1613 } 1614 1615 void 1616 sctp_iterator_worker(void) 1617 { 1618 struct sctp_iterator *it; 1619 1620 /* This function is called with the WQ lock in place */ 1621 sctp_it_ctl.iterator_running = 1; 1622 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1623 /* now lets work on this one */ 1624 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1625 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1626 CURVNET_SET(it->vn); 1627 sctp_iterator_work(it); 1628 CURVNET_RESTORE(); 1629 SCTP_IPI_ITERATOR_WQ_LOCK(); 1630 /* sa_ignore FREED_MEMORY */ 1631 } 1632 sctp_it_ctl.iterator_running = 0; 1633 return; 1634 } 1635 1636 static void 1637 sctp_handle_addr_wq(void) 1638 { 1639 /* deal with the ADDR wq from the rtsock calls */ 1640 struct sctp_laddr *wi, *nwi; 1641 struct sctp_asconf_iterator *asc; 1642 1643 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1644 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1645 if (asc == NULL) { 1646 /* Try later, no memory */ 1647 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1648 (struct sctp_inpcb *)NULL, 1649 (struct sctp_tcb *)NULL, 1650 (struct sctp_nets *)NULL); 1651 return; 1652 } 1653 LIST_INIT(&asc->list_of_work); 1654 asc->cnt = 0; 1655 1656 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1657 LIST_REMOVE(wi, sctp_nxt_addr); 1658 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1659 asc->cnt++; 1660 } 1661 1662 if (asc->cnt == 0) { 1663 SCTP_FREE(asc, SCTP_M_ASC_IT); 1664 } else { 1665 int ret; 1666 1667 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1668 sctp_asconf_iterator_stcb, 1669 NULL, /* No ep end for boundall */ 1670 SCTP_PCB_FLAGS_BOUNDALL, 1671 SCTP_PCB_ANY_FEATURES, 1672 SCTP_ASOC_ANY_STATE, 1673 (void *)asc, 0, 1674 sctp_asconf_iterator_end, NULL, 0); 1675 if (ret) { 1676 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1677 /* 1678 * Freeing if we are stopping or put back on the 1679 * addr_wq. 1680 */ 1681 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1682 sctp_asconf_iterator_end(asc, 0); 1683 } else { 1684 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1685 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1686 } 1687 SCTP_FREE(asc, SCTP_M_ASC_IT); 1688 } 1689 } 1690 } 1691 } 1692 1693 /*- 1694 * The following table shows which pointers for the inp, stcb, or net are 1695 * stored for each timer after it was started. 1696 * 1697 *|Name |Timer |inp |stcb|net | 1698 *|-----------------------------|-----------------------------|----|----|----| 1699 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1701 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1704 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1706 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1708 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1710 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1711 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1713 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1714 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1715 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1716 */ 1717 1718 void 1719 sctp_timeout_handler(void *t) 1720 { 1721 struct epoch_tracker et; 1722 struct timeval tv; 1723 struct sctp_inpcb *inp; 1724 struct sctp_tcb *stcb; 1725 struct sctp_nets *net; 1726 struct sctp_timer *tmr; 1727 struct mbuf *op_err; 1728 int type; 1729 int i, secret; 1730 bool did_output, released_asoc_reference; 1731 1732 /* 1733 * If inp, stcb or net are not NULL, then references to these were 1734 * added when the timer was started, and must be released before 1735 * this function returns. 1736 */ 1737 tmr = (struct sctp_timer *)t; 1738 inp = (struct sctp_inpcb *)tmr->ep; 1739 stcb = (struct sctp_tcb *)tmr->tcb; 1740 net = (struct sctp_nets *)tmr->net; 1741 CURVNET_SET((struct vnet *)tmr->vnet); 1742 NET_EPOCH_ENTER(et); 1743 released_asoc_reference = false; 1744 1745 #ifdef SCTP_AUDITING_ENABLED 1746 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1747 sctp_auditing(3, inp, stcb, net); 1748 #endif 1749 1750 /* sanity checks... */ 1751 KASSERT(tmr->self == NULL || tmr->self == tmr, 1752 ("sctp_timeout_handler: tmr->self corrupted")); 1753 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1754 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1755 type = tmr->type; 1756 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1757 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1758 type, stcb, stcb->sctp_ep)); 1759 tmr->stopped_from = 0xa001; 1760 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1761 SCTPDBG(SCTP_DEBUG_TIMER2, 1762 "Timer type %d handler exiting due to CLOSED association.\n", 1763 type); 1764 goto out_decr; 1765 } 1766 tmr->stopped_from = 0xa002; 1767 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1768 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1769 SCTPDBG(SCTP_DEBUG_TIMER2, 1770 "Timer type %d handler exiting due to not being active.\n", 1771 type); 1772 goto out_decr; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 /* 1779 * Release reference so that association can be freed if 1780 * necessary below. This is safe now that we have acquired 1781 * the lock. 1782 */ 1783 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1784 released_asoc_reference = true; 1785 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1786 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1787 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1788 SCTPDBG(SCTP_DEBUG_TIMER2, 1789 "Timer type %d handler exiting due to CLOSED association.\n", 1790 type); 1791 goto out; 1792 } 1793 } else if (inp != NULL) { 1794 SCTP_INP_WLOCK(inp); 1795 } else { 1796 SCTP_WQ_ADDR_LOCK(); 1797 } 1798 1799 /* Record in stopped_from which timeout occurred. */ 1800 tmr->stopped_from = type; 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 did_output = true; 1840 if ((stcb->asoc.num_send_timers_up == 0) && 1841 (stcb->asoc.sent_queue_cnt > 0)) { 1842 struct sctp_tmit_chunk *chk; 1843 1844 /* 1845 * Safeguard. If there on some on the sent queue 1846 * somewhere but no timers running something is 1847 * wrong... so we start a timer on the first chunk 1848 * on the send queue on whatever net it is sent to. 1849 */ 1850 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1851 if (chk->whoTo != NULL) { 1852 break; 1853 } 1854 } 1855 if (chk != NULL) { 1856 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1857 } 1858 } 1859 break; 1860 case SCTP_TIMER_TYPE_INIT: 1861 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1862 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1863 type, inp, stcb, net)); 1864 SCTP_STAT_INCR(sctps_timoinit); 1865 stcb->asoc.timoinit++; 1866 if (sctp_t1init_timer(inp, stcb, net)) { 1867 /* no need to unlock on tcb its gone */ 1868 goto out_decr; 1869 } 1870 did_output = false; 1871 break; 1872 case SCTP_TIMER_TYPE_RECV: 1873 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1874 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1875 type, inp, stcb, net)); 1876 SCTP_STAT_INCR(sctps_timosack); 1877 stcb->asoc.timosack++; 1878 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1879 #ifdef SCTP_AUDITING_ENABLED 1880 sctp_auditing(4, inp, stcb, NULL); 1881 #endif 1882 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1883 did_output = true; 1884 break; 1885 case SCTP_TIMER_TYPE_SHUTDOWN: 1886 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1887 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1888 type, inp, stcb, net)); 1889 SCTP_STAT_INCR(sctps_timoshutdown); 1890 stcb->asoc.timoshutdown++; 1891 if (sctp_shutdown_timer(inp, stcb, net)) { 1892 /* no need to unlock on tcb its gone */ 1893 goto out_decr; 1894 } 1895 #ifdef SCTP_AUDITING_ENABLED 1896 sctp_auditing(4, inp, stcb, net); 1897 #endif 1898 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1899 did_output = true; 1900 break; 1901 case SCTP_TIMER_TYPE_HEARTBEAT: 1902 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1903 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1904 type, inp, stcb, net)); 1905 SCTP_STAT_INCR(sctps_timoheartbeat); 1906 stcb->asoc.timoheartbeat++; 1907 if (sctp_heartbeat_timer(inp, stcb, net)) { 1908 /* no need to unlock on tcb its gone */ 1909 goto out_decr; 1910 } 1911 #ifdef SCTP_AUDITING_ENABLED 1912 sctp_auditing(4, inp, stcb, net); 1913 #endif 1914 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1915 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1916 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1917 did_output = true; 1918 } else { 1919 did_output = false; 1920 } 1921 break; 1922 case SCTP_TIMER_TYPE_COOKIE: 1923 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1924 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1925 type, inp, stcb, net)); 1926 SCTP_STAT_INCR(sctps_timocookie); 1927 stcb->asoc.timocookie++; 1928 if (sctp_cookie_timer(inp, stcb, net)) { 1929 /* no need to unlock on tcb its gone */ 1930 goto out_decr; 1931 } 1932 #ifdef SCTP_AUDITING_ENABLED 1933 sctp_auditing(4, inp, stcb, net); 1934 #endif 1935 /* 1936 * We consider T3 and Cookie timer pretty much the same with 1937 * respect to where from in chunk_output. 1938 */ 1939 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1940 did_output = true; 1941 break; 1942 case SCTP_TIMER_TYPE_NEWCOOKIE: 1943 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1944 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1945 type, inp, stcb, net)); 1946 SCTP_STAT_INCR(sctps_timosecret); 1947 (void)SCTP_GETTIME_TIMEVAL(&tv); 1948 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1949 inp->sctp_ep.last_secret_number = 1950 inp->sctp_ep.current_secret_number; 1951 inp->sctp_ep.current_secret_number++; 1952 if (inp->sctp_ep.current_secret_number >= 1953 SCTP_HOW_MANY_SECRETS) { 1954 inp->sctp_ep.current_secret_number = 0; 1955 } 1956 secret = (int)inp->sctp_ep.current_secret_number; 1957 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1958 inp->sctp_ep.secret_key[secret][i] = 1959 sctp_select_initial_TSN(&inp->sctp_ep); 1960 } 1961 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1962 did_output = false; 1963 break; 1964 case SCTP_TIMER_TYPE_PATHMTURAISE: 1965 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1966 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1967 type, inp, stcb, net)); 1968 SCTP_STAT_INCR(sctps_timopathmtu); 1969 sctp_pathmtu_timer(inp, stcb, net); 1970 did_output = false; 1971 break; 1972 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1973 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1974 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1975 type, inp, stcb, net)); 1976 if (sctp_shutdownack_timer(inp, stcb, net)) { 1977 /* no need to unlock on tcb its gone */ 1978 goto out_decr; 1979 } 1980 SCTP_STAT_INCR(sctps_timoshutdownack); 1981 stcb->asoc.timoshutdownack++; 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_ASCONF: 1989 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoasconf); 1993 if (sctp_asconf_timer(inp, stcb, net)) { 1994 /* no need to unlock on tcb its gone */ 1995 goto out_decr; 1996 } 1997 #ifdef SCTP_AUDITING_ENABLED 1998 sctp_auditing(4, inp, stcb, net); 1999 #endif 2000 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2001 did_output = true; 2002 break; 2003 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2004 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2005 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2006 type, inp, stcb, net)); 2007 SCTP_STAT_INCR(sctps_timoshutdownguard); 2008 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2009 "Shutdown guard timer expired"); 2010 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2011 /* no need to unlock on tcb its gone */ 2012 goto out_decr; 2013 case SCTP_TIMER_TYPE_AUTOCLOSE: 2014 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2015 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2016 type, inp, stcb, net)); 2017 SCTP_STAT_INCR(sctps_timoautoclose); 2018 sctp_autoclose_timer(inp, stcb); 2019 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2020 did_output = true; 2021 break; 2022 case SCTP_TIMER_TYPE_STRRESET: 2023 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2024 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2025 type, inp, stcb, net)); 2026 SCTP_STAT_INCR(sctps_timostrmrst); 2027 if (sctp_strreset_timer(inp, stcb)) { 2028 /* no need to unlock on tcb its gone */ 2029 goto out_decr; 2030 } 2031 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2032 did_output = true; 2033 break; 2034 case SCTP_TIMER_TYPE_INPKILL: 2035 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2036 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2037 type, inp, stcb, net)); 2038 SCTP_STAT_INCR(sctps_timoinpkill); 2039 /* 2040 * special case, take away our increment since WE are the 2041 * killer 2042 */ 2043 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2044 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2045 SCTP_INP_DECR_REF(inp); 2046 SCTP_INP_WUNLOCK(inp); 2047 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2048 SCTP_CALLED_FROM_INPKILL_TIMER); 2049 inp = NULL; 2050 goto out_decr; 2051 case SCTP_TIMER_TYPE_ASOCKILL: 2052 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2053 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2054 type, inp, stcb, net)); 2055 SCTP_STAT_INCR(sctps_timoassockill); 2056 /* Can we free it yet? */ 2057 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2058 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2059 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2060 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2061 /* 2062 * free asoc, always unlocks (or destroy's) so prevent 2063 * duplicate unlock or unlock of a free mtx :-0 2064 */ 2065 stcb = NULL; 2066 goto out_decr; 2067 case SCTP_TIMER_TYPE_ADDR_WQ: 2068 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2069 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2070 type, inp, stcb, net)); 2071 sctp_handle_addr_wq(); 2072 did_output = true; 2073 break; 2074 case SCTP_TIMER_TYPE_PRIM_DELETED: 2075 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2076 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2077 type, inp, stcb, net)); 2078 SCTP_STAT_INCR(sctps_timodelprim); 2079 sctp_delete_prim_timer(inp, stcb); 2080 did_output = false; 2081 break; 2082 default: 2083 #ifdef INVARIANTS 2084 panic("Unknown timer type %d", type); 2085 #else 2086 goto out; 2087 #endif 2088 } 2089 #ifdef SCTP_AUDITING_ENABLED 2090 sctp_audit_log(0xF1, (uint8_t)type); 2091 if (inp != NULL) 2092 sctp_auditing(5, inp, stcb, net); 2093 #endif 2094 if (did_output && (stcb != NULL)) { 2095 /* 2096 * Now we need to clean up the control chunk chain if an 2097 * ECNE is on it. It must be marked as UNSENT again so next 2098 * call will continue to send it until such time that we get 2099 * a CWR, to remove it. It is, however, less likely that we 2100 * will find a ecn echo on the chain though. 2101 */ 2102 sctp_fix_ecn_echo(&stcb->asoc); 2103 } 2104 out: 2105 if (stcb != NULL) { 2106 SCTP_TCB_UNLOCK(stcb); 2107 } else if (inp != NULL) { 2108 SCTP_INP_WUNLOCK(inp); 2109 } else { 2110 SCTP_WQ_ADDR_UNLOCK(); 2111 } 2112 2113 out_decr: 2114 /* These reference counts were incremented in sctp_timer_start(). */ 2115 if (inp != NULL) { 2116 SCTP_INP_DECR_REF(inp); 2117 } 2118 if ((stcb != NULL) && !released_asoc_reference) { 2119 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2120 } 2121 if (net != NULL) { 2122 sctp_free_remote_addr(net); 2123 } 2124 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2125 CURVNET_RESTORE(); 2126 NET_EPOCH_EXIT(et); 2127 } 2128 2129 /*- 2130 * The following table shows which parameters must be provided 2131 * when calling sctp_timer_start(). For parameters not being 2132 * provided, NULL must be used. 2133 * 2134 * |Name |inp |stcb|net | 2135 * |-----------------------------|----|----|----| 2136 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2139 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2141 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2143 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2145 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2147 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2148 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2149 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2150 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2151 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2152 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2153 * 2154 */ 2155 2156 void 2157 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2158 struct sctp_nets *net) 2159 { 2160 struct sctp_timer *tmr; 2161 uint32_t to_ticks; 2162 uint32_t rndval, jitter; 2163 2164 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2165 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2166 t_type, stcb, stcb->sctp_ep)); 2167 tmr = NULL; 2168 if (stcb != NULL) { 2169 SCTP_TCB_LOCK_ASSERT(stcb); 2170 } else if (inp != NULL) { 2171 SCTP_INP_WLOCK_ASSERT(inp); 2172 } else { 2173 SCTP_WQ_ADDR_LOCK_ASSERT(); 2174 } 2175 if (stcb != NULL) { 2176 /* 2177 * Don't restart timer on association that's about to be 2178 * killed. 2179 */ 2180 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2181 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2182 SCTPDBG(SCTP_DEBUG_TIMER2, 2183 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2184 t_type, inp, stcb, net); 2185 return; 2186 } 2187 /* Don't restart timer on net that's been removed. */ 2188 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2189 SCTPDBG(SCTP_DEBUG_TIMER2, 2190 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2191 t_type, inp, stcb, net); 2192 return; 2193 } 2194 } 2195 switch (t_type) { 2196 case SCTP_TIMER_TYPE_SEND: 2197 /* Here we use the RTO timer. */ 2198 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2199 #ifdef INVARIANTS 2200 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2201 t_type, inp, stcb, net); 2202 #else 2203 return; 2204 #endif 2205 } 2206 tmr = &net->rxt_timer; 2207 if (net->RTO == 0) { 2208 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2209 } else { 2210 to_ticks = sctp_msecs_to_ticks(net->RTO); 2211 } 2212 break; 2213 case SCTP_TIMER_TYPE_INIT: 2214 /* 2215 * Here we use the INIT timer default usually about 1 2216 * second. 2217 */ 2218 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2219 #ifdef INVARIANTS 2220 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2221 t_type, inp, stcb, net); 2222 #else 2223 return; 2224 #endif 2225 } 2226 tmr = &net->rxt_timer; 2227 if (net->RTO == 0) { 2228 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2229 } else { 2230 to_ticks = sctp_msecs_to_ticks(net->RTO); 2231 } 2232 break; 2233 case SCTP_TIMER_TYPE_RECV: 2234 /* 2235 * Here we use the Delayed-Ack timer value from the inp, 2236 * usually about 200ms. 2237 */ 2238 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2239 #ifdef INVARIANTS 2240 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2241 t_type, inp, stcb, net); 2242 #else 2243 return; 2244 #endif 2245 } 2246 tmr = &stcb->asoc.dack_timer; 2247 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2248 break; 2249 case SCTP_TIMER_TYPE_SHUTDOWN: 2250 /* Here we use the RTO of the destination. */ 2251 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2252 #ifdef INVARIANTS 2253 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2254 t_type, inp, stcb, net); 2255 #else 2256 return; 2257 #endif 2258 } 2259 tmr = &net->rxt_timer; 2260 if (net->RTO == 0) { 2261 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2262 } else { 2263 to_ticks = sctp_msecs_to_ticks(net->RTO); 2264 } 2265 break; 2266 case SCTP_TIMER_TYPE_HEARTBEAT: 2267 /* 2268 * The net is used here so that we can add in the RTO. Even 2269 * though we use a different timer. We also add the HB timer 2270 * PLUS a random jitter. 2271 */ 2272 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2273 #ifdef INVARIANTS 2274 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2275 t_type, inp, stcb, net); 2276 #else 2277 return; 2278 #endif 2279 } 2280 if ((net->dest_state & SCTP_ADDR_NOHB) && 2281 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2282 SCTPDBG(SCTP_DEBUG_TIMER2, 2283 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2284 t_type, inp, stcb, net); 2285 return; 2286 } 2287 tmr = &net->hb_timer; 2288 if (net->RTO == 0) { 2289 to_ticks = stcb->asoc.initial_rto; 2290 } else { 2291 to_ticks = net->RTO; 2292 } 2293 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2294 jitter = rndval % to_ticks; 2295 if (to_ticks > 1) { 2296 to_ticks >>= 1; 2297 } 2298 if (jitter < (UINT32_MAX - to_ticks)) { 2299 to_ticks += jitter; 2300 } else { 2301 to_ticks = UINT32_MAX; 2302 } 2303 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2304 !(net->dest_state & SCTP_ADDR_PF)) { 2305 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2306 to_ticks += net->heart_beat_delay; 2307 } else { 2308 to_ticks = UINT32_MAX; 2309 } 2310 } 2311 /* 2312 * Now we must convert the to_ticks that are now in ms to 2313 * ticks. 2314 */ 2315 to_ticks = sctp_msecs_to_ticks(to_ticks); 2316 break; 2317 case SCTP_TIMER_TYPE_COOKIE: 2318 /* 2319 * Here we can use the RTO timer from the network since one 2320 * RTT was complete. If a retransmission happened then we 2321 * will be using the RTO initial value. 2322 */ 2323 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2324 #ifdef INVARIANTS 2325 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2326 t_type, inp, stcb, net); 2327 #else 2328 return; 2329 #endif 2330 } 2331 tmr = &net->rxt_timer; 2332 if (net->RTO == 0) { 2333 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2334 } else { 2335 to_ticks = sctp_msecs_to_ticks(net->RTO); 2336 } 2337 break; 2338 case SCTP_TIMER_TYPE_NEWCOOKIE: 2339 /* 2340 * Nothing needed but the endpoint here usually about 60 2341 * minutes. 2342 */ 2343 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2344 #ifdef INVARIANTS 2345 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2346 t_type, inp, stcb, net); 2347 #else 2348 return; 2349 #endif 2350 } 2351 tmr = &inp->sctp_ep.signature_change; 2352 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2353 break; 2354 case SCTP_TIMER_TYPE_PATHMTURAISE: 2355 /* 2356 * Here we use the value found in the EP for PMTUD, usually 2357 * about 10 minutes. 2358 */ 2359 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2360 #ifdef INVARIANTS 2361 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2362 t_type, inp, stcb, net); 2363 #else 2364 return; 2365 #endif 2366 } 2367 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2368 SCTPDBG(SCTP_DEBUG_TIMER2, 2369 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2370 t_type, inp, stcb, net); 2371 return; 2372 } 2373 tmr = &net->pmtu_timer; 2374 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2375 break; 2376 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2377 /* Here we use the RTO of the destination. */ 2378 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2379 #ifdef INVARIANTS 2380 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2381 t_type, inp, stcb, net); 2382 #else 2383 return; 2384 #endif 2385 } 2386 tmr = &net->rxt_timer; 2387 if (net->RTO == 0) { 2388 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2389 } else { 2390 to_ticks = sctp_msecs_to_ticks(net->RTO); 2391 } 2392 break; 2393 case SCTP_TIMER_TYPE_ASCONF: 2394 /* 2395 * Here the timer comes from the stcb but its value is from 2396 * the net's RTO. 2397 */ 2398 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2399 #ifdef INVARIANTS 2400 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2401 t_type, inp, stcb, net); 2402 #else 2403 return; 2404 #endif 2405 } 2406 tmr = &stcb->asoc.asconf_timer; 2407 if (net->RTO == 0) { 2408 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(net->RTO); 2411 } 2412 break; 2413 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2414 /* 2415 * Here we use the endpoints shutdown guard timer usually 2416 * about 3 minutes. 2417 */ 2418 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2419 #ifdef INVARIANTS 2420 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2421 t_type, inp, stcb, net); 2422 #else 2423 return; 2424 #endif 2425 } 2426 tmr = &stcb->asoc.shut_guard_timer; 2427 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2428 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2429 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2430 } else { 2431 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2432 } 2433 } else { 2434 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2435 } 2436 break; 2437 case SCTP_TIMER_TYPE_AUTOCLOSE: 2438 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2439 #ifdef INVARIANTS 2440 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2441 t_type, inp, stcb, net); 2442 #else 2443 return; 2444 #endif 2445 } 2446 tmr = &stcb->asoc.autoclose_timer; 2447 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2448 break; 2449 case SCTP_TIMER_TYPE_STRRESET: 2450 /* 2451 * Here the timer comes from the stcb but its value is from 2452 * the net's RTO. 2453 */ 2454 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &stcb->asoc.strreset_timer; 2463 if (net->RTO == 0) { 2464 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2465 } else { 2466 to_ticks = sctp_msecs_to_ticks(net->RTO); 2467 } 2468 break; 2469 case SCTP_TIMER_TYPE_INPKILL: 2470 /* 2471 * The inp is setup to die. We re-use the signature_change 2472 * timer since that has stopped and we are in the GONE 2473 * state. 2474 */ 2475 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2476 #ifdef INVARIANTS 2477 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2478 t_type, inp, stcb, net); 2479 #else 2480 return; 2481 #endif 2482 } 2483 tmr = &inp->sctp_ep.signature_change; 2484 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2485 break; 2486 case SCTP_TIMER_TYPE_ASOCKILL: 2487 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2488 #ifdef INVARIANTS 2489 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2490 t_type, inp, stcb, net); 2491 #else 2492 return; 2493 #endif 2494 } 2495 tmr = &stcb->asoc.strreset_timer; 2496 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2497 break; 2498 case SCTP_TIMER_TYPE_ADDR_WQ: 2499 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2500 #ifdef INVARIANTS 2501 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2502 t_type, inp, stcb, net); 2503 #else 2504 return; 2505 #endif 2506 } 2507 /* Only 1 tick away :-) */ 2508 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2509 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2510 break; 2511 case SCTP_TIMER_TYPE_PRIM_DELETED: 2512 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2513 #ifdef INVARIANTS 2514 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2515 t_type, inp, stcb, net); 2516 #else 2517 return; 2518 #endif 2519 } 2520 tmr = &stcb->asoc.delete_prim_timer; 2521 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2522 break; 2523 default: 2524 #ifdef INVARIANTS 2525 panic("Unknown timer type %d", t_type); 2526 #else 2527 return; 2528 #endif 2529 } 2530 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2531 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2532 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2533 /* 2534 * We do NOT allow you to have it already running. If it is, 2535 * we leave the current one up unchanged. 2536 */ 2537 SCTPDBG(SCTP_DEBUG_TIMER2, 2538 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2539 t_type, inp, stcb, net); 2540 return; 2541 } 2542 /* At this point we can proceed. */ 2543 if (t_type == SCTP_TIMER_TYPE_SEND) { 2544 stcb->asoc.num_send_timers_up++; 2545 } 2546 tmr->stopped_from = 0; 2547 tmr->type = t_type; 2548 tmr->ep = (void *)inp; 2549 tmr->tcb = (void *)stcb; 2550 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2551 tmr->net = NULL; 2552 } else { 2553 tmr->net = (void *)net; 2554 } 2555 tmr->self = (void *)tmr; 2556 tmr->vnet = (void *)curvnet; 2557 tmr->ticks = sctp_get_tick_count(); 2558 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2559 SCTPDBG(SCTP_DEBUG_TIMER2, 2560 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2561 t_type, to_ticks, inp, stcb, net); 2562 /* 2563 * If this is a newly scheduled callout, as opposed to a 2564 * rescheduled one, increment relevant reference counts. 2565 */ 2566 if (tmr->ep != NULL) { 2567 SCTP_INP_INCR_REF(inp); 2568 } 2569 if (tmr->tcb != NULL) { 2570 atomic_add_int(&stcb->asoc.refcnt, 1); 2571 } 2572 if (tmr->net != NULL) { 2573 atomic_add_int(&net->ref_count, 1); 2574 } 2575 } else { 2576 /* 2577 * This should not happen, since we checked for pending 2578 * above. 2579 */ 2580 SCTPDBG(SCTP_DEBUG_TIMER2, 2581 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2582 t_type, to_ticks, inp, stcb, net); 2583 } 2584 return; 2585 } 2586 2587 /*- 2588 * The following table shows which parameters must be provided 2589 * when calling sctp_timer_stop(). For parameters not being 2590 * provided, NULL must be used. 2591 * 2592 * |Name |inp |stcb|net | 2593 * |-----------------------------|----|----|----| 2594 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2595 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2596 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2600 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2601 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2604 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2605 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2606 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2608 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2610 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2611 * 2612 */ 2613 2614 void 2615 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2616 struct sctp_nets *net, uint32_t from) 2617 { 2618 struct sctp_timer *tmr; 2619 2620 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2621 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2622 t_type, stcb, stcb->sctp_ep)); 2623 if (stcb != NULL) { 2624 SCTP_TCB_LOCK_ASSERT(stcb); 2625 } else if (inp != NULL) { 2626 SCTP_INP_WLOCK_ASSERT(inp); 2627 } else { 2628 SCTP_WQ_ADDR_LOCK_ASSERT(); 2629 } 2630 tmr = NULL; 2631 switch (t_type) { 2632 case SCTP_TIMER_TYPE_SEND: 2633 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2634 #ifdef INVARIANTS 2635 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2636 t_type, inp, stcb, net); 2637 #else 2638 return; 2639 #endif 2640 } 2641 tmr = &net->rxt_timer; 2642 break; 2643 case SCTP_TIMER_TYPE_INIT: 2644 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2645 #ifdef INVARIANTS 2646 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2647 t_type, inp, stcb, net); 2648 #else 2649 return; 2650 #endif 2651 } 2652 tmr = &net->rxt_timer; 2653 break; 2654 case SCTP_TIMER_TYPE_RECV: 2655 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2656 #ifdef INVARIANTS 2657 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2658 t_type, inp, stcb, net); 2659 #else 2660 return; 2661 #endif 2662 } 2663 tmr = &stcb->asoc.dack_timer; 2664 break; 2665 case SCTP_TIMER_TYPE_SHUTDOWN: 2666 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2667 #ifdef INVARIANTS 2668 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2669 t_type, inp, stcb, net); 2670 #else 2671 return; 2672 #endif 2673 } 2674 tmr = &net->rxt_timer; 2675 break; 2676 case SCTP_TIMER_TYPE_HEARTBEAT: 2677 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2678 #ifdef INVARIANTS 2679 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2680 t_type, inp, stcb, net); 2681 #else 2682 return; 2683 #endif 2684 } 2685 tmr = &net->hb_timer; 2686 break; 2687 case SCTP_TIMER_TYPE_COOKIE: 2688 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2689 #ifdef INVARIANTS 2690 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2691 t_type, inp, stcb, net); 2692 #else 2693 return; 2694 #endif 2695 } 2696 tmr = &net->rxt_timer; 2697 break; 2698 case SCTP_TIMER_TYPE_NEWCOOKIE: 2699 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2700 #ifdef INVARIANTS 2701 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2702 t_type, inp, stcb, net); 2703 #else 2704 return; 2705 #endif 2706 } 2707 tmr = &inp->sctp_ep.signature_change; 2708 break; 2709 case SCTP_TIMER_TYPE_PATHMTURAISE: 2710 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2711 #ifdef INVARIANTS 2712 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2713 t_type, inp, stcb, net); 2714 #else 2715 return; 2716 #endif 2717 } 2718 tmr = &net->pmtu_timer; 2719 break; 2720 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2721 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2722 #ifdef INVARIANTS 2723 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2724 t_type, inp, stcb, net); 2725 #else 2726 return; 2727 #endif 2728 } 2729 tmr = &net->rxt_timer; 2730 break; 2731 case SCTP_TIMER_TYPE_ASCONF: 2732 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2733 #ifdef INVARIANTS 2734 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2735 t_type, inp, stcb, net); 2736 #else 2737 return; 2738 #endif 2739 } 2740 tmr = &stcb->asoc.asconf_timer; 2741 break; 2742 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2743 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2744 #ifdef INVARIANTS 2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2746 t_type, inp, stcb, net); 2747 #else 2748 return; 2749 #endif 2750 } 2751 tmr = &stcb->asoc.shut_guard_timer; 2752 break; 2753 case SCTP_TIMER_TYPE_AUTOCLOSE: 2754 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2755 #ifdef INVARIANTS 2756 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2757 t_type, inp, stcb, net); 2758 #else 2759 return; 2760 #endif 2761 } 2762 tmr = &stcb->asoc.autoclose_timer; 2763 break; 2764 case SCTP_TIMER_TYPE_STRRESET: 2765 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2766 #ifdef INVARIANTS 2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2768 t_type, inp, stcb, net); 2769 #else 2770 return; 2771 #endif 2772 } 2773 tmr = &stcb->asoc.strreset_timer; 2774 break; 2775 case SCTP_TIMER_TYPE_INPKILL: 2776 /* 2777 * The inp is setup to die. We re-use the signature_change 2778 * timer since that has stopped and we are in the GONE 2779 * state. 2780 */ 2781 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2782 #ifdef INVARIANTS 2783 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2784 t_type, inp, stcb, net); 2785 #else 2786 return; 2787 #endif 2788 } 2789 tmr = &inp->sctp_ep.signature_change; 2790 break; 2791 case SCTP_TIMER_TYPE_ASOCKILL: 2792 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2793 #ifdef INVARIANTS 2794 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2795 t_type, inp, stcb, net); 2796 #else 2797 return; 2798 #endif 2799 } 2800 tmr = &stcb->asoc.strreset_timer; 2801 break; 2802 case SCTP_TIMER_TYPE_ADDR_WQ: 2803 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2804 #ifdef INVARIANTS 2805 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2806 t_type, inp, stcb, net); 2807 #else 2808 return; 2809 #endif 2810 } 2811 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2812 break; 2813 case SCTP_TIMER_TYPE_PRIM_DELETED: 2814 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2815 #ifdef INVARIANTS 2816 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2817 t_type, inp, stcb, net); 2818 #else 2819 return; 2820 #endif 2821 } 2822 tmr = &stcb->asoc.delete_prim_timer; 2823 break; 2824 default: 2825 #ifdef INVARIANTS 2826 panic("Unknown timer type %d", t_type); 2827 #else 2828 return; 2829 #endif 2830 } 2831 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2832 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2833 (tmr->type != t_type)) { 2834 /* 2835 * Ok we have a timer that is under joint use. Cookie timer 2836 * per chance with the SEND timer. We therefore are NOT 2837 * running the timer that the caller wants stopped. So just 2838 * return. 2839 */ 2840 SCTPDBG(SCTP_DEBUG_TIMER2, 2841 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2842 t_type, inp, stcb, net); 2843 return; 2844 } 2845 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2846 stcb->asoc.num_send_timers_up--; 2847 if (stcb->asoc.num_send_timers_up < 0) { 2848 stcb->asoc.num_send_timers_up = 0; 2849 } 2850 } 2851 tmr->self = NULL; 2852 tmr->stopped_from = from; 2853 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2854 KASSERT(tmr->ep == inp, 2855 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2856 t_type, inp, tmr->ep)); 2857 KASSERT(tmr->tcb == stcb, 2858 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2859 t_type, stcb, tmr->tcb)); 2860 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2861 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2862 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2863 t_type, net, tmr->net)); 2864 SCTPDBG(SCTP_DEBUG_TIMER2, 2865 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2866 t_type, inp, stcb, net); 2867 /* 2868 * If the timer was actually stopped, decrement reference 2869 * counts that were incremented in sctp_timer_start(). 2870 */ 2871 if (tmr->ep != NULL) { 2872 tmr->ep = NULL; 2873 SCTP_INP_DECR_REF(inp); 2874 } 2875 if (tmr->tcb != NULL) { 2876 tmr->tcb = NULL; 2877 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2878 } 2879 if (tmr->net != NULL) { 2880 struct sctp_nets *tmr_net; 2881 2882 /* 2883 * Can't use net, since it doesn't work for 2884 * SCTP_TIMER_TYPE_ASCONF. 2885 */ 2886 tmr_net = tmr->net; 2887 tmr->net = NULL; 2888 sctp_free_remote_addr(tmr_net); 2889 } 2890 } else { 2891 SCTPDBG(SCTP_DEBUG_TIMER2, 2892 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2893 t_type, inp, stcb, net); 2894 } 2895 return; 2896 } 2897 2898 uint32_t 2899 sctp_calculate_len(struct mbuf *m) 2900 { 2901 struct mbuf *at; 2902 uint32_t tlen; 2903 2904 tlen = 0; 2905 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2906 tlen += SCTP_BUF_LEN(at); 2907 } 2908 return (tlen); 2909 } 2910 2911 /* 2912 * Given an association and starting time of the current RTT period, update 2913 * RTO in number of msecs. net should point to the current network. 2914 * Return 1, if an RTO update was performed, return 0 if no update was 2915 * performed due to invalid starting point. 2916 */ 2917 2918 int 2919 sctp_calculate_rto(struct sctp_tcb *stcb, 2920 struct sctp_association *asoc, 2921 struct sctp_nets *net, 2922 struct timeval *old, 2923 int rtt_from_sack) 2924 { 2925 struct timeval now; 2926 uint64_t rtt_us; /* RTT in us */ 2927 int32_t rtt; /* RTT in ms */ 2928 uint32_t new_rto; 2929 int first_measure = 0; 2930 2931 /************************/ 2932 /* 1. calculate new RTT */ 2933 /************************/ 2934 /* get the current time */ 2935 if (stcb->asoc.use_precise_time) { 2936 (void)SCTP_GETPTIME_TIMEVAL(&now); 2937 } else { 2938 (void)SCTP_GETTIME_TIMEVAL(&now); 2939 } 2940 if ((old->tv_sec > now.tv_sec) || 2941 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2942 /* The starting point is in the future. */ 2943 return (0); 2944 } 2945 timevalsub(&now, old); 2946 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2947 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2948 /* The RTT is larger than a sane value. */ 2949 return (0); 2950 } 2951 /* store the current RTT in us */ 2952 net->rtt = rtt_us; 2953 /* compute rtt in ms */ 2954 rtt = (int32_t)(net->rtt / 1000); 2955 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2956 /* 2957 * Tell the CC module that a new update has just occurred 2958 * from a sack 2959 */ 2960 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2961 } 2962 /* 2963 * Do we need to determine the lan? We do this only on sacks i.e. 2964 * RTT being determined from data not non-data (HB/INIT->INITACK). 2965 */ 2966 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2967 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2968 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2969 net->lan_type = SCTP_LAN_INTERNET; 2970 } else { 2971 net->lan_type = SCTP_LAN_LOCAL; 2972 } 2973 } 2974 2975 /***************************/ 2976 /* 2. update RTTVAR & SRTT */ 2977 /***************************/ 2978 /*- 2979 * Compute the scaled average lastsa and the 2980 * scaled variance lastsv as described in van Jacobson 2981 * Paper "Congestion Avoidance and Control", Annex A. 2982 * 2983 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2984 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2985 */ 2986 if (net->RTO_measured) { 2987 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2988 net->lastsa += rtt; 2989 if (rtt < 0) { 2990 rtt = -rtt; 2991 } 2992 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2993 net->lastsv += rtt; 2994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2995 rto_logging(net, SCTP_LOG_RTTVAR); 2996 } 2997 } else { 2998 /* First RTO measurement */ 2999 net->RTO_measured = 1; 3000 first_measure = 1; 3001 net->lastsa = rtt << SCTP_RTT_SHIFT; 3002 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3004 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3005 } 3006 } 3007 if (net->lastsv == 0) { 3008 net->lastsv = SCTP_CLOCK_GRANULARITY; 3009 } 3010 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3011 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3012 (stcb->asoc.sat_network_lockout == 0)) { 3013 stcb->asoc.sat_network = 1; 3014 } else if ((!first_measure) && stcb->asoc.sat_network) { 3015 stcb->asoc.sat_network = 0; 3016 stcb->asoc.sat_network_lockout = 1; 3017 } 3018 /* bound it, per C6/C7 in Section 5.3.1 */ 3019 if (new_rto < stcb->asoc.minrto) { 3020 new_rto = stcb->asoc.minrto; 3021 } 3022 if (new_rto > stcb->asoc.maxrto) { 3023 new_rto = stcb->asoc.maxrto; 3024 } 3025 net->RTO = new_rto; 3026 return (1); 3027 } 3028 3029 /* 3030 * return a pointer to a contiguous piece of data from the given mbuf chain 3031 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3032 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3033 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3034 */ 3035 caddr_t 3036 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3037 { 3038 uint32_t count; 3039 uint8_t *ptr; 3040 3041 ptr = in_ptr; 3042 if ((off < 0) || (len <= 0)) 3043 return (NULL); 3044 3045 /* find the desired start location */ 3046 while ((m != NULL) && (off > 0)) { 3047 if (off < SCTP_BUF_LEN(m)) 3048 break; 3049 off -= SCTP_BUF_LEN(m); 3050 m = SCTP_BUF_NEXT(m); 3051 } 3052 if (m == NULL) 3053 return (NULL); 3054 3055 /* is the current mbuf large enough (eg. contiguous)? */ 3056 if ((SCTP_BUF_LEN(m) - off) >= len) { 3057 return (mtod(m, caddr_t)+off); 3058 } else { 3059 /* else, it spans more than one mbuf, so save a temp copy... */ 3060 while ((m != NULL) && (len > 0)) { 3061 count = min(SCTP_BUF_LEN(m) - off, len); 3062 memcpy(ptr, mtod(m, caddr_t)+off, count); 3063 len -= count; 3064 ptr += count; 3065 off = 0; 3066 m = SCTP_BUF_NEXT(m); 3067 } 3068 if ((m == NULL) && (len > 0)) 3069 return (NULL); 3070 else 3071 return ((caddr_t)in_ptr); 3072 } 3073 } 3074 3075 struct sctp_paramhdr * 3076 sctp_get_next_param(struct mbuf *m, 3077 int offset, 3078 struct sctp_paramhdr *pull, 3079 int pull_limit) 3080 { 3081 /* This just provides a typed signature to Peter's Pull routine */ 3082 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3083 (uint8_t *)pull)); 3084 } 3085 3086 struct mbuf * 3087 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3088 { 3089 struct mbuf *m_last; 3090 caddr_t dp; 3091 3092 if (padlen > 3) { 3093 return (NULL); 3094 } 3095 if (padlen <= M_TRAILINGSPACE(m)) { 3096 /* 3097 * The easy way. We hope the majority of the time we hit 3098 * here :) 3099 */ 3100 m_last = m; 3101 } else { 3102 /* Hard way we must grow the mbuf chain */ 3103 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3104 if (m_last == NULL) { 3105 return (NULL); 3106 } 3107 SCTP_BUF_LEN(m_last) = 0; 3108 SCTP_BUF_NEXT(m_last) = NULL; 3109 SCTP_BUF_NEXT(m) = m_last; 3110 } 3111 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3112 SCTP_BUF_LEN(m_last) += padlen; 3113 memset(dp, 0, padlen); 3114 return (m_last); 3115 } 3116 3117 struct mbuf * 3118 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3119 { 3120 /* find the last mbuf in chain and pad it */ 3121 struct mbuf *m_at; 3122 3123 if (last_mbuf != NULL) { 3124 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3125 } else { 3126 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3127 if (SCTP_BUF_NEXT(m_at) == NULL) { 3128 return (sctp_add_pad_tombuf(m_at, padval)); 3129 } 3130 } 3131 } 3132 return (NULL); 3133 } 3134 3135 static void 3136 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3137 uint16_t error, struct sctp_abort_chunk *abort, 3138 bool from_peer, bool timedout, int so_locked) 3139 { 3140 struct mbuf *m_notify; 3141 struct sctp_assoc_change *sac; 3142 struct sctp_queued_to_read *control; 3143 unsigned int notif_len; 3144 uint16_t abort_len; 3145 unsigned int i; 3146 3147 KASSERT(abort == NULL || from_peer, 3148 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3149 KASSERT(!from_peer || !timedout, 3150 ("sctp_notify_assoc_change: timeouts can only be local")); 3151 if (stcb == NULL) { 3152 return; 3153 } 3154 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3155 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3156 if (abort != NULL) { 3157 abort_len = ntohs(abort->ch.chunk_length); 3158 /* 3159 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3160 * contiguous. 3161 */ 3162 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3163 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3164 } 3165 } else { 3166 abort_len = 0; 3167 } 3168 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3169 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3170 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3171 notif_len += abort_len; 3172 } 3173 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3174 if (m_notify == NULL) { 3175 /* Retry with smaller value. */ 3176 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3177 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3178 if (m_notify == NULL) { 3179 goto set_error; 3180 } 3181 } 3182 SCTP_BUF_NEXT(m_notify) = NULL; 3183 sac = mtod(m_notify, struct sctp_assoc_change *); 3184 memset(sac, 0, notif_len); 3185 sac->sac_type = SCTP_ASSOC_CHANGE; 3186 sac->sac_flags = 0; 3187 sac->sac_length = sizeof(struct sctp_assoc_change); 3188 sac->sac_state = state; 3189 sac->sac_error = error; 3190 if (state == SCTP_CANT_STR_ASSOC) { 3191 sac->sac_outbound_streams = 0; 3192 sac->sac_inbound_streams = 0; 3193 } else { 3194 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3195 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3196 } 3197 sac->sac_assoc_id = sctp_get_associd(stcb); 3198 if (notif_len > sizeof(struct sctp_assoc_change)) { 3199 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3200 i = 0; 3201 if (stcb->asoc.prsctp_supported == 1) { 3202 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3203 } 3204 if (stcb->asoc.auth_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3206 } 3207 if (stcb->asoc.asconf_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3209 } 3210 if (stcb->asoc.idata_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3212 } 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3214 if (stcb->asoc.reconfig_supported == 1) { 3215 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3216 } 3217 sac->sac_length += i; 3218 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3219 memcpy(sac->sac_info, abort, abort_len); 3220 sac->sac_length += abort_len; 3221 } 3222 } 3223 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3224 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3225 0, 0, stcb->asoc.context, 0, 0, 0, 3226 m_notify); 3227 if (control != NULL) { 3228 control->length = SCTP_BUF_LEN(m_notify); 3229 control->spec_flags = M_NOTIFICATION; 3230 /* not that we need this */ 3231 control->tail_mbuf = m_notify; 3232 sctp_add_to_readq(stcb->sctp_ep, stcb, 3233 control, 3234 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3235 so_locked); 3236 } else { 3237 sctp_m_freem(m_notify); 3238 } 3239 } 3240 /* 3241 * For 1-to-1 style sockets, we send up and error when an ABORT 3242 * comes in. 3243 */ 3244 set_error: 3245 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3246 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3247 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3248 SOCK_LOCK(stcb->sctp_socket); 3249 if (from_peer) { 3250 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3251 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3252 stcb->sctp_socket->so_error = ECONNREFUSED; 3253 } else { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3255 stcb->sctp_socket->so_error = ECONNRESET; 3256 } 3257 } else { 3258 if (timedout) { 3259 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3260 stcb->sctp_socket->so_error = ETIMEDOUT; 3261 } else { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3263 stcb->sctp_socket->so_error = ECONNABORTED; 3264 } 3265 } 3266 SOCK_UNLOCK(stcb->sctp_socket); 3267 } 3268 /* Wake ANY sleepers */ 3269 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3270 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3271 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3272 socantrcvmore(stcb->sctp_socket); 3273 } 3274 sorwakeup(stcb->sctp_socket); 3275 sowwakeup(stcb->sctp_socket); 3276 } 3277 3278 static void 3279 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3280 struct sockaddr *sa, uint32_t error, int so_locked) 3281 { 3282 struct mbuf *m_notify; 3283 struct sctp_paddr_change *spc; 3284 struct sctp_queued_to_read *control; 3285 3286 if ((stcb == NULL) || 3287 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3288 /* event not enabled */ 3289 return; 3290 } 3291 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3292 if (m_notify == NULL) 3293 return; 3294 SCTP_BUF_LEN(m_notify) = 0; 3295 spc = mtod(m_notify, struct sctp_paddr_change *); 3296 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3297 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3298 spc->spc_flags = 0; 3299 spc->spc_length = sizeof(struct sctp_paddr_change); 3300 switch (sa->sa_family) { 3301 #ifdef INET 3302 case AF_INET: 3303 #ifdef INET6 3304 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3305 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3306 (struct sockaddr_in6 *)&spc->spc_aaddr); 3307 } else { 3308 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3309 } 3310 #else 3311 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3312 #endif 3313 break; 3314 #endif 3315 #ifdef INET6 3316 case AF_INET6: 3317 { 3318 struct sockaddr_in6 *sin6; 3319 3320 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3321 3322 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3323 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3324 if (sin6->sin6_scope_id == 0) { 3325 /* recover scope_id for user */ 3326 (void)sa6_recoverscope(sin6); 3327 } else { 3328 /* clear embedded scope_id for user */ 3329 in6_clearscope(&sin6->sin6_addr); 3330 } 3331 } 3332 break; 3333 } 3334 #endif 3335 default: 3336 /* TSNH */ 3337 break; 3338 } 3339 spc->spc_state = state; 3340 spc->spc_error = error; 3341 spc->spc_assoc_id = sctp_get_associd(stcb); 3342 3343 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3344 SCTP_BUF_NEXT(m_notify) = NULL; 3345 3346 /* append to socket */ 3347 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3348 0, 0, stcb->asoc.context, 0, 0, 0, 3349 m_notify); 3350 if (control == NULL) { 3351 /* no memory */ 3352 sctp_m_freem(m_notify); 3353 return; 3354 } 3355 control->length = SCTP_BUF_LEN(m_notify); 3356 control->spec_flags = M_NOTIFICATION; 3357 /* not that we need this */ 3358 control->tail_mbuf = m_notify; 3359 sctp_add_to_readq(stcb->sctp_ep, stcb, 3360 control, 3361 &stcb->sctp_socket->so_rcv, 1, 3362 SCTP_READ_LOCK_NOT_HELD, 3363 so_locked); 3364 } 3365 3366 static void 3367 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3368 struct sctp_tmit_chunk *chk, int so_locked) 3369 { 3370 struct mbuf *m_notify; 3371 struct sctp_send_failed *ssf; 3372 struct sctp_send_failed_event *ssfe; 3373 struct sctp_queued_to_read *control; 3374 struct sctp_chunkhdr *chkhdr; 3375 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3376 3377 if ((stcb == NULL) || 3378 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3379 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3380 /* event not enabled */ 3381 return; 3382 } 3383 3384 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3385 notifhdr_len = sizeof(struct sctp_send_failed_event); 3386 } else { 3387 notifhdr_len = sizeof(struct sctp_send_failed); 3388 } 3389 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3390 if (m_notify == NULL) 3391 /* no space left */ 3392 return; 3393 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3394 if (stcb->asoc.idata_supported) { 3395 chkhdr_len = sizeof(struct sctp_idata_chunk); 3396 } else { 3397 chkhdr_len = sizeof(struct sctp_data_chunk); 3398 } 3399 /* Use some defaults in case we can't access the chunk header */ 3400 if (chk->send_size >= chkhdr_len) { 3401 payload_len = chk->send_size - chkhdr_len; 3402 } else { 3403 payload_len = 0; 3404 } 3405 padding_len = 0; 3406 if (chk->data != NULL) { 3407 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3408 if (chkhdr != NULL) { 3409 chk_len = ntohs(chkhdr->chunk_length); 3410 if ((chk_len >= chkhdr_len) && 3411 (chk->send_size >= chk_len) && 3412 (chk->send_size - chk_len < 4)) { 3413 padding_len = chk->send_size - chk_len; 3414 payload_len = chk->send_size - chkhdr_len - padding_len; 3415 } 3416 } 3417 } 3418 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3419 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3420 memset(ssfe, 0, notifhdr_len); 3421 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3422 if (sent) { 3423 ssfe->ssfe_flags = SCTP_DATA_SENT; 3424 } else { 3425 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3426 } 3427 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3428 ssfe->ssfe_error = error; 3429 /* not exactly what the user sent in, but should be close :) */ 3430 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3431 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3432 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3433 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3434 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3435 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3436 } else { 3437 ssf = mtod(m_notify, struct sctp_send_failed *); 3438 memset(ssf, 0, notifhdr_len); 3439 ssf->ssf_type = SCTP_SEND_FAILED; 3440 if (sent) { 3441 ssf->ssf_flags = SCTP_DATA_SENT; 3442 } else { 3443 ssf->ssf_flags = SCTP_DATA_UNSENT; 3444 } 3445 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3446 ssf->ssf_error = error; 3447 /* not exactly what the user sent in, but should be close :) */ 3448 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3449 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3450 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3451 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3452 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3453 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3454 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3455 } 3456 if (chk->data != NULL) { 3457 /* Trim off the sctp chunk header (it should be there) */ 3458 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3459 m_adj(chk->data, chkhdr_len); 3460 m_adj(chk->data, -padding_len); 3461 sctp_mbuf_crush(chk->data); 3462 chk->send_size -= (chkhdr_len + padding_len); 3463 } 3464 } 3465 SCTP_BUF_NEXT(m_notify) = chk->data; 3466 /* Steal off the mbuf */ 3467 chk->data = NULL; 3468 /* 3469 * For this case, we check the actual socket buffer, since the assoc 3470 * is going away we don't want to overfill the socket buffer for a 3471 * non-reader 3472 */ 3473 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3474 sctp_m_freem(m_notify); 3475 return; 3476 } 3477 /* append to socket */ 3478 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3479 0, 0, stcb->asoc.context, 0, 0, 0, 3480 m_notify); 3481 if (control == NULL) { 3482 /* no memory */ 3483 sctp_m_freem(m_notify); 3484 return; 3485 } 3486 control->length = SCTP_BUF_LEN(m_notify); 3487 control->spec_flags = M_NOTIFICATION; 3488 /* not that we need this */ 3489 control->tail_mbuf = m_notify; 3490 sctp_add_to_readq(stcb->sctp_ep, stcb, 3491 control, 3492 &stcb->sctp_socket->so_rcv, 1, 3493 SCTP_READ_LOCK_NOT_HELD, 3494 so_locked); 3495 } 3496 3497 static void 3498 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3499 struct sctp_stream_queue_pending *sp, int so_locked) 3500 { 3501 struct mbuf *m_notify; 3502 struct sctp_send_failed *ssf; 3503 struct sctp_send_failed_event *ssfe; 3504 struct sctp_queued_to_read *control; 3505 int notifhdr_len; 3506 3507 if ((stcb == NULL) || 3508 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3509 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3510 /* event not enabled */ 3511 return; 3512 } 3513 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3514 notifhdr_len = sizeof(struct sctp_send_failed_event); 3515 } else { 3516 notifhdr_len = sizeof(struct sctp_send_failed); 3517 } 3518 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3519 if (m_notify == NULL) { 3520 /* no space left */ 3521 return; 3522 } 3523 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3524 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3525 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3526 memset(ssfe, 0, notifhdr_len); 3527 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3528 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3529 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3530 ssfe->ssfe_error = error; 3531 /* not exactly what the user sent in, but should be close :) */ 3532 ssfe->ssfe_info.snd_sid = sp->sid; 3533 if (sp->some_taken) { 3534 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3535 } else { 3536 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3537 } 3538 ssfe->ssfe_info.snd_ppid = sp->ppid; 3539 ssfe->ssfe_info.snd_context = sp->context; 3540 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3541 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3542 } else { 3543 ssf = mtod(m_notify, struct sctp_send_failed *); 3544 memset(ssf, 0, notifhdr_len); 3545 ssf->ssf_type = SCTP_SEND_FAILED; 3546 ssf->ssf_flags = SCTP_DATA_UNSENT; 3547 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3548 ssf->ssf_error = error; 3549 /* not exactly what the user sent in, but should be close :) */ 3550 ssf->ssf_info.sinfo_stream = sp->sid; 3551 ssf->ssf_info.sinfo_ssn = 0; 3552 if (sp->some_taken) { 3553 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3554 } else { 3555 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3556 } 3557 ssf->ssf_info.sinfo_ppid = sp->ppid; 3558 ssf->ssf_info.sinfo_context = sp->context; 3559 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3560 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3561 } 3562 SCTP_BUF_NEXT(m_notify) = sp->data; 3563 3564 /* Steal off the mbuf */ 3565 sp->data = NULL; 3566 /* 3567 * For this case, we check the actual socket buffer, since the assoc 3568 * is going away we don't want to overfill the socket buffer for a 3569 * non-reader 3570 */ 3571 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3572 sctp_m_freem(m_notify); 3573 return; 3574 } 3575 /* append to socket */ 3576 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3577 0, 0, stcb->asoc.context, 0, 0, 0, 3578 m_notify); 3579 if (control == NULL) { 3580 /* no memory */ 3581 sctp_m_freem(m_notify); 3582 return; 3583 } 3584 control->length = SCTP_BUF_LEN(m_notify); 3585 control->spec_flags = M_NOTIFICATION; 3586 /* not that we need this */ 3587 control->tail_mbuf = m_notify; 3588 sctp_add_to_readq(stcb->sctp_ep, stcb, 3589 control, 3590 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3591 } 3592 3593 static void 3594 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3595 { 3596 struct mbuf *m_notify; 3597 struct sctp_adaptation_event *sai; 3598 struct sctp_queued_to_read *control; 3599 3600 if ((stcb == NULL) || 3601 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3602 /* event not enabled */ 3603 return; 3604 } 3605 3606 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3607 if (m_notify == NULL) 3608 /* no space left */ 3609 return; 3610 SCTP_BUF_LEN(m_notify) = 0; 3611 sai = mtod(m_notify, struct sctp_adaptation_event *); 3612 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3613 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3614 sai->sai_flags = 0; 3615 sai->sai_length = sizeof(struct sctp_adaptation_event); 3616 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3617 sai->sai_assoc_id = sctp_get_associd(stcb); 3618 3619 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3620 SCTP_BUF_NEXT(m_notify) = NULL; 3621 3622 /* append to socket */ 3623 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3624 0, 0, stcb->asoc.context, 0, 0, 0, 3625 m_notify); 3626 if (control == NULL) { 3627 /* no memory */ 3628 sctp_m_freem(m_notify); 3629 return; 3630 } 3631 control->length = SCTP_BUF_LEN(m_notify); 3632 control->spec_flags = M_NOTIFICATION; 3633 /* not that we need this */ 3634 control->tail_mbuf = m_notify; 3635 sctp_add_to_readq(stcb->sctp_ep, stcb, 3636 control, 3637 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3638 } 3639 3640 /* This always must be called with the read-queue LOCKED in the INP */ 3641 static void 3642 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3643 uint32_t val, int so_locked) 3644 { 3645 struct mbuf *m_notify; 3646 struct sctp_pdapi_event *pdapi; 3647 struct sctp_queued_to_read *control; 3648 struct sockbuf *sb; 3649 3650 if ((stcb == NULL) || 3651 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3652 /* event not enabled */ 3653 return; 3654 } 3655 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3656 return; 3657 } 3658 3659 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3660 if (m_notify == NULL) 3661 /* no space left */ 3662 return; 3663 SCTP_BUF_LEN(m_notify) = 0; 3664 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3665 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3666 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3667 pdapi->pdapi_flags = 0; 3668 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3669 pdapi->pdapi_indication = error; 3670 pdapi->pdapi_stream = (val >> 16); 3671 pdapi->pdapi_seq = (val & 0x0000ffff); 3672 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3673 3674 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3675 SCTP_BUF_NEXT(m_notify) = NULL; 3676 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3677 0, 0, stcb->asoc.context, 0, 0, 0, 3678 m_notify); 3679 if (control == NULL) { 3680 /* no memory */ 3681 sctp_m_freem(m_notify); 3682 return; 3683 } 3684 control->length = SCTP_BUF_LEN(m_notify); 3685 control->spec_flags = M_NOTIFICATION; 3686 /* not that we need this */ 3687 control->tail_mbuf = m_notify; 3688 sb = &stcb->sctp_socket->so_rcv; 3689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3690 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3691 } 3692 sctp_sballoc(stcb, sb, m_notify); 3693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3694 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3695 } 3696 control->end_added = 1; 3697 if (stcb->asoc.control_pdapi) 3698 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3699 else { 3700 /* we really should not see this case */ 3701 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3702 } 3703 if (stcb->sctp_ep && stcb->sctp_socket) { 3704 /* This should always be the case */ 3705 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3706 } 3707 } 3708 3709 static void 3710 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3711 { 3712 struct mbuf *m_notify; 3713 struct sctp_shutdown_event *sse; 3714 struct sctp_queued_to_read *control; 3715 3716 /* 3717 * For TCP model AND UDP connected sockets we will send an error up 3718 * when an SHUTDOWN completes 3719 */ 3720 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3721 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3722 /* mark socket closed for read/write and wakeup! */ 3723 socantsendmore(stcb->sctp_socket); 3724 } 3725 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3726 /* event not enabled */ 3727 return; 3728 } 3729 3730 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3731 if (m_notify == NULL) 3732 /* no space left */ 3733 return; 3734 sse = mtod(m_notify, struct sctp_shutdown_event *); 3735 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3736 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3737 sse->sse_flags = 0; 3738 sse->sse_length = sizeof(struct sctp_shutdown_event); 3739 sse->sse_assoc_id = sctp_get_associd(stcb); 3740 3741 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3742 SCTP_BUF_NEXT(m_notify) = NULL; 3743 3744 /* append to socket */ 3745 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3746 0, 0, stcb->asoc.context, 0, 0, 0, 3747 m_notify); 3748 if (control == NULL) { 3749 /* no memory */ 3750 sctp_m_freem(m_notify); 3751 return; 3752 } 3753 control->length = SCTP_BUF_LEN(m_notify); 3754 control->spec_flags = M_NOTIFICATION; 3755 /* not that we need this */ 3756 control->tail_mbuf = m_notify; 3757 sctp_add_to_readq(stcb->sctp_ep, stcb, 3758 control, 3759 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3760 } 3761 3762 static void 3763 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3764 int so_locked) 3765 { 3766 struct mbuf *m_notify; 3767 struct sctp_sender_dry_event *event; 3768 struct sctp_queued_to_read *control; 3769 3770 if ((stcb == NULL) || 3771 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3772 /* event not enabled */ 3773 return; 3774 } 3775 3776 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3777 if (m_notify == NULL) { 3778 /* no space left */ 3779 return; 3780 } 3781 SCTP_BUF_LEN(m_notify) = 0; 3782 event = mtod(m_notify, struct sctp_sender_dry_event *); 3783 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3784 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3785 event->sender_dry_flags = 0; 3786 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3787 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3788 3789 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3790 SCTP_BUF_NEXT(m_notify) = NULL; 3791 3792 /* append to socket */ 3793 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3794 0, 0, stcb->asoc.context, 0, 0, 0, 3795 m_notify); 3796 if (control == NULL) { 3797 /* no memory */ 3798 sctp_m_freem(m_notify); 3799 return; 3800 } 3801 control->length = SCTP_BUF_LEN(m_notify); 3802 control->spec_flags = M_NOTIFICATION; 3803 /* not that we need this */ 3804 control->tail_mbuf = m_notify; 3805 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3806 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3807 } 3808 3809 void 3810 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3811 { 3812 struct mbuf *m_notify; 3813 struct sctp_queued_to_read *control; 3814 struct sctp_stream_change_event *stradd; 3815 3816 if ((stcb == NULL) || 3817 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3818 /* event not enabled */ 3819 return; 3820 } 3821 if ((stcb->asoc.peer_req_out) && flag) { 3822 /* Peer made the request, don't tell the local user */ 3823 stcb->asoc.peer_req_out = 0; 3824 return; 3825 } 3826 stcb->asoc.peer_req_out = 0; 3827 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3828 if (m_notify == NULL) 3829 /* no space left */ 3830 return; 3831 SCTP_BUF_LEN(m_notify) = 0; 3832 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3833 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3834 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3835 stradd->strchange_flags = flag; 3836 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3837 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3838 stradd->strchange_instrms = numberin; 3839 stradd->strchange_outstrms = numberout; 3840 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3841 SCTP_BUF_NEXT(m_notify) = NULL; 3842 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3843 /* no space */ 3844 sctp_m_freem(m_notify); 3845 return; 3846 } 3847 /* append to socket */ 3848 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3849 0, 0, stcb->asoc.context, 0, 0, 0, 3850 m_notify); 3851 if (control == NULL) { 3852 /* no memory */ 3853 sctp_m_freem(m_notify); 3854 return; 3855 } 3856 control->length = SCTP_BUF_LEN(m_notify); 3857 control->spec_flags = M_NOTIFICATION; 3858 /* not that we need this */ 3859 control->tail_mbuf = m_notify; 3860 sctp_add_to_readq(stcb->sctp_ep, stcb, 3861 control, 3862 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3863 } 3864 3865 void 3866 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3867 { 3868 struct mbuf *m_notify; 3869 struct sctp_queued_to_read *control; 3870 struct sctp_assoc_reset_event *strasoc; 3871 3872 if ((stcb == NULL) || 3873 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3874 /* event not enabled */ 3875 return; 3876 } 3877 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3878 if (m_notify == NULL) 3879 /* no space left */ 3880 return; 3881 SCTP_BUF_LEN(m_notify) = 0; 3882 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3883 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3884 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3885 strasoc->assocreset_flags = flag; 3886 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3887 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3888 strasoc->assocreset_local_tsn = sending_tsn; 3889 strasoc->assocreset_remote_tsn = recv_tsn; 3890 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3891 SCTP_BUF_NEXT(m_notify) = NULL; 3892 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3893 /* no space */ 3894 sctp_m_freem(m_notify); 3895 return; 3896 } 3897 /* append to socket */ 3898 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3899 0, 0, stcb->asoc.context, 0, 0, 0, 3900 m_notify); 3901 if (control == NULL) { 3902 /* no memory */ 3903 sctp_m_freem(m_notify); 3904 return; 3905 } 3906 control->length = SCTP_BUF_LEN(m_notify); 3907 control->spec_flags = M_NOTIFICATION; 3908 /* not that we need this */ 3909 control->tail_mbuf = m_notify; 3910 sctp_add_to_readq(stcb->sctp_ep, stcb, 3911 control, 3912 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3913 } 3914 3915 static void 3916 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3917 int number_entries, uint16_t *list, int flag) 3918 { 3919 struct mbuf *m_notify; 3920 struct sctp_queued_to_read *control; 3921 struct sctp_stream_reset_event *strreset; 3922 int len; 3923 3924 if ((stcb == NULL) || 3925 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3926 /* event not enabled */ 3927 return; 3928 } 3929 3930 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3931 if (m_notify == NULL) 3932 /* no space left */ 3933 return; 3934 SCTP_BUF_LEN(m_notify) = 0; 3935 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3936 if (len > M_TRAILINGSPACE(m_notify)) { 3937 /* never enough room */ 3938 sctp_m_freem(m_notify); 3939 return; 3940 } 3941 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3942 memset(strreset, 0, len); 3943 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3944 strreset->strreset_flags = flag; 3945 strreset->strreset_length = len; 3946 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3947 if (number_entries) { 3948 int i; 3949 3950 for (i = 0; i < number_entries; i++) { 3951 strreset->strreset_stream_list[i] = ntohs(list[i]); 3952 } 3953 } 3954 SCTP_BUF_LEN(m_notify) = len; 3955 SCTP_BUF_NEXT(m_notify) = NULL; 3956 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3957 /* no space */ 3958 sctp_m_freem(m_notify); 3959 return; 3960 } 3961 /* append to socket */ 3962 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3963 0, 0, stcb->asoc.context, 0, 0, 0, 3964 m_notify); 3965 if (control == NULL) { 3966 /* no memory */ 3967 sctp_m_freem(m_notify); 3968 return; 3969 } 3970 control->length = SCTP_BUF_LEN(m_notify); 3971 control->spec_flags = M_NOTIFICATION; 3972 /* not that we need this */ 3973 control->tail_mbuf = m_notify; 3974 sctp_add_to_readq(stcb->sctp_ep, stcb, 3975 control, 3976 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3977 } 3978 3979 static void 3980 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3981 { 3982 struct mbuf *m_notify; 3983 struct sctp_remote_error *sre; 3984 struct sctp_queued_to_read *control; 3985 unsigned int notif_len; 3986 uint16_t chunk_len; 3987 3988 if ((stcb == NULL) || 3989 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3990 return; 3991 } 3992 if (chunk != NULL) { 3993 chunk_len = ntohs(chunk->ch.chunk_length); 3994 /* 3995 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3996 * contiguous. 3997 */ 3998 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3999 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4000 } 4001 } else { 4002 chunk_len = 0; 4003 } 4004 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4005 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4006 if (m_notify == NULL) { 4007 /* Retry with smaller value. */ 4008 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4009 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4010 if (m_notify == NULL) { 4011 return; 4012 } 4013 } 4014 SCTP_BUF_NEXT(m_notify) = NULL; 4015 sre = mtod(m_notify, struct sctp_remote_error *); 4016 memset(sre, 0, notif_len); 4017 sre->sre_type = SCTP_REMOTE_ERROR; 4018 sre->sre_flags = 0; 4019 sre->sre_length = sizeof(struct sctp_remote_error); 4020 sre->sre_error = error; 4021 sre->sre_assoc_id = sctp_get_associd(stcb); 4022 if (notif_len > sizeof(struct sctp_remote_error)) { 4023 memcpy(sre->sre_data, chunk, chunk_len); 4024 sre->sre_length += chunk_len; 4025 } 4026 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4027 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4028 0, 0, stcb->asoc.context, 0, 0, 0, 4029 m_notify); 4030 if (control != NULL) { 4031 control->length = SCTP_BUF_LEN(m_notify); 4032 control->spec_flags = M_NOTIFICATION; 4033 /* not that we need this */ 4034 control->tail_mbuf = m_notify; 4035 sctp_add_to_readq(stcb->sctp_ep, stcb, 4036 control, 4037 &stcb->sctp_socket->so_rcv, 1, 4038 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4039 } else { 4040 sctp_m_freem(m_notify); 4041 } 4042 } 4043 4044 void 4045 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4046 uint32_t error, void *data, int so_locked) 4047 { 4048 if ((stcb == NULL) || 4049 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4050 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4051 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4052 /* If the socket is gone we are out of here */ 4053 return; 4054 } 4055 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4056 return; 4057 } 4058 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4059 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4060 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4061 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4062 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4063 /* Don't report these in front states */ 4064 return; 4065 } 4066 } 4067 switch (notification) { 4068 case SCTP_NOTIFY_ASSOC_UP: 4069 if (stcb->asoc.assoc_up_sent == 0) { 4070 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4071 stcb->asoc.assoc_up_sent = 1; 4072 } 4073 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4074 sctp_notify_adaptation_layer(stcb); 4075 } 4076 if (stcb->asoc.auth_supported == 0) { 4077 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4078 NULL, so_locked); 4079 } 4080 break; 4081 case SCTP_NOTIFY_ASSOC_DOWN: 4082 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4083 break; 4084 case SCTP_NOTIFY_INTERFACE_DOWN: 4085 { 4086 struct sctp_nets *net; 4087 4088 net = (struct sctp_nets *)data; 4089 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4090 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4091 break; 4092 } 4093 case SCTP_NOTIFY_INTERFACE_UP: 4094 { 4095 struct sctp_nets *net; 4096 4097 net = (struct sctp_nets *)data; 4098 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4099 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4100 break; 4101 } 4102 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4103 { 4104 struct sctp_nets *net; 4105 4106 net = (struct sctp_nets *)data; 4107 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4108 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4109 break; 4110 } 4111 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4112 sctp_notify_send_failed2(stcb, error, 4113 (struct sctp_stream_queue_pending *)data, so_locked); 4114 break; 4115 case SCTP_NOTIFY_SENT_DG_FAIL: 4116 sctp_notify_send_failed(stcb, 1, error, 4117 (struct sctp_tmit_chunk *)data, so_locked); 4118 break; 4119 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4120 sctp_notify_send_failed(stcb, 0, error, 4121 (struct sctp_tmit_chunk *)data, so_locked); 4122 break; 4123 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4124 { 4125 uint32_t val; 4126 4127 val = *((uint32_t *)data); 4128 4129 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4130 break; 4131 } 4132 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4133 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4134 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4135 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4136 } else { 4137 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4138 } 4139 break; 4140 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4141 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4142 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4143 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4144 } else { 4145 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4146 } 4147 break; 4148 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4149 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4150 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4151 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4152 } else { 4153 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4154 } 4155 break; 4156 case SCTP_NOTIFY_ASSOC_RESTART: 4157 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4158 if (stcb->asoc.auth_supported == 0) { 4159 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4160 NULL, so_locked); 4161 } 4162 break; 4163 case SCTP_NOTIFY_STR_RESET_SEND: 4164 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4165 break; 4166 case SCTP_NOTIFY_STR_RESET_RECV: 4167 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4168 break; 4169 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4170 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4171 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4172 break; 4173 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4174 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4175 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4176 break; 4177 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4178 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4179 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4180 break; 4181 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4182 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4183 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4184 break; 4185 case SCTP_NOTIFY_ASCONF_ADD_IP: 4186 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4187 error, so_locked); 4188 break; 4189 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4190 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4191 error, so_locked); 4192 break; 4193 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4194 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4195 error, so_locked); 4196 break; 4197 case SCTP_NOTIFY_PEER_SHUTDOWN: 4198 sctp_notify_shutdown_event(stcb); 4199 break; 4200 case SCTP_NOTIFY_AUTH_NEW_KEY: 4201 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4202 (uint16_t)(uintptr_t)data, 4203 so_locked); 4204 break; 4205 case SCTP_NOTIFY_AUTH_FREE_KEY: 4206 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4207 (uint16_t)(uintptr_t)data, 4208 so_locked); 4209 break; 4210 case SCTP_NOTIFY_NO_PEER_AUTH: 4211 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4212 (uint16_t)(uintptr_t)data, 4213 so_locked); 4214 break; 4215 case SCTP_NOTIFY_SENDER_DRY: 4216 sctp_notify_sender_dry_event(stcb, so_locked); 4217 break; 4218 case SCTP_NOTIFY_REMOTE_ERROR: 4219 sctp_notify_remote_error(stcb, error, data); 4220 break; 4221 default: 4222 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4223 __func__, notification, notification); 4224 break; 4225 } /* end switch */ 4226 } 4227 4228 void 4229 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4230 { 4231 struct sctp_association *asoc; 4232 struct sctp_stream_out *outs; 4233 struct sctp_tmit_chunk *chk, *nchk; 4234 struct sctp_stream_queue_pending *sp, *nsp; 4235 int i; 4236 4237 if (stcb == NULL) { 4238 return; 4239 } 4240 asoc = &stcb->asoc; 4241 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4242 /* already being freed */ 4243 return; 4244 } 4245 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4246 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4247 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4248 return; 4249 } 4250 /* now through all the gunk freeing chunks */ 4251 /* sent queue SHOULD be empty */ 4252 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4253 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4254 asoc->sent_queue_cnt--; 4255 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4256 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4257 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4258 #ifdef INVARIANTS 4259 } else { 4260 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4261 #endif 4262 } 4263 } 4264 if (chk->data != NULL) { 4265 sctp_free_bufspace(stcb, asoc, chk, 1); 4266 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4267 error, chk, so_locked); 4268 if (chk->data) { 4269 sctp_m_freem(chk->data); 4270 chk->data = NULL; 4271 } 4272 } 4273 sctp_free_a_chunk(stcb, chk, so_locked); 4274 /* sa_ignore FREED_MEMORY */ 4275 } 4276 /* pending send queue SHOULD be empty */ 4277 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4278 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4279 asoc->send_queue_cnt--; 4280 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4281 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4282 #ifdef INVARIANTS 4283 } else { 4284 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4285 #endif 4286 } 4287 if (chk->data != NULL) { 4288 sctp_free_bufspace(stcb, asoc, chk, 1); 4289 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4290 error, chk, so_locked); 4291 if (chk->data) { 4292 sctp_m_freem(chk->data); 4293 chk->data = NULL; 4294 } 4295 } 4296 sctp_free_a_chunk(stcb, chk, so_locked); 4297 /* sa_ignore FREED_MEMORY */ 4298 } 4299 for (i = 0; i < asoc->streamoutcnt; i++) { 4300 /* For each stream */ 4301 outs = &asoc->strmout[i]; 4302 /* clean up any sends there */ 4303 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4304 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4305 TAILQ_REMOVE(&outs->outqueue, sp, next); 4306 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4307 sctp_free_spbufspace(stcb, asoc, sp); 4308 if (sp->data) { 4309 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4310 error, (void *)sp, so_locked); 4311 if (sp->data) { 4312 sctp_m_freem(sp->data); 4313 sp->data = NULL; 4314 sp->tail_mbuf = NULL; 4315 sp->length = 0; 4316 } 4317 } 4318 if (sp->net) { 4319 sctp_free_remote_addr(sp->net); 4320 sp->net = NULL; 4321 } 4322 /* Free the chunk */ 4323 sctp_free_a_strmoq(stcb, sp, so_locked); 4324 /* sa_ignore FREED_MEMORY */ 4325 } 4326 } 4327 } 4328 4329 void 4330 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4331 uint16_t error, struct sctp_abort_chunk *abort, 4332 int so_locked) 4333 { 4334 if (stcb == NULL) { 4335 return; 4336 } 4337 SCTP_TCB_LOCK_ASSERT(stcb); 4338 4339 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4340 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4341 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4342 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4343 } 4344 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4345 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4346 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4347 return; 4348 } 4349 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4350 /* Tell them we lost the asoc */ 4351 sctp_report_all_outbound(stcb, error, so_locked); 4352 if (from_peer) { 4353 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4354 } else { 4355 if (timeout) { 4356 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4357 } else { 4358 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4359 } 4360 } 4361 } 4362 4363 void 4364 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4365 struct mbuf *m, int iphlen, 4366 struct sockaddr *src, struct sockaddr *dst, 4367 struct sctphdr *sh, struct mbuf *op_err, 4368 uint8_t mflowtype, uint32_t mflowid, 4369 uint32_t vrf_id, uint16_t port) 4370 { 4371 struct sctp_gen_error_cause *cause; 4372 uint32_t vtag; 4373 uint16_t cause_code; 4374 4375 if (stcb != NULL) { 4376 vtag = stcb->asoc.peer_vtag; 4377 vrf_id = stcb->asoc.vrf_id; 4378 if (op_err != NULL) { 4379 /* Read the cause code from the error cause. */ 4380 cause = mtod(op_err, struct sctp_gen_error_cause *); 4381 cause_code = ntohs(cause->code); 4382 } else { 4383 cause_code = 0; 4384 } 4385 } else { 4386 vtag = 0; 4387 } 4388 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4389 mflowtype, mflowid, inp->fibnum, 4390 vrf_id, port); 4391 if (stcb != NULL) { 4392 /* We have a TCB to abort, send notification too */ 4393 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4394 /* Ok, now lets free it */ 4395 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4396 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4397 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4398 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4399 } 4400 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4401 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4402 } 4403 } 4404 #ifdef SCTP_ASOCLOG_OF_TSNS 4405 void 4406 sctp_print_out_track_log(struct sctp_tcb *stcb) 4407 { 4408 #ifdef NOSIY_PRINTS 4409 int i; 4410 4411 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4412 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4413 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4414 SCTP_PRINTF("None rcvd\n"); 4415 goto none_in; 4416 } 4417 if (stcb->asoc.tsn_in_wrapped) { 4418 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4419 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4420 stcb->asoc.in_tsnlog[i].tsn, 4421 stcb->asoc.in_tsnlog[i].strm, 4422 stcb->asoc.in_tsnlog[i].seq, 4423 stcb->asoc.in_tsnlog[i].flgs, 4424 stcb->asoc.in_tsnlog[i].sz); 4425 } 4426 } 4427 if (stcb->asoc.tsn_in_at) { 4428 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4429 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4430 stcb->asoc.in_tsnlog[i].tsn, 4431 stcb->asoc.in_tsnlog[i].strm, 4432 stcb->asoc.in_tsnlog[i].seq, 4433 stcb->asoc.in_tsnlog[i].flgs, 4434 stcb->asoc.in_tsnlog[i].sz); 4435 } 4436 } 4437 none_in: 4438 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4439 if ((stcb->asoc.tsn_out_at == 0) && 4440 (stcb->asoc.tsn_out_wrapped == 0)) { 4441 SCTP_PRINTF("None sent\n"); 4442 } 4443 if (stcb->asoc.tsn_out_wrapped) { 4444 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4445 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4446 stcb->asoc.out_tsnlog[i].tsn, 4447 stcb->asoc.out_tsnlog[i].strm, 4448 stcb->asoc.out_tsnlog[i].seq, 4449 stcb->asoc.out_tsnlog[i].flgs, 4450 stcb->asoc.out_tsnlog[i].sz); 4451 } 4452 } 4453 if (stcb->asoc.tsn_out_at) { 4454 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4455 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4456 stcb->asoc.out_tsnlog[i].tsn, 4457 stcb->asoc.out_tsnlog[i].strm, 4458 stcb->asoc.out_tsnlog[i].seq, 4459 stcb->asoc.out_tsnlog[i].flgs, 4460 stcb->asoc.out_tsnlog[i].sz); 4461 } 4462 } 4463 #endif 4464 } 4465 #endif 4466 4467 void 4468 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4469 struct mbuf *op_err, bool timedout, int so_locked) 4470 { 4471 struct sctp_gen_error_cause *cause; 4472 uint16_t cause_code; 4473 4474 if (stcb == NULL) { 4475 /* Got to have a TCB */ 4476 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4477 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4478 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4479 SCTP_CALLED_DIRECTLY_NOCMPSET); 4480 } 4481 } 4482 return; 4483 } 4484 if (op_err != NULL) { 4485 /* Read the cause code from the error cause. */ 4486 cause = mtod(op_err, struct sctp_gen_error_cause *); 4487 cause_code = ntohs(cause->code); 4488 } else { 4489 cause_code = 0; 4490 } 4491 /* notify the peer */ 4492 sctp_send_abort_tcb(stcb, op_err, so_locked); 4493 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4494 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4495 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4496 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4497 } 4498 /* notify the ulp */ 4499 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4500 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4501 } 4502 /* now free the asoc */ 4503 #ifdef SCTP_ASOCLOG_OF_TSNS 4504 sctp_print_out_track_log(stcb); 4505 #endif 4506 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4507 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4508 } 4509 4510 void 4511 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4512 struct sockaddr *src, struct sockaddr *dst, 4513 struct sctphdr *sh, struct sctp_inpcb *inp, 4514 struct mbuf *cause, 4515 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4516 uint32_t vrf_id, uint16_t port) 4517 { 4518 struct sctp_chunkhdr *ch, chunk_buf; 4519 unsigned int chk_length; 4520 int contains_init_chunk; 4521 4522 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4523 /* Generate a TO address for future reference */ 4524 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4525 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4526 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4527 SCTP_CALLED_DIRECTLY_NOCMPSET); 4528 } 4529 } 4530 contains_init_chunk = 0; 4531 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4532 sizeof(*ch), (uint8_t *)&chunk_buf); 4533 while (ch != NULL) { 4534 chk_length = ntohs(ch->chunk_length); 4535 if (chk_length < sizeof(*ch)) { 4536 /* break to abort land */ 4537 break; 4538 } 4539 switch (ch->chunk_type) { 4540 case SCTP_INIT: 4541 contains_init_chunk = 1; 4542 break; 4543 case SCTP_PACKET_DROPPED: 4544 /* we don't respond to pkt-dropped */ 4545 return; 4546 case SCTP_ABORT_ASSOCIATION: 4547 /* we don't respond with an ABORT to an ABORT */ 4548 return; 4549 case SCTP_SHUTDOWN_COMPLETE: 4550 /* 4551 * we ignore it since we are not waiting for it and 4552 * peer is gone 4553 */ 4554 return; 4555 case SCTP_SHUTDOWN_ACK: 4556 sctp_send_shutdown_complete2(src, dst, sh, 4557 mflowtype, mflowid, fibnum, 4558 vrf_id, port); 4559 return; 4560 default: 4561 break; 4562 } 4563 offset += SCTP_SIZE32(chk_length); 4564 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4565 sizeof(*ch), (uint8_t *)&chunk_buf); 4566 } 4567 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4568 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4569 (contains_init_chunk == 0))) { 4570 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4571 mflowtype, mflowid, fibnum, 4572 vrf_id, port); 4573 } 4574 } 4575 4576 /* 4577 * check the inbound datagram to make sure there is not an abort inside it, 4578 * if there is return 1, else return 0. 4579 */ 4580 int 4581 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4582 { 4583 struct sctp_chunkhdr *ch; 4584 struct sctp_init_chunk *init_chk, chunk_buf; 4585 int offset; 4586 unsigned int chk_length; 4587 4588 offset = iphlen + sizeof(struct sctphdr); 4589 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4590 (uint8_t *)&chunk_buf); 4591 while (ch != NULL) { 4592 chk_length = ntohs(ch->chunk_length); 4593 if (chk_length < sizeof(*ch)) { 4594 /* packet is probably corrupt */ 4595 break; 4596 } 4597 /* we seem to be ok, is it an abort? */ 4598 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4599 /* yep, tell them */ 4600 return (1); 4601 } 4602 if ((ch->chunk_type == SCTP_INITIATION) || 4603 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4604 /* need to update the Vtag */ 4605 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4606 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4607 if (init_chk != NULL) { 4608 *vtag = ntohl(init_chk->init.initiate_tag); 4609 } 4610 } 4611 /* Nope, move to the next chunk */ 4612 offset += SCTP_SIZE32(chk_length); 4613 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4614 sizeof(*ch), (uint8_t *)&chunk_buf); 4615 } 4616 return (0); 4617 } 4618 4619 /* 4620 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4621 * set (i.e. it's 0) so, create this function to compare link local scopes 4622 */ 4623 #ifdef INET6 4624 uint32_t 4625 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4626 { 4627 struct sockaddr_in6 a, b; 4628 4629 /* save copies */ 4630 a = *addr1; 4631 b = *addr2; 4632 4633 if (a.sin6_scope_id == 0) 4634 if (sa6_recoverscope(&a)) { 4635 /* can't get scope, so can't match */ 4636 return (0); 4637 } 4638 if (b.sin6_scope_id == 0) 4639 if (sa6_recoverscope(&b)) { 4640 /* can't get scope, so can't match */ 4641 return (0); 4642 } 4643 if (a.sin6_scope_id != b.sin6_scope_id) 4644 return (0); 4645 4646 return (1); 4647 } 4648 4649 /* 4650 * returns a sockaddr_in6 with embedded scope recovered and removed 4651 */ 4652 struct sockaddr_in6 * 4653 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4654 { 4655 /* check and strip embedded scope junk */ 4656 if (addr->sin6_family == AF_INET6) { 4657 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4658 if (addr->sin6_scope_id == 0) { 4659 *store = *addr; 4660 if (!sa6_recoverscope(store)) { 4661 /* use the recovered scope */ 4662 addr = store; 4663 } 4664 } else { 4665 /* else, return the original "to" addr */ 4666 in6_clearscope(&addr->sin6_addr); 4667 } 4668 } 4669 } 4670 return (addr); 4671 } 4672 #endif 4673 4674 /* 4675 * are the two addresses the same? currently a "scopeless" check returns: 1 4676 * if same, 0 if not 4677 */ 4678 int 4679 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4680 { 4681 4682 /* must be valid */ 4683 if (sa1 == NULL || sa2 == NULL) 4684 return (0); 4685 4686 /* must be the same family */ 4687 if (sa1->sa_family != sa2->sa_family) 4688 return (0); 4689 4690 switch (sa1->sa_family) { 4691 #ifdef INET6 4692 case AF_INET6: 4693 { 4694 /* IPv6 addresses */ 4695 struct sockaddr_in6 *sin6_1, *sin6_2; 4696 4697 sin6_1 = (struct sockaddr_in6 *)sa1; 4698 sin6_2 = (struct sockaddr_in6 *)sa2; 4699 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4700 sin6_2)); 4701 } 4702 #endif 4703 #ifdef INET 4704 case AF_INET: 4705 { 4706 /* IPv4 addresses */ 4707 struct sockaddr_in *sin_1, *sin_2; 4708 4709 sin_1 = (struct sockaddr_in *)sa1; 4710 sin_2 = (struct sockaddr_in *)sa2; 4711 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4712 } 4713 #endif 4714 default: 4715 /* we don't do these... */ 4716 return (0); 4717 } 4718 } 4719 4720 void 4721 sctp_print_address(struct sockaddr *sa) 4722 { 4723 #ifdef INET6 4724 char ip6buf[INET6_ADDRSTRLEN]; 4725 #endif 4726 4727 switch (sa->sa_family) { 4728 #ifdef INET6 4729 case AF_INET6: 4730 { 4731 struct sockaddr_in6 *sin6; 4732 4733 sin6 = (struct sockaddr_in6 *)sa; 4734 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4735 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4736 ntohs(sin6->sin6_port), 4737 sin6->sin6_scope_id); 4738 break; 4739 } 4740 #endif 4741 #ifdef INET 4742 case AF_INET: 4743 { 4744 struct sockaddr_in *sin; 4745 unsigned char *p; 4746 4747 sin = (struct sockaddr_in *)sa; 4748 p = (unsigned char *)&sin->sin_addr; 4749 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4750 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4751 break; 4752 } 4753 #endif 4754 default: 4755 SCTP_PRINTF("?\n"); 4756 break; 4757 } 4758 } 4759 4760 void 4761 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4762 struct sctp_inpcb *new_inp, 4763 struct sctp_tcb *stcb, 4764 int waitflags) 4765 { 4766 /* 4767 * go through our old INP and pull off any control structures that 4768 * belong to stcb and move then to the new inp. 4769 */ 4770 struct socket *old_so, *new_so; 4771 struct sctp_queued_to_read *control, *nctl; 4772 struct sctp_readhead tmp_queue; 4773 struct mbuf *m; 4774 int error = 0; 4775 4776 old_so = old_inp->sctp_socket; 4777 new_so = new_inp->sctp_socket; 4778 TAILQ_INIT(&tmp_queue); 4779 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4780 if (error) { 4781 /* 4782 * Gak, can't get I/O lock, we have a problem. data will be 4783 * left stranded.. and we don't dare look at it since the 4784 * other thread may be reading something. Oh well, its a 4785 * screwed up app that does a peeloff OR a accept while 4786 * reading from the main socket... actually its only the 4787 * peeloff() case, since I think read will fail on a 4788 * listening socket.. 4789 */ 4790 return; 4791 } 4792 /* lock the socket buffers */ 4793 SCTP_INP_READ_LOCK(old_inp); 4794 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4795 /* Pull off all for out target stcb */ 4796 if (control->stcb == stcb) { 4797 /* remove it we want it */ 4798 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4799 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4800 m = control->data; 4801 while (m) { 4802 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4803 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4804 } 4805 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4806 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4807 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4808 } 4809 m = SCTP_BUF_NEXT(m); 4810 } 4811 } 4812 } 4813 SCTP_INP_READ_UNLOCK(old_inp); 4814 /* Remove the recv-lock on the old socket */ 4815 SOCK_IO_RECV_UNLOCK(old_so); 4816 /* Now we move them over to the new socket buffer */ 4817 SCTP_INP_READ_LOCK(new_inp); 4818 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4819 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4820 m = control->data; 4821 while (m) { 4822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4823 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4824 } 4825 sctp_sballoc(stcb, &new_so->so_rcv, m); 4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4827 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4828 } 4829 m = SCTP_BUF_NEXT(m); 4830 } 4831 } 4832 SCTP_INP_READ_UNLOCK(new_inp); 4833 } 4834 4835 void 4836 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4837 struct sctp_tcb *stcb, 4838 int so_locked 4839 SCTP_UNUSED 4840 ) 4841 { 4842 if ((inp != NULL) && 4843 (inp->sctp_socket != NULL) && 4844 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4845 !SCTP_IS_LISTENING(inp))) { 4846 sctp_sorwakeup(inp, inp->sctp_socket); 4847 } 4848 } 4849 4850 void 4851 sctp_add_to_readq(struct sctp_inpcb *inp, 4852 struct sctp_tcb *stcb, 4853 struct sctp_queued_to_read *control, 4854 struct sockbuf *sb, 4855 int end, 4856 int inp_read_lock_held, 4857 int so_locked) 4858 { 4859 /* 4860 * Here we must place the control on the end of the socket read 4861 * queue AND increment sb_cc so that select will work properly on 4862 * read. 4863 */ 4864 struct mbuf *m, *prev = NULL; 4865 4866 if (inp == NULL) { 4867 /* Gak, TSNH!! */ 4868 #ifdef INVARIANTS 4869 panic("Gak, inp NULL on add_to_readq"); 4870 #endif 4871 return; 4872 } 4873 if (inp_read_lock_held == 0) 4874 SCTP_INP_READ_LOCK(inp); 4875 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4876 if (!control->on_strm_q) { 4877 sctp_free_remote_addr(control->whoFrom); 4878 if (control->data) { 4879 sctp_m_freem(control->data); 4880 control->data = NULL; 4881 } 4882 sctp_free_a_readq(stcb, control); 4883 } 4884 if (inp_read_lock_held == 0) 4885 SCTP_INP_READ_UNLOCK(inp); 4886 return; 4887 } 4888 if (!(control->spec_flags & M_NOTIFICATION)) { 4889 atomic_add_int(&inp->total_recvs, 1); 4890 if (!control->do_not_ref_stcb) { 4891 atomic_add_int(&stcb->total_recvs, 1); 4892 } 4893 } 4894 m = control->data; 4895 control->held_length = 0; 4896 control->length = 0; 4897 while (m) { 4898 if (SCTP_BUF_LEN(m) == 0) { 4899 /* Skip mbufs with NO length */ 4900 if (prev == NULL) { 4901 /* First one */ 4902 control->data = sctp_m_free(m); 4903 m = control->data; 4904 } else { 4905 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4906 m = SCTP_BUF_NEXT(prev); 4907 } 4908 if (m == NULL) { 4909 control->tail_mbuf = prev; 4910 } 4911 continue; 4912 } 4913 prev = m; 4914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4915 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4916 } 4917 sctp_sballoc(stcb, sb, m); 4918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4919 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4920 } 4921 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4922 m = SCTP_BUF_NEXT(m); 4923 } 4924 if (prev != NULL) { 4925 control->tail_mbuf = prev; 4926 } else { 4927 /* Everything got collapsed out?? */ 4928 if (!control->on_strm_q) { 4929 sctp_free_remote_addr(control->whoFrom); 4930 sctp_free_a_readq(stcb, control); 4931 } 4932 if (inp_read_lock_held == 0) 4933 SCTP_INP_READ_UNLOCK(inp); 4934 return; 4935 } 4936 if (end) { 4937 control->end_added = 1; 4938 } 4939 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4940 control->on_read_q = 1; 4941 if (inp_read_lock_held == 0) 4942 SCTP_INP_READ_UNLOCK(inp); 4943 if (inp && inp->sctp_socket) { 4944 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4945 } 4946 } 4947 4948 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4949 *************ALTERNATE ROUTING CODE 4950 */ 4951 4952 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4953 *************ALTERNATE ROUTING CODE 4954 */ 4955 4956 struct mbuf * 4957 sctp_generate_cause(uint16_t code, char *info) 4958 { 4959 struct mbuf *m; 4960 struct sctp_gen_error_cause *cause; 4961 size_t info_len; 4962 uint16_t len; 4963 4964 if ((code == 0) || (info == NULL)) { 4965 return (NULL); 4966 } 4967 info_len = strlen(info); 4968 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4969 return (NULL); 4970 } 4971 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4972 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4973 if (m != NULL) { 4974 SCTP_BUF_LEN(m) = len; 4975 cause = mtod(m, struct sctp_gen_error_cause *); 4976 cause->code = htons(code); 4977 cause->length = htons(len); 4978 memcpy(cause->info, info, info_len); 4979 } 4980 return (m); 4981 } 4982 4983 struct mbuf * 4984 sctp_generate_no_user_data_cause(uint32_t tsn) 4985 { 4986 struct mbuf *m; 4987 struct sctp_error_no_user_data *no_user_data_cause; 4988 uint16_t len; 4989 4990 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4991 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4992 if (m != NULL) { 4993 SCTP_BUF_LEN(m) = len; 4994 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4995 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4996 no_user_data_cause->cause.length = htons(len); 4997 no_user_data_cause->tsn = htonl(tsn); 4998 } 4999 return (m); 5000 } 5001 5002 #ifdef SCTP_MBCNT_LOGGING 5003 void 5004 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5005 struct sctp_tmit_chunk *tp1, int chk_cnt) 5006 { 5007 if (tp1->data == NULL) { 5008 return; 5009 } 5010 asoc->chunks_on_out_queue -= chk_cnt; 5011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5012 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5013 asoc->total_output_queue_size, 5014 tp1->book_size, 5015 0, 5016 tp1->mbcnt); 5017 } 5018 if (asoc->total_output_queue_size >= tp1->book_size) { 5019 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5020 } else { 5021 asoc->total_output_queue_size = 0; 5022 } 5023 5024 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5025 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5026 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5027 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5028 } else { 5029 stcb->sctp_socket->so_snd.sb_cc = 0; 5030 } 5031 } 5032 } 5033 5034 #endif 5035 5036 int 5037 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5038 uint8_t sent, int so_locked) 5039 { 5040 struct sctp_stream_out *strq; 5041 struct sctp_tmit_chunk *chk = NULL, *tp2; 5042 struct sctp_stream_queue_pending *sp; 5043 uint32_t mid; 5044 uint16_t sid; 5045 uint8_t foundeom = 0; 5046 int ret_sz = 0; 5047 int notdone; 5048 int do_wakeup_routine = 0; 5049 5050 SCTP_TCB_LOCK_ASSERT(stcb); 5051 5052 sid = tp1->rec.data.sid; 5053 mid = tp1->rec.data.mid; 5054 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5055 stcb->asoc.abandoned_sent[0]++; 5056 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5057 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5058 #if defined(SCTP_DETAILED_STR_STATS) 5059 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5060 #endif 5061 } else { 5062 stcb->asoc.abandoned_unsent[0]++; 5063 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5064 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5065 #if defined(SCTP_DETAILED_STR_STATS) 5066 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5067 #endif 5068 } 5069 do { 5070 ret_sz += tp1->book_size; 5071 if (tp1->data != NULL) { 5072 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5073 sctp_flight_size_decrease(tp1); 5074 sctp_total_flight_decrease(stcb, tp1); 5075 } 5076 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5077 stcb->asoc.peers_rwnd += tp1->send_size; 5078 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5079 if (sent) { 5080 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5081 } else { 5082 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5083 } 5084 if (tp1->data) { 5085 sctp_m_freem(tp1->data); 5086 tp1->data = NULL; 5087 } 5088 do_wakeup_routine = 1; 5089 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5090 stcb->asoc.sent_queue_cnt_removeable--; 5091 } 5092 } 5093 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5094 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5095 SCTP_DATA_NOT_FRAG) { 5096 /* not frag'ed we ae done */ 5097 notdone = 0; 5098 foundeom = 1; 5099 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5100 /* end of frag, we are done */ 5101 notdone = 0; 5102 foundeom = 1; 5103 } else { 5104 /* 5105 * Its a begin or middle piece, we must mark all of 5106 * it 5107 */ 5108 notdone = 1; 5109 tp1 = TAILQ_NEXT(tp1, sctp_next); 5110 } 5111 } while (tp1 && notdone); 5112 if (foundeom == 0) { 5113 /* 5114 * The multi-part message was scattered across the send and 5115 * sent queue. 5116 */ 5117 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5118 if ((tp1->rec.data.sid != sid) || 5119 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5120 break; 5121 } 5122 /* 5123 * save to chk in case we have some on stream out 5124 * queue. If so and we have an un-transmitted one we 5125 * don't have to fudge the TSN. 5126 */ 5127 chk = tp1; 5128 ret_sz += tp1->book_size; 5129 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5130 if (sent) { 5131 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5132 } else { 5133 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5134 } 5135 if (tp1->data) { 5136 sctp_m_freem(tp1->data); 5137 tp1->data = NULL; 5138 } 5139 /* No flight involved here book the size to 0 */ 5140 tp1->book_size = 0; 5141 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5142 foundeom = 1; 5143 } 5144 do_wakeup_routine = 1; 5145 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5146 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5147 /* 5148 * on to the sent queue so we can wait for it to be 5149 * passed by. 5150 */ 5151 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5152 sctp_next); 5153 stcb->asoc.send_queue_cnt--; 5154 stcb->asoc.sent_queue_cnt++; 5155 } 5156 } 5157 if (foundeom == 0) { 5158 /* 5159 * Still no eom found. That means there is stuff left on the 5160 * stream out queue.. yuck. 5161 */ 5162 strq = &stcb->asoc.strmout[sid]; 5163 sp = TAILQ_FIRST(&strq->outqueue); 5164 if (sp != NULL) { 5165 sp->discard_rest = 1; 5166 /* 5167 * We may need to put a chunk on the queue that 5168 * holds the TSN that would have been sent with the 5169 * LAST bit. 5170 */ 5171 if (chk == NULL) { 5172 /* Yep, we have to */ 5173 sctp_alloc_a_chunk(stcb, chk); 5174 if (chk == NULL) { 5175 /* 5176 * we are hosed. All we can do is 5177 * nothing.. which will cause an 5178 * abort if the peer is paying 5179 * attention. 5180 */ 5181 goto oh_well; 5182 } 5183 memset(chk, 0, sizeof(*chk)); 5184 chk->rec.data.rcv_flags = 0; 5185 chk->sent = SCTP_FORWARD_TSN_SKIP; 5186 chk->asoc = &stcb->asoc; 5187 if (stcb->asoc.idata_supported == 0) { 5188 if (sp->sinfo_flags & SCTP_UNORDERED) { 5189 chk->rec.data.mid = 0; 5190 } else { 5191 chk->rec.data.mid = strq->next_mid_ordered; 5192 } 5193 } else { 5194 if (sp->sinfo_flags & SCTP_UNORDERED) { 5195 chk->rec.data.mid = strq->next_mid_unordered; 5196 } else { 5197 chk->rec.data.mid = strq->next_mid_ordered; 5198 } 5199 } 5200 chk->rec.data.sid = sp->sid; 5201 chk->rec.data.ppid = sp->ppid; 5202 chk->rec.data.context = sp->context; 5203 chk->flags = sp->act_flags; 5204 chk->whoTo = NULL; 5205 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5206 strq->chunks_on_queues++; 5207 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5208 stcb->asoc.sent_queue_cnt++; 5209 stcb->asoc.pr_sctp_cnt++; 5210 } 5211 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5212 if (sp->sinfo_flags & SCTP_UNORDERED) { 5213 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5214 } 5215 if (stcb->asoc.idata_supported == 0) { 5216 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5217 strq->next_mid_ordered++; 5218 } 5219 } else { 5220 if (sp->sinfo_flags & SCTP_UNORDERED) { 5221 strq->next_mid_unordered++; 5222 } else { 5223 strq->next_mid_ordered++; 5224 } 5225 } 5226 oh_well: 5227 if (sp->data) { 5228 /* 5229 * Pull any data to free up the SB and allow 5230 * sender to "add more" while we will throw 5231 * away :-) 5232 */ 5233 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5234 ret_sz += sp->length; 5235 do_wakeup_routine = 1; 5236 sp->some_taken = 1; 5237 sctp_m_freem(sp->data); 5238 sp->data = NULL; 5239 sp->tail_mbuf = NULL; 5240 sp->length = 0; 5241 } 5242 } 5243 } 5244 if (do_wakeup_routine) { 5245 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5246 } 5247 return (ret_sz); 5248 } 5249 5250 /* 5251 * checks to see if the given address, sa, is one that is currently known by 5252 * the kernel note: can't distinguish the same address on multiple interfaces 5253 * and doesn't handle multiple addresses with different zone/scope id's note: 5254 * ifa_ifwithaddr() compares the entire sockaddr struct 5255 */ 5256 struct sctp_ifa * 5257 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5258 int holds_lock) 5259 { 5260 struct sctp_laddr *laddr; 5261 5262 if (holds_lock == 0) { 5263 SCTP_INP_RLOCK(inp); 5264 } 5265 5266 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5267 if (laddr->ifa == NULL) 5268 continue; 5269 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5270 continue; 5271 #ifdef INET 5272 if (addr->sa_family == AF_INET) { 5273 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5274 laddr->ifa->address.sin.sin_addr.s_addr) { 5275 /* found him. */ 5276 break; 5277 } 5278 } 5279 #endif 5280 #ifdef INET6 5281 if (addr->sa_family == AF_INET6) { 5282 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5283 &laddr->ifa->address.sin6)) { 5284 /* found him. */ 5285 break; 5286 } 5287 } 5288 #endif 5289 } 5290 if (holds_lock == 0) { 5291 SCTP_INP_RUNLOCK(inp); 5292 } 5293 if (laddr != NULL) { 5294 return (laddr->ifa); 5295 } else { 5296 return (NULL); 5297 } 5298 } 5299 5300 uint32_t 5301 sctp_get_ifa_hash_val(struct sockaddr *addr) 5302 { 5303 switch (addr->sa_family) { 5304 #ifdef INET 5305 case AF_INET: 5306 { 5307 struct sockaddr_in *sin; 5308 5309 sin = (struct sockaddr_in *)addr; 5310 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5311 } 5312 #endif 5313 #ifdef INET6 5314 case AF_INET6: 5315 { 5316 struct sockaddr_in6 *sin6; 5317 uint32_t hash_of_addr; 5318 5319 sin6 = (struct sockaddr_in6 *)addr; 5320 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5321 sin6->sin6_addr.s6_addr32[1] + 5322 sin6->sin6_addr.s6_addr32[2] + 5323 sin6->sin6_addr.s6_addr32[3]); 5324 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5325 return (hash_of_addr); 5326 } 5327 #endif 5328 default: 5329 break; 5330 } 5331 return (0); 5332 } 5333 5334 struct sctp_ifa * 5335 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5336 { 5337 struct sctp_ifa *sctp_ifap; 5338 struct sctp_vrf *vrf; 5339 struct sctp_ifalist *hash_head; 5340 uint32_t hash_of_addr; 5341 5342 if (holds_lock == 0) { 5343 SCTP_IPI_ADDR_RLOCK(); 5344 } else { 5345 SCTP_IPI_ADDR_LOCK_ASSERT(); 5346 } 5347 5348 vrf = sctp_find_vrf(vrf_id); 5349 if (vrf == NULL) { 5350 if (holds_lock == 0) 5351 SCTP_IPI_ADDR_RUNLOCK(); 5352 return (NULL); 5353 } 5354 5355 hash_of_addr = sctp_get_ifa_hash_val(addr); 5356 5357 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5358 if (hash_head == NULL) { 5359 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5360 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5361 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5362 sctp_print_address(addr); 5363 SCTP_PRINTF("No such bucket for address\n"); 5364 if (holds_lock == 0) 5365 SCTP_IPI_ADDR_RUNLOCK(); 5366 5367 return (NULL); 5368 } 5369 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5370 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5371 continue; 5372 #ifdef INET 5373 if (addr->sa_family == AF_INET) { 5374 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5375 sctp_ifap->address.sin.sin_addr.s_addr) { 5376 /* found him. */ 5377 break; 5378 } 5379 } 5380 #endif 5381 #ifdef INET6 5382 if (addr->sa_family == AF_INET6) { 5383 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5384 &sctp_ifap->address.sin6)) { 5385 /* found him. */ 5386 break; 5387 } 5388 } 5389 #endif 5390 } 5391 if (holds_lock == 0) 5392 SCTP_IPI_ADDR_RUNLOCK(); 5393 return (sctp_ifap); 5394 } 5395 5396 static void 5397 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5398 uint32_t rwnd_req) 5399 { 5400 /* User pulled some data, do we need a rwnd update? */ 5401 struct epoch_tracker et; 5402 int r_unlocked = 0; 5403 uint32_t dif, rwnd; 5404 struct socket *so = NULL; 5405 5406 if (stcb == NULL) 5407 return; 5408 5409 atomic_add_int(&stcb->asoc.refcnt, 1); 5410 5411 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5412 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5413 /* Pre-check If we are freeing no update */ 5414 goto no_lock; 5415 } 5416 SCTP_INP_INCR_REF(stcb->sctp_ep); 5417 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5418 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5419 goto out; 5420 } 5421 so = stcb->sctp_socket; 5422 if (so == NULL) { 5423 goto out; 5424 } 5425 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5426 /* Have you have freed enough to look */ 5427 *freed_so_far = 0; 5428 /* Yep, its worth a look and the lock overhead */ 5429 5430 /* Figure out what the rwnd would be */ 5431 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5432 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5433 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5434 } else { 5435 dif = 0; 5436 } 5437 if (dif >= rwnd_req) { 5438 if (hold_rlock) { 5439 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5440 r_unlocked = 1; 5441 } 5442 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5443 /* 5444 * One last check before we allow the guy possibly 5445 * to get in. There is a race, where the guy has not 5446 * reached the gate. In that case 5447 */ 5448 goto out; 5449 } 5450 SCTP_TCB_LOCK(stcb); 5451 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5452 /* No reports here */ 5453 SCTP_TCB_UNLOCK(stcb); 5454 goto out; 5455 } 5456 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5457 NET_EPOCH_ENTER(et); 5458 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5459 5460 sctp_chunk_output(stcb->sctp_ep, stcb, 5461 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5462 /* make sure no timer is running */ 5463 NET_EPOCH_EXIT(et); 5464 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5465 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5466 SCTP_TCB_UNLOCK(stcb); 5467 } else { 5468 /* Update how much we have pending */ 5469 stcb->freed_by_sorcv_sincelast = dif; 5470 } 5471 out: 5472 if (so && r_unlocked && hold_rlock) { 5473 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5474 } 5475 5476 SCTP_INP_DECR_REF(stcb->sctp_ep); 5477 no_lock: 5478 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5479 return; 5480 } 5481 5482 int 5483 sctp_sorecvmsg(struct socket *so, 5484 struct uio *uio, 5485 struct mbuf **mp, 5486 struct sockaddr *from, 5487 int fromlen, 5488 int *msg_flags, 5489 struct sctp_sndrcvinfo *sinfo, 5490 int filling_sinfo) 5491 { 5492 /* 5493 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5494 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5495 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5496 * On the way out we may send out any combination of: 5497 * MSG_NOTIFICATION MSG_EOR 5498 * 5499 */ 5500 struct sctp_inpcb *inp = NULL; 5501 ssize_t my_len = 0; 5502 ssize_t cp_len = 0; 5503 int error = 0; 5504 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5505 struct mbuf *m = NULL; 5506 struct sctp_tcb *stcb = NULL; 5507 int wakeup_read_socket = 0; 5508 int freecnt_applied = 0; 5509 int out_flags = 0, in_flags = 0; 5510 int block_allowed = 1; 5511 uint32_t freed_so_far = 0; 5512 ssize_t copied_so_far = 0; 5513 int in_eeor_mode = 0; 5514 int no_rcv_needed = 0; 5515 uint32_t rwnd_req = 0; 5516 int hold_sblock = 0; 5517 int hold_rlock = 0; 5518 ssize_t slen = 0; 5519 uint32_t held_length = 0; 5520 int sockbuf_lock = 0; 5521 5522 if (uio == NULL) { 5523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5524 return (EINVAL); 5525 } 5526 5527 if (msg_flags) { 5528 in_flags = *msg_flags; 5529 if (in_flags & MSG_PEEK) 5530 SCTP_STAT_INCR(sctps_read_peeks); 5531 } else { 5532 in_flags = 0; 5533 } 5534 slen = uio->uio_resid; 5535 5536 /* Pull in and set up our int flags */ 5537 if (in_flags & MSG_OOB) { 5538 /* Out of band's NOT supported */ 5539 return (EOPNOTSUPP); 5540 } 5541 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5542 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5543 return (EINVAL); 5544 } 5545 if ((in_flags & (MSG_DONTWAIT 5546 | MSG_NBIO 5547 )) || 5548 SCTP_SO_IS_NBIO(so)) { 5549 block_allowed = 0; 5550 } 5551 /* setup the endpoint */ 5552 inp = (struct sctp_inpcb *)so->so_pcb; 5553 if (inp == NULL) { 5554 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5555 return (EFAULT); 5556 } 5557 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5558 /* Must be at least a MTU's worth */ 5559 if (rwnd_req < SCTP_MIN_RWND) 5560 rwnd_req = SCTP_MIN_RWND; 5561 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5563 sctp_misc_ints(SCTP_SORECV_ENTER, 5564 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5565 } 5566 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5567 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5568 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5569 } 5570 5571 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5572 if (error) { 5573 goto release_unlocked; 5574 } 5575 sockbuf_lock = 1; 5576 restart: 5577 5578 restart_nosblocks: 5579 if (hold_sblock == 0) { 5580 SOCKBUF_LOCK(&so->so_rcv); 5581 hold_sblock = 1; 5582 } 5583 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5584 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5585 goto out; 5586 } 5587 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5588 if (so->so_error) { 5589 error = so->so_error; 5590 if ((in_flags & MSG_PEEK) == 0) 5591 so->so_error = 0; 5592 goto out; 5593 } else { 5594 if (so->so_rcv.sb_cc == 0) { 5595 /* indicate EOF */ 5596 error = 0; 5597 goto out; 5598 } 5599 } 5600 } 5601 if (so->so_rcv.sb_cc <= held_length) { 5602 if (so->so_error) { 5603 error = so->so_error; 5604 if ((in_flags & MSG_PEEK) == 0) { 5605 so->so_error = 0; 5606 } 5607 goto out; 5608 } 5609 if ((so->so_rcv.sb_cc == 0) && 5610 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5611 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5612 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5613 /* 5614 * For active open side clear flags for 5615 * re-use passive open is blocked by 5616 * connect. 5617 */ 5618 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5619 /* 5620 * You were aborted, passive side 5621 * always hits here 5622 */ 5623 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5624 error = ECONNRESET; 5625 } 5626 so->so_state &= ~(SS_ISCONNECTING | 5627 SS_ISDISCONNECTING | 5628 SS_ISCONFIRMING | 5629 SS_ISCONNECTED); 5630 if (error == 0) { 5631 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5632 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5633 error = ENOTCONN; 5634 } 5635 } 5636 goto out; 5637 } 5638 } 5639 if (block_allowed) { 5640 error = sbwait(&so->so_rcv); 5641 if (error) { 5642 goto out; 5643 } 5644 held_length = 0; 5645 goto restart_nosblocks; 5646 } else { 5647 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5648 error = EWOULDBLOCK; 5649 goto out; 5650 } 5651 } 5652 if (hold_sblock == 1) { 5653 SOCKBUF_UNLOCK(&so->so_rcv); 5654 hold_sblock = 0; 5655 } 5656 /* we possibly have data we can read */ 5657 /* sa_ignore FREED_MEMORY */ 5658 control = TAILQ_FIRST(&inp->read_queue); 5659 if (control == NULL) { 5660 /* 5661 * This could be happening since the appender did the 5662 * increment but as not yet did the tailq insert onto the 5663 * read_queue 5664 */ 5665 if (hold_rlock == 0) { 5666 SCTP_INP_READ_LOCK(inp); 5667 } 5668 control = TAILQ_FIRST(&inp->read_queue); 5669 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5670 #ifdef INVARIANTS 5671 panic("Huh, its non zero and nothing on control?"); 5672 #endif 5673 so->so_rcv.sb_cc = 0; 5674 } 5675 SCTP_INP_READ_UNLOCK(inp); 5676 hold_rlock = 0; 5677 goto restart; 5678 } 5679 5680 if ((control->length == 0) && 5681 (control->do_not_ref_stcb)) { 5682 /* 5683 * Clean up code for freeing assoc that left behind a 5684 * pdapi.. maybe a peer in EEOR that just closed after 5685 * sending and never indicated a EOR. 5686 */ 5687 if (hold_rlock == 0) { 5688 hold_rlock = 1; 5689 SCTP_INP_READ_LOCK(inp); 5690 } 5691 control->held_length = 0; 5692 if (control->data) { 5693 /* Hmm there is data here .. fix */ 5694 struct mbuf *m_tmp; 5695 int cnt = 0; 5696 5697 m_tmp = control->data; 5698 while (m_tmp) { 5699 cnt += SCTP_BUF_LEN(m_tmp); 5700 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5701 control->tail_mbuf = m_tmp; 5702 control->end_added = 1; 5703 } 5704 m_tmp = SCTP_BUF_NEXT(m_tmp); 5705 } 5706 control->length = cnt; 5707 } else { 5708 /* remove it */ 5709 TAILQ_REMOVE(&inp->read_queue, control, next); 5710 /* Add back any hidden data */ 5711 sctp_free_remote_addr(control->whoFrom); 5712 sctp_free_a_readq(stcb, control); 5713 } 5714 if (hold_rlock) { 5715 hold_rlock = 0; 5716 SCTP_INP_READ_UNLOCK(inp); 5717 } 5718 goto restart; 5719 } 5720 if ((control->length == 0) && 5721 (control->end_added == 1)) { 5722 /* 5723 * Do we also need to check for (control->pdapi_aborted == 5724 * 1)? 5725 */ 5726 if (hold_rlock == 0) { 5727 hold_rlock = 1; 5728 SCTP_INP_READ_LOCK(inp); 5729 } 5730 TAILQ_REMOVE(&inp->read_queue, control, next); 5731 if (control->data) { 5732 #ifdef INVARIANTS 5733 panic("control->data not null but control->length == 0"); 5734 #else 5735 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5736 sctp_m_freem(control->data); 5737 control->data = NULL; 5738 #endif 5739 } 5740 if (control->aux_data) { 5741 sctp_m_free(control->aux_data); 5742 control->aux_data = NULL; 5743 } 5744 #ifdef INVARIANTS 5745 if (control->on_strm_q) { 5746 panic("About to free ctl:%p so:%p and its in %d", 5747 control, so, control->on_strm_q); 5748 } 5749 #endif 5750 sctp_free_remote_addr(control->whoFrom); 5751 sctp_free_a_readq(stcb, control); 5752 if (hold_rlock) { 5753 hold_rlock = 0; 5754 SCTP_INP_READ_UNLOCK(inp); 5755 } 5756 goto restart; 5757 } 5758 if (control->length == 0) { 5759 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5760 (filling_sinfo)) { 5761 /* find a more suitable one then this */ 5762 ctl = TAILQ_NEXT(control, next); 5763 while (ctl) { 5764 if ((ctl->stcb != control->stcb) && (ctl->length) && 5765 (ctl->some_taken || 5766 (ctl->spec_flags & M_NOTIFICATION) || 5767 ((ctl->do_not_ref_stcb == 0) && 5768 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5769 ) { 5770 /*- 5771 * If we have a different TCB next, and there is data 5772 * present. If we have already taken some (pdapi), OR we can 5773 * ref the tcb and no delivery as started on this stream, we 5774 * take it. Note we allow a notification on a different 5775 * assoc to be delivered.. 5776 */ 5777 control = ctl; 5778 goto found_one; 5779 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5780 (ctl->length) && 5781 ((ctl->some_taken) || 5782 ((ctl->do_not_ref_stcb == 0) && 5783 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5784 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5785 /*- 5786 * If we have the same tcb, and there is data present, and we 5787 * have the strm interleave feature present. Then if we have 5788 * taken some (pdapi) or we can refer to tht tcb AND we have 5789 * not started a delivery for this stream, we can take it. 5790 * Note we do NOT allow a notification on the same assoc to 5791 * be delivered. 5792 */ 5793 control = ctl; 5794 goto found_one; 5795 } 5796 ctl = TAILQ_NEXT(ctl, next); 5797 } 5798 } 5799 /* 5800 * if we reach here, not suitable replacement is available 5801 * <or> fragment interleave is NOT on. So stuff the sb_cc 5802 * into the our held count, and its time to sleep again. 5803 */ 5804 held_length = so->so_rcv.sb_cc; 5805 control->held_length = so->so_rcv.sb_cc; 5806 goto restart; 5807 } 5808 /* Clear the held length since there is something to read */ 5809 control->held_length = 0; 5810 found_one: 5811 /* 5812 * If we reach here, control has a some data for us to read off. 5813 * Note that stcb COULD be NULL. 5814 */ 5815 if (hold_rlock == 0) { 5816 hold_rlock = 1; 5817 SCTP_INP_READ_LOCK(inp); 5818 } 5819 control->some_taken++; 5820 stcb = control->stcb; 5821 if (stcb) { 5822 if ((control->do_not_ref_stcb == 0) && 5823 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5824 if (freecnt_applied == 0) 5825 stcb = NULL; 5826 } else if (control->do_not_ref_stcb == 0) { 5827 /* you can't free it on me please */ 5828 /* 5829 * The lock on the socket buffer protects us so the 5830 * free code will stop. But since we used the 5831 * socketbuf lock and the sender uses the tcb_lock 5832 * to increment, we need to use the atomic add to 5833 * the refcnt 5834 */ 5835 if (freecnt_applied) { 5836 #ifdef INVARIANTS 5837 panic("refcnt already incremented"); 5838 #else 5839 SCTP_PRINTF("refcnt already incremented?\n"); 5840 #endif 5841 } else { 5842 atomic_add_int(&stcb->asoc.refcnt, 1); 5843 freecnt_applied = 1; 5844 } 5845 /* 5846 * Setup to remember how much we have not yet told 5847 * the peer our rwnd has opened up. Note we grab the 5848 * value from the tcb from last time. Note too that 5849 * sack sending clears this when a sack is sent, 5850 * which is fine. Once we hit the rwnd_req, we then 5851 * will go to the sctp_user_rcvd() that will not 5852 * lock until it KNOWs it MUST send a WUP-SACK. 5853 */ 5854 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5855 stcb->freed_by_sorcv_sincelast = 0; 5856 } 5857 } 5858 if (stcb && 5859 ((control->spec_flags & M_NOTIFICATION) == 0) && 5860 control->do_not_ref_stcb == 0) { 5861 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5862 } 5863 5864 /* First lets get off the sinfo and sockaddr info */ 5865 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5866 sinfo->sinfo_stream = control->sinfo_stream; 5867 sinfo->sinfo_ssn = (uint16_t)control->mid; 5868 sinfo->sinfo_flags = control->sinfo_flags; 5869 sinfo->sinfo_ppid = control->sinfo_ppid; 5870 sinfo->sinfo_context = control->sinfo_context; 5871 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5872 sinfo->sinfo_tsn = control->sinfo_tsn; 5873 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5874 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5875 nxt = TAILQ_NEXT(control, next); 5876 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5877 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5878 struct sctp_extrcvinfo *s_extra; 5879 5880 s_extra = (struct sctp_extrcvinfo *)sinfo; 5881 if ((nxt) && 5882 (nxt->length)) { 5883 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5884 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5885 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5886 } 5887 if (nxt->spec_flags & M_NOTIFICATION) { 5888 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5889 } 5890 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5891 s_extra->serinfo_next_length = nxt->length; 5892 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5893 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5894 if (nxt->tail_mbuf != NULL) { 5895 if (nxt->end_added) { 5896 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5897 } 5898 } 5899 } else { 5900 /* 5901 * we explicitly 0 this, since the memcpy 5902 * got some other things beyond the older 5903 * sinfo_ that is on the control's structure 5904 * :-D 5905 */ 5906 nxt = NULL; 5907 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5908 s_extra->serinfo_next_aid = 0; 5909 s_extra->serinfo_next_length = 0; 5910 s_extra->serinfo_next_ppid = 0; 5911 s_extra->serinfo_next_stream = 0; 5912 } 5913 } 5914 /* 5915 * update off the real current cum-ack, if we have an stcb. 5916 */ 5917 if ((control->do_not_ref_stcb == 0) && stcb) 5918 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5919 /* 5920 * mask off the high bits, we keep the actual chunk bits in 5921 * there. 5922 */ 5923 sinfo->sinfo_flags &= 0x00ff; 5924 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5925 sinfo->sinfo_flags |= SCTP_UNORDERED; 5926 } 5927 } 5928 #ifdef SCTP_ASOCLOG_OF_TSNS 5929 { 5930 int index, newindex; 5931 struct sctp_pcbtsn_rlog *entry; 5932 5933 do { 5934 index = inp->readlog_index; 5935 newindex = index + 1; 5936 if (newindex >= SCTP_READ_LOG_SIZE) { 5937 newindex = 0; 5938 } 5939 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5940 entry = &inp->readlog[index]; 5941 entry->vtag = control->sinfo_assoc_id; 5942 entry->strm = control->sinfo_stream; 5943 entry->seq = (uint16_t)control->mid; 5944 entry->sz = control->length; 5945 entry->flgs = control->sinfo_flags; 5946 } 5947 #endif 5948 if ((fromlen > 0) && (from != NULL)) { 5949 union sctp_sockstore store; 5950 size_t len; 5951 5952 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5953 #ifdef INET6 5954 case AF_INET6: 5955 len = sizeof(struct sockaddr_in6); 5956 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5957 store.sin6.sin6_port = control->port_from; 5958 break; 5959 #endif 5960 #ifdef INET 5961 case AF_INET: 5962 #ifdef INET6 5963 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5964 len = sizeof(struct sockaddr_in6); 5965 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5966 &store.sin6); 5967 store.sin6.sin6_port = control->port_from; 5968 } else { 5969 len = sizeof(struct sockaddr_in); 5970 store.sin = control->whoFrom->ro._l_addr.sin; 5971 store.sin.sin_port = control->port_from; 5972 } 5973 #else 5974 len = sizeof(struct sockaddr_in); 5975 store.sin = control->whoFrom->ro._l_addr.sin; 5976 store.sin.sin_port = control->port_from; 5977 #endif 5978 break; 5979 #endif 5980 default: 5981 len = 0; 5982 break; 5983 } 5984 memcpy(from, &store, min((size_t)fromlen, len)); 5985 #ifdef INET6 5986 { 5987 struct sockaddr_in6 lsa6, *from6; 5988 5989 from6 = (struct sockaddr_in6 *)from; 5990 sctp_recover_scope_mac(from6, (&lsa6)); 5991 } 5992 #endif 5993 } 5994 if (hold_rlock) { 5995 SCTP_INP_READ_UNLOCK(inp); 5996 hold_rlock = 0; 5997 } 5998 if (hold_sblock) { 5999 SOCKBUF_UNLOCK(&so->so_rcv); 6000 hold_sblock = 0; 6001 } 6002 /* now copy out what data we can */ 6003 if (mp == NULL) { 6004 /* copy out each mbuf in the chain up to length */ 6005 get_more_data: 6006 m = control->data; 6007 while (m) { 6008 /* Move out all we can */ 6009 cp_len = uio->uio_resid; 6010 my_len = SCTP_BUF_LEN(m); 6011 if (cp_len > my_len) { 6012 /* not enough in this buf */ 6013 cp_len = my_len; 6014 } 6015 if (hold_rlock) { 6016 SCTP_INP_READ_UNLOCK(inp); 6017 hold_rlock = 0; 6018 } 6019 if (cp_len > 0) 6020 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6021 /* re-read */ 6022 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6023 goto release; 6024 } 6025 6026 if ((control->do_not_ref_stcb == 0) && stcb && 6027 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6028 no_rcv_needed = 1; 6029 } 6030 if (error) { 6031 /* error we are out of here */ 6032 goto release; 6033 } 6034 SCTP_INP_READ_LOCK(inp); 6035 hold_rlock = 1; 6036 if (cp_len == SCTP_BUF_LEN(m)) { 6037 if ((SCTP_BUF_NEXT(m) == NULL) && 6038 (control->end_added)) { 6039 out_flags |= MSG_EOR; 6040 if ((control->do_not_ref_stcb == 0) && 6041 (control->stcb != NULL) && 6042 ((control->spec_flags & M_NOTIFICATION) == 0)) 6043 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6044 } 6045 if (control->spec_flags & M_NOTIFICATION) { 6046 out_flags |= MSG_NOTIFICATION; 6047 } 6048 /* we ate up the mbuf */ 6049 if (in_flags & MSG_PEEK) { 6050 /* just looking */ 6051 m = SCTP_BUF_NEXT(m); 6052 copied_so_far += cp_len; 6053 } else { 6054 /* dispose of the mbuf */ 6055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6056 sctp_sblog(&so->so_rcv, 6057 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6058 } 6059 sctp_sbfree(control, stcb, &so->so_rcv, m); 6060 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6061 sctp_sblog(&so->so_rcv, 6062 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6063 } 6064 copied_so_far += cp_len; 6065 freed_so_far += (uint32_t)cp_len; 6066 freed_so_far += MSIZE; 6067 atomic_subtract_int(&control->length, (int)cp_len); 6068 control->data = sctp_m_free(m); 6069 m = control->data; 6070 /* 6071 * been through it all, must hold sb 6072 * lock ok to null tail 6073 */ 6074 if (control->data == NULL) { 6075 #ifdef INVARIANTS 6076 if ((control->end_added == 0) || 6077 (TAILQ_NEXT(control, next) == NULL)) { 6078 /* 6079 * If the end is not 6080 * added, OR the 6081 * next is NOT null 6082 * we MUST have the 6083 * lock. 6084 */ 6085 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6086 panic("Hmm we don't own the lock?"); 6087 } 6088 } 6089 #endif 6090 control->tail_mbuf = NULL; 6091 #ifdef INVARIANTS 6092 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6093 panic("end_added, nothing left and no MSG_EOR"); 6094 } 6095 #endif 6096 } 6097 } 6098 } else { 6099 /* Do we need to trim the mbuf? */ 6100 if (control->spec_flags & M_NOTIFICATION) { 6101 out_flags |= MSG_NOTIFICATION; 6102 } 6103 if ((in_flags & MSG_PEEK) == 0) { 6104 SCTP_BUF_RESV_UF(m, cp_len); 6105 SCTP_BUF_LEN(m) -= (int)cp_len; 6106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6107 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6108 } 6109 atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); 6110 if ((control->do_not_ref_stcb == 0) && 6111 stcb) { 6112 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6113 } 6114 copied_so_far += cp_len; 6115 freed_so_far += (uint32_t)cp_len; 6116 freed_so_far += MSIZE; 6117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6118 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6119 SCTP_LOG_SBRESULT, 0); 6120 } 6121 atomic_subtract_int(&control->length, (int)cp_len); 6122 } else { 6123 copied_so_far += cp_len; 6124 } 6125 } 6126 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6127 break; 6128 } 6129 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6130 (control->do_not_ref_stcb == 0) && 6131 (freed_so_far >= rwnd_req)) { 6132 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6133 } 6134 } /* end while(m) */ 6135 /* 6136 * At this point we have looked at it all and we either have 6137 * a MSG_EOR/or read all the user wants... <OR> 6138 * control->length == 0. 6139 */ 6140 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6141 /* we are done with this control */ 6142 if (control->length == 0) { 6143 if (control->data) { 6144 #ifdef INVARIANTS 6145 panic("control->data not null at read eor?"); 6146 #else 6147 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6148 sctp_m_freem(control->data); 6149 control->data = NULL; 6150 #endif 6151 } 6152 done_with_control: 6153 if (hold_rlock == 0) { 6154 SCTP_INP_READ_LOCK(inp); 6155 hold_rlock = 1; 6156 } 6157 TAILQ_REMOVE(&inp->read_queue, control, next); 6158 /* Add back any hidden data */ 6159 if (control->held_length) { 6160 held_length = 0; 6161 control->held_length = 0; 6162 wakeup_read_socket = 1; 6163 } 6164 if (control->aux_data) { 6165 sctp_m_free(control->aux_data); 6166 control->aux_data = NULL; 6167 } 6168 no_rcv_needed = control->do_not_ref_stcb; 6169 sctp_free_remote_addr(control->whoFrom); 6170 control->data = NULL; 6171 #ifdef INVARIANTS 6172 if (control->on_strm_q) { 6173 panic("About to free ctl:%p so:%p and its in %d", 6174 control, so, control->on_strm_q); 6175 } 6176 #endif 6177 sctp_free_a_readq(stcb, control); 6178 control = NULL; 6179 if ((freed_so_far >= rwnd_req) && 6180 (no_rcv_needed == 0)) 6181 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6182 6183 } else { 6184 /* 6185 * The user did not read all of this 6186 * message, turn off the returned MSG_EOR 6187 * since we are leaving more behind on the 6188 * control to read. 6189 */ 6190 #ifdef INVARIANTS 6191 if (control->end_added && 6192 (control->data == NULL) && 6193 (control->tail_mbuf == NULL)) { 6194 panic("Gak, control->length is corrupt?"); 6195 } 6196 #endif 6197 no_rcv_needed = control->do_not_ref_stcb; 6198 out_flags &= ~MSG_EOR; 6199 } 6200 } 6201 if (out_flags & MSG_EOR) { 6202 goto release; 6203 } 6204 if ((uio->uio_resid == 0) || 6205 ((in_eeor_mode) && 6206 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6207 goto release; 6208 } 6209 /* 6210 * If I hit here the receiver wants more and this message is 6211 * NOT done (pd-api). So two questions. Can we block? if not 6212 * we are done. Did the user NOT set MSG_WAITALL? 6213 */ 6214 if (block_allowed == 0) { 6215 goto release; 6216 } 6217 /* 6218 * We need to wait for more data a few things: - We don't 6219 * release the I/O lock so we don't get someone else 6220 * reading. - We must be sure to account for the case where 6221 * what is added is NOT to our control when we wakeup. 6222 */ 6223 6224 /* 6225 * Do we need to tell the transport a rwnd update might be 6226 * needed before we go to sleep? 6227 */ 6228 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6229 ((freed_so_far >= rwnd_req) && 6230 (control->do_not_ref_stcb == 0) && 6231 (no_rcv_needed == 0))) { 6232 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6233 } 6234 wait_some_more: 6235 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6236 goto release; 6237 } 6238 6239 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6240 goto release; 6241 6242 if (hold_rlock == 1) { 6243 SCTP_INP_READ_UNLOCK(inp); 6244 hold_rlock = 0; 6245 } 6246 if (hold_sblock == 0) { 6247 SOCKBUF_LOCK(&so->so_rcv); 6248 hold_sblock = 1; 6249 } 6250 if ((copied_so_far) && (control->length == 0) && 6251 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6252 goto release; 6253 } 6254 if (so->so_rcv.sb_cc <= control->held_length) { 6255 error = sbwait(&so->so_rcv); 6256 if (error) { 6257 goto release; 6258 } 6259 control->held_length = 0; 6260 } 6261 if (hold_sblock) { 6262 SOCKBUF_UNLOCK(&so->so_rcv); 6263 hold_sblock = 0; 6264 } 6265 if (control->length == 0) { 6266 /* still nothing here */ 6267 if (control->end_added == 1) { 6268 /* he aborted, or is done i.e.did a shutdown */ 6269 out_flags |= MSG_EOR; 6270 if (control->pdapi_aborted) { 6271 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6272 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6273 6274 out_flags |= MSG_TRUNC; 6275 } else { 6276 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6277 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6278 } 6279 goto done_with_control; 6280 } 6281 if (so->so_rcv.sb_cc > held_length) { 6282 control->held_length = so->so_rcv.sb_cc; 6283 held_length = 0; 6284 } 6285 goto wait_some_more; 6286 } else if (control->data == NULL) { 6287 /* 6288 * we must re-sync since data is probably being 6289 * added 6290 */ 6291 SCTP_INP_READ_LOCK(inp); 6292 if ((control->length > 0) && (control->data == NULL)) { 6293 /* 6294 * big trouble.. we have the lock and its 6295 * corrupt? 6296 */ 6297 #ifdef INVARIANTS 6298 panic("Impossible data==NULL length !=0"); 6299 #endif 6300 out_flags |= MSG_EOR; 6301 out_flags |= MSG_TRUNC; 6302 control->length = 0; 6303 SCTP_INP_READ_UNLOCK(inp); 6304 goto done_with_control; 6305 } 6306 SCTP_INP_READ_UNLOCK(inp); 6307 /* We will fall around to get more data */ 6308 } 6309 goto get_more_data; 6310 } else { 6311 /*- 6312 * Give caller back the mbuf chain, 6313 * store in uio_resid the length 6314 */ 6315 wakeup_read_socket = 0; 6316 if ((control->end_added == 0) || 6317 (TAILQ_NEXT(control, next) == NULL)) { 6318 /* Need to get rlock */ 6319 if (hold_rlock == 0) { 6320 SCTP_INP_READ_LOCK(inp); 6321 hold_rlock = 1; 6322 } 6323 } 6324 if (control->end_added) { 6325 out_flags |= MSG_EOR; 6326 if ((control->do_not_ref_stcb == 0) && 6327 (control->stcb != NULL) && 6328 ((control->spec_flags & M_NOTIFICATION) == 0)) 6329 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6330 } 6331 if (control->spec_flags & M_NOTIFICATION) { 6332 out_flags |= MSG_NOTIFICATION; 6333 } 6334 uio->uio_resid = control->length; 6335 *mp = control->data; 6336 m = control->data; 6337 while (m) { 6338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6339 sctp_sblog(&so->so_rcv, 6340 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6341 } 6342 sctp_sbfree(control, stcb, &so->so_rcv, m); 6343 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6344 freed_so_far += MSIZE; 6345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6346 sctp_sblog(&so->so_rcv, 6347 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6348 } 6349 m = SCTP_BUF_NEXT(m); 6350 } 6351 control->data = control->tail_mbuf = NULL; 6352 control->length = 0; 6353 if (out_flags & MSG_EOR) { 6354 /* Done with this control */ 6355 goto done_with_control; 6356 } 6357 } 6358 release: 6359 if (hold_rlock == 1) { 6360 SCTP_INP_READ_UNLOCK(inp); 6361 hold_rlock = 0; 6362 } 6363 if (hold_sblock == 1) { 6364 SOCKBUF_UNLOCK(&so->so_rcv); 6365 hold_sblock = 0; 6366 } 6367 6368 SOCK_IO_RECV_UNLOCK(so); 6369 sockbuf_lock = 0; 6370 6371 release_unlocked: 6372 if (hold_sblock) { 6373 SOCKBUF_UNLOCK(&so->so_rcv); 6374 hold_sblock = 0; 6375 } 6376 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6377 if ((freed_so_far >= rwnd_req) && 6378 (control && (control->do_not_ref_stcb == 0)) && 6379 (no_rcv_needed == 0)) 6380 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6381 } 6382 out: 6383 if (msg_flags) { 6384 *msg_flags = out_flags; 6385 } 6386 if (((out_flags & MSG_EOR) == 0) && 6387 ((in_flags & MSG_PEEK) == 0) && 6388 (sinfo) && 6389 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6390 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6391 struct sctp_extrcvinfo *s_extra; 6392 6393 s_extra = (struct sctp_extrcvinfo *)sinfo; 6394 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6395 } 6396 if (hold_rlock == 1) { 6397 SCTP_INP_READ_UNLOCK(inp); 6398 } 6399 if (hold_sblock) { 6400 SOCKBUF_UNLOCK(&so->so_rcv); 6401 } 6402 if (sockbuf_lock) { 6403 SOCK_IO_RECV_UNLOCK(so); 6404 } 6405 6406 if (freecnt_applied) { 6407 /* 6408 * The lock on the socket buffer protects us so the free 6409 * code will stop. But since we used the socketbuf lock and 6410 * the sender uses the tcb_lock to increment, we need to use 6411 * the atomic add to the refcnt. 6412 */ 6413 if (stcb == NULL) { 6414 #ifdef INVARIANTS 6415 panic("stcb for refcnt has gone NULL?"); 6416 goto stage_left; 6417 #else 6418 goto stage_left; 6419 #endif 6420 } 6421 /* Save the value back for next time */ 6422 stcb->freed_by_sorcv_sincelast = freed_so_far; 6423 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6424 } 6425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6426 if (stcb) { 6427 sctp_misc_ints(SCTP_SORECV_DONE, 6428 freed_so_far, 6429 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6430 stcb->asoc.my_rwnd, 6431 so->so_rcv.sb_cc); 6432 } else { 6433 sctp_misc_ints(SCTP_SORECV_DONE, 6434 freed_so_far, 6435 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6436 0, 6437 so->so_rcv.sb_cc); 6438 } 6439 } 6440 stage_left: 6441 if (wakeup_read_socket) { 6442 sctp_sorwakeup(inp, so); 6443 } 6444 return (error); 6445 } 6446 6447 #ifdef SCTP_MBUF_LOGGING 6448 struct mbuf * 6449 sctp_m_free(struct mbuf *m) 6450 { 6451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6452 sctp_log_mb(m, SCTP_MBUF_IFREE); 6453 } 6454 return (m_free(m)); 6455 } 6456 6457 void 6458 sctp_m_freem(struct mbuf *mb) 6459 { 6460 while (mb != NULL) 6461 mb = sctp_m_free(mb); 6462 } 6463 6464 #endif 6465 6466 int 6467 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6468 { 6469 /* 6470 * Given a local address. For all associations that holds the 6471 * address, request a peer-set-primary. 6472 */ 6473 struct sctp_ifa *ifa; 6474 struct sctp_laddr *wi; 6475 6476 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6477 if (ifa == NULL) { 6478 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6479 return (EADDRNOTAVAIL); 6480 } 6481 /* 6482 * Now that we have the ifa we must awaken the iterator with this 6483 * message. 6484 */ 6485 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6486 if (wi == NULL) { 6487 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6488 return (ENOMEM); 6489 } 6490 /* Now incr the count and int wi structure */ 6491 SCTP_INCR_LADDR_COUNT(); 6492 memset(wi, 0, sizeof(*wi)); 6493 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6494 wi->ifa = ifa; 6495 wi->action = SCTP_SET_PRIM_ADDR; 6496 atomic_add_int(&ifa->refcount, 1); 6497 6498 /* Now add it to the work queue */ 6499 SCTP_WQ_ADDR_LOCK(); 6500 /* 6501 * Should this really be a tailq? As it is we will process the 6502 * newest first :-0 6503 */ 6504 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6505 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6506 (struct sctp_inpcb *)NULL, 6507 (struct sctp_tcb *)NULL, 6508 (struct sctp_nets *)NULL); 6509 SCTP_WQ_ADDR_UNLOCK(); 6510 return (0); 6511 } 6512 6513 int 6514 sctp_soreceive(struct socket *so, 6515 struct sockaddr **psa, 6516 struct uio *uio, 6517 struct mbuf **mp0, 6518 struct mbuf **controlp, 6519 int *flagsp) 6520 { 6521 int error, fromlen; 6522 uint8_t sockbuf[256]; 6523 struct sockaddr *from; 6524 struct sctp_extrcvinfo sinfo; 6525 int filling_sinfo = 1; 6526 int flags; 6527 struct sctp_inpcb *inp; 6528 6529 inp = (struct sctp_inpcb *)so->so_pcb; 6530 /* pickup the assoc we are reading from */ 6531 if (inp == NULL) { 6532 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6533 return (EINVAL); 6534 } 6535 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6536 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6537 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6538 (controlp == NULL)) { 6539 /* user does not want the sndrcv ctl */ 6540 filling_sinfo = 0; 6541 } 6542 if (psa) { 6543 from = (struct sockaddr *)sockbuf; 6544 fromlen = sizeof(sockbuf); 6545 from->sa_len = 0; 6546 } else { 6547 from = NULL; 6548 fromlen = 0; 6549 } 6550 6551 if (filling_sinfo) { 6552 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6553 } 6554 if (flagsp != NULL) { 6555 flags = *flagsp; 6556 } else { 6557 flags = 0; 6558 } 6559 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6560 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6561 if (flagsp != NULL) { 6562 *flagsp = flags; 6563 } 6564 if (controlp != NULL) { 6565 /* copy back the sinfo in a CMSG format */ 6566 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6567 *controlp = sctp_build_ctl_nchunk(inp, 6568 (struct sctp_sndrcvinfo *)&sinfo); 6569 } else { 6570 *controlp = NULL; 6571 } 6572 } 6573 if (psa) { 6574 /* copy back the address info */ 6575 if (from && from->sa_len) { 6576 *psa = sodupsockaddr(from, M_NOWAIT); 6577 } else { 6578 *psa = NULL; 6579 } 6580 } 6581 return (error); 6582 } 6583 6584 int 6585 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6586 int totaddr, int *error) 6587 { 6588 int added = 0; 6589 int i; 6590 struct sctp_inpcb *inp; 6591 struct sockaddr *sa; 6592 size_t incr = 0; 6593 #ifdef INET 6594 struct sockaddr_in *sin; 6595 #endif 6596 #ifdef INET6 6597 struct sockaddr_in6 *sin6; 6598 #endif 6599 6600 sa = addr; 6601 inp = stcb->sctp_ep; 6602 *error = 0; 6603 for (i = 0; i < totaddr; i++) { 6604 switch (sa->sa_family) { 6605 #ifdef INET 6606 case AF_INET: 6607 incr = sizeof(struct sockaddr_in); 6608 sin = (struct sockaddr_in *)sa; 6609 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6610 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6611 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6612 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6613 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6614 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6615 *error = EINVAL; 6616 goto out_now; 6617 } 6618 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6619 SCTP_DONOT_SETSCOPE, 6620 SCTP_ADDR_IS_CONFIRMED)) { 6621 /* assoc gone no un-lock */ 6622 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6623 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6624 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6625 *error = ENOBUFS; 6626 goto out_now; 6627 } 6628 added++; 6629 break; 6630 #endif 6631 #ifdef INET6 6632 case AF_INET6: 6633 incr = sizeof(struct sockaddr_in6); 6634 sin6 = (struct sockaddr_in6 *)sa; 6635 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6636 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6637 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6638 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6639 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6640 *error = EINVAL; 6641 goto out_now; 6642 } 6643 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6644 SCTP_DONOT_SETSCOPE, 6645 SCTP_ADDR_IS_CONFIRMED)) { 6646 /* assoc gone no un-lock */ 6647 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6648 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6649 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6650 *error = ENOBUFS; 6651 goto out_now; 6652 } 6653 added++; 6654 break; 6655 #endif 6656 default: 6657 break; 6658 } 6659 sa = (struct sockaddr *)((caddr_t)sa + incr); 6660 } 6661 out_now: 6662 return (added); 6663 } 6664 6665 int 6666 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6667 unsigned int totaddr, 6668 unsigned int *num_v4, unsigned int *num_v6, 6669 unsigned int limit) 6670 { 6671 struct sockaddr *sa; 6672 struct sctp_tcb *stcb; 6673 unsigned int incr, at, i; 6674 6675 at = 0; 6676 sa = addr; 6677 *num_v6 = *num_v4 = 0; 6678 /* account and validate addresses */ 6679 if (totaddr == 0) { 6680 return (EINVAL); 6681 } 6682 for (i = 0; i < totaddr; i++) { 6683 if (at + sizeof(struct sockaddr) > limit) { 6684 return (EINVAL); 6685 } 6686 switch (sa->sa_family) { 6687 #ifdef INET 6688 case AF_INET: 6689 incr = (unsigned int)sizeof(struct sockaddr_in); 6690 if (sa->sa_len != incr) { 6691 return (EINVAL); 6692 } 6693 (*num_v4) += 1; 6694 break; 6695 #endif 6696 #ifdef INET6 6697 case AF_INET6: 6698 { 6699 struct sockaddr_in6 *sin6; 6700 6701 incr = (unsigned int)sizeof(struct sockaddr_in6); 6702 if (sa->sa_len != incr) { 6703 return (EINVAL); 6704 } 6705 sin6 = (struct sockaddr_in6 *)sa; 6706 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6707 /* Must be non-mapped for connectx */ 6708 return (EINVAL); 6709 } 6710 (*num_v6) += 1; 6711 break; 6712 } 6713 #endif 6714 default: 6715 return (EINVAL); 6716 } 6717 if ((at + incr) > limit) { 6718 return (EINVAL); 6719 } 6720 SCTP_INP_INCR_REF(inp); 6721 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6722 if (stcb != NULL) { 6723 SCTP_TCB_UNLOCK(stcb); 6724 return (EALREADY); 6725 } else { 6726 SCTP_INP_DECR_REF(inp); 6727 } 6728 at += incr; 6729 sa = (struct sockaddr *)((caddr_t)sa + incr); 6730 } 6731 return (0); 6732 } 6733 6734 /* 6735 * sctp_bindx(ADD) for one address. 6736 * assumes all arguments are valid/checked by caller. 6737 */ 6738 void 6739 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6740 struct sockaddr *sa, uint32_t vrf_id, int *error, 6741 void *p) 6742 { 6743 #if defined(INET) && defined(INET6) 6744 struct sockaddr_in sin; 6745 #endif 6746 #ifdef INET6 6747 struct sockaddr_in6 *sin6; 6748 #endif 6749 #ifdef INET 6750 struct sockaddr_in *sinp; 6751 #endif 6752 struct sockaddr *addr_to_use; 6753 struct sctp_inpcb *lep; 6754 uint16_t port; 6755 6756 /* see if we're bound all already! */ 6757 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6758 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6759 *error = EINVAL; 6760 return; 6761 } 6762 switch (sa->sa_family) { 6763 #ifdef INET6 6764 case AF_INET6: 6765 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6766 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6767 *error = EINVAL; 6768 return; 6769 } 6770 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6771 /* can only bind v6 on PF_INET6 sockets */ 6772 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6773 *error = EINVAL; 6774 return; 6775 } 6776 sin6 = (struct sockaddr_in6 *)sa; 6777 port = sin6->sin6_port; 6778 #ifdef INET 6779 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6780 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6781 SCTP_IPV6_V6ONLY(inp)) { 6782 /* can't bind v4-mapped on PF_INET sockets */ 6783 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6784 *error = EINVAL; 6785 return; 6786 } 6787 in6_sin6_2_sin(&sin, sin6); 6788 addr_to_use = (struct sockaddr *)&sin; 6789 } else { 6790 addr_to_use = sa; 6791 } 6792 #else 6793 addr_to_use = sa; 6794 #endif 6795 break; 6796 #endif 6797 #ifdef INET 6798 case AF_INET: 6799 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6800 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6801 *error = EINVAL; 6802 return; 6803 } 6804 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6805 SCTP_IPV6_V6ONLY(inp)) { 6806 /* can't bind v4 on PF_INET sockets */ 6807 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6808 *error = EINVAL; 6809 return; 6810 } 6811 sinp = (struct sockaddr_in *)sa; 6812 port = sinp->sin_port; 6813 addr_to_use = sa; 6814 break; 6815 #endif 6816 default: 6817 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6818 *error = EINVAL; 6819 return; 6820 } 6821 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6822 if (p == NULL) { 6823 /* Can't get proc for Net/Open BSD */ 6824 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6825 *error = EINVAL; 6826 return; 6827 } 6828 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6829 return; 6830 } 6831 /* Validate the incoming port. */ 6832 if ((port != 0) && (port != inp->sctp_lport)) { 6833 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6834 *error = EINVAL; 6835 return; 6836 } 6837 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6838 if (lep == NULL) { 6839 /* add the address */ 6840 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6841 SCTP_ADD_IP_ADDRESS, vrf_id); 6842 } else { 6843 if (lep != inp) { 6844 *error = EADDRINUSE; 6845 } 6846 SCTP_INP_DECR_REF(lep); 6847 } 6848 } 6849 6850 /* 6851 * sctp_bindx(DELETE) for one address. 6852 * assumes all arguments are valid/checked by caller. 6853 */ 6854 void 6855 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6856 struct sockaddr *sa, uint32_t vrf_id, int *error) 6857 { 6858 struct sockaddr *addr_to_use; 6859 #if defined(INET) && defined(INET6) 6860 struct sockaddr_in6 *sin6; 6861 struct sockaddr_in sin; 6862 #endif 6863 6864 /* see if we're bound all already! */ 6865 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6866 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6867 *error = EINVAL; 6868 return; 6869 } 6870 switch (sa->sa_family) { 6871 #ifdef INET6 6872 case AF_INET6: 6873 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6874 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6875 *error = EINVAL; 6876 return; 6877 } 6878 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6879 /* can only bind v6 on PF_INET6 sockets */ 6880 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6881 *error = EINVAL; 6882 return; 6883 } 6884 #ifdef INET 6885 sin6 = (struct sockaddr_in6 *)sa; 6886 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6887 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6888 SCTP_IPV6_V6ONLY(inp)) { 6889 /* can't bind mapped-v4 on PF_INET sockets */ 6890 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6891 *error = EINVAL; 6892 return; 6893 } 6894 in6_sin6_2_sin(&sin, sin6); 6895 addr_to_use = (struct sockaddr *)&sin; 6896 } else { 6897 addr_to_use = sa; 6898 } 6899 #else 6900 addr_to_use = sa; 6901 #endif 6902 break; 6903 #endif 6904 #ifdef INET 6905 case AF_INET: 6906 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6907 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6908 *error = EINVAL; 6909 return; 6910 } 6911 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6912 SCTP_IPV6_V6ONLY(inp)) { 6913 /* can't bind v4 on PF_INET sockets */ 6914 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6915 *error = EINVAL; 6916 return; 6917 } 6918 addr_to_use = sa; 6919 break; 6920 #endif 6921 default: 6922 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6923 *error = EINVAL; 6924 return; 6925 } 6926 /* No lock required mgmt_ep_sa does its own locking. */ 6927 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6928 vrf_id); 6929 } 6930 6931 /* 6932 * returns the valid local address count for an assoc, taking into account 6933 * all scoping rules 6934 */ 6935 int 6936 sctp_local_addr_count(struct sctp_tcb *stcb) 6937 { 6938 int loopback_scope; 6939 #if defined(INET) 6940 int ipv4_local_scope, ipv4_addr_legal; 6941 #endif 6942 #if defined(INET6) 6943 int local_scope, site_scope, ipv6_addr_legal; 6944 #endif 6945 struct sctp_vrf *vrf; 6946 struct sctp_ifn *sctp_ifn; 6947 struct sctp_ifa *sctp_ifa; 6948 int count = 0; 6949 6950 /* Turn on all the appropriate scopes */ 6951 loopback_scope = stcb->asoc.scope.loopback_scope; 6952 #if defined(INET) 6953 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6954 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6955 #endif 6956 #if defined(INET6) 6957 local_scope = stcb->asoc.scope.local_scope; 6958 site_scope = stcb->asoc.scope.site_scope; 6959 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6960 #endif 6961 SCTP_IPI_ADDR_RLOCK(); 6962 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6963 if (vrf == NULL) { 6964 /* no vrf, no addresses */ 6965 SCTP_IPI_ADDR_RUNLOCK(); 6966 return (0); 6967 } 6968 6969 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6970 /* 6971 * bound all case: go through all ifns on the vrf 6972 */ 6973 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6974 if ((loopback_scope == 0) && 6975 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6976 continue; 6977 } 6978 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6979 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6980 continue; 6981 switch (sctp_ifa->address.sa.sa_family) { 6982 #ifdef INET 6983 case AF_INET: 6984 if (ipv4_addr_legal) { 6985 struct sockaddr_in *sin; 6986 6987 sin = &sctp_ifa->address.sin; 6988 if (sin->sin_addr.s_addr == 0) { 6989 /* 6990 * skip unspecified 6991 * addrs 6992 */ 6993 continue; 6994 } 6995 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6996 &sin->sin_addr) != 0) { 6997 continue; 6998 } 6999 if ((ipv4_local_scope == 0) && 7000 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7001 continue; 7002 } 7003 /* count this one */ 7004 count++; 7005 } else { 7006 continue; 7007 } 7008 break; 7009 #endif 7010 #ifdef INET6 7011 case AF_INET6: 7012 if (ipv6_addr_legal) { 7013 struct sockaddr_in6 *sin6; 7014 7015 sin6 = &sctp_ifa->address.sin6; 7016 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7017 continue; 7018 } 7019 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7020 &sin6->sin6_addr) != 0) { 7021 continue; 7022 } 7023 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7024 if (local_scope == 0) 7025 continue; 7026 if (sin6->sin6_scope_id == 0) { 7027 if (sa6_recoverscope(sin6) != 0) 7028 /* 7029 * 7030 * bad 7031 * link 7032 * 7033 * local 7034 * 7035 * address 7036 */ 7037 continue; 7038 } 7039 } 7040 if ((site_scope == 0) && 7041 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7042 continue; 7043 } 7044 /* count this one */ 7045 count++; 7046 } 7047 break; 7048 #endif 7049 default: 7050 /* TSNH */ 7051 break; 7052 } 7053 } 7054 } 7055 } else { 7056 /* 7057 * subset bound case 7058 */ 7059 struct sctp_laddr *laddr; 7060 7061 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7062 sctp_nxt_addr) { 7063 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7064 continue; 7065 } 7066 /* count this one */ 7067 count++; 7068 } 7069 } 7070 SCTP_IPI_ADDR_RUNLOCK(); 7071 return (count); 7072 } 7073 7074 #if defined(SCTP_LOCAL_TRACE_BUF) 7075 7076 void 7077 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7078 { 7079 uint32_t saveindex, newindex; 7080 7081 do { 7082 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7083 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7084 newindex = 1; 7085 } else { 7086 newindex = saveindex + 1; 7087 } 7088 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7089 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7090 saveindex = 0; 7091 } 7092 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7093 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7094 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7095 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7096 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7097 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7098 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7099 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7100 } 7101 7102 #endif 7103 static bool 7104 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7105 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7106 { 7107 struct ip *iph; 7108 #ifdef INET6 7109 struct ip6_hdr *ip6; 7110 #endif 7111 struct mbuf *sp, *last; 7112 struct udphdr *uhdr; 7113 uint16_t port; 7114 7115 if ((m->m_flags & M_PKTHDR) == 0) { 7116 /* Can't handle one that is not a pkt hdr */ 7117 goto out; 7118 } 7119 /* Pull the src port */ 7120 iph = mtod(m, struct ip *); 7121 uhdr = (struct udphdr *)((caddr_t)iph + off); 7122 port = uhdr->uh_sport; 7123 /* 7124 * Split out the mbuf chain. Leave the IP header in m, place the 7125 * rest in the sp. 7126 */ 7127 sp = m_split(m, off, M_NOWAIT); 7128 if (sp == NULL) { 7129 /* Gak, drop packet, we can't do a split */ 7130 goto out; 7131 } 7132 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7133 /* Gak, packet can't have an SCTP header in it - too small */ 7134 m_freem(sp); 7135 goto out; 7136 } 7137 /* Now pull up the UDP header and SCTP header together */ 7138 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7139 if (sp == NULL) { 7140 /* Gak pullup failed */ 7141 goto out; 7142 } 7143 /* Trim out the UDP header */ 7144 m_adj(sp, sizeof(struct udphdr)); 7145 7146 /* Now reconstruct the mbuf chain */ 7147 for (last = m; last->m_next; last = last->m_next); 7148 last->m_next = sp; 7149 m->m_pkthdr.len += sp->m_pkthdr.len; 7150 /* 7151 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7152 * checksum and it was valid. Since CSUM_DATA_VALID == 7153 * CSUM_SCTP_VALID this would imply that the HW also verified the 7154 * SCTP checksum. Therefore, clear the bit. 7155 */ 7156 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7157 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7158 m->m_pkthdr.len, 7159 if_name(m->m_pkthdr.rcvif), 7160 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7161 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7162 iph = mtod(m, struct ip *); 7163 switch (iph->ip_v) { 7164 #ifdef INET 7165 case IPVERSION: 7166 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7167 sctp_input_with_port(m, off, port); 7168 break; 7169 #endif 7170 #ifdef INET6 7171 case IPV6_VERSION >> 4: 7172 ip6 = mtod(m, struct ip6_hdr *); 7173 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7174 sctp6_input_with_port(&m, &off, port); 7175 break; 7176 #endif 7177 default: 7178 goto out; 7179 break; 7180 } 7181 return (true); 7182 out: 7183 m_freem(m); 7184 7185 return (true); 7186 } 7187 7188 #ifdef INET 7189 static void 7190 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7191 { 7192 struct ip *outer_ip, *inner_ip; 7193 struct sctphdr *sh; 7194 struct icmp *icmp; 7195 struct udphdr *udp; 7196 struct sctp_inpcb *inp; 7197 struct sctp_tcb *stcb; 7198 struct sctp_nets *net; 7199 struct sctp_init_chunk *ch; 7200 struct sockaddr_in src, dst; 7201 uint8_t type, code; 7202 7203 inner_ip = (struct ip *)vip; 7204 icmp = (struct icmp *)((caddr_t)inner_ip - 7205 (sizeof(struct icmp) - sizeof(struct ip))); 7206 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7207 if (ntohs(outer_ip->ip_len) < 7208 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7209 return; 7210 } 7211 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7212 sh = (struct sctphdr *)(udp + 1); 7213 memset(&src, 0, sizeof(struct sockaddr_in)); 7214 src.sin_family = AF_INET; 7215 src.sin_len = sizeof(struct sockaddr_in); 7216 src.sin_port = sh->src_port; 7217 src.sin_addr = inner_ip->ip_src; 7218 memset(&dst, 0, sizeof(struct sockaddr_in)); 7219 dst.sin_family = AF_INET; 7220 dst.sin_len = sizeof(struct sockaddr_in); 7221 dst.sin_port = sh->dest_port; 7222 dst.sin_addr = inner_ip->ip_dst; 7223 /* 7224 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7225 * holds our local endpoint address. Thus we reverse the dst and the 7226 * src in the lookup. 7227 */ 7228 inp = NULL; 7229 net = NULL; 7230 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7231 (struct sockaddr *)&src, 7232 &inp, &net, 1, 7233 SCTP_DEFAULT_VRFID); 7234 if ((stcb != NULL) && 7235 (net != NULL) && 7236 (inp != NULL)) { 7237 /* Check the UDP port numbers */ 7238 if ((udp->uh_dport != net->port) || 7239 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7240 SCTP_TCB_UNLOCK(stcb); 7241 return; 7242 } 7243 /* Check the verification tag */ 7244 if (ntohl(sh->v_tag) != 0) { 7245 /* 7246 * This must be the verification tag used for 7247 * sending out packets. We don't consider packets 7248 * reflecting the verification tag. 7249 */ 7250 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7251 SCTP_TCB_UNLOCK(stcb); 7252 return; 7253 } 7254 } else { 7255 if (ntohs(outer_ip->ip_len) >= 7256 sizeof(struct ip) + 7257 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7258 /* 7259 * In this case we can check if we got an 7260 * INIT chunk and if the initiate tag 7261 * matches. 7262 */ 7263 ch = (struct sctp_init_chunk *)(sh + 1); 7264 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7265 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7266 SCTP_TCB_UNLOCK(stcb); 7267 return; 7268 } 7269 } else { 7270 SCTP_TCB_UNLOCK(stcb); 7271 return; 7272 } 7273 } 7274 type = icmp->icmp_type; 7275 code = icmp->icmp_code; 7276 if ((type == ICMP_UNREACH) && 7277 (code == ICMP_UNREACH_PORT)) { 7278 code = ICMP_UNREACH_PROTOCOL; 7279 } 7280 sctp_notify(inp, stcb, net, type, code, 7281 ntohs(inner_ip->ip_len), 7282 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7283 } else { 7284 if ((stcb == NULL) && (inp != NULL)) { 7285 /* reduce ref-count */ 7286 SCTP_INP_WLOCK(inp); 7287 SCTP_INP_DECR_REF(inp); 7288 SCTP_INP_WUNLOCK(inp); 7289 } 7290 if (stcb) { 7291 SCTP_TCB_UNLOCK(stcb); 7292 } 7293 } 7294 return; 7295 } 7296 #endif 7297 7298 #ifdef INET6 7299 static void 7300 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7301 { 7302 struct ip6ctlparam *ip6cp; 7303 struct sctp_inpcb *inp; 7304 struct sctp_tcb *stcb; 7305 struct sctp_nets *net; 7306 struct sctphdr sh; 7307 struct udphdr udp; 7308 struct sockaddr_in6 src, dst; 7309 uint8_t type, code; 7310 7311 ip6cp = (struct ip6ctlparam *)d; 7312 /* 7313 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7314 */ 7315 if (ip6cp->ip6c_m == NULL) { 7316 return; 7317 } 7318 /* 7319 * Check if we can safely examine the ports and the verification tag 7320 * of the SCTP common header. 7321 */ 7322 if (ip6cp->ip6c_m->m_pkthdr.len < 7323 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7324 return; 7325 } 7326 /* Copy out the UDP header. */ 7327 memset(&udp, 0, sizeof(struct udphdr)); 7328 m_copydata(ip6cp->ip6c_m, 7329 ip6cp->ip6c_off, 7330 sizeof(struct udphdr), 7331 (caddr_t)&udp); 7332 /* Copy out the port numbers and the verification tag. */ 7333 memset(&sh, 0, sizeof(struct sctphdr)); 7334 m_copydata(ip6cp->ip6c_m, 7335 ip6cp->ip6c_off + sizeof(struct udphdr), 7336 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7337 (caddr_t)&sh); 7338 memset(&src, 0, sizeof(struct sockaddr_in6)); 7339 src.sin6_family = AF_INET6; 7340 src.sin6_len = sizeof(struct sockaddr_in6); 7341 src.sin6_port = sh.src_port; 7342 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7343 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7344 return; 7345 } 7346 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7347 dst.sin6_family = AF_INET6; 7348 dst.sin6_len = sizeof(struct sockaddr_in6); 7349 dst.sin6_port = sh.dest_port; 7350 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7351 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7352 return; 7353 } 7354 inp = NULL; 7355 net = NULL; 7356 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7357 (struct sockaddr *)&src, 7358 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7359 if ((stcb != NULL) && 7360 (net != NULL) && 7361 (inp != NULL)) { 7362 /* Check the UDP port numbers */ 7363 if ((udp.uh_dport != net->port) || 7364 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7365 SCTP_TCB_UNLOCK(stcb); 7366 return; 7367 } 7368 /* Check the verification tag */ 7369 if (ntohl(sh.v_tag) != 0) { 7370 /* 7371 * This must be the verification tag used for 7372 * sending out packets. We don't consider packets 7373 * reflecting the verification tag. 7374 */ 7375 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7376 SCTP_TCB_UNLOCK(stcb); 7377 return; 7378 } 7379 } else { 7380 if (ip6cp->ip6c_m->m_pkthdr.len >= 7381 ip6cp->ip6c_off + sizeof(struct udphdr) + 7382 sizeof(struct sctphdr) + 7383 sizeof(struct sctp_chunkhdr) + 7384 offsetof(struct sctp_init, a_rwnd)) { 7385 /* 7386 * In this case we can check if we got an 7387 * INIT chunk and if the initiate tag 7388 * matches. 7389 */ 7390 uint32_t initiate_tag; 7391 uint8_t chunk_type; 7392 7393 m_copydata(ip6cp->ip6c_m, 7394 ip6cp->ip6c_off + 7395 sizeof(struct udphdr) + 7396 sizeof(struct sctphdr), 7397 sizeof(uint8_t), 7398 (caddr_t)&chunk_type); 7399 m_copydata(ip6cp->ip6c_m, 7400 ip6cp->ip6c_off + 7401 sizeof(struct udphdr) + 7402 sizeof(struct sctphdr) + 7403 sizeof(struct sctp_chunkhdr), 7404 sizeof(uint32_t), 7405 (caddr_t)&initiate_tag); 7406 if ((chunk_type != SCTP_INITIATION) || 7407 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7408 SCTP_TCB_UNLOCK(stcb); 7409 return; 7410 } 7411 } else { 7412 SCTP_TCB_UNLOCK(stcb); 7413 return; 7414 } 7415 } 7416 type = ip6cp->ip6c_icmp6->icmp6_type; 7417 code = ip6cp->ip6c_icmp6->icmp6_code; 7418 if ((type == ICMP6_DST_UNREACH) && 7419 (code == ICMP6_DST_UNREACH_NOPORT)) { 7420 type = ICMP6_PARAM_PROB; 7421 code = ICMP6_PARAMPROB_NEXTHEADER; 7422 } 7423 sctp6_notify(inp, stcb, net, type, code, 7424 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7425 } else { 7426 if ((stcb == NULL) && (inp != NULL)) { 7427 /* reduce inp's ref-count */ 7428 SCTP_INP_WLOCK(inp); 7429 SCTP_INP_DECR_REF(inp); 7430 SCTP_INP_WUNLOCK(inp); 7431 } 7432 if (stcb) { 7433 SCTP_TCB_UNLOCK(stcb); 7434 } 7435 } 7436 } 7437 #endif 7438 7439 void 7440 sctp_over_udp_stop(void) 7441 { 7442 /* 7443 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7444 * for writing! 7445 */ 7446 #ifdef INET 7447 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7448 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7449 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7450 } 7451 #endif 7452 #ifdef INET6 7453 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7454 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7455 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7456 } 7457 #endif 7458 } 7459 7460 int 7461 sctp_over_udp_start(void) 7462 { 7463 uint16_t port; 7464 int ret; 7465 #ifdef INET 7466 struct sockaddr_in sin; 7467 #endif 7468 #ifdef INET6 7469 struct sockaddr_in6 sin6; 7470 #endif 7471 /* 7472 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7473 * for writing! 7474 */ 7475 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7476 if (ntohs(port) == 0) { 7477 /* Must have a port set */ 7478 return (EINVAL); 7479 } 7480 #ifdef INET 7481 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7482 /* Already running -- must stop first */ 7483 return (EALREADY); 7484 } 7485 #endif 7486 #ifdef INET6 7487 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7488 /* Already running -- must stop first */ 7489 return (EALREADY); 7490 } 7491 #endif 7492 #ifdef INET 7493 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7494 SOCK_DGRAM, IPPROTO_UDP, 7495 curthread->td_ucred, curthread))) { 7496 sctp_over_udp_stop(); 7497 return (ret); 7498 } 7499 /* Call the special UDP hook. */ 7500 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7501 sctp_recv_udp_tunneled_packet, 7502 sctp_recv_icmp_tunneled_packet, 7503 NULL))) { 7504 sctp_over_udp_stop(); 7505 return (ret); 7506 } 7507 /* Ok, we have a socket, bind it to the port. */ 7508 memset(&sin, 0, sizeof(struct sockaddr_in)); 7509 sin.sin_len = sizeof(struct sockaddr_in); 7510 sin.sin_family = AF_INET; 7511 sin.sin_port = htons(port); 7512 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7513 (struct sockaddr *)&sin, curthread))) { 7514 sctp_over_udp_stop(); 7515 return (ret); 7516 } 7517 #endif 7518 #ifdef INET6 7519 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7520 SOCK_DGRAM, IPPROTO_UDP, 7521 curthread->td_ucred, curthread))) { 7522 sctp_over_udp_stop(); 7523 return (ret); 7524 } 7525 /* Call the special UDP hook. */ 7526 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7527 sctp_recv_udp_tunneled_packet, 7528 sctp_recv_icmp6_tunneled_packet, 7529 NULL))) { 7530 sctp_over_udp_stop(); 7531 return (ret); 7532 } 7533 /* Ok, we have a socket, bind it to the port. */ 7534 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7535 sin6.sin6_len = sizeof(struct sockaddr_in6); 7536 sin6.sin6_family = AF_INET6; 7537 sin6.sin6_port = htons(port); 7538 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7539 (struct sockaddr *)&sin6, curthread))) { 7540 sctp_over_udp_stop(); 7541 return (ret); 7542 } 7543 #endif 7544 return (0); 7545 } 7546 7547 /* 7548 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7549 * If all arguments are zero, zero is returned. 7550 */ 7551 uint32_t 7552 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7553 { 7554 if (mtu1 > 0) { 7555 if (mtu2 > 0) { 7556 if (mtu3 > 0) { 7557 return (min(mtu1, min(mtu2, mtu3))); 7558 } else { 7559 return (min(mtu1, mtu2)); 7560 } 7561 } else { 7562 if (mtu3 > 0) { 7563 return (min(mtu1, mtu3)); 7564 } else { 7565 return (mtu1); 7566 } 7567 } 7568 } else { 7569 if (mtu2 > 0) { 7570 if (mtu3 > 0) { 7571 return (min(mtu2, mtu3)); 7572 } else { 7573 return (mtu2); 7574 } 7575 } else { 7576 return (mtu3); 7577 } 7578 } 7579 } 7580 7581 void 7582 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7583 { 7584 struct in_conninfo inc; 7585 7586 memset(&inc, 0, sizeof(struct in_conninfo)); 7587 inc.inc_fibnum = fibnum; 7588 switch (addr->sa.sa_family) { 7589 #ifdef INET 7590 case AF_INET: 7591 inc.inc_faddr = addr->sin.sin_addr; 7592 break; 7593 #endif 7594 #ifdef INET6 7595 case AF_INET6: 7596 inc.inc_flags |= INC_ISIPV6; 7597 inc.inc6_faddr = addr->sin6.sin6_addr; 7598 break; 7599 #endif 7600 default: 7601 return; 7602 } 7603 tcp_hc_updatemtu(&inc, (u_long)mtu); 7604 } 7605 7606 uint32_t 7607 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7608 { 7609 struct in_conninfo inc; 7610 7611 memset(&inc, 0, sizeof(struct in_conninfo)); 7612 inc.inc_fibnum = fibnum; 7613 switch (addr->sa.sa_family) { 7614 #ifdef INET 7615 case AF_INET: 7616 inc.inc_faddr = addr->sin.sin_addr; 7617 break; 7618 #endif 7619 #ifdef INET6 7620 case AF_INET6: 7621 inc.inc_flags |= INC_ISIPV6; 7622 inc.inc6_faddr = addr->sin6.sin6_addr; 7623 break; 7624 #endif 7625 default: 7626 return (0); 7627 } 7628 return ((uint32_t)tcp_hc_getmtu(&inc)); 7629 } 7630 7631 void 7632 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7633 { 7634 #if defined(KDTRACE_HOOKS) 7635 int old_state = stcb->asoc.state; 7636 #endif 7637 7638 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7639 ("sctp_set_state: Can't set substate (new_state = %x)", 7640 new_state)); 7641 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7642 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7643 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7644 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7645 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7646 } 7647 #if defined(KDTRACE_HOOKS) 7648 if (((old_state & SCTP_STATE_MASK) != new_state) && 7649 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7650 (new_state == SCTP_STATE_INUSE))) { 7651 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7652 } 7653 #endif 7654 } 7655 7656 void 7657 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7658 { 7659 #if defined(KDTRACE_HOOKS) 7660 int old_state = stcb->asoc.state; 7661 #endif 7662 7663 KASSERT((substate & SCTP_STATE_MASK) == 0, 7664 ("sctp_add_substate: Can't set state (substate = %x)", 7665 substate)); 7666 stcb->asoc.state |= substate; 7667 #if defined(KDTRACE_HOOKS) 7668 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7669 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7670 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7671 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7672 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7673 } 7674 #endif 7675 } 7676