1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 if (override_tag) { 1195 asoc->init_seq_number = initial_tsn; 1196 } else { 1197 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1198 } 1199 asoc->asconf_seq_out = asoc->init_seq_number; 1200 asoc->str_reset_seq_out = asoc->init_seq_number; 1201 asoc->sending_seq = asoc->init_seq_number; 1202 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1203 /* we are optimisitic here */ 1204 asoc->peer_supports_nat = 0; 1205 asoc->sent_queue_retran_cnt = 0; 1206 1207 /* for CMT */ 1208 asoc->last_net_cmt_send_started = NULL; 1209 1210 asoc->last_acked_seq = asoc->init_seq_number - 1; 1211 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1212 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1213 1214 /* here we are different, we hold the next one we expect */ 1215 asoc->str_reset_seq_in = asoc->init_seq_number; 1216 1217 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1218 asoc->initial_rto = inp->sctp_ep.initial_rto; 1219 1220 asoc->default_mtu = inp->sctp_ep.default_mtu; 1221 asoc->max_init_times = inp->sctp_ep.max_init_times; 1222 asoc->max_send_times = inp->sctp_ep.max_send_times; 1223 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1224 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1225 asoc->free_chunk_cnt = 0; 1226 1227 asoc->iam_blocking = 0; 1228 asoc->context = inp->sctp_context; 1229 asoc->local_strreset_support = inp->local_strreset_support; 1230 asoc->def_send = inp->def_send; 1231 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1232 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1233 asoc->pr_sctp_cnt = 0; 1234 asoc->total_output_queue_size = 0; 1235 1236 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1237 asoc->scope.ipv6_addr_legal = 1; 1238 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } else { 1241 asoc->scope.ipv4_addr_legal = 0; 1242 } 1243 } else { 1244 asoc->scope.ipv6_addr_legal = 0; 1245 asoc->scope.ipv4_addr_legal = 1; 1246 } 1247 1248 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1249 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1250 1251 asoc->smallest_mtu = inp->sctp_frag_point; 1252 asoc->minrto = inp->sctp_ep.sctp_minrto; 1253 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1254 1255 asoc->stream_locked_on = 0; 1256 asoc->ecn_echo_cnt_onq = 0; 1257 asoc->stream_locked = 0; 1258 1259 asoc->send_sack = 1; 1260 1261 LIST_INIT(&asoc->sctp_restricted_addrs); 1262 1263 TAILQ_INIT(&asoc->nets); 1264 TAILQ_INIT(&asoc->pending_reply_queue); 1265 TAILQ_INIT(&asoc->asconf_ack_sent); 1266 /* Setup to fill the hb random cache at first HB */ 1267 asoc->hb_random_idx = 4; 1268 1269 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1270 1271 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1272 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1273 1274 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1275 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1276 1277 /* 1278 * Now the stream parameters, here we allocate space for all streams 1279 * that we request by default. 1280 */ 1281 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1282 o_strms; 1283 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1284 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1285 SCTP_M_STRMO); 1286 if (asoc->strmout == NULL) { 1287 /* big trouble no memory */ 1288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1289 return (ENOMEM); 1290 } 1291 SCTP_TCB_SEND_LOCK(stcb); 1292 for (i = 0; i < asoc->streamoutcnt; i++) { 1293 /* 1294 * inbound side must be set to 0xffff, also NOTE when we get 1295 * the INIT-ACK back (for INIT sender) we MUST reduce the 1296 * count (streamoutcnt) but first check if we sent to any of 1297 * the upper streams that were dropped (if some were). Those 1298 * that were dropped must be notified to the upper layer as 1299 * failed to send. 1300 */ 1301 TAILQ_INIT(&asoc->strmout[i].outqueue); 1302 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1303 asoc->strmout[i].chunks_on_queues = 0; 1304 #if defined(SCTP_DETAILED_STR_STATS) 1305 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1306 asoc->strmout[i].abandoned_sent[j] = 0; 1307 asoc->strmout[i].abandoned_unsent[j] = 0; 1308 } 1309 #else 1310 asoc->strmout[i].abandoned_sent[0] = 0; 1311 asoc->strmout[i].abandoned_unsent[0] = 0; 1312 #endif 1313 asoc->strmout[i].next_mid_ordered = 0; 1314 asoc->strmout[i].next_mid_unordered = 0; 1315 asoc->strmout[i].sid = i; 1316 asoc->strmout[i].last_msg_incomplete = 0; 1317 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1318 } 1319 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1320 SCTP_TCB_SEND_UNLOCK(stcb); 1321 1322 /* Now the mapping array */ 1323 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1324 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1329 return (ENOMEM); 1330 } 1331 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1332 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1333 SCTP_M_MAP); 1334 if (asoc->nr_mapping_array == NULL) { 1335 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1336 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1337 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1338 return (ENOMEM); 1339 } 1340 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1341 1342 /* Now the init of the other outqueues */ 1343 TAILQ_INIT(&asoc->free_chunks); 1344 TAILQ_INIT(&asoc->control_send_queue); 1345 TAILQ_INIT(&asoc->asconf_send_queue); 1346 TAILQ_INIT(&asoc->send_queue); 1347 TAILQ_INIT(&asoc->sent_queue); 1348 TAILQ_INIT(&asoc->resetHead); 1349 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1350 TAILQ_INIT(&asoc->asconf_queue); 1351 /* authentication fields */ 1352 asoc->authinfo.random = NULL; 1353 asoc->authinfo.active_keyid = 0; 1354 asoc->authinfo.assoc_key = NULL; 1355 asoc->authinfo.assoc_keyid = 0; 1356 asoc->authinfo.recv_key = NULL; 1357 asoc->authinfo.recv_keyid = 0; 1358 LIST_INIT(&asoc->shared_keys); 1359 asoc->marked_retrans = 0; 1360 asoc->port = inp->sctp_ep.port; 1361 asoc->timoinit = 0; 1362 asoc->timodata = 0; 1363 asoc->timosack = 0; 1364 asoc->timoshutdown = 0; 1365 asoc->timoheartbeat = 0; 1366 asoc->timocookie = 0; 1367 asoc->timoshutdownack = 0; 1368 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1369 asoc->discontinuity_time = asoc->start_time; 1370 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1371 asoc->abandoned_unsent[i] = 0; 1372 asoc->abandoned_sent[i] = 0; 1373 } 1374 /* 1375 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1376 * freed later when the association is freed. 1377 */ 1378 return (0); 1379 } 1380 1381 void 1382 sctp_print_mapping_array(struct sctp_association *asoc) 1383 { 1384 unsigned int i, limit; 1385 1386 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1387 asoc->mapping_array_size, 1388 asoc->mapping_array_base_tsn, 1389 asoc->cumulative_tsn, 1390 asoc->highest_tsn_inside_map, 1391 asoc->highest_tsn_inside_nr_map); 1392 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1393 if (asoc->mapping_array[limit - 1] != 0) { 1394 break; 1395 } 1396 } 1397 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1398 for (i = 0; i < limit; i++) { 1399 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1400 } 1401 if (limit % 16) 1402 SCTP_PRINTF("\n"); 1403 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1404 if (asoc->nr_mapping_array[limit - 1]) { 1405 break; 1406 } 1407 } 1408 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1409 for (i = 0; i < limit; i++) { 1410 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1411 } 1412 if (limit % 16) 1413 SCTP_PRINTF("\n"); 1414 } 1415 1416 int 1417 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1418 { 1419 /* mapping array needs to grow */ 1420 uint8_t *new_array1, *new_array2; 1421 uint32_t new_size; 1422 1423 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1424 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1425 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1426 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1427 /* can't get more, forget it */ 1428 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1429 if (new_array1) { 1430 SCTP_FREE(new_array1, SCTP_M_MAP); 1431 } 1432 if (new_array2) { 1433 SCTP_FREE(new_array2, SCTP_M_MAP); 1434 } 1435 return (-1); 1436 } 1437 memset(new_array1, 0, new_size); 1438 memset(new_array2, 0, new_size); 1439 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1440 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1441 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1442 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1443 asoc->mapping_array = new_array1; 1444 asoc->nr_mapping_array = new_array2; 1445 asoc->mapping_array_size = new_size; 1446 return (0); 1447 } 1448 1449 static void 1450 sctp_iterator_work(struct sctp_iterator *it) 1451 { 1452 struct epoch_tracker et; 1453 struct sctp_inpcb *tinp; 1454 int iteration_count = 0; 1455 int inp_skip = 0; 1456 int first_in = 1; 1457 1458 NET_EPOCH_ENTER(et); 1459 SCTP_INP_INFO_RLOCK(); 1460 SCTP_ITERATOR_LOCK(); 1461 sctp_it_ctl.cur_it = it; 1462 if (it->inp) { 1463 SCTP_INP_RLOCK(it->inp); 1464 SCTP_INP_DECR_REF(it->inp); 1465 } 1466 if (it->inp == NULL) { 1467 /* iterator is complete */ 1468 done_with_iterator: 1469 sctp_it_ctl.cur_it = NULL; 1470 SCTP_ITERATOR_UNLOCK(); 1471 SCTP_INP_INFO_RUNLOCK(); 1472 if (it->function_atend != NULL) { 1473 (*it->function_atend) (it->pointer, it->val); 1474 } 1475 SCTP_FREE(it, SCTP_M_ITER); 1476 NET_EPOCH_EXIT(et); 1477 return; 1478 } 1479 select_a_new_ep: 1480 if (first_in) { 1481 first_in = 0; 1482 } else { 1483 SCTP_INP_RLOCK(it->inp); 1484 } 1485 while (((it->pcb_flags) && 1486 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1487 ((it->pcb_features) && 1488 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1489 /* endpoint flags or features don't match, so keep looking */ 1490 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1491 SCTP_INP_RUNLOCK(it->inp); 1492 goto done_with_iterator; 1493 } 1494 tinp = it->inp; 1495 it->inp = LIST_NEXT(it->inp, sctp_list); 1496 it->stcb = NULL; 1497 SCTP_INP_RUNLOCK(tinp); 1498 if (it->inp == NULL) { 1499 goto done_with_iterator; 1500 } 1501 SCTP_INP_RLOCK(it->inp); 1502 } 1503 /* now go through each assoc which is in the desired state */ 1504 if (it->done_current_ep == 0) { 1505 if (it->function_inp != NULL) 1506 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1507 it->done_current_ep = 1; 1508 } 1509 if (it->stcb == NULL) { 1510 /* run the per instance function */ 1511 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1512 } 1513 if ((inp_skip) || it->stcb == NULL) { 1514 if (it->function_inp_end != NULL) { 1515 inp_skip = (*it->function_inp_end) (it->inp, 1516 it->pointer, 1517 it->val); 1518 } 1519 SCTP_INP_RUNLOCK(it->inp); 1520 goto no_stcb; 1521 } 1522 while (it->stcb) { 1523 SCTP_TCB_LOCK(it->stcb); 1524 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1525 /* not in the right state... keep looking */ 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 goto next_assoc; 1528 } 1529 /* see if we have limited out the iterator loop */ 1530 iteration_count++; 1531 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1532 /* Pause to let others grab the lock */ 1533 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1534 SCTP_TCB_UNLOCK(it->stcb); 1535 SCTP_INP_INCR_REF(it->inp); 1536 SCTP_INP_RUNLOCK(it->inp); 1537 SCTP_ITERATOR_UNLOCK(); 1538 SCTP_INP_INFO_RUNLOCK(); 1539 SCTP_INP_INFO_RLOCK(); 1540 SCTP_ITERATOR_LOCK(); 1541 if (sctp_it_ctl.iterator_flags) { 1542 /* We won't be staying here */ 1543 SCTP_INP_DECR_REF(it->inp); 1544 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1545 if (sctp_it_ctl.iterator_flags & 1546 SCTP_ITERATOR_STOP_CUR_IT) { 1547 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1548 goto done_with_iterator; 1549 } 1550 if (sctp_it_ctl.iterator_flags & 1551 SCTP_ITERATOR_STOP_CUR_INP) { 1552 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1553 goto no_stcb; 1554 } 1555 /* If we reach here huh? */ 1556 SCTP_PRINTF("Unknown it ctl flag %x\n", 1557 sctp_it_ctl.iterator_flags); 1558 sctp_it_ctl.iterator_flags = 0; 1559 } 1560 SCTP_INP_RLOCK(it->inp); 1561 SCTP_INP_DECR_REF(it->inp); 1562 SCTP_TCB_LOCK(it->stcb); 1563 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1564 iteration_count = 0; 1565 } 1566 KASSERT(it->inp == it->stcb->sctp_ep, 1567 ("%s: stcb %p does not belong to inp %p, but inp %p", 1568 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1569 1570 /* run function on this one */ 1571 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1572 1573 /* 1574 * we lie here, it really needs to have its own type but 1575 * first I must verify that this won't effect things :-0 1576 */ 1577 if (it->no_chunk_output == 0) 1578 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1579 1580 SCTP_TCB_UNLOCK(it->stcb); 1581 next_assoc: 1582 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1583 if (it->stcb == NULL) { 1584 /* Run last function */ 1585 if (it->function_inp_end != NULL) { 1586 inp_skip = (*it->function_inp_end) (it->inp, 1587 it->pointer, 1588 it->val); 1589 } 1590 } 1591 } 1592 SCTP_INP_RUNLOCK(it->inp); 1593 no_stcb: 1594 /* done with all assocs on this endpoint, move on to next endpoint */ 1595 it->done_current_ep = 0; 1596 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1597 it->inp = NULL; 1598 } else { 1599 it->inp = LIST_NEXT(it->inp, sctp_list); 1600 } 1601 it->stcb = NULL; 1602 if (it->inp == NULL) { 1603 goto done_with_iterator; 1604 } 1605 goto select_a_new_ep; 1606 } 1607 1608 void 1609 sctp_iterator_worker(void) 1610 { 1611 struct sctp_iterator *it; 1612 1613 /* This function is called with the WQ lock in place */ 1614 sctp_it_ctl.iterator_running = 1; 1615 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1616 /* now lets work on this one */ 1617 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1618 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1619 CURVNET_SET(it->vn); 1620 sctp_iterator_work(it); 1621 CURVNET_RESTORE(); 1622 SCTP_IPI_ITERATOR_WQ_LOCK(); 1623 /* sa_ignore FREED_MEMORY */ 1624 } 1625 sctp_it_ctl.iterator_running = 0; 1626 return; 1627 } 1628 1629 static void 1630 sctp_handle_addr_wq(void) 1631 { 1632 /* deal with the ADDR wq from the rtsock calls */ 1633 struct sctp_laddr *wi, *nwi; 1634 struct sctp_asconf_iterator *asc; 1635 1636 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1637 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1638 if (asc == NULL) { 1639 /* Try later, no memory */ 1640 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1641 (struct sctp_inpcb *)NULL, 1642 (struct sctp_tcb *)NULL, 1643 (struct sctp_nets *)NULL); 1644 return; 1645 } 1646 LIST_INIT(&asc->list_of_work); 1647 asc->cnt = 0; 1648 1649 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1650 LIST_REMOVE(wi, sctp_nxt_addr); 1651 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1652 asc->cnt++; 1653 } 1654 1655 if (asc->cnt == 0) { 1656 SCTP_FREE(asc, SCTP_M_ASC_IT); 1657 } else { 1658 int ret; 1659 1660 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1661 sctp_asconf_iterator_stcb, 1662 NULL, /* No ep end for boundall */ 1663 SCTP_PCB_FLAGS_BOUNDALL, 1664 SCTP_PCB_ANY_FEATURES, 1665 SCTP_ASOC_ANY_STATE, 1666 (void *)asc, 0, 1667 sctp_asconf_iterator_end, NULL, 0); 1668 if (ret) { 1669 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1670 /* 1671 * Freeing if we are stopping or put back on the 1672 * addr_wq. 1673 */ 1674 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1675 sctp_asconf_iterator_end(asc, 0); 1676 } else { 1677 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1678 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1679 } 1680 SCTP_FREE(asc, SCTP_M_ASC_IT); 1681 } 1682 } 1683 } 1684 } 1685 1686 /*- 1687 * The following table shows which pointers for the inp, stcb, or net are 1688 * stored for each timer after it was started. 1689 * 1690 *|Name |Timer |inp |stcb|net | 1691 *|-----------------------------|-----------------------------|----|----|----| 1692 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1697 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1698 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1699 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1701 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1703 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1704 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1705 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1706 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1707 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1708 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1709 */ 1710 1711 void 1712 sctp_timeout_handler(void *t) 1713 { 1714 struct epoch_tracker et; 1715 struct timeval tv; 1716 struct sctp_inpcb *inp; 1717 struct sctp_tcb *stcb; 1718 struct sctp_nets *net; 1719 struct sctp_timer *tmr; 1720 struct mbuf *op_err; 1721 int type; 1722 int i, secret; 1723 bool did_output, released_asoc_reference; 1724 1725 /* 1726 * If inp, stcb or net are not NULL, then references to these were 1727 * added when the timer was started, and must be released before 1728 * this function returns. 1729 */ 1730 tmr = (struct sctp_timer *)t; 1731 inp = (struct sctp_inpcb *)tmr->ep; 1732 stcb = (struct sctp_tcb *)tmr->tcb; 1733 net = (struct sctp_nets *)tmr->net; 1734 CURVNET_SET((struct vnet *)tmr->vnet); 1735 NET_EPOCH_ENTER(et); 1736 released_asoc_reference = false; 1737 1738 #ifdef SCTP_AUDITING_ENABLED 1739 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1740 sctp_auditing(3, inp, stcb, net); 1741 #endif 1742 1743 /* sanity checks... */ 1744 KASSERT(tmr->self == NULL || tmr->self == tmr, 1745 ("sctp_timeout_handler: tmr->self corrupted")); 1746 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1747 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1748 type = tmr->type; 1749 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1750 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1751 type, stcb, stcb->sctp_ep)); 1752 tmr->stopped_from = 0xa001; 1753 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1754 SCTPDBG(SCTP_DEBUG_TIMER2, 1755 "Timer type %d handler exiting due to CLOSED association.\n", 1756 type); 1757 goto out_decr; 1758 } 1759 tmr->stopped_from = 0xa002; 1760 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1761 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1762 SCTPDBG(SCTP_DEBUG_TIMER2, 1763 "Timer type %d handler exiting due to not being active.\n", 1764 type); 1765 goto out_decr; 1766 } 1767 1768 tmr->stopped_from = 0xa003; 1769 if (stcb) { 1770 SCTP_TCB_LOCK(stcb); 1771 /* 1772 * Release reference so that association can be freed if 1773 * necessary below. This is safe now that we have acquired 1774 * the lock. 1775 */ 1776 atomic_add_int(&stcb->asoc.refcnt, -1); 1777 released_asoc_reference = true; 1778 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1779 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1780 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1781 SCTPDBG(SCTP_DEBUG_TIMER2, 1782 "Timer type %d handler exiting due to CLOSED association.\n", 1783 type); 1784 goto out; 1785 } 1786 } else if (inp != NULL) { 1787 SCTP_INP_WLOCK(inp); 1788 } else { 1789 SCTP_WQ_ADDR_LOCK(); 1790 } 1791 1792 /* Record in stopped_from which timeout occurred. */ 1793 tmr->stopped_from = type; 1794 /* mark as being serviced now */ 1795 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1796 /* 1797 * Callout has been rescheduled. 1798 */ 1799 goto out; 1800 } 1801 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1802 /* 1803 * Not active, so no action. 1804 */ 1805 goto out; 1806 } 1807 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1808 1809 /* call the handler for the appropriate timer type */ 1810 switch (type) { 1811 case SCTP_TIMER_TYPE_SEND: 1812 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1813 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1814 type, inp, stcb, net)); 1815 SCTP_STAT_INCR(sctps_timodata); 1816 stcb->asoc.timodata++; 1817 stcb->asoc.num_send_timers_up--; 1818 if (stcb->asoc.num_send_timers_up < 0) { 1819 stcb->asoc.num_send_timers_up = 0; 1820 } 1821 SCTP_TCB_LOCK_ASSERT(stcb); 1822 if (sctp_t3rxt_timer(inp, stcb, net)) { 1823 /* no need to unlock on tcb its gone */ 1824 1825 goto out_decr; 1826 } 1827 SCTP_TCB_LOCK_ASSERT(stcb); 1828 #ifdef SCTP_AUDITING_ENABLED 1829 sctp_auditing(4, inp, stcb, net); 1830 #endif 1831 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1832 did_output = true; 1833 if ((stcb->asoc.num_send_timers_up == 0) && 1834 (stcb->asoc.sent_queue_cnt > 0)) { 1835 struct sctp_tmit_chunk *chk; 1836 1837 /* 1838 * Safeguard. If there on some on the sent queue 1839 * somewhere but no timers running something is 1840 * wrong... so we start a timer on the first chunk 1841 * on the send queue on whatever net it is sent to. 1842 */ 1843 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1844 if (chk->whoTo != NULL) { 1845 break; 1846 } 1847 } 1848 if (chk != NULL) { 1849 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1850 } 1851 } 1852 break; 1853 case SCTP_TIMER_TYPE_INIT: 1854 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1855 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1856 type, inp, stcb, net)); 1857 SCTP_STAT_INCR(sctps_timoinit); 1858 stcb->asoc.timoinit++; 1859 if (sctp_t1init_timer(inp, stcb, net)) { 1860 /* no need to unlock on tcb its gone */ 1861 goto out_decr; 1862 } 1863 did_output = false; 1864 break; 1865 case SCTP_TIMER_TYPE_RECV: 1866 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1867 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1868 type, inp, stcb, net)); 1869 SCTP_STAT_INCR(sctps_timosack); 1870 stcb->asoc.timosack++; 1871 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1872 #ifdef SCTP_AUDITING_ENABLED 1873 sctp_auditing(4, inp, stcb, NULL); 1874 #endif 1875 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1876 did_output = true; 1877 break; 1878 case SCTP_TIMER_TYPE_SHUTDOWN: 1879 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1880 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1881 type, inp, stcb, net)); 1882 SCTP_STAT_INCR(sctps_timoshutdown); 1883 stcb->asoc.timoshutdown++; 1884 if (sctp_shutdown_timer(inp, stcb, net)) { 1885 /* no need to unlock on tcb its gone */ 1886 goto out_decr; 1887 } 1888 #ifdef SCTP_AUDITING_ENABLED 1889 sctp_auditing(4, inp, stcb, net); 1890 #endif 1891 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1892 did_output = true; 1893 break; 1894 case SCTP_TIMER_TYPE_HEARTBEAT: 1895 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1896 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1897 type, inp, stcb, net)); 1898 SCTP_STAT_INCR(sctps_timoheartbeat); 1899 stcb->asoc.timoheartbeat++; 1900 if (sctp_heartbeat_timer(inp, stcb, net)) { 1901 /* no need to unlock on tcb its gone */ 1902 goto out_decr; 1903 } 1904 #ifdef SCTP_AUDITING_ENABLED 1905 sctp_auditing(4, inp, stcb, net); 1906 #endif 1907 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1908 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1909 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1910 did_output = true; 1911 } else { 1912 did_output = false; 1913 } 1914 break; 1915 case SCTP_TIMER_TYPE_COOKIE: 1916 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1917 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1918 type, inp, stcb, net)); 1919 SCTP_STAT_INCR(sctps_timocookie); 1920 stcb->asoc.timocookie++; 1921 if (sctp_cookie_timer(inp, stcb, net)) { 1922 /* no need to unlock on tcb its gone */ 1923 goto out_decr; 1924 } 1925 #ifdef SCTP_AUDITING_ENABLED 1926 sctp_auditing(4, inp, stcb, net); 1927 #endif 1928 /* 1929 * We consider T3 and Cookie timer pretty much the same with 1930 * respect to where from in chunk_output. 1931 */ 1932 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1933 did_output = true; 1934 break; 1935 case SCTP_TIMER_TYPE_NEWCOOKIE: 1936 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1937 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1938 type, inp, stcb, net)); 1939 SCTP_STAT_INCR(sctps_timosecret); 1940 (void)SCTP_GETTIME_TIMEVAL(&tv); 1941 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1942 inp->sctp_ep.last_secret_number = 1943 inp->sctp_ep.current_secret_number; 1944 inp->sctp_ep.current_secret_number++; 1945 if (inp->sctp_ep.current_secret_number >= 1946 SCTP_HOW_MANY_SECRETS) { 1947 inp->sctp_ep.current_secret_number = 0; 1948 } 1949 secret = (int)inp->sctp_ep.current_secret_number; 1950 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1951 inp->sctp_ep.secret_key[secret][i] = 1952 sctp_select_initial_TSN(&inp->sctp_ep); 1953 } 1954 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1955 did_output = false; 1956 break; 1957 case SCTP_TIMER_TYPE_PATHMTURAISE: 1958 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1959 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1960 type, inp, stcb, net)); 1961 SCTP_STAT_INCR(sctps_timopathmtu); 1962 sctp_pathmtu_timer(inp, stcb, net); 1963 did_output = false; 1964 break; 1965 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1966 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1967 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1968 type, inp, stcb, net)); 1969 if (sctp_shutdownack_timer(inp, stcb, net)) { 1970 /* no need to unlock on tcb its gone */ 1971 goto out_decr; 1972 } 1973 SCTP_STAT_INCR(sctps_timoshutdownack); 1974 stcb->asoc.timoshutdownack++; 1975 #ifdef SCTP_AUDITING_ENABLED 1976 sctp_auditing(4, inp, stcb, net); 1977 #endif 1978 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1979 did_output = true; 1980 break; 1981 case SCTP_TIMER_TYPE_ASCONF: 1982 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1983 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1984 type, inp, stcb, net)); 1985 SCTP_STAT_INCR(sctps_timoasconf); 1986 if (sctp_asconf_timer(inp, stcb, net)) { 1987 /* no need to unlock on tcb its gone */ 1988 goto out_decr; 1989 } 1990 #ifdef SCTP_AUDITING_ENABLED 1991 sctp_auditing(4, inp, stcb, net); 1992 #endif 1993 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1994 did_output = true; 1995 break; 1996 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1997 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1998 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1999 type, inp, stcb, net)); 2000 SCTP_STAT_INCR(sctps_timoshutdownguard); 2001 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2002 "Shutdown guard timer expired"); 2003 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2004 /* no need to unlock on tcb its gone */ 2005 goto out_decr; 2006 case SCTP_TIMER_TYPE_AUTOCLOSE: 2007 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2008 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2009 type, inp, stcb, net)); 2010 SCTP_STAT_INCR(sctps_timoautoclose); 2011 sctp_autoclose_timer(inp, stcb); 2012 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2013 did_output = true; 2014 break; 2015 case SCTP_TIMER_TYPE_STRRESET: 2016 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2017 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2018 type, inp, stcb, net)); 2019 SCTP_STAT_INCR(sctps_timostrmrst); 2020 if (sctp_strreset_timer(inp, stcb)) { 2021 /* no need to unlock on tcb its gone */ 2022 goto out_decr; 2023 } 2024 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2025 did_output = true; 2026 break; 2027 case SCTP_TIMER_TYPE_INPKILL: 2028 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2029 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2030 type, inp, stcb, net)); 2031 SCTP_STAT_INCR(sctps_timoinpkill); 2032 /* 2033 * special case, take away our increment since WE are the 2034 * killer 2035 */ 2036 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2037 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2038 SCTP_INP_DECR_REF(inp); 2039 SCTP_INP_WUNLOCK(inp); 2040 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2041 SCTP_CALLED_FROM_INPKILL_TIMER); 2042 inp = NULL; 2043 goto out_decr; 2044 case SCTP_TIMER_TYPE_ASOCKILL: 2045 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2046 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2047 type, inp, stcb, net)); 2048 SCTP_STAT_INCR(sctps_timoassockill); 2049 /* Can we free it yet? */ 2050 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2051 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2052 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2053 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2054 /* 2055 * free asoc, always unlocks (or destroy's) so prevent 2056 * duplicate unlock or unlock of a free mtx :-0 2057 */ 2058 stcb = NULL; 2059 goto out_decr; 2060 case SCTP_TIMER_TYPE_ADDR_WQ: 2061 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2062 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2063 type, inp, stcb, net)); 2064 sctp_handle_addr_wq(); 2065 did_output = true; 2066 break; 2067 case SCTP_TIMER_TYPE_PRIM_DELETED: 2068 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2069 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2070 type, inp, stcb, net)); 2071 SCTP_STAT_INCR(sctps_timodelprim); 2072 sctp_delete_prim_timer(inp, stcb); 2073 did_output = false; 2074 break; 2075 default: 2076 #ifdef INVARIANTS 2077 panic("Unknown timer type %d", type); 2078 #else 2079 goto out; 2080 #endif 2081 } 2082 #ifdef SCTP_AUDITING_ENABLED 2083 sctp_audit_log(0xF1, (uint8_t)type); 2084 if (inp != NULL) 2085 sctp_auditing(5, inp, stcb, net); 2086 #endif 2087 if (did_output && (stcb != NULL)) { 2088 /* 2089 * Now we need to clean up the control chunk chain if an 2090 * ECNE is on it. It must be marked as UNSENT again so next 2091 * call will continue to send it until such time that we get 2092 * a CWR, to remove it. It is, however, less likely that we 2093 * will find a ecn echo on the chain though. 2094 */ 2095 sctp_fix_ecn_echo(&stcb->asoc); 2096 } 2097 out: 2098 if (stcb != NULL) { 2099 SCTP_TCB_UNLOCK(stcb); 2100 } else if (inp != NULL) { 2101 SCTP_INP_WUNLOCK(inp); 2102 } else { 2103 SCTP_WQ_ADDR_UNLOCK(); 2104 } 2105 2106 out_decr: 2107 /* These reference counts were incremented in sctp_timer_start(). */ 2108 if (inp != NULL) { 2109 SCTP_INP_DECR_REF(inp); 2110 } 2111 if ((stcb != NULL) && !released_asoc_reference) { 2112 atomic_add_int(&stcb->asoc.refcnt, -1); 2113 } 2114 if (net != NULL) { 2115 sctp_free_remote_addr(net); 2116 } 2117 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2118 CURVNET_RESTORE(); 2119 NET_EPOCH_EXIT(et); 2120 } 2121 2122 /*- 2123 * The following table shows which parameters must be provided 2124 * when calling sctp_timer_start(). For parameters not being 2125 * provided, NULL must be used. 2126 * 2127 * |Name |inp |stcb|net | 2128 * |-----------------------------|----|----|----| 2129 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2136 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2140 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2143 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2144 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2145 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2146 * 2147 */ 2148 2149 void 2150 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2151 struct sctp_nets *net) 2152 { 2153 struct sctp_timer *tmr; 2154 uint32_t to_ticks; 2155 uint32_t rndval, jitter; 2156 2157 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2158 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2159 t_type, stcb, stcb->sctp_ep)); 2160 tmr = NULL; 2161 if (stcb != NULL) { 2162 SCTP_TCB_LOCK_ASSERT(stcb); 2163 } else if (inp != NULL) { 2164 SCTP_INP_WLOCK_ASSERT(inp); 2165 } else { 2166 SCTP_WQ_ADDR_LOCK_ASSERT(); 2167 } 2168 if (stcb != NULL) { 2169 /* 2170 * Don't restart timer on association that's about to be 2171 * killed. 2172 */ 2173 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2174 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2175 SCTPDBG(SCTP_DEBUG_TIMER2, 2176 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2177 t_type, inp, stcb, net); 2178 return; 2179 } 2180 /* Don't restart timer on net that's been removed. */ 2181 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2182 SCTPDBG(SCTP_DEBUG_TIMER2, 2183 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2184 t_type, inp, stcb, net); 2185 return; 2186 } 2187 } 2188 switch (t_type) { 2189 case SCTP_TIMER_TYPE_SEND: 2190 /* Here we use the RTO timer. */ 2191 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2192 #ifdef INVARIANTS 2193 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2194 t_type, inp, stcb, net); 2195 #else 2196 return; 2197 #endif 2198 } 2199 tmr = &net->rxt_timer; 2200 if (net->RTO == 0) { 2201 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2202 } else { 2203 to_ticks = sctp_msecs_to_ticks(net->RTO); 2204 } 2205 break; 2206 case SCTP_TIMER_TYPE_INIT: 2207 /* 2208 * Here we use the INIT timer default usually about 1 2209 * second. 2210 */ 2211 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2212 #ifdef INVARIANTS 2213 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2214 t_type, inp, stcb, net); 2215 #else 2216 return; 2217 #endif 2218 } 2219 tmr = &net->rxt_timer; 2220 if (net->RTO == 0) { 2221 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2222 } else { 2223 to_ticks = sctp_msecs_to_ticks(net->RTO); 2224 } 2225 break; 2226 case SCTP_TIMER_TYPE_RECV: 2227 /* 2228 * Here we use the Delayed-Ack timer value from the inp, 2229 * ususually about 200ms. 2230 */ 2231 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2232 #ifdef INVARIANTS 2233 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2234 t_type, inp, stcb, net); 2235 #else 2236 return; 2237 #endif 2238 } 2239 tmr = &stcb->asoc.dack_timer; 2240 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2241 break; 2242 case SCTP_TIMER_TYPE_SHUTDOWN: 2243 /* Here we use the RTO of the destination. */ 2244 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2245 #ifdef INVARIANTS 2246 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2247 t_type, inp, stcb, net); 2248 #else 2249 return; 2250 #endif 2251 } 2252 tmr = &net->rxt_timer; 2253 if (net->RTO == 0) { 2254 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2255 } else { 2256 to_ticks = sctp_msecs_to_ticks(net->RTO); 2257 } 2258 break; 2259 case SCTP_TIMER_TYPE_HEARTBEAT: 2260 /* 2261 * The net is used here so that we can add in the RTO. Even 2262 * though we use a different timer. We also add the HB timer 2263 * PLUS a random jitter. 2264 */ 2265 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2266 #ifdef INVARIANTS 2267 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2268 t_type, inp, stcb, net); 2269 #else 2270 return; 2271 #endif 2272 } 2273 if ((net->dest_state & SCTP_ADDR_NOHB) && 2274 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2275 SCTPDBG(SCTP_DEBUG_TIMER2, 2276 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2277 t_type, inp, stcb, net); 2278 return; 2279 } 2280 tmr = &net->hb_timer; 2281 if (net->RTO == 0) { 2282 to_ticks = stcb->asoc.initial_rto; 2283 } else { 2284 to_ticks = net->RTO; 2285 } 2286 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2287 jitter = rndval % to_ticks; 2288 if (to_ticks > 1) { 2289 to_ticks >>= 1; 2290 } 2291 if (jitter < (UINT32_MAX - to_ticks)) { 2292 to_ticks += jitter; 2293 } else { 2294 to_ticks = UINT32_MAX; 2295 } 2296 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2297 !(net->dest_state & SCTP_ADDR_PF)) { 2298 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2299 to_ticks += net->heart_beat_delay; 2300 } else { 2301 to_ticks = UINT32_MAX; 2302 } 2303 } 2304 /* 2305 * Now we must convert the to_ticks that are now in ms to 2306 * ticks. 2307 */ 2308 to_ticks = sctp_msecs_to_ticks(to_ticks); 2309 break; 2310 case SCTP_TIMER_TYPE_COOKIE: 2311 /* 2312 * Here we can use the RTO timer from the network since one 2313 * RTT was complete. If a retransmission happened then we 2314 * will be using the RTO initial value. 2315 */ 2316 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2317 #ifdef INVARIANTS 2318 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2319 t_type, inp, stcb, net); 2320 #else 2321 return; 2322 #endif 2323 } 2324 tmr = &net->rxt_timer; 2325 if (net->RTO == 0) { 2326 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2327 } else { 2328 to_ticks = sctp_msecs_to_ticks(net->RTO); 2329 } 2330 break; 2331 case SCTP_TIMER_TYPE_NEWCOOKIE: 2332 /* 2333 * Nothing needed but the endpoint here ususually about 60 2334 * minutes. 2335 */ 2336 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2337 #ifdef INVARIANTS 2338 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2339 t_type, inp, stcb, net); 2340 #else 2341 return; 2342 #endif 2343 } 2344 tmr = &inp->sctp_ep.signature_change; 2345 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2346 break; 2347 case SCTP_TIMER_TYPE_PATHMTURAISE: 2348 /* 2349 * Here we use the value found in the EP for PMTUD, 2350 * ususually about 10 minutes. 2351 */ 2352 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2353 #ifdef INVARIANTS 2354 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2355 t_type, inp, stcb, net); 2356 #else 2357 return; 2358 #endif 2359 } 2360 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2361 SCTPDBG(SCTP_DEBUG_TIMER2, 2362 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2363 t_type, inp, stcb, net); 2364 return; 2365 } 2366 tmr = &net->pmtu_timer; 2367 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2368 break; 2369 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2370 /* Here we use the RTO of the destination. */ 2371 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2372 #ifdef INVARIANTS 2373 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2374 t_type, inp, stcb, net); 2375 #else 2376 return; 2377 #endif 2378 } 2379 tmr = &net->rxt_timer; 2380 if (net->RTO == 0) { 2381 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2382 } else { 2383 to_ticks = sctp_msecs_to_ticks(net->RTO); 2384 } 2385 break; 2386 case SCTP_TIMER_TYPE_ASCONF: 2387 /* 2388 * Here the timer comes from the stcb but its value is from 2389 * the net's RTO. 2390 */ 2391 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2392 #ifdef INVARIANTS 2393 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2394 t_type, inp, stcb, net); 2395 #else 2396 return; 2397 #endif 2398 } 2399 tmr = &stcb->asoc.asconf_timer; 2400 if (net->RTO == 0) { 2401 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2402 } else { 2403 to_ticks = sctp_msecs_to_ticks(net->RTO); 2404 } 2405 break; 2406 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2407 /* 2408 * Here we use the endpoints shutdown guard timer usually 2409 * about 3 minutes. 2410 */ 2411 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2412 #ifdef INVARIANTS 2413 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2414 t_type, inp, stcb, net); 2415 #else 2416 return; 2417 #endif 2418 } 2419 tmr = &stcb->asoc.shut_guard_timer; 2420 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2421 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2422 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2423 } else { 2424 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2425 } 2426 } else { 2427 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2428 } 2429 break; 2430 case SCTP_TIMER_TYPE_AUTOCLOSE: 2431 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2432 #ifdef INVARIANTS 2433 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2434 t_type, inp, stcb, net); 2435 #else 2436 return; 2437 #endif 2438 } 2439 tmr = &stcb->asoc.autoclose_timer; 2440 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2441 break; 2442 case SCTP_TIMER_TYPE_STRRESET: 2443 /* 2444 * Here the timer comes from the stcb but its value is from 2445 * the net's RTO. 2446 */ 2447 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2448 #ifdef INVARIANTS 2449 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2450 t_type, inp, stcb, net); 2451 #else 2452 return; 2453 #endif 2454 } 2455 tmr = &stcb->asoc.strreset_timer; 2456 if (net->RTO == 0) { 2457 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2458 } else { 2459 to_ticks = sctp_msecs_to_ticks(net->RTO); 2460 } 2461 break; 2462 case SCTP_TIMER_TYPE_INPKILL: 2463 /* 2464 * The inp is setup to die. We re-use the signature_chage 2465 * timer since that has stopped and we are in the GONE 2466 * state. 2467 */ 2468 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2469 #ifdef INVARIANTS 2470 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2471 t_type, inp, stcb, net); 2472 #else 2473 return; 2474 #endif 2475 } 2476 tmr = &inp->sctp_ep.signature_change; 2477 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2478 break; 2479 case SCTP_TIMER_TYPE_ASOCKILL: 2480 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2481 #ifdef INVARIANTS 2482 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2483 t_type, inp, stcb, net); 2484 #else 2485 return; 2486 #endif 2487 } 2488 tmr = &stcb->asoc.strreset_timer; 2489 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2490 break; 2491 case SCTP_TIMER_TYPE_ADDR_WQ: 2492 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2493 #ifdef INVARIANTS 2494 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2495 t_type, inp, stcb, net); 2496 #else 2497 return; 2498 #endif 2499 } 2500 /* Only 1 tick away :-) */ 2501 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2502 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2503 break; 2504 case SCTP_TIMER_TYPE_PRIM_DELETED: 2505 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2506 #ifdef INVARIANTS 2507 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2508 t_type, inp, stcb, net); 2509 #else 2510 return; 2511 #endif 2512 } 2513 tmr = &stcb->asoc.delete_prim_timer; 2514 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2515 break; 2516 default: 2517 #ifdef INVARIANTS 2518 panic("Unknown timer type %d", t_type); 2519 #else 2520 return; 2521 #endif 2522 } 2523 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2524 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2525 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2526 /* 2527 * We do NOT allow you to have it already running. If it is, 2528 * we leave the current one up unchanged. 2529 */ 2530 SCTPDBG(SCTP_DEBUG_TIMER2, 2531 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2532 t_type, inp, stcb, net); 2533 return; 2534 } 2535 /* At this point we can proceed. */ 2536 if (t_type == SCTP_TIMER_TYPE_SEND) { 2537 stcb->asoc.num_send_timers_up++; 2538 } 2539 tmr->stopped_from = 0; 2540 tmr->type = t_type; 2541 tmr->ep = (void *)inp; 2542 tmr->tcb = (void *)stcb; 2543 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2544 tmr->net = NULL; 2545 } else { 2546 tmr->net = (void *)net; 2547 } 2548 tmr->self = (void *)tmr; 2549 tmr->vnet = (void *)curvnet; 2550 tmr->ticks = sctp_get_tick_count(); 2551 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2552 SCTPDBG(SCTP_DEBUG_TIMER2, 2553 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2554 t_type, to_ticks, inp, stcb, net); 2555 /* 2556 * If this is a newly scheduled callout, as opposed to a 2557 * rescheduled one, increment relevant reference counts. 2558 */ 2559 if (tmr->ep != NULL) { 2560 SCTP_INP_INCR_REF(inp); 2561 } 2562 if (tmr->tcb != NULL) { 2563 atomic_add_int(&stcb->asoc.refcnt, 1); 2564 } 2565 if (tmr->net != NULL) { 2566 atomic_add_int(&net->ref_count, 1); 2567 } 2568 } else { 2569 /* 2570 * This should not happen, since we checked for pending 2571 * above. 2572 */ 2573 SCTPDBG(SCTP_DEBUG_TIMER2, 2574 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2575 t_type, to_ticks, inp, stcb, net); 2576 } 2577 return; 2578 } 2579 2580 /*- 2581 * The following table shows which parameters must be provided 2582 * when calling sctp_timer_stop(). For parameters not being 2583 * provided, NULL must be used. 2584 * 2585 * |Name |inp |stcb|net | 2586 * |-----------------------------|----|----|----| 2587 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2588 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2589 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2590 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2591 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2592 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2593 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2594 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2595 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2596 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2598 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2599 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2600 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2601 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2602 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2603 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2604 * 2605 */ 2606 2607 void 2608 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2609 struct sctp_nets *net, uint32_t from) 2610 { 2611 struct sctp_timer *tmr; 2612 2613 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2614 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2615 t_type, stcb, stcb->sctp_ep)); 2616 if (stcb != NULL) { 2617 SCTP_TCB_LOCK_ASSERT(stcb); 2618 } else if (inp != NULL) { 2619 SCTP_INP_WLOCK_ASSERT(inp); 2620 } else { 2621 SCTP_WQ_ADDR_LOCK_ASSERT(); 2622 } 2623 tmr = NULL; 2624 switch (t_type) { 2625 case SCTP_TIMER_TYPE_SEND: 2626 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2627 #ifdef INVARIANTS 2628 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2629 t_type, inp, stcb, net); 2630 #else 2631 return; 2632 #endif 2633 } 2634 tmr = &net->rxt_timer; 2635 break; 2636 case SCTP_TIMER_TYPE_INIT: 2637 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2638 #ifdef INVARIANTS 2639 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2640 t_type, inp, stcb, net); 2641 #else 2642 return; 2643 #endif 2644 } 2645 tmr = &net->rxt_timer; 2646 break; 2647 case SCTP_TIMER_TYPE_RECV: 2648 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2649 #ifdef INVARIANTS 2650 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2651 t_type, inp, stcb, net); 2652 #else 2653 return; 2654 #endif 2655 } 2656 tmr = &stcb->asoc.dack_timer; 2657 break; 2658 case SCTP_TIMER_TYPE_SHUTDOWN: 2659 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2660 #ifdef INVARIANTS 2661 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2662 t_type, inp, stcb, net); 2663 #else 2664 return; 2665 #endif 2666 } 2667 tmr = &net->rxt_timer; 2668 break; 2669 case SCTP_TIMER_TYPE_HEARTBEAT: 2670 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2671 #ifdef INVARIANTS 2672 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2673 t_type, inp, stcb, net); 2674 #else 2675 return; 2676 #endif 2677 } 2678 tmr = &net->hb_timer; 2679 break; 2680 case SCTP_TIMER_TYPE_COOKIE: 2681 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2682 #ifdef INVARIANTS 2683 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2684 t_type, inp, stcb, net); 2685 #else 2686 return; 2687 #endif 2688 } 2689 tmr = &net->rxt_timer; 2690 break; 2691 case SCTP_TIMER_TYPE_NEWCOOKIE: 2692 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2693 #ifdef INVARIANTS 2694 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2695 t_type, inp, stcb, net); 2696 #else 2697 return; 2698 #endif 2699 } 2700 tmr = &inp->sctp_ep.signature_change; 2701 break; 2702 case SCTP_TIMER_TYPE_PATHMTURAISE: 2703 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2704 #ifdef INVARIANTS 2705 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2706 t_type, inp, stcb, net); 2707 #else 2708 return; 2709 #endif 2710 } 2711 tmr = &net->pmtu_timer; 2712 break; 2713 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2714 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2715 #ifdef INVARIANTS 2716 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2717 t_type, inp, stcb, net); 2718 #else 2719 return; 2720 #endif 2721 } 2722 tmr = &net->rxt_timer; 2723 break; 2724 case SCTP_TIMER_TYPE_ASCONF: 2725 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2726 #ifdef INVARIANTS 2727 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2728 t_type, inp, stcb, net); 2729 #else 2730 return; 2731 #endif 2732 } 2733 tmr = &stcb->asoc.asconf_timer; 2734 break; 2735 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2736 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2737 #ifdef INVARIANTS 2738 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2739 t_type, inp, stcb, net); 2740 #else 2741 return; 2742 #endif 2743 } 2744 tmr = &stcb->asoc.shut_guard_timer; 2745 break; 2746 case SCTP_TIMER_TYPE_AUTOCLOSE: 2747 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2748 #ifdef INVARIANTS 2749 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2750 t_type, inp, stcb, net); 2751 #else 2752 return; 2753 #endif 2754 } 2755 tmr = &stcb->asoc.autoclose_timer; 2756 break; 2757 case SCTP_TIMER_TYPE_STRRESET: 2758 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2759 #ifdef INVARIANTS 2760 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2761 t_type, inp, stcb, net); 2762 #else 2763 return; 2764 #endif 2765 } 2766 tmr = &stcb->asoc.strreset_timer; 2767 break; 2768 case SCTP_TIMER_TYPE_INPKILL: 2769 /* 2770 * The inp is setup to die. We re-use the signature_chage 2771 * timer since that has stopped and we are in the GONE 2772 * state. 2773 */ 2774 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2775 #ifdef INVARIANTS 2776 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2777 t_type, inp, stcb, net); 2778 #else 2779 return; 2780 #endif 2781 } 2782 tmr = &inp->sctp_ep.signature_change; 2783 break; 2784 case SCTP_TIMER_TYPE_ASOCKILL: 2785 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2786 #ifdef INVARIANTS 2787 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2788 t_type, inp, stcb, net); 2789 #else 2790 return; 2791 #endif 2792 } 2793 tmr = &stcb->asoc.strreset_timer; 2794 break; 2795 case SCTP_TIMER_TYPE_ADDR_WQ: 2796 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2797 #ifdef INVARIANTS 2798 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2799 t_type, inp, stcb, net); 2800 #else 2801 return; 2802 #endif 2803 } 2804 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2805 break; 2806 case SCTP_TIMER_TYPE_PRIM_DELETED: 2807 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2808 #ifdef INVARIANTS 2809 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2810 t_type, inp, stcb, net); 2811 #else 2812 return; 2813 #endif 2814 } 2815 tmr = &stcb->asoc.delete_prim_timer; 2816 break; 2817 default: 2818 #ifdef INVARIANTS 2819 panic("Unknown timer type %d", t_type); 2820 #else 2821 return; 2822 #endif 2823 } 2824 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2825 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2826 (tmr->type != t_type)) { 2827 /* 2828 * Ok we have a timer that is under joint use. Cookie timer 2829 * per chance with the SEND timer. We therefore are NOT 2830 * running the timer that the caller wants stopped. So just 2831 * return. 2832 */ 2833 SCTPDBG(SCTP_DEBUG_TIMER2, 2834 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2835 t_type, inp, stcb, net); 2836 return; 2837 } 2838 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2839 stcb->asoc.num_send_timers_up--; 2840 if (stcb->asoc.num_send_timers_up < 0) { 2841 stcb->asoc.num_send_timers_up = 0; 2842 } 2843 } 2844 tmr->self = NULL; 2845 tmr->stopped_from = from; 2846 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2847 KASSERT(tmr->ep == inp, 2848 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2849 t_type, inp, tmr->ep)); 2850 KASSERT(tmr->tcb == stcb, 2851 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2852 t_type, stcb, tmr->tcb)); 2853 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2854 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2855 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2856 t_type, net, tmr->net)); 2857 SCTPDBG(SCTP_DEBUG_TIMER2, 2858 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2859 t_type, inp, stcb, net); 2860 /* 2861 * If the timer was actually stopped, decrement reference 2862 * counts that were incremented in sctp_timer_start(). 2863 */ 2864 if (tmr->ep != NULL) { 2865 SCTP_INP_DECR_REF(inp); 2866 tmr->ep = NULL; 2867 } 2868 if (tmr->tcb != NULL) { 2869 atomic_add_int(&stcb->asoc.refcnt, -1); 2870 tmr->tcb = NULL; 2871 } 2872 if (tmr->net != NULL) { 2873 /* 2874 * Can't use net, since it doesn't work for 2875 * SCTP_TIMER_TYPE_ASCONF. 2876 */ 2877 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2878 tmr->net = NULL; 2879 } 2880 } else { 2881 SCTPDBG(SCTP_DEBUG_TIMER2, 2882 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2883 t_type, inp, stcb, net); 2884 } 2885 return; 2886 } 2887 2888 uint32_t 2889 sctp_calculate_len(struct mbuf *m) 2890 { 2891 uint32_t tlen = 0; 2892 struct mbuf *at; 2893 2894 at = m; 2895 while (at) { 2896 tlen += SCTP_BUF_LEN(at); 2897 at = SCTP_BUF_NEXT(at); 2898 } 2899 return (tlen); 2900 } 2901 2902 void 2903 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2904 struct sctp_association *asoc, uint32_t mtu) 2905 { 2906 /* 2907 * Reset the P-MTU size on this association, this involves changing 2908 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2909 * allow the DF flag to be cleared. 2910 */ 2911 struct sctp_tmit_chunk *chk; 2912 unsigned int eff_mtu, ovh; 2913 2914 asoc->smallest_mtu = mtu; 2915 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2916 ovh = SCTP_MIN_OVERHEAD; 2917 } else { 2918 ovh = SCTP_MIN_V4_OVERHEAD; 2919 } 2920 eff_mtu = mtu - ovh; 2921 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2922 if (chk->send_size > eff_mtu) { 2923 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2924 } 2925 } 2926 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2927 if (chk->send_size > eff_mtu) { 2928 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2929 } 2930 } 2931 } 2932 2933 /* 2934 * Given an association and starting time of the current RTT period, update 2935 * RTO in number of msecs. net should point to the current network. 2936 * Return 1, if an RTO update was performed, return 0 if no update was 2937 * performed due to invalid starting point. 2938 */ 2939 2940 int 2941 sctp_calculate_rto(struct sctp_tcb *stcb, 2942 struct sctp_association *asoc, 2943 struct sctp_nets *net, 2944 struct timeval *old, 2945 int rtt_from_sack) 2946 { 2947 struct timeval now; 2948 uint64_t rtt_us; /* RTT in us */ 2949 int32_t rtt; /* RTT in ms */ 2950 uint32_t new_rto; 2951 int first_measure = 0; 2952 2953 /************************/ 2954 /* 1. calculate new RTT */ 2955 /************************/ 2956 /* get the current time */ 2957 if (stcb->asoc.use_precise_time) { 2958 (void)SCTP_GETPTIME_TIMEVAL(&now); 2959 } else { 2960 (void)SCTP_GETTIME_TIMEVAL(&now); 2961 } 2962 if ((old->tv_sec > now.tv_sec) || 2963 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2964 /* The starting point is in the future. */ 2965 return (0); 2966 } 2967 timevalsub(&now, old); 2968 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2969 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2970 /* The RTT is larger than a sane value. */ 2971 return (0); 2972 } 2973 /* store the current RTT in us */ 2974 net->rtt = rtt_us; 2975 /* compute rtt in ms */ 2976 rtt = (int32_t)(net->rtt / 1000); 2977 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2978 /* 2979 * Tell the CC module that a new update has just occurred 2980 * from a sack 2981 */ 2982 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2983 } 2984 /* 2985 * Do we need to determine the lan? We do this only on sacks i.e. 2986 * RTT being determined from data not non-data (HB/INIT->INITACK). 2987 */ 2988 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2989 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2990 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2991 net->lan_type = SCTP_LAN_INTERNET; 2992 } else { 2993 net->lan_type = SCTP_LAN_LOCAL; 2994 } 2995 } 2996 2997 /***************************/ 2998 /* 2. update RTTVAR & SRTT */ 2999 /***************************/ 3000 /*- 3001 * Compute the scaled average lastsa and the 3002 * scaled variance lastsv as described in van Jacobson 3003 * Paper "Congestion Avoidance and Control", Annex A. 3004 * 3005 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 3006 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 3007 */ 3008 if (net->RTO_measured) { 3009 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3010 net->lastsa += rtt; 3011 if (rtt < 0) { 3012 rtt = -rtt; 3013 } 3014 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3015 net->lastsv += rtt; 3016 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3017 rto_logging(net, SCTP_LOG_RTTVAR); 3018 } 3019 } else { 3020 /* First RTO measurment */ 3021 net->RTO_measured = 1; 3022 first_measure = 1; 3023 net->lastsa = rtt << SCTP_RTT_SHIFT; 3024 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3026 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3027 } 3028 } 3029 if (net->lastsv == 0) { 3030 net->lastsv = SCTP_CLOCK_GRANULARITY; 3031 } 3032 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3033 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3034 (stcb->asoc.sat_network_lockout == 0)) { 3035 stcb->asoc.sat_network = 1; 3036 } else if ((!first_measure) && stcb->asoc.sat_network) { 3037 stcb->asoc.sat_network = 0; 3038 stcb->asoc.sat_network_lockout = 1; 3039 } 3040 /* bound it, per C6/C7 in Section 5.3.1 */ 3041 if (new_rto < stcb->asoc.minrto) { 3042 new_rto = stcb->asoc.minrto; 3043 } 3044 if (new_rto > stcb->asoc.maxrto) { 3045 new_rto = stcb->asoc.maxrto; 3046 } 3047 net->RTO = new_rto; 3048 return (1); 3049 } 3050 3051 /* 3052 * return a pointer to a contiguous piece of data from the given mbuf chain 3053 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3054 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3055 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3056 */ 3057 caddr_t 3058 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3059 { 3060 uint32_t count; 3061 uint8_t *ptr; 3062 3063 ptr = in_ptr; 3064 if ((off < 0) || (len <= 0)) 3065 return (NULL); 3066 3067 /* find the desired start location */ 3068 while ((m != NULL) && (off > 0)) { 3069 if (off < SCTP_BUF_LEN(m)) 3070 break; 3071 off -= SCTP_BUF_LEN(m); 3072 m = SCTP_BUF_NEXT(m); 3073 } 3074 if (m == NULL) 3075 return (NULL); 3076 3077 /* is the current mbuf large enough (eg. contiguous)? */ 3078 if ((SCTP_BUF_LEN(m) - off) >= len) { 3079 return (mtod(m, caddr_t)+off); 3080 } else { 3081 /* else, it spans more than one mbuf, so save a temp copy... */ 3082 while ((m != NULL) && (len > 0)) { 3083 count = min(SCTP_BUF_LEN(m) - off, len); 3084 memcpy(ptr, mtod(m, caddr_t)+off, count); 3085 len -= count; 3086 ptr += count; 3087 off = 0; 3088 m = SCTP_BUF_NEXT(m); 3089 } 3090 if ((m == NULL) && (len > 0)) 3091 return (NULL); 3092 else 3093 return ((caddr_t)in_ptr); 3094 } 3095 } 3096 3097 struct sctp_paramhdr * 3098 sctp_get_next_param(struct mbuf *m, 3099 int offset, 3100 struct sctp_paramhdr *pull, 3101 int pull_limit) 3102 { 3103 /* This just provides a typed signature to Peter's Pull routine */ 3104 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3105 (uint8_t *)pull)); 3106 } 3107 3108 struct mbuf * 3109 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3110 { 3111 struct mbuf *m_last; 3112 caddr_t dp; 3113 3114 if (padlen > 3) { 3115 return (NULL); 3116 } 3117 if (padlen <= M_TRAILINGSPACE(m)) { 3118 /* 3119 * The easy way. We hope the majority of the time we hit 3120 * here :) 3121 */ 3122 m_last = m; 3123 } else { 3124 /* Hard way we must grow the mbuf chain */ 3125 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3126 if (m_last == NULL) { 3127 return (NULL); 3128 } 3129 SCTP_BUF_LEN(m_last) = 0; 3130 SCTP_BUF_NEXT(m_last) = NULL; 3131 SCTP_BUF_NEXT(m) = m_last; 3132 } 3133 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3134 SCTP_BUF_LEN(m_last) += padlen; 3135 memset(dp, 0, padlen); 3136 return (m_last); 3137 } 3138 3139 struct mbuf * 3140 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3141 { 3142 /* find the last mbuf in chain and pad it */ 3143 struct mbuf *m_at; 3144 3145 if (last_mbuf != NULL) { 3146 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3147 } else { 3148 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3149 if (SCTP_BUF_NEXT(m_at) == NULL) { 3150 return (sctp_add_pad_tombuf(m_at, padval)); 3151 } 3152 } 3153 } 3154 return (NULL); 3155 } 3156 3157 static void 3158 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3159 uint16_t error, struct sctp_abort_chunk *abort, 3160 bool from_peer, bool timedout, int so_locked) 3161 { 3162 struct mbuf *m_notify; 3163 struct sctp_assoc_change *sac; 3164 struct sctp_queued_to_read *control; 3165 unsigned int notif_len; 3166 uint16_t abort_len; 3167 unsigned int i; 3168 3169 KASSERT(abort == NULL || from_peer, 3170 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3171 KASSERT(!from_peer || !timedout, 3172 ("sctp_notify_assoc_change: timeouts can only be local")); 3173 if (stcb == NULL) { 3174 return; 3175 } 3176 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3177 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3178 if (abort != NULL) { 3179 abort_len = ntohs(abort->ch.chunk_length); 3180 /* 3181 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3182 * contiguous. 3183 */ 3184 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3185 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3186 } 3187 } else { 3188 abort_len = 0; 3189 } 3190 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3191 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3192 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3193 notif_len += abort_len; 3194 } 3195 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3196 if (m_notify == NULL) { 3197 /* Retry with smaller value. */ 3198 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3199 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3200 if (m_notify == NULL) { 3201 goto set_error; 3202 } 3203 } 3204 SCTP_BUF_NEXT(m_notify) = NULL; 3205 sac = mtod(m_notify, struct sctp_assoc_change *); 3206 memset(sac, 0, notif_len); 3207 sac->sac_type = SCTP_ASSOC_CHANGE; 3208 sac->sac_flags = 0; 3209 sac->sac_length = sizeof(struct sctp_assoc_change); 3210 sac->sac_state = state; 3211 sac->sac_error = error; 3212 if (state == SCTP_CANT_STR_ASSOC) { 3213 sac->sac_outbound_streams = 0; 3214 sac->sac_inbound_streams = 0; 3215 } else { 3216 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3217 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3218 } 3219 sac->sac_assoc_id = sctp_get_associd(stcb); 3220 if (notif_len > sizeof(struct sctp_assoc_change)) { 3221 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3222 i = 0; 3223 if (stcb->asoc.prsctp_supported == 1) { 3224 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3225 } 3226 if (stcb->asoc.auth_supported == 1) { 3227 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3228 } 3229 if (stcb->asoc.asconf_supported == 1) { 3230 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3231 } 3232 if (stcb->asoc.idata_supported == 1) { 3233 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3234 } 3235 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3236 if (stcb->asoc.reconfig_supported == 1) { 3237 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3238 } 3239 sac->sac_length += i; 3240 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3241 memcpy(sac->sac_info, abort, abort_len); 3242 sac->sac_length += abort_len; 3243 } 3244 } 3245 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3246 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3247 0, 0, stcb->asoc.context, 0, 0, 0, 3248 m_notify); 3249 if (control != NULL) { 3250 control->length = SCTP_BUF_LEN(m_notify); 3251 control->spec_flags = M_NOTIFICATION; 3252 /* not that we need this */ 3253 control->tail_mbuf = m_notify; 3254 sctp_add_to_readq(stcb->sctp_ep, stcb, 3255 control, 3256 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3257 so_locked); 3258 } else { 3259 sctp_m_freem(m_notify); 3260 } 3261 } 3262 /* 3263 * For 1-to-1 style sockets, we send up and error when an ABORT 3264 * comes in. 3265 */ 3266 set_error: 3267 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3268 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3269 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3270 SOCK_LOCK(stcb->sctp_socket); 3271 if (from_peer) { 3272 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3273 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3274 stcb->sctp_socket->so_error = ECONNREFUSED; 3275 } else { 3276 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3277 stcb->sctp_socket->so_error = ECONNRESET; 3278 } 3279 } else { 3280 if (timedout) { 3281 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3282 stcb->sctp_socket->so_error = ETIMEDOUT; 3283 } else { 3284 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3285 stcb->sctp_socket->so_error = ECONNABORTED; 3286 } 3287 } 3288 SOCK_UNLOCK(stcb->sctp_socket); 3289 } 3290 /* Wake ANY sleepers */ 3291 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3292 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3293 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3294 socantrcvmore(stcb->sctp_socket); 3295 } 3296 sorwakeup(stcb->sctp_socket); 3297 sowwakeup(stcb->sctp_socket); 3298 } 3299 3300 static void 3301 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3302 struct sockaddr *sa, uint32_t error, int so_locked) 3303 { 3304 struct mbuf *m_notify; 3305 struct sctp_paddr_change *spc; 3306 struct sctp_queued_to_read *control; 3307 3308 if ((stcb == NULL) || 3309 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3310 /* event not enabled */ 3311 return; 3312 } 3313 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3314 if (m_notify == NULL) 3315 return; 3316 SCTP_BUF_LEN(m_notify) = 0; 3317 spc = mtod(m_notify, struct sctp_paddr_change *); 3318 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3319 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3320 spc->spc_flags = 0; 3321 spc->spc_length = sizeof(struct sctp_paddr_change); 3322 switch (sa->sa_family) { 3323 #ifdef INET 3324 case AF_INET: 3325 #ifdef INET6 3326 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3327 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3328 (struct sockaddr_in6 *)&spc->spc_aaddr); 3329 } else { 3330 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3331 } 3332 #else 3333 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3334 #endif 3335 break; 3336 #endif 3337 #ifdef INET6 3338 case AF_INET6: 3339 { 3340 struct sockaddr_in6 *sin6; 3341 3342 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3343 3344 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3345 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3346 if (sin6->sin6_scope_id == 0) { 3347 /* recover scope_id for user */ 3348 (void)sa6_recoverscope(sin6); 3349 } else { 3350 /* clear embedded scope_id for user */ 3351 in6_clearscope(&sin6->sin6_addr); 3352 } 3353 } 3354 break; 3355 } 3356 #endif 3357 default: 3358 /* TSNH */ 3359 break; 3360 } 3361 spc->spc_state = state; 3362 spc->spc_error = error; 3363 spc->spc_assoc_id = sctp_get_associd(stcb); 3364 3365 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3366 SCTP_BUF_NEXT(m_notify) = NULL; 3367 3368 /* append to socket */ 3369 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3370 0, 0, stcb->asoc.context, 0, 0, 0, 3371 m_notify); 3372 if (control == NULL) { 3373 /* no memory */ 3374 sctp_m_freem(m_notify); 3375 return; 3376 } 3377 control->length = SCTP_BUF_LEN(m_notify); 3378 control->spec_flags = M_NOTIFICATION; 3379 /* not that we need this */ 3380 control->tail_mbuf = m_notify; 3381 sctp_add_to_readq(stcb->sctp_ep, stcb, 3382 control, 3383 &stcb->sctp_socket->so_rcv, 1, 3384 SCTP_READ_LOCK_NOT_HELD, 3385 so_locked); 3386 } 3387 3388 static void 3389 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3390 struct sctp_tmit_chunk *chk, int so_locked) 3391 { 3392 struct mbuf *m_notify; 3393 struct sctp_send_failed *ssf; 3394 struct sctp_send_failed_event *ssfe; 3395 struct sctp_queued_to_read *control; 3396 struct sctp_chunkhdr *chkhdr; 3397 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3398 3399 if ((stcb == NULL) || 3400 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3401 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3402 /* event not enabled */ 3403 return; 3404 } 3405 3406 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3407 notifhdr_len = sizeof(struct sctp_send_failed_event); 3408 } else { 3409 notifhdr_len = sizeof(struct sctp_send_failed); 3410 } 3411 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3412 if (m_notify == NULL) 3413 /* no space left */ 3414 return; 3415 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3416 if (stcb->asoc.idata_supported) { 3417 chkhdr_len = sizeof(struct sctp_idata_chunk); 3418 } else { 3419 chkhdr_len = sizeof(struct sctp_data_chunk); 3420 } 3421 /* Use some defaults in case we can't access the chunk header */ 3422 if (chk->send_size >= chkhdr_len) { 3423 payload_len = chk->send_size - chkhdr_len; 3424 } else { 3425 payload_len = 0; 3426 } 3427 padding_len = 0; 3428 if (chk->data != NULL) { 3429 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3430 if (chkhdr != NULL) { 3431 chk_len = ntohs(chkhdr->chunk_length); 3432 if ((chk_len >= chkhdr_len) && 3433 (chk->send_size >= chk_len) && 3434 (chk->send_size - chk_len < 4)) { 3435 padding_len = chk->send_size - chk_len; 3436 payload_len = chk->send_size - chkhdr_len - padding_len; 3437 } 3438 } 3439 } 3440 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3441 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3442 memset(ssfe, 0, notifhdr_len); 3443 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3444 if (sent) { 3445 ssfe->ssfe_flags = SCTP_DATA_SENT; 3446 } else { 3447 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3448 } 3449 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3450 ssfe->ssfe_error = error; 3451 /* not exactly what the user sent in, but should be close :) */ 3452 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3453 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3454 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3455 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3456 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3457 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3458 } else { 3459 ssf = mtod(m_notify, struct sctp_send_failed *); 3460 memset(ssf, 0, notifhdr_len); 3461 ssf->ssf_type = SCTP_SEND_FAILED; 3462 if (sent) { 3463 ssf->ssf_flags = SCTP_DATA_SENT; 3464 } else { 3465 ssf->ssf_flags = SCTP_DATA_UNSENT; 3466 } 3467 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3468 ssf->ssf_error = error; 3469 /* not exactly what the user sent in, but should be close :) */ 3470 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3471 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3472 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3473 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3474 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3475 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3476 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3477 } 3478 if (chk->data != NULL) { 3479 /* Trim off the sctp chunk header (it should be there) */ 3480 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3481 m_adj(chk->data, chkhdr_len); 3482 m_adj(chk->data, -padding_len); 3483 sctp_mbuf_crush(chk->data); 3484 chk->send_size -= (chkhdr_len + padding_len); 3485 } 3486 } 3487 SCTP_BUF_NEXT(m_notify) = chk->data; 3488 /* Steal off the mbuf */ 3489 chk->data = NULL; 3490 /* 3491 * For this case, we check the actual socket buffer, since the assoc 3492 * is going away we don't want to overfill the socket buffer for a 3493 * non-reader 3494 */ 3495 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3496 sctp_m_freem(m_notify); 3497 return; 3498 } 3499 /* append to socket */ 3500 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3501 0, 0, stcb->asoc.context, 0, 0, 0, 3502 m_notify); 3503 if (control == NULL) { 3504 /* no memory */ 3505 sctp_m_freem(m_notify); 3506 return; 3507 } 3508 control->length = SCTP_BUF_LEN(m_notify); 3509 control->spec_flags = M_NOTIFICATION; 3510 /* not that we need this */ 3511 control->tail_mbuf = m_notify; 3512 sctp_add_to_readq(stcb->sctp_ep, stcb, 3513 control, 3514 &stcb->sctp_socket->so_rcv, 1, 3515 SCTP_READ_LOCK_NOT_HELD, 3516 so_locked); 3517 } 3518 3519 static void 3520 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3521 struct sctp_stream_queue_pending *sp, int so_locked) 3522 { 3523 struct mbuf *m_notify; 3524 struct sctp_send_failed *ssf; 3525 struct sctp_send_failed_event *ssfe; 3526 struct sctp_queued_to_read *control; 3527 int notifhdr_len; 3528 3529 if ((stcb == NULL) || 3530 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3531 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3532 /* event not enabled */ 3533 return; 3534 } 3535 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3536 notifhdr_len = sizeof(struct sctp_send_failed_event); 3537 } else { 3538 notifhdr_len = sizeof(struct sctp_send_failed); 3539 } 3540 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3541 if (m_notify == NULL) { 3542 /* no space left */ 3543 return; 3544 } 3545 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3546 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3547 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3548 memset(ssfe, 0, notifhdr_len); 3549 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3550 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3551 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3552 ssfe->ssfe_error = error; 3553 /* not exactly what the user sent in, but should be close :) */ 3554 ssfe->ssfe_info.snd_sid = sp->sid; 3555 if (sp->some_taken) { 3556 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3557 } else { 3558 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3559 } 3560 ssfe->ssfe_info.snd_ppid = sp->ppid; 3561 ssfe->ssfe_info.snd_context = sp->context; 3562 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3563 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3564 } else { 3565 ssf = mtod(m_notify, struct sctp_send_failed *); 3566 memset(ssf, 0, notifhdr_len); 3567 ssf->ssf_type = SCTP_SEND_FAILED; 3568 ssf->ssf_flags = SCTP_DATA_UNSENT; 3569 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3570 ssf->ssf_error = error; 3571 /* not exactly what the user sent in, but should be close :) */ 3572 ssf->ssf_info.sinfo_stream = sp->sid; 3573 ssf->ssf_info.sinfo_ssn = 0; 3574 if (sp->some_taken) { 3575 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3576 } else { 3577 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3578 } 3579 ssf->ssf_info.sinfo_ppid = sp->ppid; 3580 ssf->ssf_info.sinfo_context = sp->context; 3581 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3582 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3583 } 3584 SCTP_BUF_NEXT(m_notify) = sp->data; 3585 3586 /* Steal off the mbuf */ 3587 sp->data = NULL; 3588 /* 3589 * For this case, we check the actual socket buffer, since the assoc 3590 * is going away we don't want to overfill the socket buffer for a 3591 * non-reader 3592 */ 3593 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3594 sctp_m_freem(m_notify); 3595 return; 3596 } 3597 /* append to socket */ 3598 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3599 0, 0, stcb->asoc.context, 0, 0, 0, 3600 m_notify); 3601 if (control == NULL) { 3602 /* no memory */ 3603 sctp_m_freem(m_notify); 3604 return; 3605 } 3606 control->length = SCTP_BUF_LEN(m_notify); 3607 control->spec_flags = M_NOTIFICATION; 3608 /* not that we need this */ 3609 control->tail_mbuf = m_notify; 3610 sctp_add_to_readq(stcb->sctp_ep, stcb, 3611 control, 3612 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3613 } 3614 3615 static void 3616 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3617 { 3618 struct mbuf *m_notify; 3619 struct sctp_adaptation_event *sai; 3620 struct sctp_queued_to_read *control; 3621 3622 if ((stcb == NULL) || 3623 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3624 /* event not enabled */ 3625 return; 3626 } 3627 3628 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3629 if (m_notify == NULL) 3630 /* no space left */ 3631 return; 3632 SCTP_BUF_LEN(m_notify) = 0; 3633 sai = mtod(m_notify, struct sctp_adaptation_event *); 3634 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3635 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3636 sai->sai_flags = 0; 3637 sai->sai_length = sizeof(struct sctp_adaptation_event); 3638 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3639 sai->sai_assoc_id = sctp_get_associd(stcb); 3640 3641 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3642 SCTP_BUF_NEXT(m_notify) = NULL; 3643 3644 /* append to socket */ 3645 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3646 0, 0, stcb->asoc.context, 0, 0, 0, 3647 m_notify); 3648 if (control == NULL) { 3649 /* no memory */ 3650 sctp_m_freem(m_notify); 3651 return; 3652 } 3653 control->length = SCTP_BUF_LEN(m_notify); 3654 control->spec_flags = M_NOTIFICATION; 3655 /* not that we need this */ 3656 control->tail_mbuf = m_notify; 3657 sctp_add_to_readq(stcb->sctp_ep, stcb, 3658 control, 3659 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3660 } 3661 3662 /* This always must be called with the read-queue LOCKED in the INP */ 3663 static void 3664 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3665 uint32_t val, int so_locked) 3666 { 3667 struct mbuf *m_notify; 3668 struct sctp_pdapi_event *pdapi; 3669 struct sctp_queued_to_read *control; 3670 struct sockbuf *sb; 3671 3672 if ((stcb == NULL) || 3673 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3674 /* event not enabled */ 3675 return; 3676 } 3677 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3678 return; 3679 } 3680 3681 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3682 if (m_notify == NULL) 3683 /* no space left */ 3684 return; 3685 SCTP_BUF_LEN(m_notify) = 0; 3686 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3687 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3688 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3689 pdapi->pdapi_flags = 0; 3690 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3691 pdapi->pdapi_indication = error; 3692 pdapi->pdapi_stream = (val >> 16); 3693 pdapi->pdapi_seq = (val & 0x0000ffff); 3694 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3695 3696 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3697 SCTP_BUF_NEXT(m_notify) = NULL; 3698 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3699 0, 0, stcb->asoc.context, 0, 0, 0, 3700 m_notify); 3701 if (control == NULL) { 3702 /* no memory */ 3703 sctp_m_freem(m_notify); 3704 return; 3705 } 3706 control->length = SCTP_BUF_LEN(m_notify); 3707 control->spec_flags = M_NOTIFICATION; 3708 /* not that we need this */ 3709 control->tail_mbuf = m_notify; 3710 sb = &stcb->sctp_socket->so_rcv; 3711 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3712 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3713 } 3714 sctp_sballoc(stcb, sb, m_notify); 3715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3716 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3717 } 3718 control->end_added = 1; 3719 if (stcb->asoc.control_pdapi) 3720 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3721 else { 3722 /* we really should not see this case */ 3723 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3724 } 3725 if (stcb->sctp_ep && stcb->sctp_socket) { 3726 /* This should always be the case */ 3727 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3728 } 3729 } 3730 3731 static void 3732 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3733 { 3734 struct mbuf *m_notify; 3735 struct sctp_shutdown_event *sse; 3736 struct sctp_queued_to_read *control; 3737 3738 /* 3739 * For TCP model AND UDP connected sockets we will send an error up 3740 * when an SHUTDOWN completes 3741 */ 3742 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3743 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3744 /* mark socket closed for read/write and wakeup! */ 3745 socantsendmore(stcb->sctp_socket); 3746 } 3747 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3748 /* event not enabled */ 3749 return; 3750 } 3751 3752 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3753 if (m_notify == NULL) 3754 /* no space left */ 3755 return; 3756 sse = mtod(m_notify, struct sctp_shutdown_event *); 3757 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3758 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3759 sse->sse_flags = 0; 3760 sse->sse_length = sizeof(struct sctp_shutdown_event); 3761 sse->sse_assoc_id = sctp_get_associd(stcb); 3762 3763 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3764 SCTP_BUF_NEXT(m_notify) = NULL; 3765 3766 /* append to socket */ 3767 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3768 0, 0, stcb->asoc.context, 0, 0, 0, 3769 m_notify); 3770 if (control == NULL) { 3771 /* no memory */ 3772 sctp_m_freem(m_notify); 3773 return; 3774 } 3775 control->length = SCTP_BUF_LEN(m_notify); 3776 control->spec_flags = M_NOTIFICATION; 3777 /* not that we need this */ 3778 control->tail_mbuf = m_notify; 3779 sctp_add_to_readq(stcb->sctp_ep, stcb, 3780 control, 3781 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3782 } 3783 3784 static void 3785 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3786 int so_locked) 3787 { 3788 struct mbuf *m_notify; 3789 struct sctp_sender_dry_event *event; 3790 struct sctp_queued_to_read *control; 3791 3792 if ((stcb == NULL) || 3793 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3794 /* event not enabled */ 3795 return; 3796 } 3797 3798 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3799 if (m_notify == NULL) { 3800 /* no space left */ 3801 return; 3802 } 3803 SCTP_BUF_LEN(m_notify) = 0; 3804 event = mtod(m_notify, struct sctp_sender_dry_event *); 3805 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3806 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3807 event->sender_dry_flags = 0; 3808 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3809 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3810 3811 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3812 SCTP_BUF_NEXT(m_notify) = NULL; 3813 3814 /* append to socket */ 3815 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3816 0, 0, stcb->asoc.context, 0, 0, 0, 3817 m_notify); 3818 if (control == NULL) { 3819 /* no memory */ 3820 sctp_m_freem(m_notify); 3821 return; 3822 } 3823 control->length = SCTP_BUF_LEN(m_notify); 3824 control->spec_flags = M_NOTIFICATION; 3825 /* not that we need this */ 3826 control->tail_mbuf = m_notify; 3827 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3828 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3829 } 3830 3831 void 3832 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3833 { 3834 struct mbuf *m_notify; 3835 struct sctp_queued_to_read *control; 3836 struct sctp_stream_change_event *stradd; 3837 3838 if ((stcb == NULL) || 3839 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3840 /* event not enabled */ 3841 return; 3842 } 3843 if ((stcb->asoc.peer_req_out) && flag) { 3844 /* Peer made the request, don't tell the local user */ 3845 stcb->asoc.peer_req_out = 0; 3846 return; 3847 } 3848 stcb->asoc.peer_req_out = 0; 3849 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3850 if (m_notify == NULL) 3851 /* no space left */ 3852 return; 3853 SCTP_BUF_LEN(m_notify) = 0; 3854 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3855 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3856 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3857 stradd->strchange_flags = flag; 3858 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3859 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3860 stradd->strchange_instrms = numberin; 3861 stradd->strchange_outstrms = numberout; 3862 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3863 SCTP_BUF_NEXT(m_notify) = NULL; 3864 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3865 /* no space */ 3866 sctp_m_freem(m_notify); 3867 return; 3868 } 3869 /* append to socket */ 3870 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3871 0, 0, stcb->asoc.context, 0, 0, 0, 3872 m_notify); 3873 if (control == NULL) { 3874 /* no memory */ 3875 sctp_m_freem(m_notify); 3876 return; 3877 } 3878 control->length = SCTP_BUF_LEN(m_notify); 3879 control->spec_flags = M_NOTIFICATION; 3880 /* not that we need this */ 3881 control->tail_mbuf = m_notify; 3882 sctp_add_to_readq(stcb->sctp_ep, stcb, 3883 control, 3884 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3885 } 3886 3887 void 3888 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3889 { 3890 struct mbuf *m_notify; 3891 struct sctp_queued_to_read *control; 3892 struct sctp_assoc_reset_event *strasoc; 3893 3894 if ((stcb == NULL) || 3895 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3896 /* event not enabled */ 3897 return; 3898 } 3899 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3900 if (m_notify == NULL) 3901 /* no space left */ 3902 return; 3903 SCTP_BUF_LEN(m_notify) = 0; 3904 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3905 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3906 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3907 strasoc->assocreset_flags = flag; 3908 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3909 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3910 strasoc->assocreset_local_tsn = sending_tsn; 3911 strasoc->assocreset_remote_tsn = recv_tsn; 3912 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3913 SCTP_BUF_NEXT(m_notify) = NULL; 3914 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3915 /* no space */ 3916 sctp_m_freem(m_notify); 3917 return; 3918 } 3919 /* append to socket */ 3920 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3921 0, 0, stcb->asoc.context, 0, 0, 0, 3922 m_notify); 3923 if (control == NULL) { 3924 /* no memory */ 3925 sctp_m_freem(m_notify); 3926 return; 3927 } 3928 control->length = SCTP_BUF_LEN(m_notify); 3929 control->spec_flags = M_NOTIFICATION; 3930 /* not that we need this */ 3931 control->tail_mbuf = m_notify; 3932 sctp_add_to_readq(stcb->sctp_ep, stcb, 3933 control, 3934 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3935 } 3936 3937 static void 3938 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3939 int number_entries, uint16_t *list, int flag) 3940 { 3941 struct mbuf *m_notify; 3942 struct sctp_queued_to_read *control; 3943 struct sctp_stream_reset_event *strreset; 3944 int len; 3945 3946 if ((stcb == NULL) || 3947 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3948 /* event not enabled */ 3949 return; 3950 } 3951 3952 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3953 if (m_notify == NULL) 3954 /* no space left */ 3955 return; 3956 SCTP_BUF_LEN(m_notify) = 0; 3957 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3958 if (len > M_TRAILINGSPACE(m_notify)) { 3959 /* never enough room */ 3960 sctp_m_freem(m_notify); 3961 return; 3962 } 3963 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3964 memset(strreset, 0, len); 3965 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3966 strreset->strreset_flags = flag; 3967 strreset->strreset_length = len; 3968 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3969 if (number_entries) { 3970 int i; 3971 3972 for (i = 0; i < number_entries; i++) { 3973 strreset->strreset_stream_list[i] = ntohs(list[i]); 3974 } 3975 } 3976 SCTP_BUF_LEN(m_notify) = len; 3977 SCTP_BUF_NEXT(m_notify) = NULL; 3978 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3979 /* no space */ 3980 sctp_m_freem(m_notify); 3981 return; 3982 } 3983 /* append to socket */ 3984 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3985 0, 0, stcb->asoc.context, 0, 0, 0, 3986 m_notify); 3987 if (control == NULL) { 3988 /* no memory */ 3989 sctp_m_freem(m_notify); 3990 return; 3991 } 3992 control->length = SCTP_BUF_LEN(m_notify); 3993 control->spec_flags = M_NOTIFICATION; 3994 /* not that we need this */ 3995 control->tail_mbuf = m_notify; 3996 sctp_add_to_readq(stcb->sctp_ep, stcb, 3997 control, 3998 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3999 } 4000 4001 static void 4002 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 4003 { 4004 struct mbuf *m_notify; 4005 struct sctp_remote_error *sre; 4006 struct sctp_queued_to_read *control; 4007 unsigned int notif_len; 4008 uint16_t chunk_len; 4009 4010 if ((stcb == NULL) || 4011 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4012 return; 4013 } 4014 if (chunk != NULL) { 4015 chunk_len = ntohs(chunk->ch.chunk_length); 4016 /* 4017 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4018 * contiguous. 4019 */ 4020 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4021 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4022 } 4023 } else { 4024 chunk_len = 0; 4025 } 4026 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4027 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4028 if (m_notify == NULL) { 4029 /* Retry with smaller value. */ 4030 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4031 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4032 if (m_notify == NULL) { 4033 return; 4034 } 4035 } 4036 SCTP_BUF_NEXT(m_notify) = NULL; 4037 sre = mtod(m_notify, struct sctp_remote_error *); 4038 memset(sre, 0, notif_len); 4039 sre->sre_type = SCTP_REMOTE_ERROR; 4040 sre->sre_flags = 0; 4041 sre->sre_length = sizeof(struct sctp_remote_error); 4042 sre->sre_error = error; 4043 sre->sre_assoc_id = sctp_get_associd(stcb); 4044 if (notif_len > sizeof(struct sctp_remote_error)) { 4045 memcpy(sre->sre_data, chunk, chunk_len); 4046 sre->sre_length += chunk_len; 4047 } 4048 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4049 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4050 0, 0, stcb->asoc.context, 0, 0, 0, 4051 m_notify); 4052 if (control != NULL) { 4053 control->length = SCTP_BUF_LEN(m_notify); 4054 control->spec_flags = M_NOTIFICATION; 4055 /* not that we need this */ 4056 control->tail_mbuf = m_notify; 4057 sctp_add_to_readq(stcb->sctp_ep, stcb, 4058 control, 4059 &stcb->sctp_socket->so_rcv, 1, 4060 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4061 } else { 4062 sctp_m_freem(m_notify); 4063 } 4064 } 4065 4066 void 4067 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4068 uint32_t error, void *data, int so_locked) 4069 { 4070 if ((stcb == NULL) || 4071 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4072 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4073 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4074 /* If the socket is gone we are out of here */ 4075 return; 4076 } 4077 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4078 return; 4079 } 4080 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4081 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4082 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4083 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4084 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4085 /* Don't report these in front states */ 4086 return; 4087 } 4088 } 4089 switch (notification) { 4090 case SCTP_NOTIFY_ASSOC_UP: 4091 if (stcb->asoc.assoc_up_sent == 0) { 4092 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4093 stcb->asoc.assoc_up_sent = 1; 4094 } 4095 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4096 sctp_notify_adaptation_layer(stcb); 4097 } 4098 if (stcb->asoc.auth_supported == 0) { 4099 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4100 NULL, so_locked); 4101 } 4102 break; 4103 case SCTP_NOTIFY_ASSOC_DOWN: 4104 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4105 break; 4106 case SCTP_NOTIFY_INTERFACE_DOWN: 4107 { 4108 struct sctp_nets *net; 4109 4110 net = (struct sctp_nets *)data; 4111 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4112 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4113 break; 4114 } 4115 case SCTP_NOTIFY_INTERFACE_UP: 4116 { 4117 struct sctp_nets *net; 4118 4119 net = (struct sctp_nets *)data; 4120 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4121 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4122 break; 4123 } 4124 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4125 { 4126 struct sctp_nets *net; 4127 4128 net = (struct sctp_nets *)data; 4129 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4130 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4131 break; 4132 } 4133 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4134 sctp_notify_send_failed2(stcb, error, 4135 (struct sctp_stream_queue_pending *)data, so_locked); 4136 break; 4137 case SCTP_NOTIFY_SENT_DG_FAIL: 4138 sctp_notify_send_failed(stcb, 1, error, 4139 (struct sctp_tmit_chunk *)data, so_locked); 4140 break; 4141 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4142 sctp_notify_send_failed(stcb, 0, error, 4143 (struct sctp_tmit_chunk *)data, so_locked); 4144 break; 4145 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4146 { 4147 uint32_t val; 4148 4149 val = *((uint32_t *)data); 4150 4151 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4152 break; 4153 } 4154 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4155 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4156 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4157 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4158 } else { 4159 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4160 } 4161 break; 4162 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4163 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4164 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4165 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4166 } else { 4167 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4168 } 4169 break; 4170 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4171 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4172 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4173 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4174 } else { 4175 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4176 } 4177 break; 4178 case SCTP_NOTIFY_ASSOC_RESTART: 4179 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4180 if (stcb->asoc.auth_supported == 0) { 4181 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4182 NULL, so_locked); 4183 } 4184 break; 4185 case SCTP_NOTIFY_STR_RESET_SEND: 4186 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4187 break; 4188 case SCTP_NOTIFY_STR_RESET_RECV: 4189 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4190 break; 4191 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4192 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4193 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4194 break; 4195 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4196 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4197 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4198 break; 4199 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4200 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4201 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4202 break; 4203 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4204 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4205 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4206 break; 4207 case SCTP_NOTIFY_ASCONF_ADD_IP: 4208 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4209 error, so_locked); 4210 break; 4211 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4212 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4213 error, so_locked); 4214 break; 4215 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4216 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4217 error, so_locked); 4218 break; 4219 case SCTP_NOTIFY_PEER_SHUTDOWN: 4220 sctp_notify_shutdown_event(stcb); 4221 break; 4222 case SCTP_NOTIFY_AUTH_NEW_KEY: 4223 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4224 (uint16_t)(uintptr_t)data, 4225 so_locked); 4226 break; 4227 case SCTP_NOTIFY_AUTH_FREE_KEY: 4228 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4229 (uint16_t)(uintptr_t)data, 4230 so_locked); 4231 break; 4232 case SCTP_NOTIFY_NO_PEER_AUTH: 4233 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4234 (uint16_t)(uintptr_t)data, 4235 so_locked); 4236 break; 4237 case SCTP_NOTIFY_SENDER_DRY: 4238 sctp_notify_sender_dry_event(stcb, so_locked); 4239 break; 4240 case SCTP_NOTIFY_REMOTE_ERROR: 4241 sctp_notify_remote_error(stcb, error, data); 4242 break; 4243 default: 4244 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4245 __func__, notification, notification); 4246 break; 4247 } /* end switch */ 4248 } 4249 4250 void 4251 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4252 { 4253 struct sctp_association *asoc; 4254 struct sctp_stream_out *outs; 4255 struct sctp_tmit_chunk *chk, *nchk; 4256 struct sctp_stream_queue_pending *sp, *nsp; 4257 int i; 4258 4259 if (stcb == NULL) { 4260 return; 4261 } 4262 asoc = &stcb->asoc; 4263 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4264 /* already being freed */ 4265 return; 4266 } 4267 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4268 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4269 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4270 return; 4271 } 4272 /* now through all the gunk freeing chunks */ 4273 /* sent queue SHOULD be empty */ 4274 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4275 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4276 asoc->sent_queue_cnt--; 4277 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4278 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4279 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4280 #ifdef INVARIANTS 4281 } else { 4282 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4283 #endif 4284 } 4285 } 4286 if (chk->data != NULL) { 4287 sctp_free_bufspace(stcb, asoc, chk, 1); 4288 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4289 error, chk, so_locked); 4290 if (chk->data) { 4291 sctp_m_freem(chk->data); 4292 chk->data = NULL; 4293 } 4294 } 4295 sctp_free_a_chunk(stcb, chk, so_locked); 4296 /* sa_ignore FREED_MEMORY */ 4297 } 4298 /* pending send queue SHOULD be empty */ 4299 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4300 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4301 asoc->send_queue_cnt--; 4302 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4303 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4304 #ifdef INVARIANTS 4305 } else { 4306 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4307 #endif 4308 } 4309 if (chk->data != NULL) { 4310 sctp_free_bufspace(stcb, asoc, chk, 1); 4311 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4312 error, chk, so_locked); 4313 if (chk->data) { 4314 sctp_m_freem(chk->data); 4315 chk->data = NULL; 4316 } 4317 } 4318 sctp_free_a_chunk(stcb, chk, so_locked); 4319 /* sa_ignore FREED_MEMORY */ 4320 } 4321 for (i = 0; i < asoc->streamoutcnt; i++) { 4322 /* For each stream */ 4323 outs = &asoc->strmout[i]; 4324 /* clean up any sends there */ 4325 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4326 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4327 TAILQ_REMOVE(&outs->outqueue, sp, next); 4328 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4329 sctp_free_spbufspace(stcb, asoc, sp); 4330 if (sp->data) { 4331 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4332 error, (void *)sp, so_locked); 4333 if (sp->data) { 4334 sctp_m_freem(sp->data); 4335 sp->data = NULL; 4336 sp->tail_mbuf = NULL; 4337 sp->length = 0; 4338 } 4339 } 4340 if (sp->net) { 4341 sctp_free_remote_addr(sp->net); 4342 sp->net = NULL; 4343 } 4344 /* Free the chunk */ 4345 sctp_free_a_strmoq(stcb, sp, so_locked); 4346 /* sa_ignore FREED_MEMORY */ 4347 } 4348 } 4349 } 4350 4351 void 4352 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4353 uint16_t error, struct sctp_abort_chunk *abort, 4354 int so_locked) 4355 { 4356 if (stcb == NULL) { 4357 return; 4358 } 4359 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4360 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4361 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4362 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4363 } 4364 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4365 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4366 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4367 return; 4368 } 4369 SCTP_TCB_SEND_LOCK(stcb); 4370 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4371 /* Tell them we lost the asoc */ 4372 sctp_report_all_outbound(stcb, error, so_locked); 4373 SCTP_TCB_SEND_UNLOCK(stcb); 4374 if (from_peer) { 4375 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4376 } else { 4377 if (timeout) { 4378 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4379 } else { 4380 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4381 } 4382 } 4383 } 4384 4385 void 4386 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4387 struct mbuf *m, int iphlen, 4388 struct sockaddr *src, struct sockaddr *dst, 4389 struct sctphdr *sh, struct mbuf *op_err, 4390 uint8_t mflowtype, uint32_t mflowid, 4391 uint32_t vrf_id, uint16_t port) 4392 { 4393 struct sctp_gen_error_cause *cause; 4394 uint32_t vtag; 4395 uint16_t cause_code; 4396 4397 if (stcb != NULL) { 4398 vtag = stcb->asoc.peer_vtag; 4399 vrf_id = stcb->asoc.vrf_id; 4400 if (op_err != NULL) { 4401 /* Read the cause code from the error cause. */ 4402 cause = mtod(op_err, struct sctp_gen_error_cause *); 4403 cause_code = ntohs(cause->code); 4404 } else { 4405 cause_code = 0; 4406 } 4407 } else { 4408 vtag = 0; 4409 } 4410 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4411 mflowtype, mflowid, inp->fibnum, 4412 vrf_id, port); 4413 if (stcb != NULL) { 4414 /* We have a TCB to abort, send notification too */ 4415 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4416 /* Ok, now lets free it */ 4417 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4418 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4419 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4420 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4421 } 4422 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4423 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4424 } 4425 } 4426 #ifdef SCTP_ASOCLOG_OF_TSNS 4427 void 4428 sctp_print_out_track_log(struct sctp_tcb *stcb) 4429 { 4430 #ifdef NOSIY_PRINTS 4431 int i; 4432 4433 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4434 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4435 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4436 SCTP_PRINTF("None rcvd\n"); 4437 goto none_in; 4438 } 4439 if (stcb->asoc.tsn_in_wrapped) { 4440 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4441 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4442 stcb->asoc.in_tsnlog[i].tsn, 4443 stcb->asoc.in_tsnlog[i].strm, 4444 stcb->asoc.in_tsnlog[i].seq, 4445 stcb->asoc.in_tsnlog[i].flgs, 4446 stcb->asoc.in_tsnlog[i].sz); 4447 } 4448 } 4449 if (stcb->asoc.tsn_in_at) { 4450 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4451 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4452 stcb->asoc.in_tsnlog[i].tsn, 4453 stcb->asoc.in_tsnlog[i].strm, 4454 stcb->asoc.in_tsnlog[i].seq, 4455 stcb->asoc.in_tsnlog[i].flgs, 4456 stcb->asoc.in_tsnlog[i].sz); 4457 } 4458 } 4459 none_in: 4460 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4461 if ((stcb->asoc.tsn_out_at == 0) && 4462 (stcb->asoc.tsn_out_wrapped == 0)) { 4463 SCTP_PRINTF("None sent\n"); 4464 } 4465 if (stcb->asoc.tsn_out_wrapped) { 4466 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4467 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4468 stcb->asoc.out_tsnlog[i].tsn, 4469 stcb->asoc.out_tsnlog[i].strm, 4470 stcb->asoc.out_tsnlog[i].seq, 4471 stcb->asoc.out_tsnlog[i].flgs, 4472 stcb->asoc.out_tsnlog[i].sz); 4473 } 4474 } 4475 if (stcb->asoc.tsn_out_at) { 4476 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4477 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4478 stcb->asoc.out_tsnlog[i].tsn, 4479 stcb->asoc.out_tsnlog[i].strm, 4480 stcb->asoc.out_tsnlog[i].seq, 4481 stcb->asoc.out_tsnlog[i].flgs, 4482 stcb->asoc.out_tsnlog[i].sz); 4483 } 4484 } 4485 #endif 4486 } 4487 #endif 4488 4489 void 4490 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4491 struct mbuf *op_err, bool timedout, int so_locked) 4492 { 4493 struct sctp_gen_error_cause *cause; 4494 uint16_t cause_code; 4495 4496 if (stcb == NULL) { 4497 /* Got to have a TCB */ 4498 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4499 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4500 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4501 SCTP_CALLED_DIRECTLY_NOCMPSET); 4502 } 4503 } 4504 return; 4505 } 4506 if (op_err != NULL) { 4507 /* Read the cause code from the error cause. */ 4508 cause = mtod(op_err, struct sctp_gen_error_cause *); 4509 cause_code = ntohs(cause->code); 4510 } else { 4511 cause_code = 0; 4512 } 4513 /* notify the peer */ 4514 sctp_send_abort_tcb(stcb, op_err, so_locked); 4515 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4516 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4517 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4518 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4519 } 4520 /* notify the ulp */ 4521 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4522 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4523 } 4524 /* now free the asoc */ 4525 #ifdef SCTP_ASOCLOG_OF_TSNS 4526 sctp_print_out_track_log(stcb); 4527 #endif 4528 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4529 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4530 } 4531 4532 void 4533 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4534 struct sockaddr *src, struct sockaddr *dst, 4535 struct sctphdr *sh, struct sctp_inpcb *inp, 4536 struct mbuf *cause, 4537 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4538 uint32_t vrf_id, uint16_t port) 4539 { 4540 struct sctp_chunkhdr *ch, chunk_buf; 4541 unsigned int chk_length; 4542 int contains_init_chunk; 4543 4544 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4545 /* Generate a TO address for future reference */ 4546 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4547 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4548 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4549 SCTP_CALLED_DIRECTLY_NOCMPSET); 4550 } 4551 } 4552 contains_init_chunk = 0; 4553 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4554 sizeof(*ch), (uint8_t *)&chunk_buf); 4555 while (ch != NULL) { 4556 chk_length = ntohs(ch->chunk_length); 4557 if (chk_length < sizeof(*ch)) { 4558 /* break to abort land */ 4559 break; 4560 } 4561 switch (ch->chunk_type) { 4562 case SCTP_INIT: 4563 contains_init_chunk = 1; 4564 break; 4565 case SCTP_PACKET_DROPPED: 4566 /* we don't respond to pkt-dropped */ 4567 return; 4568 case SCTP_ABORT_ASSOCIATION: 4569 /* we don't respond with an ABORT to an ABORT */ 4570 return; 4571 case SCTP_SHUTDOWN_COMPLETE: 4572 /* 4573 * we ignore it since we are not waiting for it and 4574 * peer is gone 4575 */ 4576 return; 4577 case SCTP_SHUTDOWN_ACK: 4578 sctp_send_shutdown_complete2(src, dst, sh, 4579 mflowtype, mflowid, fibnum, 4580 vrf_id, port); 4581 return; 4582 default: 4583 break; 4584 } 4585 offset += SCTP_SIZE32(chk_length); 4586 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4587 sizeof(*ch), (uint8_t *)&chunk_buf); 4588 } 4589 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4590 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4591 (contains_init_chunk == 0))) { 4592 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4593 mflowtype, mflowid, fibnum, 4594 vrf_id, port); 4595 } 4596 } 4597 4598 /* 4599 * check the inbound datagram to make sure there is not an abort inside it, 4600 * if there is return 1, else return 0. 4601 */ 4602 int 4603 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4604 { 4605 struct sctp_chunkhdr *ch; 4606 struct sctp_init_chunk *init_chk, chunk_buf; 4607 int offset; 4608 unsigned int chk_length; 4609 4610 offset = iphlen + sizeof(struct sctphdr); 4611 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4612 (uint8_t *)&chunk_buf); 4613 while (ch != NULL) { 4614 chk_length = ntohs(ch->chunk_length); 4615 if (chk_length < sizeof(*ch)) { 4616 /* packet is probably corrupt */ 4617 break; 4618 } 4619 /* we seem to be ok, is it an abort? */ 4620 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4621 /* yep, tell them */ 4622 return (1); 4623 } 4624 if ((ch->chunk_type == SCTP_INITIATION) || 4625 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4626 /* need to update the Vtag */ 4627 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4628 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4629 if (init_chk != NULL) { 4630 *vtag = ntohl(init_chk->init.initiate_tag); 4631 } 4632 } 4633 /* Nope, move to the next chunk */ 4634 offset += SCTP_SIZE32(chk_length); 4635 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4636 sizeof(*ch), (uint8_t *)&chunk_buf); 4637 } 4638 return (0); 4639 } 4640 4641 /* 4642 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4643 * set (i.e. it's 0) so, create this function to compare link local scopes 4644 */ 4645 #ifdef INET6 4646 uint32_t 4647 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4648 { 4649 struct sockaddr_in6 a, b; 4650 4651 /* save copies */ 4652 a = *addr1; 4653 b = *addr2; 4654 4655 if (a.sin6_scope_id == 0) 4656 if (sa6_recoverscope(&a)) { 4657 /* can't get scope, so can't match */ 4658 return (0); 4659 } 4660 if (b.sin6_scope_id == 0) 4661 if (sa6_recoverscope(&b)) { 4662 /* can't get scope, so can't match */ 4663 return (0); 4664 } 4665 if (a.sin6_scope_id != b.sin6_scope_id) 4666 return (0); 4667 4668 return (1); 4669 } 4670 4671 /* 4672 * returns a sockaddr_in6 with embedded scope recovered and removed 4673 */ 4674 struct sockaddr_in6 * 4675 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4676 { 4677 /* check and strip embedded scope junk */ 4678 if (addr->sin6_family == AF_INET6) { 4679 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4680 if (addr->sin6_scope_id == 0) { 4681 *store = *addr; 4682 if (!sa6_recoverscope(store)) { 4683 /* use the recovered scope */ 4684 addr = store; 4685 } 4686 } else { 4687 /* else, return the original "to" addr */ 4688 in6_clearscope(&addr->sin6_addr); 4689 } 4690 } 4691 } 4692 return (addr); 4693 } 4694 #endif 4695 4696 /* 4697 * are the two addresses the same? currently a "scopeless" check returns: 1 4698 * if same, 0 if not 4699 */ 4700 int 4701 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4702 { 4703 4704 /* must be valid */ 4705 if (sa1 == NULL || sa2 == NULL) 4706 return (0); 4707 4708 /* must be the same family */ 4709 if (sa1->sa_family != sa2->sa_family) 4710 return (0); 4711 4712 switch (sa1->sa_family) { 4713 #ifdef INET6 4714 case AF_INET6: 4715 { 4716 /* IPv6 addresses */ 4717 struct sockaddr_in6 *sin6_1, *sin6_2; 4718 4719 sin6_1 = (struct sockaddr_in6 *)sa1; 4720 sin6_2 = (struct sockaddr_in6 *)sa2; 4721 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4722 sin6_2)); 4723 } 4724 #endif 4725 #ifdef INET 4726 case AF_INET: 4727 { 4728 /* IPv4 addresses */ 4729 struct sockaddr_in *sin_1, *sin_2; 4730 4731 sin_1 = (struct sockaddr_in *)sa1; 4732 sin_2 = (struct sockaddr_in *)sa2; 4733 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4734 } 4735 #endif 4736 default: 4737 /* we don't do these... */ 4738 return (0); 4739 } 4740 } 4741 4742 void 4743 sctp_print_address(struct sockaddr *sa) 4744 { 4745 #ifdef INET6 4746 char ip6buf[INET6_ADDRSTRLEN]; 4747 #endif 4748 4749 switch (sa->sa_family) { 4750 #ifdef INET6 4751 case AF_INET6: 4752 { 4753 struct sockaddr_in6 *sin6; 4754 4755 sin6 = (struct sockaddr_in6 *)sa; 4756 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4757 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4758 ntohs(sin6->sin6_port), 4759 sin6->sin6_scope_id); 4760 break; 4761 } 4762 #endif 4763 #ifdef INET 4764 case AF_INET: 4765 { 4766 struct sockaddr_in *sin; 4767 unsigned char *p; 4768 4769 sin = (struct sockaddr_in *)sa; 4770 p = (unsigned char *)&sin->sin_addr; 4771 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4772 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4773 break; 4774 } 4775 #endif 4776 default: 4777 SCTP_PRINTF("?\n"); 4778 break; 4779 } 4780 } 4781 4782 void 4783 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4784 struct sctp_inpcb *new_inp, 4785 struct sctp_tcb *stcb, 4786 int waitflags) 4787 { 4788 /* 4789 * go through our old INP and pull off any control structures that 4790 * belong to stcb and move then to the new inp. 4791 */ 4792 struct socket *old_so, *new_so; 4793 struct sctp_queued_to_read *control, *nctl; 4794 struct sctp_readhead tmp_queue; 4795 struct mbuf *m; 4796 int error = 0; 4797 4798 old_so = old_inp->sctp_socket; 4799 new_so = new_inp->sctp_socket; 4800 TAILQ_INIT(&tmp_queue); 4801 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4802 if (error) { 4803 /* 4804 * Gak, can't get I/O lock, we have a problem. data will be 4805 * left stranded.. and we don't dare look at it since the 4806 * other thread may be reading something. Oh well, its a 4807 * screwed up app that does a peeloff OR a accept while 4808 * reading from the main socket... actually its only the 4809 * peeloff() case, since I think read will fail on a 4810 * listening socket.. 4811 */ 4812 return; 4813 } 4814 /* lock the socket buffers */ 4815 SCTP_INP_READ_LOCK(old_inp); 4816 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4817 /* Pull off all for out target stcb */ 4818 if (control->stcb == stcb) { 4819 /* remove it we want it */ 4820 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4821 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4822 m = control->data; 4823 while (m) { 4824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4825 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4826 } 4827 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4828 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4829 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4830 } 4831 m = SCTP_BUF_NEXT(m); 4832 } 4833 } 4834 } 4835 SCTP_INP_READ_UNLOCK(old_inp); 4836 /* Remove the recv-lock on the old socket */ 4837 SOCK_IO_RECV_UNLOCK(old_so); 4838 /* Now we move them over to the new socket buffer */ 4839 SCTP_INP_READ_LOCK(new_inp); 4840 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4841 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4842 m = control->data; 4843 while (m) { 4844 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4845 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4846 } 4847 sctp_sballoc(stcb, &new_so->so_rcv, m); 4848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4849 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4850 } 4851 m = SCTP_BUF_NEXT(m); 4852 } 4853 } 4854 SCTP_INP_READ_UNLOCK(new_inp); 4855 } 4856 4857 void 4858 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4859 struct sctp_tcb *stcb, 4860 int so_locked 4861 SCTP_UNUSED 4862 ) 4863 { 4864 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4865 sctp_sorwakeup(inp, inp->sctp_socket); 4866 } 4867 } 4868 4869 void 4870 sctp_add_to_readq(struct sctp_inpcb *inp, 4871 struct sctp_tcb *stcb, 4872 struct sctp_queued_to_read *control, 4873 struct sockbuf *sb, 4874 int end, 4875 int inp_read_lock_held, 4876 int so_locked) 4877 { 4878 /* 4879 * Here we must place the control on the end of the socket read 4880 * queue AND increment sb_cc so that select will work properly on 4881 * read. 4882 */ 4883 struct mbuf *m, *prev = NULL; 4884 4885 if (inp == NULL) { 4886 /* Gak, TSNH!! */ 4887 #ifdef INVARIANTS 4888 panic("Gak, inp NULL on add_to_readq"); 4889 #endif 4890 return; 4891 } 4892 if (inp_read_lock_held == 0) 4893 SCTP_INP_READ_LOCK(inp); 4894 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4895 if (!control->on_strm_q) { 4896 sctp_free_remote_addr(control->whoFrom); 4897 if (control->data) { 4898 sctp_m_freem(control->data); 4899 control->data = NULL; 4900 } 4901 sctp_free_a_readq(stcb, control); 4902 } 4903 if (inp_read_lock_held == 0) 4904 SCTP_INP_READ_UNLOCK(inp); 4905 return; 4906 } 4907 if (!(control->spec_flags & M_NOTIFICATION)) { 4908 atomic_add_int(&inp->total_recvs, 1); 4909 if (!control->do_not_ref_stcb) { 4910 atomic_add_int(&stcb->total_recvs, 1); 4911 } 4912 } 4913 m = control->data; 4914 control->held_length = 0; 4915 control->length = 0; 4916 while (m) { 4917 if (SCTP_BUF_LEN(m) == 0) { 4918 /* Skip mbufs with NO length */ 4919 if (prev == NULL) { 4920 /* First one */ 4921 control->data = sctp_m_free(m); 4922 m = control->data; 4923 } else { 4924 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4925 m = SCTP_BUF_NEXT(prev); 4926 } 4927 if (m == NULL) { 4928 control->tail_mbuf = prev; 4929 } 4930 continue; 4931 } 4932 prev = m; 4933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4934 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4935 } 4936 sctp_sballoc(stcb, sb, m); 4937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4938 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4939 } 4940 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4941 m = SCTP_BUF_NEXT(m); 4942 } 4943 if (prev != NULL) { 4944 control->tail_mbuf = prev; 4945 } else { 4946 /* Everything got collapsed out?? */ 4947 if (!control->on_strm_q) { 4948 sctp_free_remote_addr(control->whoFrom); 4949 sctp_free_a_readq(stcb, control); 4950 } 4951 if (inp_read_lock_held == 0) 4952 SCTP_INP_READ_UNLOCK(inp); 4953 return; 4954 } 4955 if (end) { 4956 control->end_added = 1; 4957 } 4958 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4959 control->on_read_q = 1; 4960 if (inp_read_lock_held == 0) 4961 SCTP_INP_READ_UNLOCK(inp); 4962 if (inp && inp->sctp_socket) { 4963 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4964 } 4965 } 4966 4967 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4968 *************ALTERNATE ROUTING CODE 4969 */ 4970 4971 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4972 *************ALTERNATE ROUTING CODE 4973 */ 4974 4975 struct mbuf * 4976 sctp_generate_cause(uint16_t code, char *info) 4977 { 4978 struct mbuf *m; 4979 struct sctp_gen_error_cause *cause; 4980 size_t info_len; 4981 uint16_t len; 4982 4983 if ((code == 0) || (info == NULL)) { 4984 return (NULL); 4985 } 4986 info_len = strlen(info); 4987 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4988 return (NULL); 4989 } 4990 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4991 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4992 if (m != NULL) { 4993 SCTP_BUF_LEN(m) = len; 4994 cause = mtod(m, struct sctp_gen_error_cause *); 4995 cause->code = htons(code); 4996 cause->length = htons(len); 4997 memcpy(cause->info, info, info_len); 4998 } 4999 return (m); 5000 } 5001 5002 struct mbuf * 5003 sctp_generate_no_user_data_cause(uint32_t tsn) 5004 { 5005 struct mbuf *m; 5006 struct sctp_error_no_user_data *no_user_data_cause; 5007 uint16_t len; 5008 5009 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5010 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5011 if (m != NULL) { 5012 SCTP_BUF_LEN(m) = len; 5013 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5014 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5015 no_user_data_cause->cause.length = htons(len); 5016 no_user_data_cause->tsn = htonl(tsn); 5017 } 5018 return (m); 5019 } 5020 5021 #ifdef SCTP_MBCNT_LOGGING 5022 void 5023 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5024 struct sctp_tmit_chunk *tp1, int chk_cnt) 5025 { 5026 if (tp1->data == NULL) { 5027 return; 5028 } 5029 asoc->chunks_on_out_queue -= chk_cnt; 5030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5031 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5032 asoc->total_output_queue_size, 5033 tp1->book_size, 5034 0, 5035 tp1->mbcnt); 5036 } 5037 if (asoc->total_output_queue_size >= tp1->book_size) { 5038 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5039 } else { 5040 asoc->total_output_queue_size = 0; 5041 } 5042 5043 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5044 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5045 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5046 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5047 } else { 5048 stcb->sctp_socket->so_snd.sb_cc = 0; 5049 } 5050 } 5051 } 5052 5053 #endif 5054 5055 int 5056 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5057 uint8_t sent, int so_locked) 5058 { 5059 struct sctp_stream_out *strq; 5060 struct sctp_tmit_chunk *chk = NULL, *tp2; 5061 struct sctp_stream_queue_pending *sp; 5062 uint32_t mid; 5063 uint16_t sid; 5064 uint8_t foundeom = 0; 5065 int ret_sz = 0; 5066 int notdone; 5067 int do_wakeup_routine = 0; 5068 5069 sid = tp1->rec.data.sid; 5070 mid = tp1->rec.data.mid; 5071 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5072 stcb->asoc.abandoned_sent[0]++; 5073 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5074 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5075 #if defined(SCTP_DETAILED_STR_STATS) 5076 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5077 #endif 5078 } else { 5079 stcb->asoc.abandoned_unsent[0]++; 5080 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5081 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5082 #if defined(SCTP_DETAILED_STR_STATS) 5083 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5084 #endif 5085 } 5086 do { 5087 ret_sz += tp1->book_size; 5088 if (tp1->data != NULL) { 5089 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5090 sctp_flight_size_decrease(tp1); 5091 sctp_total_flight_decrease(stcb, tp1); 5092 } 5093 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5094 stcb->asoc.peers_rwnd += tp1->send_size; 5095 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5096 if (sent) { 5097 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5098 } else { 5099 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5100 } 5101 if (tp1->data) { 5102 sctp_m_freem(tp1->data); 5103 tp1->data = NULL; 5104 } 5105 do_wakeup_routine = 1; 5106 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5107 stcb->asoc.sent_queue_cnt_removeable--; 5108 } 5109 } 5110 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5111 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5112 SCTP_DATA_NOT_FRAG) { 5113 /* not frag'ed we ae done */ 5114 notdone = 0; 5115 foundeom = 1; 5116 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5117 /* end of frag, we are done */ 5118 notdone = 0; 5119 foundeom = 1; 5120 } else { 5121 /* 5122 * Its a begin or middle piece, we must mark all of 5123 * it 5124 */ 5125 notdone = 1; 5126 tp1 = TAILQ_NEXT(tp1, sctp_next); 5127 } 5128 } while (tp1 && notdone); 5129 if (foundeom == 0) { 5130 /* 5131 * The multi-part message was scattered across the send and 5132 * sent queue. 5133 */ 5134 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5135 if ((tp1->rec.data.sid != sid) || 5136 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5137 break; 5138 } 5139 /* 5140 * save to chk in case we have some on stream out 5141 * queue. If so and we have an un-transmitted one we 5142 * don't have to fudge the TSN. 5143 */ 5144 chk = tp1; 5145 ret_sz += tp1->book_size; 5146 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5147 if (sent) { 5148 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5149 } else { 5150 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5151 } 5152 if (tp1->data) { 5153 sctp_m_freem(tp1->data); 5154 tp1->data = NULL; 5155 } 5156 /* No flight involved here book the size to 0 */ 5157 tp1->book_size = 0; 5158 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5159 foundeom = 1; 5160 } 5161 do_wakeup_routine = 1; 5162 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5163 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5164 /* 5165 * on to the sent queue so we can wait for it to be 5166 * passed by. 5167 */ 5168 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5169 sctp_next); 5170 stcb->asoc.send_queue_cnt--; 5171 stcb->asoc.sent_queue_cnt++; 5172 } 5173 } 5174 if (foundeom == 0) { 5175 /* 5176 * Still no eom found. That means there is stuff left on the 5177 * stream out queue.. yuck. 5178 */ 5179 SCTP_TCB_SEND_LOCK(stcb); 5180 strq = &stcb->asoc.strmout[sid]; 5181 sp = TAILQ_FIRST(&strq->outqueue); 5182 if (sp != NULL) { 5183 sp->discard_rest = 1; 5184 /* 5185 * We may need to put a chunk on the queue that 5186 * holds the TSN that would have been sent with the 5187 * LAST bit. 5188 */ 5189 if (chk == NULL) { 5190 /* Yep, we have to */ 5191 sctp_alloc_a_chunk(stcb, chk); 5192 if (chk == NULL) { 5193 /* 5194 * we are hosed. All we can do is 5195 * nothing.. which will cause an 5196 * abort if the peer is paying 5197 * attention. 5198 */ 5199 goto oh_well; 5200 } 5201 memset(chk, 0, sizeof(*chk)); 5202 chk->rec.data.rcv_flags = 0; 5203 chk->sent = SCTP_FORWARD_TSN_SKIP; 5204 chk->asoc = &stcb->asoc; 5205 if (stcb->asoc.idata_supported == 0) { 5206 if (sp->sinfo_flags & SCTP_UNORDERED) { 5207 chk->rec.data.mid = 0; 5208 } else { 5209 chk->rec.data.mid = strq->next_mid_ordered; 5210 } 5211 } else { 5212 if (sp->sinfo_flags & SCTP_UNORDERED) { 5213 chk->rec.data.mid = strq->next_mid_unordered; 5214 } else { 5215 chk->rec.data.mid = strq->next_mid_ordered; 5216 } 5217 } 5218 chk->rec.data.sid = sp->sid; 5219 chk->rec.data.ppid = sp->ppid; 5220 chk->rec.data.context = sp->context; 5221 chk->flags = sp->act_flags; 5222 chk->whoTo = NULL; 5223 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5224 strq->chunks_on_queues++; 5225 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5226 stcb->asoc.sent_queue_cnt++; 5227 stcb->asoc.pr_sctp_cnt++; 5228 } 5229 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5230 if (sp->sinfo_flags & SCTP_UNORDERED) { 5231 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5232 } 5233 if (stcb->asoc.idata_supported == 0) { 5234 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5235 strq->next_mid_ordered++; 5236 } 5237 } else { 5238 if (sp->sinfo_flags & SCTP_UNORDERED) { 5239 strq->next_mid_unordered++; 5240 } else { 5241 strq->next_mid_ordered++; 5242 } 5243 } 5244 oh_well: 5245 if (sp->data) { 5246 /* 5247 * Pull any data to free up the SB and allow 5248 * sender to "add more" while we will throw 5249 * away :-) 5250 */ 5251 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5252 ret_sz += sp->length; 5253 do_wakeup_routine = 1; 5254 sp->some_taken = 1; 5255 sctp_m_freem(sp->data); 5256 sp->data = NULL; 5257 sp->tail_mbuf = NULL; 5258 sp->length = 0; 5259 } 5260 } 5261 SCTP_TCB_SEND_UNLOCK(stcb); 5262 } 5263 if (do_wakeup_routine) { 5264 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5265 } 5266 return (ret_sz); 5267 } 5268 5269 /* 5270 * checks to see if the given address, sa, is one that is currently known by 5271 * the kernel note: can't distinguish the same address on multiple interfaces 5272 * and doesn't handle multiple addresses with different zone/scope id's note: 5273 * ifa_ifwithaddr() compares the entire sockaddr struct 5274 */ 5275 struct sctp_ifa * 5276 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5277 int holds_lock) 5278 { 5279 struct sctp_laddr *laddr; 5280 5281 if (holds_lock == 0) { 5282 SCTP_INP_RLOCK(inp); 5283 } 5284 5285 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5286 if (laddr->ifa == NULL) 5287 continue; 5288 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5289 continue; 5290 #ifdef INET 5291 if (addr->sa_family == AF_INET) { 5292 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5293 laddr->ifa->address.sin.sin_addr.s_addr) { 5294 /* found him. */ 5295 break; 5296 } 5297 } 5298 #endif 5299 #ifdef INET6 5300 if (addr->sa_family == AF_INET6) { 5301 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5302 &laddr->ifa->address.sin6)) { 5303 /* found him. */ 5304 break; 5305 } 5306 } 5307 #endif 5308 } 5309 if (holds_lock == 0) { 5310 SCTP_INP_RUNLOCK(inp); 5311 } 5312 if (laddr != NULL) { 5313 return (laddr->ifa); 5314 } else { 5315 return (NULL); 5316 } 5317 } 5318 5319 uint32_t 5320 sctp_get_ifa_hash_val(struct sockaddr *addr) 5321 { 5322 switch (addr->sa_family) { 5323 #ifdef INET 5324 case AF_INET: 5325 { 5326 struct sockaddr_in *sin; 5327 5328 sin = (struct sockaddr_in *)addr; 5329 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5330 } 5331 #endif 5332 #ifdef INET6 5333 case AF_INET6: 5334 { 5335 struct sockaddr_in6 *sin6; 5336 uint32_t hash_of_addr; 5337 5338 sin6 = (struct sockaddr_in6 *)addr; 5339 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5340 sin6->sin6_addr.s6_addr32[1] + 5341 sin6->sin6_addr.s6_addr32[2] + 5342 sin6->sin6_addr.s6_addr32[3]); 5343 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5344 return (hash_of_addr); 5345 } 5346 #endif 5347 default: 5348 break; 5349 } 5350 return (0); 5351 } 5352 5353 struct sctp_ifa * 5354 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5355 { 5356 struct sctp_ifa *sctp_ifap; 5357 struct sctp_vrf *vrf; 5358 struct sctp_ifalist *hash_head; 5359 uint32_t hash_of_addr; 5360 5361 if (holds_lock == 0) { 5362 SCTP_IPI_ADDR_RLOCK(); 5363 } else { 5364 SCTP_IPI_ADDR_LOCK_ASSERT(); 5365 } 5366 5367 vrf = sctp_find_vrf(vrf_id); 5368 if (vrf == NULL) { 5369 if (holds_lock == 0) 5370 SCTP_IPI_ADDR_RUNLOCK(); 5371 return (NULL); 5372 } 5373 5374 hash_of_addr = sctp_get_ifa_hash_val(addr); 5375 5376 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5377 if (hash_head == NULL) { 5378 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5379 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5380 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5381 sctp_print_address(addr); 5382 SCTP_PRINTF("No such bucket for address\n"); 5383 if (holds_lock == 0) 5384 SCTP_IPI_ADDR_RUNLOCK(); 5385 5386 return (NULL); 5387 } 5388 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5389 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5390 continue; 5391 #ifdef INET 5392 if (addr->sa_family == AF_INET) { 5393 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5394 sctp_ifap->address.sin.sin_addr.s_addr) { 5395 /* found him. */ 5396 break; 5397 } 5398 } 5399 #endif 5400 #ifdef INET6 5401 if (addr->sa_family == AF_INET6) { 5402 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5403 &sctp_ifap->address.sin6)) { 5404 /* found him. */ 5405 break; 5406 } 5407 } 5408 #endif 5409 } 5410 if (holds_lock == 0) 5411 SCTP_IPI_ADDR_RUNLOCK(); 5412 return (sctp_ifap); 5413 } 5414 5415 static void 5416 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5417 uint32_t rwnd_req) 5418 { 5419 /* User pulled some data, do we need a rwnd update? */ 5420 struct epoch_tracker et; 5421 int r_unlocked = 0; 5422 uint32_t dif, rwnd; 5423 struct socket *so = NULL; 5424 5425 if (stcb == NULL) 5426 return; 5427 5428 atomic_add_int(&stcb->asoc.refcnt, 1); 5429 5430 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5431 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5432 /* Pre-check If we are freeing no update */ 5433 goto no_lock; 5434 } 5435 SCTP_INP_INCR_REF(stcb->sctp_ep); 5436 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5437 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5438 goto out; 5439 } 5440 so = stcb->sctp_socket; 5441 if (so == NULL) { 5442 goto out; 5443 } 5444 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5445 /* Have you have freed enough to look */ 5446 *freed_so_far = 0; 5447 /* Yep, its worth a look and the lock overhead */ 5448 5449 /* Figure out what the rwnd would be */ 5450 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5451 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5452 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5453 } else { 5454 dif = 0; 5455 } 5456 if (dif >= rwnd_req) { 5457 if (hold_rlock) { 5458 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5459 r_unlocked = 1; 5460 } 5461 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5462 /* 5463 * One last check before we allow the guy possibly 5464 * to get in. There is a race, where the guy has not 5465 * reached the gate. In that case 5466 */ 5467 goto out; 5468 } 5469 SCTP_TCB_LOCK(stcb); 5470 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5471 /* No reports here */ 5472 SCTP_TCB_UNLOCK(stcb); 5473 goto out; 5474 } 5475 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5476 NET_EPOCH_ENTER(et); 5477 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5478 5479 sctp_chunk_output(stcb->sctp_ep, stcb, 5480 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5481 /* make sure no timer is running */ 5482 NET_EPOCH_EXIT(et); 5483 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5484 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5485 SCTP_TCB_UNLOCK(stcb); 5486 } else { 5487 /* Update how much we have pending */ 5488 stcb->freed_by_sorcv_sincelast = dif; 5489 } 5490 out: 5491 if (so && r_unlocked && hold_rlock) { 5492 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5493 } 5494 5495 SCTP_INP_DECR_REF(stcb->sctp_ep); 5496 no_lock: 5497 atomic_add_int(&stcb->asoc.refcnt, -1); 5498 return; 5499 } 5500 5501 int 5502 sctp_sorecvmsg(struct socket *so, 5503 struct uio *uio, 5504 struct mbuf **mp, 5505 struct sockaddr *from, 5506 int fromlen, 5507 int *msg_flags, 5508 struct sctp_sndrcvinfo *sinfo, 5509 int filling_sinfo) 5510 { 5511 /* 5512 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5513 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5514 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5515 * On the way out we may send out any combination of: 5516 * MSG_NOTIFICATION MSG_EOR 5517 * 5518 */ 5519 struct sctp_inpcb *inp = NULL; 5520 ssize_t my_len = 0; 5521 ssize_t cp_len = 0; 5522 int error = 0; 5523 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5524 struct mbuf *m = NULL; 5525 struct sctp_tcb *stcb = NULL; 5526 int wakeup_read_socket = 0; 5527 int freecnt_applied = 0; 5528 int out_flags = 0, in_flags = 0; 5529 int block_allowed = 1; 5530 uint32_t freed_so_far = 0; 5531 ssize_t copied_so_far = 0; 5532 int in_eeor_mode = 0; 5533 int no_rcv_needed = 0; 5534 uint32_t rwnd_req = 0; 5535 int hold_sblock = 0; 5536 int hold_rlock = 0; 5537 ssize_t slen = 0; 5538 uint32_t held_length = 0; 5539 int sockbuf_lock = 0; 5540 5541 if (uio == NULL) { 5542 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5543 return (EINVAL); 5544 } 5545 5546 if (msg_flags) { 5547 in_flags = *msg_flags; 5548 if (in_flags & MSG_PEEK) 5549 SCTP_STAT_INCR(sctps_read_peeks); 5550 } else { 5551 in_flags = 0; 5552 } 5553 slen = uio->uio_resid; 5554 5555 /* Pull in and set up our int flags */ 5556 if (in_flags & MSG_OOB) { 5557 /* Out of band's NOT supported */ 5558 return (EOPNOTSUPP); 5559 } 5560 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5561 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5562 return (EINVAL); 5563 } 5564 if ((in_flags & (MSG_DONTWAIT 5565 | MSG_NBIO 5566 )) || 5567 SCTP_SO_IS_NBIO(so)) { 5568 block_allowed = 0; 5569 } 5570 /* setup the endpoint */ 5571 inp = (struct sctp_inpcb *)so->so_pcb; 5572 if (inp == NULL) { 5573 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5574 return (EFAULT); 5575 } 5576 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5577 /* Must be at least a MTU's worth */ 5578 if (rwnd_req < SCTP_MIN_RWND) 5579 rwnd_req = SCTP_MIN_RWND; 5580 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5582 sctp_misc_ints(SCTP_SORECV_ENTER, 5583 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5584 } 5585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5586 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5587 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5588 } 5589 5590 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5591 if (error) { 5592 goto release_unlocked; 5593 } 5594 sockbuf_lock = 1; 5595 restart: 5596 5597 restart_nosblocks: 5598 if (hold_sblock == 0) { 5599 SOCKBUF_LOCK(&so->so_rcv); 5600 hold_sblock = 1; 5601 } 5602 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5603 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5604 goto out; 5605 } 5606 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5607 if (so->so_error) { 5608 error = so->so_error; 5609 if ((in_flags & MSG_PEEK) == 0) 5610 so->so_error = 0; 5611 goto out; 5612 } else { 5613 if (so->so_rcv.sb_cc == 0) { 5614 /* indicate EOF */ 5615 error = 0; 5616 goto out; 5617 } 5618 } 5619 } 5620 if (so->so_rcv.sb_cc <= held_length) { 5621 if (so->so_error) { 5622 error = so->so_error; 5623 if ((in_flags & MSG_PEEK) == 0) { 5624 so->so_error = 0; 5625 } 5626 goto out; 5627 } 5628 if ((so->so_rcv.sb_cc == 0) && 5629 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5630 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5631 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5632 /* 5633 * For active open side clear flags for 5634 * re-use passive open is blocked by 5635 * connect. 5636 */ 5637 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5638 /* 5639 * You were aborted, passive side 5640 * always hits here 5641 */ 5642 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5643 error = ECONNRESET; 5644 } 5645 so->so_state &= ~(SS_ISCONNECTING | 5646 SS_ISDISCONNECTING | 5647 SS_ISCONFIRMING | 5648 SS_ISCONNECTED); 5649 if (error == 0) { 5650 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5651 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5652 error = ENOTCONN; 5653 } 5654 } 5655 goto out; 5656 } 5657 } 5658 if (block_allowed) { 5659 error = sbwait(&so->so_rcv); 5660 if (error) { 5661 goto out; 5662 } 5663 held_length = 0; 5664 goto restart_nosblocks; 5665 } else { 5666 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5667 error = EWOULDBLOCK; 5668 goto out; 5669 } 5670 } 5671 if (hold_sblock == 1) { 5672 SOCKBUF_UNLOCK(&so->so_rcv); 5673 hold_sblock = 0; 5674 } 5675 /* we possibly have data we can read */ 5676 /* sa_ignore FREED_MEMORY */ 5677 control = TAILQ_FIRST(&inp->read_queue); 5678 if (control == NULL) { 5679 /* 5680 * This could be happening since the appender did the 5681 * increment but as not yet did the tailq insert onto the 5682 * read_queue 5683 */ 5684 if (hold_rlock == 0) { 5685 SCTP_INP_READ_LOCK(inp); 5686 } 5687 control = TAILQ_FIRST(&inp->read_queue); 5688 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5689 #ifdef INVARIANTS 5690 panic("Huh, its non zero and nothing on control?"); 5691 #endif 5692 so->so_rcv.sb_cc = 0; 5693 } 5694 SCTP_INP_READ_UNLOCK(inp); 5695 hold_rlock = 0; 5696 goto restart; 5697 } 5698 5699 if ((control->length == 0) && 5700 (control->do_not_ref_stcb)) { 5701 /* 5702 * Clean up code for freeing assoc that left behind a 5703 * pdapi.. maybe a peer in EEOR that just closed after 5704 * sending and never indicated a EOR. 5705 */ 5706 if (hold_rlock == 0) { 5707 hold_rlock = 1; 5708 SCTP_INP_READ_LOCK(inp); 5709 } 5710 control->held_length = 0; 5711 if (control->data) { 5712 /* Hmm there is data here .. fix */ 5713 struct mbuf *m_tmp; 5714 int cnt = 0; 5715 5716 m_tmp = control->data; 5717 while (m_tmp) { 5718 cnt += SCTP_BUF_LEN(m_tmp); 5719 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5720 control->tail_mbuf = m_tmp; 5721 control->end_added = 1; 5722 } 5723 m_tmp = SCTP_BUF_NEXT(m_tmp); 5724 } 5725 control->length = cnt; 5726 } else { 5727 /* remove it */ 5728 TAILQ_REMOVE(&inp->read_queue, control, next); 5729 /* Add back any hiddend data */ 5730 sctp_free_remote_addr(control->whoFrom); 5731 sctp_free_a_readq(stcb, control); 5732 } 5733 if (hold_rlock) { 5734 hold_rlock = 0; 5735 SCTP_INP_READ_UNLOCK(inp); 5736 } 5737 goto restart; 5738 } 5739 if ((control->length == 0) && 5740 (control->end_added == 1)) { 5741 /* 5742 * Do we also need to check for (control->pdapi_aborted == 5743 * 1)? 5744 */ 5745 if (hold_rlock == 0) { 5746 hold_rlock = 1; 5747 SCTP_INP_READ_LOCK(inp); 5748 } 5749 TAILQ_REMOVE(&inp->read_queue, control, next); 5750 if (control->data) { 5751 #ifdef INVARIANTS 5752 panic("control->data not null but control->length == 0"); 5753 #else 5754 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5755 sctp_m_freem(control->data); 5756 control->data = NULL; 5757 #endif 5758 } 5759 if (control->aux_data) { 5760 sctp_m_free(control->aux_data); 5761 control->aux_data = NULL; 5762 } 5763 #ifdef INVARIANTS 5764 if (control->on_strm_q) { 5765 panic("About to free ctl:%p so:%p and its in %d", 5766 control, so, control->on_strm_q); 5767 } 5768 #endif 5769 sctp_free_remote_addr(control->whoFrom); 5770 sctp_free_a_readq(stcb, control); 5771 if (hold_rlock) { 5772 hold_rlock = 0; 5773 SCTP_INP_READ_UNLOCK(inp); 5774 } 5775 goto restart; 5776 } 5777 if (control->length == 0) { 5778 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5779 (filling_sinfo)) { 5780 /* find a more suitable one then this */ 5781 ctl = TAILQ_NEXT(control, next); 5782 while (ctl) { 5783 if ((ctl->stcb != control->stcb) && (ctl->length) && 5784 (ctl->some_taken || 5785 (ctl->spec_flags & M_NOTIFICATION) || 5786 ((ctl->do_not_ref_stcb == 0) && 5787 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5788 ) { 5789 /*- 5790 * If we have a different TCB next, and there is data 5791 * present. If we have already taken some (pdapi), OR we can 5792 * ref the tcb and no delivery as started on this stream, we 5793 * take it. Note we allow a notification on a different 5794 * assoc to be delivered.. 5795 */ 5796 control = ctl; 5797 goto found_one; 5798 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5799 (ctl->length) && 5800 ((ctl->some_taken) || 5801 ((ctl->do_not_ref_stcb == 0) && 5802 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5803 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5804 /*- 5805 * If we have the same tcb, and there is data present, and we 5806 * have the strm interleave feature present. Then if we have 5807 * taken some (pdapi) or we can refer to tht tcb AND we have 5808 * not started a delivery for this stream, we can take it. 5809 * Note we do NOT allow a notificaiton on the same assoc to 5810 * be delivered. 5811 */ 5812 control = ctl; 5813 goto found_one; 5814 } 5815 ctl = TAILQ_NEXT(ctl, next); 5816 } 5817 } 5818 /* 5819 * if we reach here, not suitable replacement is available 5820 * <or> fragment interleave is NOT on. So stuff the sb_cc 5821 * into the our held count, and its time to sleep again. 5822 */ 5823 held_length = so->so_rcv.sb_cc; 5824 control->held_length = so->so_rcv.sb_cc; 5825 goto restart; 5826 } 5827 /* Clear the held length since there is something to read */ 5828 control->held_length = 0; 5829 found_one: 5830 /* 5831 * If we reach here, control has a some data for us to read off. 5832 * Note that stcb COULD be NULL. 5833 */ 5834 if (hold_rlock == 0) { 5835 hold_rlock = 1; 5836 SCTP_INP_READ_LOCK(inp); 5837 } 5838 control->some_taken++; 5839 stcb = control->stcb; 5840 if (stcb) { 5841 if ((control->do_not_ref_stcb == 0) && 5842 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5843 if (freecnt_applied == 0) 5844 stcb = NULL; 5845 } else if (control->do_not_ref_stcb == 0) { 5846 /* you can't free it on me please */ 5847 /* 5848 * The lock on the socket buffer protects us so the 5849 * free code will stop. But since we used the 5850 * socketbuf lock and the sender uses the tcb_lock 5851 * to increment, we need to use the atomic add to 5852 * the refcnt 5853 */ 5854 if (freecnt_applied) { 5855 #ifdef INVARIANTS 5856 panic("refcnt already incremented"); 5857 #else 5858 SCTP_PRINTF("refcnt already incremented?\n"); 5859 #endif 5860 } else { 5861 atomic_add_int(&stcb->asoc.refcnt, 1); 5862 freecnt_applied = 1; 5863 } 5864 /* 5865 * Setup to remember how much we have not yet told 5866 * the peer our rwnd has opened up. Note we grab the 5867 * value from the tcb from last time. Note too that 5868 * sack sending clears this when a sack is sent, 5869 * which is fine. Once we hit the rwnd_req, we then 5870 * will go to the sctp_user_rcvd() that will not 5871 * lock until it KNOWs it MUST send a WUP-SACK. 5872 */ 5873 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5874 stcb->freed_by_sorcv_sincelast = 0; 5875 } 5876 } 5877 if (stcb && 5878 ((control->spec_flags & M_NOTIFICATION) == 0) && 5879 control->do_not_ref_stcb == 0) { 5880 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5881 } 5882 5883 /* First lets get off the sinfo and sockaddr info */ 5884 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5885 sinfo->sinfo_stream = control->sinfo_stream; 5886 sinfo->sinfo_ssn = (uint16_t)control->mid; 5887 sinfo->sinfo_flags = control->sinfo_flags; 5888 sinfo->sinfo_ppid = control->sinfo_ppid; 5889 sinfo->sinfo_context = control->sinfo_context; 5890 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5891 sinfo->sinfo_tsn = control->sinfo_tsn; 5892 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5893 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5894 nxt = TAILQ_NEXT(control, next); 5895 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5896 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5897 struct sctp_extrcvinfo *s_extra; 5898 5899 s_extra = (struct sctp_extrcvinfo *)sinfo; 5900 if ((nxt) && 5901 (nxt->length)) { 5902 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5903 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5904 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5905 } 5906 if (nxt->spec_flags & M_NOTIFICATION) { 5907 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5908 } 5909 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5910 s_extra->serinfo_next_length = nxt->length; 5911 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5912 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5913 if (nxt->tail_mbuf != NULL) { 5914 if (nxt->end_added) { 5915 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5916 } 5917 } 5918 } else { 5919 /* 5920 * we explicitly 0 this, since the memcpy 5921 * got some other things beyond the older 5922 * sinfo_ that is on the control's structure 5923 * :-D 5924 */ 5925 nxt = NULL; 5926 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5927 s_extra->serinfo_next_aid = 0; 5928 s_extra->serinfo_next_length = 0; 5929 s_extra->serinfo_next_ppid = 0; 5930 s_extra->serinfo_next_stream = 0; 5931 } 5932 } 5933 /* 5934 * update off the real current cum-ack, if we have an stcb. 5935 */ 5936 if ((control->do_not_ref_stcb == 0) && stcb) 5937 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5938 /* 5939 * mask off the high bits, we keep the actual chunk bits in 5940 * there. 5941 */ 5942 sinfo->sinfo_flags &= 0x00ff; 5943 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5944 sinfo->sinfo_flags |= SCTP_UNORDERED; 5945 } 5946 } 5947 #ifdef SCTP_ASOCLOG_OF_TSNS 5948 { 5949 int index, newindex; 5950 struct sctp_pcbtsn_rlog *entry; 5951 5952 do { 5953 index = inp->readlog_index; 5954 newindex = index + 1; 5955 if (newindex >= SCTP_READ_LOG_SIZE) { 5956 newindex = 0; 5957 } 5958 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5959 entry = &inp->readlog[index]; 5960 entry->vtag = control->sinfo_assoc_id; 5961 entry->strm = control->sinfo_stream; 5962 entry->seq = (uint16_t)control->mid; 5963 entry->sz = control->length; 5964 entry->flgs = control->sinfo_flags; 5965 } 5966 #endif 5967 if ((fromlen > 0) && (from != NULL)) { 5968 union sctp_sockstore store; 5969 size_t len; 5970 5971 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5972 #ifdef INET6 5973 case AF_INET6: 5974 len = sizeof(struct sockaddr_in6); 5975 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5976 store.sin6.sin6_port = control->port_from; 5977 break; 5978 #endif 5979 #ifdef INET 5980 case AF_INET: 5981 #ifdef INET6 5982 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5983 len = sizeof(struct sockaddr_in6); 5984 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5985 &store.sin6); 5986 store.sin6.sin6_port = control->port_from; 5987 } else { 5988 len = sizeof(struct sockaddr_in); 5989 store.sin = control->whoFrom->ro._l_addr.sin; 5990 store.sin.sin_port = control->port_from; 5991 } 5992 #else 5993 len = sizeof(struct sockaddr_in); 5994 store.sin = control->whoFrom->ro._l_addr.sin; 5995 store.sin.sin_port = control->port_from; 5996 #endif 5997 break; 5998 #endif 5999 default: 6000 len = 0; 6001 break; 6002 } 6003 memcpy(from, &store, min((size_t)fromlen, len)); 6004 #ifdef INET6 6005 { 6006 struct sockaddr_in6 lsa6, *from6; 6007 6008 from6 = (struct sockaddr_in6 *)from; 6009 sctp_recover_scope_mac(from6, (&lsa6)); 6010 } 6011 #endif 6012 } 6013 if (hold_rlock) { 6014 SCTP_INP_READ_UNLOCK(inp); 6015 hold_rlock = 0; 6016 } 6017 if (hold_sblock) { 6018 SOCKBUF_UNLOCK(&so->so_rcv); 6019 hold_sblock = 0; 6020 } 6021 /* now copy out what data we can */ 6022 if (mp == NULL) { 6023 /* copy out each mbuf in the chain up to length */ 6024 get_more_data: 6025 m = control->data; 6026 while (m) { 6027 /* Move out all we can */ 6028 cp_len = uio->uio_resid; 6029 my_len = SCTP_BUF_LEN(m); 6030 if (cp_len > my_len) { 6031 /* not enough in this buf */ 6032 cp_len = my_len; 6033 } 6034 if (hold_rlock) { 6035 SCTP_INP_READ_UNLOCK(inp); 6036 hold_rlock = 0; 6037 } 6038 if (cp_len > 0) 6039 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6040 /* re-read */ 6041 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6042 goto release; 6043 } 6044 6045 if ((control->do_not_ref_stcb == 0) && stcb && 6046 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6047 no_rcv_needed = 1; 6048 } 6049 if (error) { 6050 /* error we are out of here */ 6051 goto release; 6052 } 6053 SCTP_INP_READ_LOCK(inp); 6054 hold_rlock = 1; 6055 if (cp_len == SCTP_BUF_LEN(m)) { 6056 if ((SCTP_BUF_NEXT(m) == NULL) && 6057 (control->end_added)) { 6058 out_flags |= MSG_EOR; 6059 if ((control->do_not_ref_stcb == 0) && 6060 (control->stcb != NULL) && 6061 ((control->spec_flags & M_NOTIFICATION) == 0)) 6062 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6063 } 6064 if (control->spec_flags & M_NOTIFICATION) { 6065 out_flags |= MSG_NOTIFICATION; 6066 } 6067 /* we ate up the mbuf */ 6068 if (in_flags & MSG_PEEK) { 6069 /* just looking */ 6070 m = SCTP_BUF_NEXT(m); 6071 copied_so_far += cp_len; 6072 } else { 6073 /* dispose of the mbuf */ 6074 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6075 sctp_sblog(&so->so_rcv, 6076 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6077 } 6078 sctp_sbfree(control, stcb, &so->so_rcv, m); 6079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6080 sctp_sblog(&so->so_rcv, 6081 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6082 } 6083 copied_so_far += cp_len; 6084 freed_so_far += (uint32_t)cp_len; 6085 freed_so_far += MSIZE; 6086 atomic_subtract_int(&control->length, (int)cp_len); 6087 control->data = sctp_m_free(m); 6088 m = control->data; 6089 /* 6090 * been through it all, must hold sb 6091 * lock ok to null tail 6092 */ 6093 if (control->data == NULL) { 6094 #ifdef INVARIANTS 6095 if ((control->end_added == 0) || 6096 (TAILQ_NEXT(control, next) == NULL)) { 6097 /* 6098 * If the end is not 6099 * added, OR the 6100 * next is NOT null 6101 * we MUST have the 6102 * lock. 6103 */ 6104 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6105 panic("Hmm we don't own the lock?"); 6106 } 6107 } 6108 #endif 6109 control->tail_mbuf = NULL; 6110 #ifdef INVARIANTS 6111 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6112 panic("end_added, nothing left and no MSG_EOR"); 6113 } 6114 #endif 6115 } 6116 } 6117 } else { 6118 /* Do we need to trim the mbuf? */ 6119 if (control->spec_flags & M_NOTIFICATION) { 6120 out_flags |= MSG_NOTIFICATION; 6121 } 6122 if ((in_flags & MSG_PEEK) == 0) { 6123 SCTP_BUF_RESV_UF(m, cp_len); 6124 SCTP_BUF_LEN(m) -= (int)cp_len; 6125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6126 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6127 } 6128 atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); 6129 if ((control->do_not_ref_stcb == 0) && 6130 stcb) { 6131 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6132 } 6133 copied_so_far += cp_len; 6134 freed_so_far += (uint32_t)cp_len; 6135 freed_so_far += MSIZE; 6136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6137 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6138 SCTP_LOG_SBRESULT, 0); 6139 } 6140 atomic_subtract_int(&control->length, (int)cp_len); 6141 } else { 6142 copied_so_far += cp_len; 6143 } 6144 } 6145 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6146 break; 6147 } 6148 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6149 (control->do_not_ref_stcb == 0) && 6150 (freed_so_far >= rwnd_req)) { 6151 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6152 } 6153 } /* end while(m) */ 6154 /* 6155 * At this point we have looked at it all and we either have 6156 * a MSG_EOR/or read all the user wants... <OR> 6157 * control->length == 0. 6158 */ 6159 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6160 /* we are done with this control */ 6161 if (control->length == 0) { 6162 if (control->data) { 6163 #ifdef INVARIANTS 6164 panic("control->data not null at read eor?"); 6165 #else 6166 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6167 sctp_m_freem(control->data); 6168 control->data = NULL; 6169 #endif 6170 } 6171 done_with_control: 6172 if (hold_rlock == 0) { 6173 SCTP_INP_READ_LOCK(inp); 6174 hold_rlock = 1; 6175 } 6176 TAILQ_REMOVE(&inp->read_queue, control, next); 6177 /* Add back any hiddend data */ 6178 if (control->held_length) { 6179 held_length = 0; 6180 control->held_length = 0; 6181 wakeup_read_socket = 1; 6182 } 6183 if (control->aux_data) { 6184 sctp_m_free(control->aux_data); 6185 control->aux_data = NULL; 6186 } 6187 no_rcv_needed = control->do_not_ref_stcb; 6188 sctp_free_remote_addr(control->whoFrom); 6189 control->data = NULL; 6190 #ifdef INVARIANTS 6191 if (control->on_strm_q) { 6192 panic("About to free ctl:%p so:%p and its in %d", 6193 control, so, control->on_strm_q); 6194 } 6195 #endif 6196 sctp_free_a_readq(stcb, control); 6197 control = NULL; 6198 if ((freed_so_far >= rwnd_req) && 6199 (no_rcv_needed == 0)) 6200 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6201 6202 } else { 6203 /* 6204 * The user did not read all of this 6205 * message, turn off the returned MSG_EOR 6206 * since we are leaving more behind on the 6207 * control to read. 6208 */ 6209 #ifdef INVARIANTS 6210 if (control->end_added && 6211 (control->data == NULL) && 6212 (control->tail_mbuf == NULL)) { 6213 panic("Gak, control->length is corrupt?"); 6214 } 6215 #endif 6216 no_rcv_needed = control->do_not_ref_stcb; 6217 out_flags &= ~MSG_EOR; 6218 } 6219 } 6220 if (out_flags & MSG_EOR) { 6221 goto release; 6222 } 6223 if ((uio->uio_resid == 0) || 6224 ((in_eeor_mode) && 6225 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6226 goto release; 6227 } 6228 /* 6229 * If I hit here the receiver wants more and this message is 6230 * NOT done (pd-api). So two questions. Can we block? if not 6231 * we are done. Did the user NOT set MSG_WAITALL? 6232 */ 6233 if (block_allowed == 0) { 6234 goto release; 6235 } 6236 /* 6237 * We need to wait for more data a few things: - We don't 6238 * release the I/O lock so we don't get someone else 6239 * reading. - We must be sure to account for the case where 6240 * what is added is NOT to our control when we wakeup. 6241 */ 6242 6243 /* 6244 * Do we need to tell the transport a rwnd update might be 6245 * needed before we go to sleep? 6246 */ 6247 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6248 ((freed_so_far >= rwnd_req) && 6249 (control->do_not_ref_stcb == 0) && 6250 (no_rcv_needed == 0))) { 6251 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6252 } 6253 wait_some_more: 6254 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6255 goto release; 6256 } 6257 6258 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6259 goto release; 6260 6261 if (hold_rlock == 1) { 6262 SCTP_INP_READ_UNLOCK(inp); 6263 hold_rlock = 0; 6264 } 6265 if (hold_sblock == 0) { 6266 SOCKBUF_LOCK(&so->so_rcv); 6267 hold_sblock = 1; 6268 } 6269 if ((copied_so_far) && (control->length == 0) && 6270 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6271 goto release; 6272 } 6273 if (so->so_rcv.sb_cc <= control->held_length) { 6274 error = sbwait(&so->so_rcv); 6275 if (error) { 6276 goto release; 6277 } 6278 control->held_length = 0; 6279 } 6280 if (hold_sblock) { 6281 SOCKBUF_UNLOCK(&so->so_rcv); 6282 hold_sblock = 0; 6283 } 6284 if (control->length == 0) { 6285 /* still nothing here */ 6286 if (control->end_added == 1) { 6287 /* he aborted, or is done i.e.did a shutdown */ 6288 out_flags |= MSG_EOR; 6289 if (control->pdapi_aborted) { 6290 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6291 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6292 6293 out_flags |= MSG_TRUNC; 6294 } else { 6295 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6296 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6297 } 6298 goto done_with_control; 6299 } 6300 if (so->so_rcv.sb_cc > held_length) { 6301 control->held_length = so->so_rcv.sb_cc; 6302 held_length = 0; 6303 } 6304 goto wait_some_more; 6305 } else if (control->data == NULL) { 6306 /* 6307 * we must re-sync since data is probably being 6308 * added 6309 */ 6310 SCTP_INP_READ_LOCK(inp); 6311 if ((control->length > 0) && (control->data == NULL)) { 6312 /* 6313 * big trouble.. we have the lock and its 6314 * corrupt? 6315 */ 6316 #ifdef INVARIANTS 6317 panic("Impossible data==NULL length !=0"); 6318 #endif 6319 out_flags |= MSG_EOR; 6320 out_flags |= MSG_TRUNC; 6321 control->length = 0; 6322 SCTP_INP_READ_UNLOCK(inp); 6323 goto done_with_control; 6324 } 6325 SCTP_INP_READ_UNLOCK(inp); 6326 /* We will fall around to get more data */ 6327 } 6328 goto get_more_data; 6329 } else { 6330 /*- 6331 * Give caller back the mbuf chain, 6332 * store in uio_resid the length 6333 */ 6334 wakeup_read_socket = 0; 6335 if ((control->end_added == 0) || 6336 (TAILQ_NEXT(control, next) == NULL)) { 6337 /* Need to get rlock */ 6338 if (hold_rlock == 0) { 6339 SCTP_INP_READ_LOCK(inp); 6340 hold_rlock = 1; 6341 } 6342 } 6343 if (control->end_added) { 6344 out_flags |= MSG_EOR; 6345 if ((control->do_not_ref_stcb == 0) && 6346 (control->stcb != NULL) && 6347 ((control->spec_flags & M_NOTIFICATION) == 0)) 6348 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6349 } 6350 if (control->spec_flags & M_NOTIFICATION) { 6351 out_flags |= MSG_NOTIFICATION; 6352 } 6353 uio->uio_resid = control->length; 6354 *mp = control->data; 6355 m = control->data; 6356 while (m) { 6357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6358 sctp_sblog(&so->so_rcv, 6359 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6360 } 6361 sctp_sbfree(control, stcb, &so->so_rcv, m); 6362 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6363 freed_so_far += MSIZE; 6364 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6365 sctp_sblog(&so->so_rcv, 6366 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6367 } 6368 m = SCTP_BUF_NEXT(m); 6369 } 6370 control->data = control->tail_mbuf = NULL; 6371 control->length = 0; 6372 if (out_flags & MSG_EOR) { 6373 /* Done with this control */ 6374 goto done_with_control; 6375 } 6376 } 6377 release: 6378 if (hold_rlock == 1) { 6379 SCTP_INP_READ_UNLOCK(inp); 6380 hold_rlock = 0; 6381 } 6382 if (hold_sblock == 1) { 6383 SOCKBUF_UNLOCK(&so->so_rcv); 6384 hold_sblock = 0; 6385 } 6386 6387 SOCK_IO_RECV_UNLOCK(so); 6388 sockbuf_lock = 0; 6389 6390 release_unlocked: 6391 if (hold_sblock) { 6392 SOCKBUF_UNLOCK(&so->so_rcv); 6393 hold_sblock = 0; 6394 } 6395 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6396 if ((freed_so_far >= rwnd_req) && 6397 (control && (control->do_not_ref_stcb == 0)) && 6398 (no_rcv_needed == 0)) 6399 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6400 } 6401 out: 6402 if (msg_flags) { 6403 *msg_flags = out_flags; 6404 } 6405 if (((out_flags & MSG_EOR) == 0) && 6406 ((in_flags & MSG_PEEK) == 0) && 6407 (sinfo) && 6408 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6409 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6410 struct sctp_extrcvinfo *s_extra; 6411 6412 s_extra = (struct sctp_extrcvinfo *)sinfo; 6413 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6414 } 6415 if (hold_rlock == 1) { 6416 SCTP_INP_READ_UNLOCK(inp); 6417 } 6418 if (hold_sblock) { 6419 SOCKBUF_UNLOCK(&so->so_rcv); 6420 } 6421 if (sockbuf_lock) { 6422 SOCK_IO_RECV_UNLOCK(so); 6423 } 6424 6425 if (freecnt_applied) { 6426 /* 6427 * The lock on the socket buffer protects us so the free 6428 * code will stop. But since we used the socketbuf lock and 6429 * the sender uses the tcb_lock to increment, we need to use 6430 * the atomic add to the refcnt. 6431 */ 6432 if (stcb == NULL) { 6433 #ifdef INVARIANTS 6434 panic("stcb for refcnt has gone NULL?"); 6435 goto stage_left; 6436 #else 6437 goto stage_left; 6438 #endif 6439 } 6440 /* Save the value back for next time */ 6441 stcb->freed_by_sorcv_sincelast = freed_so_far; 6442 atomic_add_int(&stcb->asoc.refcnt, -1); 6443 } 6444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6445 if (stcb) { 6446 sctp_misc_ints(SCTP_SORECV_DONE, 6447 freed_so_far, 6448 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6449 stcb->asoc.my_rwnd, 6450 so->so_rcv.sb_cc); 6451 } else { 6452 sctp_misc_ints(SCTP_SORECV_DONE, 6453 freed_so_far, 6454 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6455 0, 6456 so->so_rcv.sb_cc); 6457 } 6458 } 6459 stage_left: 6460 if (wakeup_read_socket) { 6461 sctp_sorwakeup(inp, so); 6462 } 6463 return (error); 6464 } 6465 6466 #ifdef SCTP_MBUF_LOGGING 6467 struct mbuf * 6468 sctp_m_free(struct mbuf *m) 6469 { 6470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6471 sctp_log_mb(m, SCTP_MBUF_IFREE); 6472 } 6473 return (m_free(m)); 6474 } 6475 6476 void 6477 sctp_m_freem(struct mbuf *mb) 6478 { 6479 while (mb != NULL) 6480 mb = sctp_m_free(mb); 6481 } 6482 6483 #endif 6484 6485 int 6486 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6487 { 6488 /* 6489 * Given a local address. For all associations that holds the 6490 * address, request a peer-set-primary. 6491 */ 6492 struct sctp_ifa *ifa; 6493 struct sctp_laddr *wi; 6494 6495 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6496 if (ifa == NULL) { 6497 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6498 return (EADDRNOTAVAIL); 6499 } 6500 /* 6501 * Now that we have the ifa we must awaken the iterator with this 6502 * message. 6503 */ 6504 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6505 if (wi == NULL) { 6506 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6507 return (ENOMEM); 6508 } 6509 /* Now incr the count and int wi structure */ 6510 SCTP_INCR_LADDR_COUNT(); 6511 memset(wi, 0, sizeof(*wi)); 6512 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6513 wi->ifa = ifa; 6514 wi->action = SCTP_SET_PRIM_ADDR; 6515 atomic_add_int(&ifa->refcount, 1); 6516 6517 /* Now add it to the work queue */ 6518 SCTP_WQ_ADDR_LOCK(); 6519 /* 6520 * Should this really be a tailq? As it is we will process the 6521 * newest first :-0 6522 */ 6523 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6524 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6525 (struct sctp_inpcb *)NULL, 6526 (struct sctp_tcb *)NULL, 6527 (struct sctp_nets *)NULL); 6528 SCTP_WQ_ADDR_UNLOCK(); 6529 return (0); 6530 } 6531 6532 int 6533 sctp_soreceive(struct socket *so, 6534 struct sockaddr **psa, 6535 struct uio *uio, 6536 struct mbuf **mp0, 6537 struct mbuf **controlp, 6538 int *flagsp) 6539 { 6540 int error, fromlen; 6541 uint8_t sockbuf[256]; 6542 struct sockaddr *from; 6543 struct sctp_extrcvinfo sinfo; 6544 int filling_sinfo = 1; 6545 int flags; 6546 struct sctp_inpcb *inp; 6547 6548 inp = (struct sctp_inpcb *)so->so_pcb; 6549 /* pickup the assoc we are reading from */ 6550 if (inp == NULL) { 6551 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6552 return (EINVAL); 6553 } 6554 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6555 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6556 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6557 (controlp == NULL)) { 6558 /* user does not want the sndrcv ctl */ 6559 filling_sinfo = 0; 6560 } 6561 if (psa) { 6562 from = (struct sockaddr *)sockbuf; 6563 fromlen = sizeof(sockbuf); 6564 from->sa_len = 0; 6565 } else { 6566 from = NULL; 6567 fromlen = 0; 6568 } 6569 6570 if (filling_sinfo) { 6571 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6572 } 6573 if (flagsp != NULL) { 6574 flags = *flagsp; 6575 } else { 6576 flags = 0; 6577 } 6578 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6579 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6580 if (flagsp != NULL) { 6581 *flagsp = flags; 6582 } 6583 if (controlp != NULL) { 6584 /* copy back the sinfo in a CMSG format */ 6585 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6586 *controlp = sctp_build_ctl_nchunk(inp, 6587 (struct sctp_sndrcvinfo *)&sinfo); 6588 } else { 6589 *controlp = NULL; 6590 } 6591 } 6592 if (psa) { 6593 /* copy back the address info */ 6594 if (from && from->sa_len) { 6595 *psa = sodupsockaddr(from, M_NOWAIT); 6596 } else { 6597 *psa = NULL; 6598 } 6599 } 6600 return (error); 6601 } 6602 6603 int 6604 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6605 int totaddr, int *error) 6606 { 6607 int added = 0; 6608 int i; 6609 struct sctp_inpcb *inp; 6610 struct sockaddr *sa; 6611 size_t incr = 0; 6612 #ifdef INET 6613 struct sockaddr_in *sin; 6614 #endif 6615 #ifdef INET6 6616 struct sockaddr_in6 *sin6; 6617 #endif 6618 6619 sa = addr; 6620 inp = stcb->sctp_ep; 6621 *error = 0; 6622 for (i = 0; i < totaddr; i++) { 6623 switch (sa->sa_family) { 6624 #ifdef INET 6625 case AF_INET: 6626 incr = sizeof(struct sockaddr_in); 6627 sin = (struct sockaddr_in *)sa; 6628 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6629 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6630 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6631 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6632 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6633 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6634 *error = EINVAL; 6635 goto out_now; 6636 } 6637 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6638 SCTP_DONOT_SETSCOPE, 6639 SCTP_ADDR_IS_CONFIRMED)) { 6640 /* assoc gone no un-lock */ 6641 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6642 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6643 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6644 *error = ENOBUFS; 6645 goto out_now; 6646 } 6647 added++; 6648 break; 6649 #endif 6650 #ifdef INET6 6651 case AF_INET6: 6652 incr = sizeof(struct sockaddr_in6); 6653 sin6 = (struct sockaddr_in6 *)sa; 6654 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6655 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6656 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6657 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6658 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6659 *error = EINVAL; 6660 goto out_now; 6661 } 6662 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6663 SCTP_DONOT_SETSCOPE, 6664 SCTP_ADDR_IS_CONFIRMED)) { 6665 /* assoc gone no un-lock */ 6666 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6667 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6668 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6669 *error = ENOBUFS; 6670 goto out_now; 6671 } 6672 added++; 6673 break; 6674 #endif 6675 default: 6676 break; 6677 } 6678 sa = (struct sockaddr *)((caddr_t)sa + incr); 6679 } 6680 out_now: 6681 return (added); 6682 } 6683 6684 int 6685 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6686 unsigned int totaddr, 6687 unsigned int *num_v4, unsigned int *num_v6, 6688 unsigned int limit) 6689 { 6690 struct sockaddr *sa; 6691 struct sctp_tcb *stcb; 6692 unsigned int incr, at, i; 6693 6694 at = 0; 6695 sa = addr; 6696 *num_v6 = *num_v4 = 0; 6697 /* account and validate addresses */ 6698 if (totaddr == 0) { 6699 return (EINVAL); 6700 } 6701 for (i = 0; i < totaddr; i++) { 6702 if (at + sizeof(struct sockaddr) > limit) { 6703 return (EINVAL); 6704 } 6705 switch (sa->sa_family) { 6706 #ifdef INET 6707 case AF_INET: 6708 incr = (unsigned int)sizeof(struct sockaddr_in); 6709 if (sa->sa_len != incr) { 6710 return (EINVAL); 6711 } 6712 (*num_v4) += 1; 6713 break; 6714 #endif 6715 #ifdef INET6 6716 case AF_INET6: 6717 { 6718 struct sockaddr_in6 *sin6; 6719 6720 incr = (unsigned int)sizeof(struct sockaddr_in6); 6721 if (sa->sa_len != incr) { 6722 return (EINVAL); 6723 } 6724 sin6 = (struct sockaddr_in6 *)sa; 6725 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6726 /* Must be non-mapped for connectx */ 6727 return (EINVAL); 6728 } 6729 (*num_v6) += 1; 6730 break; 6731 } 6732 #endif 6733 default: 6734 return (EINVAL); 6735 } 6736 if ((at + incr) > limit) { 6737 return (EINVAL); 6738 } 6739 SCTP_INP_INCR_REF(inp); 6740 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6741 if (stcb != NULL) { 6742 SCTP_TCB_UNLOCK(stcb); 6743 return (EALREADY); 6744 } else { 6745 SCTP_INP_DECR_REF(inp); 6746 } 6747 at += incr; 6748 sa = (struct sockaddr *)((caddr_t)sa + incr); 6749 } 6750 return (0); 6751 } 6752 6753 /* 6754 * sctp_bindx(ADD) for one address. 6755 * assumes all arguments are valid/checked by caller. 6756 */ 6757 void 6758 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6759 struct sockaddr *sa, uint32_t vrf_id, int *error, 6760 void *p) 6761 { 6762 #if defined(INET) && defined(INET6) 6763 struct sockaddr_in sin; 6764 #endif 6765 #ifdef INET6 6766 struct sockaddr_in6 *sin6; 6767 #endif 6768 #ifdef INET 6769 struct sockaddr_in *sinp; 6770 #endif 6771 struct sockaddr *addr_to_use; 6772 struct sctp_inpcb *lep; 6773 uint16_t port; 6774 6775 /* see if we're bound all already! */ 6776 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6777 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6778 *error = EINVAL; 6779 return; 6780 } 6781 switch (sa->sa_family) { 6782 #ifdef INET6 6783 case AF_INET6: 6784 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6785 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6786 *error = EINVAL; 6787 return; 6788 } 6789 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6790 /* can only bind v6 on PF_INET6 sockets */ 6791 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6792 *error = EINVAL; 6793 return; 6794 } 6795 sin6 = (struct sockaddr_in6 *)sa; 6796 port = sin6->sin6_port; 6797 #ifdef INET 6798 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6799 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6800 SCTP_IPV6_V6ONLY(inp)) { 6801 /* can't bind v4-mapped on PF_INET sockets */ 6802 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6803 *error = EINVAL; 6804 return; 6805 } 6806 in6_sin6_2_sin(&sin, sin6); 6807 addr_to_use = (struct sockaddr *)&sin; 6808 } else { 6809 addr_to_use = sa; 6810 } 6811 #else 6812 addr_to_use = sa; 6813 #endif 6814 break; 6815 #endif 6816 #ifdef INET 6817 case AF_INET: 6818 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6820 *error = EINVAL; 6821 return; 6822 } 6823 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6824 SCTP_IPV6_V6ONLY(inp)) { 6825 /* can't bind v4 on PF_INET sockets */ 6826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6827 *error = EINVAL; 6828 return; 6829 } 6830 sinp = (struct sockaddr_in *)sa; 6831 port = sinp->sin_port; 6832 addr_to_use = sa; 6833 break; 6834 #endif 6835 default: 6836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6837 *error = EINVAL; 6838 return; 6839 } 6840 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6841 if (p == NULL) { 6842 /* Can't get proc for Net/Open BSD */ 6843 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6844 *error = EINVAL; 6845 return; 6846 } 6847 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6848 return; 6849 } 6850 /* Validate the incoming port. */ 6851 if ((port != 0) && (port != inp->sctp_lport)) { 6852 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6853 *error = EINVAL; 6854 return; 6855 } 6856 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6857 if (lep == NULL) { 6858 /* add the address */ 6859 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6860 SCTP_ADD_IP_ADDRESS, vrf_id); 6861 } else { 6862 if (lep != inp) { 6863 *error = EADDRINUSE; 6864 } 6865 SCTP_INP_DECR_REF(lep); 6866 } 6867 } 6868 6869 /* 6870 * sctp_bindx(DELETE) for one address. 6871 * assumes all arguments are valid/checked by caller. 6872 */ 6873 void 6874 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6875 struct sockaddr *sa, uint32_t vrf_id, int *error) 6876 { 6877 struct sockaddr *addr_to_use; 6878 #if defined(INET) && defined(INET6) 6879 struct sockaddr_in6 *sin6; 6880 struct sockaddr_in sin; 6881 #endif 6882 6883 /* see if we're bound all already! */ 6884 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6885 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6886 *error = EINVAL; 6887 return; 6888 } 6889 switch (sa->sa_family) { 6890 #ifdef INET6 6891 case AF_INET6: 6892 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6893 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6894 *error = EINVAL; 6895 return; 6896 } 6897 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6898 /* can only bind v6 on PF_INET6 sockets */ 6899 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6900 *error = EINVAL; 6901 return; 6902 } 6903 #ifdef INET 6904 sin6 = (struct sockaddr_in6 *)sa; 6905 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6906 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6907 SCTP_IPV6_V6ONLY(inp)) { 6908 /* can't bind mapped-v4 on PF_INET sockets */ 6909 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6910 *error = EINVAL; 6911 return; 6912 } 6913 in6_sin6_2_sin(&sin, sin6); 6914 addr_to_use = (struct sockaddr *)&sin; 6915 } else { 6916 addr_to_use = sa; 6917 } 6918 #else 6919 addr_to_use = sa; 6920 #endif 6921 break; 6922 #endif 6923 #ifdef INET 6924 case AF_INET: 6925 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6926 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6927 *error = EINVAL; 6928 return; 6929 } 6930 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6931 SCTP_IPV6_V6ONLY(inp)) { 6932 /* can't bind v4 on PF_INET sockets */ 6933 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6934 *error = EINVAL; 6935 return; 6936 } 6937 addr_to_use = sa; 6938 break; 6939 #endif 6940 default: 6941 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6942 *error = EINVAL; 6943 return; 6944 } 6945 /* No lock required mgmt_ep_sa does its own locking. */ 6946 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6947 vrf_id); 6948 } 6949 6950 /* 6951 * returns the valid local address count for an assoc, taking into account 6952 * all scoping rules 6953 */ 6954 int 6955 sctp_local_addr_count(struct sctp_tcb *stcb) 6956 { 6957 int loopback_scope; 6958 #if defined(INET) 6959 int ipv4_local_scope, ipv4_addr_legal; 6960 #endif 6961 #if defined(INET6) 6962 int local_scope, site_scope, ipv6_addr_legal; 6963 #endif 6964 struct sctp_vrf *vrf; 6965 struct sctp_ifn *sctp_ifn; 6966 struct sctp_ifa *sctp_ifa; 6967 int count = 0; 6968 6969 /* Turn on all the appropriate scopes */ 6970 loopback_scope = stcb->asoc.scope.loopback_scope; 6971 #if defined(INET) 6972 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6973 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6974 #endif 6975 #if defined(INET6) 6976 local_scope = stcb->asoc.scope.local_scope; 6977 site_scope = stcb->asoc.scope.site_scope; 6978 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6979 #endif 6980 SCTP_IPI_ADDR_RLOCK(); 6981 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6982 if (vrf == NULL) { 6983 /* no vrf, no addresses */ 6984 SCTP_IPI_ADDR_RUNLOCK(); 6985 return (0); 6986 } 6987 6988 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6989 /* 6990 * bound all case: go through all ifns on the vrf 6991 */ 6992 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6993 if ((loopback_scope == 0) && 6994 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6995 continue; 6996 } 6997 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6998 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6999 continue; 7000 switch (sctp_ifa->address.sa.sa_family) { 7001 #ifdef INET 7002 case AF_INET: 7003 if (ipv4_addr_legal) { 7004 struct sockaddr_in *sin; 7005 7006 sin = &sctp_ifa->address.sin; 7007 if (sin->sin_addr.s_addr == 0) { 7008 /* 7009 * skip unspecified 7010 * addrs 7011 */ 7012 continue; 7013 } 7014 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7015 &sin->sin_addr) != 0) { 7016 continue; 7017 } 7018 if ((ipv4_local_scope == 0) && 7019 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7020 continue; 7021 } 7022 /* count this one */ 7023 count++; 7024 } else { 7025 continue; 7026 } 7027 break; 7028 #endif 7029 #ifdef INET6 7030 case AF_INET6: 7031 if (ipv6_addr_legal) { 7032 struct sockaddr_in6 *sin6; 7033 7034 sin6 = &sctp_ifa->address.sin6; 7035 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7036 continue; 7037 } 7038 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7039 &sin6->sin6_addr) != 0) { 7040 continue; 7041 } 7042 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7043 if (local_scope == 0) 7044 continue; 7045 if (sin6->sin6_scope_id == 0) { 7046 if (sa6_recoverscope(sin6) != 0) 7047 /* 7048 * 7049 * bad 7050 * link 7051 * 7052 * local 7053 * 7054 * address 7055 */ 7056 continue; 7057 } 7058 } 7059 if ((site_scope == 0) && 7060 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7061 continue; 7062 } 7063 /* count this one */ 7064 count++; 7065 } 7066 break; 7067 #endif 7068 default: 7069 /* TSNH */ 7070 break; 7071 } 7072 } 7073 } 7074 } else { 7075 /* 7076 * subset bound case 7077 */ 7078 struct sctp_laddr *laddr; 7079 7080 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7081 sctp_nxt_addr) { 7082 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7083 continue; 7084 } 7085 /* count this one */ 7086 count++; 7087 } 7088 } 7089 SCTP_IPI_ADDR_RUNLOCK(); 7090 return (count); 7091 } 7092 7093 #if defined(SCTP_LOCAL_TRACE_BUF) 7094 7095 void 7096 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7097 { 7098 uint32_t saveindex, newindex; 7099 7100 do { 7101 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7102 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7103 newindex = 1; 7104 } else { 7105 newindex = saveindex + 1; 7106 } 7107 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7108 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7109 saveindex = 0; 7110 } 7111 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7112 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7113 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7114 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7115 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7116 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7117 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7118 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7119 } 7120 7121 #endif 7122 static void 7123 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7124 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7125 { 7126 struct ip *iph; 7127 #ifdef INET6 7128 struct ip6_hdr *ip6; 7129 #endif 7130 struct mbuf *sp, *last; 7131 struct udphdr *uhdr; 7132 uint16_t port; 7133 7134 if ((m->m_flags & M_PKTHDR) == 0) { 7135 /* Can't handle one that is not a pkt hdr */ 7136 goto out; 7137 } 7138 /* Pull the src port */ 7139 iph = mtod(m, struct ip *); 7140 uhdr = (struct udphdr *)((caddr_t)iph + off); 7141 port = uhdr->uh_sport; 7142 /* 7143 * Split out the mbuf chain. Leave the IP header in m, place the 7144 * rest in the sp. 7145 */ 7146 sp = m_split(m, off, M_NOWAIT); 7147 if (sp == NULL) { 7148 /* Gak, drop packet, we can't do a split */ 7149 goto out; 7150 } 7151 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7152 /* Gak, packet can't have an SCTP header in it - too small */ 7153 m_freem(sp); 7154 goto out; 7155 } 7156 /* Now pull up the UDP header and SCTP header together */ 7157 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7158 if (sp == NULL) { 7159 /* Gak pullup failed */ 7160 goto out; 7161 } 7162 /* Trim out the UDP header */ 7163 m_adj(sp, sizeof(struct udphdr)); 7164 7165 /* Now reconstruct the mbuf chain */ 7166 for (last = m; last->m_next; last = last->m_next); 7167 last->m_next = sp; 7168 m->m_pkthdr.len += sp->m_pkthdr.len; 7169 /* 7170 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7171 * checksum and it was valid. Since CSUM_DATA_VALID == 7172 * CSUM_SCTP_VALID this would imply that the HW also verified the 7173 * SCTP checksum. Therefore, clear the bit. 7174 */ 7175 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7176 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7177 m->m_pkthdr.len, 7178 if_name(m->m_pkthdr.rcvif), 7179 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7180 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7181 iph = mtod(m, struct ip *); 7182 switch (iph->ip_v) { 7183 #ifdef INET 7184 case IPVERSION: 7185 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7186 sctp_input_with_port(m, off, port); 7187 break; 7188 #endif 7189 #ifdef INET6 7190 case IPV6_VERSION >> 4: 7191 ip6 = mtod(m, struct ip6_hdr *); 7192 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7193 sctp6_input_with_port(&m, &off, port); 7194 break; 7195 #endif 7196 default: 7197 goto out; 7198 break; 7199 } 7200 return; 7201 out: 7202 m_freem(m); 7203 } 7204 7205 #ifdef INET 7206 static void 7207 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7208 { 7209 struct ip *outer_ip, *inner_ip; 7210 struct sctphdr *sh; 7211 struct icmp *icmp; 7212 struct udphdr *udp; 7213 struct sctp_inpcb *inp; 7214 struct sctp_tcb *stcb; 7215 struct sctp_nets *net; 7216 struct sctp_init_chunk *ch; 7217 struct sockaddr_in src, dst; 7218 uint8_t type, code; 7219 7220 inner_ip = (struct ip *)vip; 7221 icmp = (struct icmp *)((caddr_t)inner_ip - 7222 (sizeof(struct icmp) - sizeof(struct ip))); 7223 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7224 if (ntohs(outer_ip->ip_len) < 7225 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7226 return; 7227 } 7228 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7229 sh = (struct sctphdr *)(udp + 1); 7230 memset(&src, 0, sizeof(struct sockaddr_in)); 7231 src.sin_family = AF_INET; 7232 src.sin_len = sizeof(struct sockaddr_in); 7233 src.sin_port = sh->src_port; 7234 src.sin_addr = inner_ip->ip_src; 7235 memset(&dst, 0, sizeof(struct sockaddr_in)); 7236 dst.sin_family = AF_INET; 7237 dst.sin_len = sizeof(struct sockaddr_in); 7238 dst.sin_port = sh->dest_port; 7239 dst.sin_addr = inner_ip->ip_dst; 7240 /* 7241 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7242 * holds our local endpoint address. Thus we reverse the dst and the 7243 * src in the lookup. 7244 */ 7245 inp = NULL; 7246 net = NULL; 7247 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7248 (struct sockaddr *)&src, 7249 &inp, &net, 1, 7250 SCTP_DEFAULT_VRFID); 7251 if ((stcb != NULL) && 7252 (net != NULL) && 7253 (inp != NULL)) { 7254 /* Check the UDP port numbers */ 7255 if ((udp->uh_dport != net->port) || 7256 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7257 SCTP_TCB_UNLOCK(stcb); 7258 return; 7259 } 7260 /* Check the verification tag */ 7261 if (ntohl(sh->v_tag) != 0) { 7262 /* 7263 * This must be the verification tag used for 7264 * sending out packets. We don't consider packets 7265 * reflecting the verification tag. 7266 */ 7267 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7268 SCTP_TCB_UNLOCK(stcb); 7269 return; 7270 } 7271 } else { 7272 if (ntohs(outer_ip->ip_len) >= 7273 sizeof(struct ip) + 7274 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7275 /* 7276 * In this case we can check if we got an 7277 * INIT chunk and if the initiate tag 7278 * matches. 7279 */ 7280 ch = (struct sctp_init_chunk *)(sh + 1); 7281 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7282 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7283 SCTP_TCB_UNLOCK(stcb); 7284 return; 7285 } 7286 } else { 7287 SCTP_TCB_UNLOCK(stcb); 7288 return; 7289 } 7290 } 7291 type = icmp->icmp_type; 7292 code = icmp->icmp_code; 7293 if ((type == ICMP_UNREACH) && 7294 (code == ICMP_UNREACH_PORT)) { 7295 code = ICMP_UNREACH_PROTOCOL; 7296 } 7297 sctp_notify(inp, stcb, net, type, code, 7298 ntohs(inner_ip->ip_len), 7299 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7300 } else { 7301 if ((stcb == NULL) && (inp != NULL)) { 7302 /* reduce ref-count */ 7303 SCTP_INP_WLOCK(inp); 7304 SCTP_INP_DECR_REF(inp); 7305 SCTP_INP_WUNLOCK(inp); 7306 } 7307 if (stcb) { 7308 SCTP_TCB_UNLOCK(stcb); 7309 } 7310 } 7311 return; 7312 } 7313 #endif 7314 7315 #ifdef INET6 7316 static void 7317 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7318 { 7319 struct ip6ctlparam *ip6cp; 7320 struct sctp_inpcb *inp; 7321 struct sctp_tcb *stcb; 7322 struct sctp_nets *net; 7323 struct sctphdr sh; 7324 struct udphdr udp; 7325 struct sockaddr_in6 src, dst; 7326 uint8_t type, code; 7327 7328 ip6cp = (struct ip6ctlparam *)d; 7329 /* 7330 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7331 */ 7332 if (ip6cp->ip6c_m == NULL) { 7333 return; 7334 } 7335 /* 7336 * Check if we can safely examine the ports and the verification tag 7337 * of the SCTP common header. 7338 */ 7339 if (ip6cp->ip6c_m->m_pkthdr.len < 7340 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7341 return; 7342 } 7343 /* Copy out the UDP header. */ 7344 memset(&udp, 0, sizeof(struct udphdr)); 7345 m_copydata(ip6cp->ip6c_m, 7346 ip6cp->ip6c_off, 7347 sizeof(struct udphdr), 7348 (caddr_t)&udp); 7349 /* Copy out the port numbers and the verification tag. */ 7350 memset(&sh, 0, sizeof(struct sctphdr)); 7351 m_copydata(ip6cp->ip6c_m, 7352 ip6cp->ip6c_off + sizeof(struct udphdr), 7353 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7354 (caddr_t)&sh); 7355 memset(&src, 0, sizeof(struct sockaddr_in6)); 7356 src.sin6_family = AF_INET6; 7357 src.sin6_len = sizeof(struct sockaddr_in6); 7358 src.sin6_port = sh.src_port; 7359 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7360 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7361 return; 7362 } 7363 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7364 dst.sin6_family = AF_INET6; 7365 dst.sin6_len = sizeof(struct sockaddr_in6); 7366 dst.sin6_port = sh.dest_port; 7367 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7368 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7369 return; 7370 } 7371 inp = NULL; 7372 net = NULL; 7373 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7374 (struct sockaddr *)&src, 7375 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7376 if ((stcb != NULL) && 7377 (net != NULL) && 7378 (inp != NULL)) { 7379 /* Check the UDP port numbers */ 7380 if ((udp.uh_dport != net->port) || 7381 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7382 SCTP_TCB_UNLOCK(stcb); 7383 return; 7384 } 7385 /* Check the verification tag */ 7386 if (ntohl(sh.v_tag) != 0) { 7387 /* 7388 * This must be the verification tag used for 7389 * sending out packets. We don't consider packets 7390 * reflecting the verification tag. 7391 */ 7392 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7393 SCTP_TCB_UNLOCK(stcb); 7394 return; 7395 } 7396 } else { 7397 if (ip6cp->ip6c_m->m_pkthdr.len >= 7398 ip6cp->ip6c_off + sizeof(struct udphdr) + 7399 sizeof(struct sctphdr) + 7400 sizeof(struct sctp_chunkhdr) + 7401 offsetof(struct sctp_init, a_rwnd)) { 7402 /* 7403 * In this case we can check if we got an 7404 * INIT chunk and if the initiate tag 7405 * matches. 7406 */ 7407 uint32_t initiate_tag; 7408 uint8_t chunk_type; 7409 7410 m_copydata(ip6cp->ip6c_m, 7411 ip6cp->ip6c_off + 7412 sizeof(struct udphdr) + 7413 sizeof(struct sctphdr), 7414 sizeof(uint8_t), 7415 (caddr_t)&chunk_type); 7416 m_copydata(ip6cp->ip6c_m, 7417 ip6cp->ip6c_off + 7418 sizeof(struct udphdr) + 7419 sizeof(struct sctphdr) + 7420 sizeof(struct sctp_chunkhdr), 7421 sizeof(uint32_t), 7422 (caddr_t)&initiate_tag); 7423 if ((chunk_type != SCTP_INITIATION) || 7424 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7425 SCTP_TCB_UNLOCK(stcb); 7426 return; 7427 } 7428 } else { 7429 SCTP_TCB_UNLOCK(stcb); 7430 return; 7431 } 7432 } 7433 type = ip6cp->ip6c_icmp6->icmp6_type; 7434 code = ip6cp->ip6c_icmp6->icmp6_code; 7435 if ((type == ICMP6_DST_UNREACH) && 7436 (code == ICMP6_DST_UNREACH_NOPORT)) { 7437 type = ICMP6_PARAM_PROB; 7438 code = ICMP6_PARAMPROB_NEXTHEADER; 7439 } 7440 sctp6_notify(inp, stcb, net, type, code, 7441 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7442 } else { 7443 if ((stcb == NULL) && (inp != NULL)) { 7444 /* reduce inp's ref-count */ 7445 SCTP_INP_WLOCK(inp); 7446 SCTP_INP_DECR_REF(inp); 7447 SCTP_INP_WUNLOCK(inp); 7448 } 7449 if (stcb) { 7450 SCTP_TCB_UNLOCK(stcb); 7451 } 7452 } 7453 } 7454 #endif 7455 7456 void 7457 sctp_over_udp_stop(void) 7458 { 7459 /* 7460 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7461 * for writing! 7462 */ 7463 #ifdef INET 7464 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7465 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7466 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7467 } 7468 #endif 7469 #ifdef INET6 7470 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7471 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7472 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7473 } 7474 #endif 7475 } 7476 7477 int 7478 sctp_over_udp_start(void) 7479 { 7480 uint16_t port; 7481 int ret; 7482 #ifdef INET 7483 struct sockaddr_in sin; 7484 #endif 7485 #ifdef INET6 7486 struct sockaddr_in6 sin6; 7487 #endif 7488 /* 7489 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7490 * for writing! 7491 */ 7492 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7493 if (ntohs(port) == 0) { 7494 /* Must have a port set */ 7495 return (EINVAL); 7496 } 7497 #ifdef INET 7498 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7499 /* Already running -- must stop first */ 7500 return (EALREADY); 7501 } 7502 #endif 7503 #ifdef INET6 7504 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7505 /* Already running -- must stop first */ 7506 return (EALREADY); 7507 } 7508 #endif 7509 #ifdef INET 7510 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7511 SOCK_DGRAM, IPPROTO_UDP, 7512 curthread->td_ucred, curthread))) { 7513 sctp_over_udp_stop(); 7514 return (ret); 7515 } 7516 /* Call the special UDP hook. */ 7517 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7518 sctp_recv_udp_tunneled_packet, 7519 sctp_recv_icmp_tunneled_packet, 7520 NULL))) { 7521 sctp_over_udp_stop(); 7522 return (ret); 7523 } 7524 /* Ok, we have a socket, bind it to the port. */ 7525 memset(&sin, 0, sizeof(struct sockaddr_in)); 7526 sin.sin_len = sizeof(struct sockaddr_in); 7527 sin.sin_family = AF_INET; 7528 sin.sin_port = htons(port); 7529 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7530 (struct sockaddr *)&sin, curthread))) { 7531 sctp_over_udp_stop(); 7532 return (ret); 7533 } 7534 #endif 7535 #ifdef INET6 7536 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7537 SOCK_DGRAM, IPPROTO_UDP, 7538 curthread->td_ucred, curthread))) { 7539 sctp_over_udp_stop(); 7540 return (ret); 7541 } 7542 /* Call the special UDP hook. */ 7543 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7544 sctp_recv_udp_tunneled_packet, 7545 sctp_recv_icmp6_tunneled_packet, 7546 NULL))) { 7547 sctp_over_udp_stop(); 7548 return (ret); 7549 } 7550 /* Ok, we have a socket, bind it to the port. */ 7551 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7552 sin6.sin6_len = sizeof(struct sockaddr_in6); 7553 sin6.sin6_family = AF_INET6; 7554 sin6.sin6_port = htons(port); 7555 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7556 (struct sockaddr *)&sin6, curthread))) { 7557 sctp_over_udp_stop(); 7558 return (ret); 7559 } 7560 #endif 7561 return (0); 7562 } 7563 7564 /* 7565 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7566 * If all arguments are zero, zero is returned. 7567 */ 7568 uint32_t 7569 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7570 { 7571 if (mtu1 > 0) { 7572 if (mtu2 > 0) { 7573 if (mtu3 > 0) { 7574 return (min(mtu1, min(mtu2, mtu3))); 7575 } else { 7576 return (min(mtu1, mtu2)); 7577 } 7578 } else { 7579 if (mtu3 > 0) { 7580 return (min(mtu1, mtu3)); 7581 } else { 7582 return (mtu1); 7583 } 7584 } 7585 } else { 7586 if (mtu2 > 0) { 7587 if (mtu3 > 0) { 7588 return (min(mtu2, mtu3)); 7589 } else { 7590 return (mtu2); 7591 } 7592 } else { 7593 return (mtu3); 7594 } 7595 } 7596 } 7597 7598 void 7599 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7600 { 7601 struct in_conninfo inc; 7602 7603 memset(&inc, 0, sizeof(struct in_conninfo)); 7604 inc.inc_fibnum = fibnum; 7605 switch (addr->sa.sa_family) { 7606 #ifdef INET 7607 case AF_INET: 7608 inc.inc_faddr = addr->sin.sin_addr; 7609 break; 7610 #endif 7611 #ifdef INET6 7612 case AF_INET6: 7613 inc.inc_flags |= INC_ISIPV6; 7614 inc.inc6_faddr = addr->sin6.sin6_addr; 7615 break; 7616 #endif 7617 default: 7618 return; 7619 } 7620 tcp_hc_updatemtu(&inc, (u_long)mtu); 7621 } 7622 7623 uint32_t 7624 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7625 { 7626 struct in_conninfo inc; 7627 7628 memset(&inc, 0, sizeof(struct in_conninfo)); 7629 inc.inc_fibnum = fibnum; 7630 switch (addr->sa.sa_family) { 7631 #ifdef INET 7632 case AF_INET: 7633 inc.inc_faddr = addr->sin.sin_addr; 7634 break; 7635 #endif 7636 #ifdef INET6 7637 case AF_INET6: 7638 inc.inc_flags |= INC_ISIPV6; 7639 inc.inc6_faddr = addr->sin6.sin6_addr; 7640 break; 7641 #endif 7642 default: 7643 return (0); 7644 } 7645 return ((uint32_t)tcp_hc_getmtu(&inc)); 7646 } 7647 7648 void 7649 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7650 { 7651 #if defined(KDTRACE_HOOKS) 7652 int old_state = stcb->asoc.state; 7653 #endif 7654 7655 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7656 ("sctp_set_state: Can't set substate (new_state = %x)", 7657 new_state)); 7658 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7659 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7660 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7661 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7662 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7663 } 7664 #if defined(KDTRACE_HOOKS) 7665 if (((old_state & SCTP_STATE_MASK) != new_state) && 7666 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7667 (new_state == SCTP_STATE_INUSE))) { 7668 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7669 } 7670 #endif 7671 } 7672 7673 void 7674 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7675 { 7676 #if defined(KDTRACE_HOOKS) 7677 int old_state = stcb->asoc.state; 7678 #endif 7679 7680 KASSERT((substate & SCTP_STATE_MASK) == 0, 7681 ("sctp_add_substate: Can't set state (substate = %x)", 7682 substate)); 7683 stcb->asoc.state |= substate; 7684 #if defined(KDTRACE_HOOKS) 7685 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7686 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7687 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7688 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7689 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7690 } 7691 #endif 7692 } 7693