1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #include <netinet/sctp_pcb.h> 37 #include <netinet/sctputil.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #ifdef INET6 41 #include <netinet6/sctp6_var.h> 42 #endif 43 #include <netinet/sctp_header.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_auth.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_kdtrace.h> 52 #if defined(INET6) || defined(INET) 53 #include <netinet/tcp_var.h> 54 #endif 55 #include <netinet/udp.h> 56 #include <netinet/udp_var.h> 57 #include <sys/proc.h> 58 #ifdef INET6 59 #include <netinet/icmp6.h> 60 #endif 61 62 #ifndef KTR_SCTP 63 #define KTR_SCTP KTR_SUBSYS 64 #endif 65 66 extern const struct sctp_cc_functions sctp_cc_functions[]; 67 extern const struct sctp_ss_functions sctp_ss_functions[]; 68 69 void 70 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 71 { 72 #if defined(SCTP_LOCAL_TRACE_BUF) 73 struct sctp_cwnd_log sctp_clog; 74 75 sctp_clog.x.sb.stcb = stcb; 76 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 77 if (stcb) 78 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 79 else 80 sctp_clog.x.sb.stcb_sbcc = 0; 81 sctp_clog.x.sb.incr = incr; 82 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 83 SCTP_LOG_EVENT_SB, 84 from, 85 sctp_clog.x.misc.log1, 86 sctp_clog.x.misc.log2, 87 sctp_clog.x.misc.log3, 88 sctp_clog.x.misc.log4); 89 #endif 90 } 91 92 void 93 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 94 { 95 #if defined(SCTP_LOCAL_TRACE_BUF) 96 struct sctp_cwnd_log sctp_clog; 97 98 sctp_clog.x.close.inp = (void *)inp; 99 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 100 if (stcb) { 101 sctp_clog.x.close.stcb = (void *)stcb; 102 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 103 } else { 104 sctp_clog.x.close.stcb = 0; 105 sctp_clog.x.close.state = 0; 106 } 107 sctp_clog.x.close.loc = loc; 108 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 109 SCTP_LOG_EVENT_CLOSE, 110 0, 111 sctp_clog.x.misc.log1, 112 sctp_clog.x.misc.log2, 113 sctp_clog.x.misc.log3, 114 sctp_clog.x.misc.log4); 115 #endif 116 } 117 118 void 119 rto_logging(struct sctp_nets *net, int from) 120 { 121 #if defined(SCTP_LOCAL_TRACE_BUF) 122 struct sctp_cwnd_log sctp_clog; 123 124 memset(&sctp_clog, 0, sizeof(sctp_clog)); 125 sctp_clog.x.rto.net = (void *)net; 126 sctp_clog.x.rto.rtt = net->rtt / 1000; 127 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 128 SCTP_LOG_EVENT_RTT, 129 from, 130 sctp_clog.x.misc.log1, 131 sctp_clog.x.misc.log2, 132 sctp_clog.x.misc.log3, 133 sctp_clog.x.misc.log4); 134 #endif 135 } 136 137 void 138 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 139 { 140 #if defined(SCTP_LOCAL_TRACE_BUF) 141 struct sctp_cwnd_log sctp_clog; 142 143 sctp_clog.x.strlog.stcb = stcb; 144 sctp_clog.x.strlog.n_tsn = tsn; 145 sctp_clog.x.strlog.n_sseq = sseq; 146 sctp_clog.x.strlog.e_tsn = 0; 147 sctp_clog.x.strlog.e_sseq = 0; 148 sctp_clog.x.strlog.strm = stream; 149 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 150 SCTP_LOG_EVENT_STRM, 151 from, 152 sctp_clog.x.misc.log1, 153 sctp_clog.x.misc.log2, 154 sctp_clog.x.misc.log3, 155 sctp_clog.x.misc.log4); 156 #endif 157 } 158 159 void 160 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 161 { 162 #if defined(SCTP_LOCAL_TRACE_BUF) 163 struct sctp_cwnd_log sctp_clog; 164 165 sctp_clog.x.nagle.stcb = (void *)stcb; 166 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 167 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 168 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 169 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 170 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 171 SCTP_LOG_EVENT_NAGLE, 172 action, 173 sctp_clog.x.misc.log1, 174 sctp_clog.x.misc.log2, 175 sctp_clog.x.misc.log3, 176 sctp_clog.x.misc.log4); 177 #endif 178 } 179 180 void 181 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 182 { 183 #if defined(SCTP_LOCAL_TRACE_BUF) 184 struct sctp_cwnd_log sctp_clog; 185 186 sctp_clog.x.sack.cumack = cumack; 187 sctp_clog.x.sack.oldcumack = old_cumack; 188 sctp_clog.x.sack.tsn = tsn; 189 sctp_clog.x.sack.numGaps = gaps; 190 sctp_clog.x.sack.numDups = dups; 191 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 192 SCTP_LOG_EVENT_SACK, 193 from, 194 sctp_clog.x.misc.log1, 195 sctp_clog.x.misc.log2, 196 sctp_clog.x.misc.log3, 197 sctp_clog.x.misc.log4); 198 #endif 199 } 200 201 void 202 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 203 { 204 #if defined(SCTP_LOCAL_TRACE_BUF) 205 struct sctp_cwnd_log sctp_clog; 206 207 memset(&sctp_clog, 0, sizeof(sctp_clog)); 208 sctp_clog.x.map.base = map; 209 sctp_clog.x.map.cum = cum; 210 sctp_clog.x.map.high = high; 211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 212 SCTP_LOG_EVENT_MAP, 213 from, 214 sctp_clog.x.misc.log1, 215 sctp_clog.x.misc.log2, 216 sctp_clog.x.misc.log3, 217 sctp_clog.x.misc.log4); 218 #endif 219 } 220 221 void 222 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 223 { 224 #if defined(SCTP_LOCAL_TRACE_BUF) 225 struct sctp_cwnd_log sctp_clog; 226 227 memset(&sctp_clog, 0, sizeof(sctp_clog)); 228 sctp_clog.x.fr.largest_tsn = biggest_tsn; 229 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 230 sctp_clog.x.fr.tsn = tsn; 231 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 232 SCTP_LOG_EVENT_FR, 233 from, 234 sctp_clog.x.misc.log1, 235 sctp_clog.x.misc.log2, 236 sctp_clog.x.misc.log3, 237 sctp_clog.x.misc.log4); 238 #endif 239 } 240 241 #ifdef SCTP_MBUF_LOGGING 242 void 243 sctp_log_mb(struct mbuf *m, int from) 244 { 245 #if defined(SCTP_LOCAL_TRACE_BUF) 246 struct sctp_cwnd_log sctp_clog; 247 248 sctp_clog.x.mb.mp = m; 249 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 250 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 251 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 252 if (SCTP_BUF_IS_EXTENDED(m)) { 253 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 254 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 255 } else { 256 sctp_clog.x.mb.ext = 0; 257 sctp_clog.x.mb.refcnt = 0; 258 } 259 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 260 SCTP_LOG_EVENT_MBUF, 261 from, 262 sctp_clog.x.misc.log1, 263 sctp_clog.x.misc.log2, 264 sctp_clog.x.misc.log3, 265 sctp_clog.x.misc.log4); 266 #endif 267 } 268 269 void 270 sctp_log_mbc(struct mbuf *m, int from) 271 { 272 struct mbuf *mat; 273 274 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 275 sctp_log_mb(mat, from); 276 } 277 } 278 #endif 279 280 void 281 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 282 { 283 #if defined(SCTP_LOCAL_TRACE_BUF) 284 struct sctp_cwnd_log sctp_clog; 285 286 if (control == NULL) { 287 SCTP_PRINTF("Gak log of NULL?\n"); 288 return; 289 } 290 sctp_clog.x.strlog.stcb = control->stcb; 291 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 292 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 293 sctp_clog.x.strlog.strm = control->sinfo_stream; 294 if (poschk != NULL) { 295 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 296 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 297 } else { 298 sctp_clog.x.strlog.e_tsn = 0; 299 sctp_clog.x.strlog.e_sseq = 0; 300 } 301 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 302 SCTP_LOG_EVENT_STRM, 303 from, 304 sctp_clog.x.misc.log1, 305 sctp_clog.x.misc.log2, 306 sctp_clog.x.misc.log3, 307 sctp_clog.x.misc.log4); 308 #endif 309 } 310 311 void 312 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 313 { 314 #if defined(SCTP_LOCAL_TRACE_BUF) 315 struct sctp_cwnd_log sctp_clog; 316 317 sctp_clog.x.cwnd.net = net; 318 if (stcb->asoc.send_queue_cnt > 255) 319 sctp_clog.x.cwnd.cnt_in_send = 255; 320 else 321 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 322 if (stcb->asoc.stream_queue_cnt > 255) 323 sctp_clog.x.cwnd.cnt_in_str = 255; 324 else 325 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 326 327 if (net) { 328 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 329 sctp_clog.x.cwnd.inflight = net->flight_size; 330 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 331 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 332 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 333 } 334 if (SCTP_CWNDLOG_PRESEND == from) { 335 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 336 } 337 sctp_clog.x.cwnd.cwnd_augment = augment; 338 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 339 SCTP_LOG_EVENT_CWND, 340 from, 341 sctp_clog.x.misc.log1, 342 sctp_clog.x.misc.log2, 343 sctp_clog.x.misc.log3, 344 sctp_clog.x.misc.log4); 345 #endif 346 } 347 348 void 349 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 350 { 351 #if defined(SCTP_LOCAL_TRACE_BUF) 352 struct sctp_cwnd_log sctp_clog; 353 354 memset(&sctp_clog, 0, sizeof(sctp_clog)); 355 if (inp) { 356 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 357 358 } else { 359 sctp_clog.x.lock.sock = (void *)NULL; 360 } 361 sctp_clog.x.lock.inp = (void *)inp; 362 if (stcb) { 363 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 364 } else { 365 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 366 } 367 if (inp) { 368 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 369 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 370 } else { 371 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 372 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 373 } 374 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 375 if (inp && (inp->sctp_socket)) { 376 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 377 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 378 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 379 } else { 380 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 381 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 382 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 383 } 384 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 385 SCTP_LOG_LOCK_EVENT, 386 from, 387 sctp_clog.x.misc.log1, 388 sctp_clog.x.misc.log2, 389 sctp_clog.x.misc.log3, 390 sctp_clog.x.misc.log4); 391 #endif 392 } 393 394 void 395 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 396 { 397 #if defined(SCTP_LOCAL_TRACE_BUF) 398 struct sctp_cwnd_log sctp_clog; 399 400 memset(&sctp_clog, 0, sizeof(sctp_clog)); 401 sctp_clog.x.cwnd.net = net; 402 sctp_clog.x.cwnd.cwnd_new_value = error; 403 sctp_clog.x.cwnd.inflight = net->flight_size; 404 sctp_clog.x.cwnd.cwnd_augment = burst; 405 if (stcb->asoc.send_queue_cnt > 255) 406 sctp_clog.x.cwnd.cnt_in_send = 255; 407 else 408 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 409 if (stcb->asoc.stream_queue_cnt > 255) 410 sctp_clog.x.cwnd.cnt_in_str = 255; 411 else 412 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 413 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 414 SCTP_LOG_EVENT_MAXBURST, 415 from, 416 sctp_clog.x.misc.log1, 417 sctp_clog.x.misc.log2, 418 sctp_clog.x.misc.log3, 419 sctp_clog.x.misc.log4); 420 #endif 421 } 422 423 void 424 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 425 { 426 #if defined(SCTP_LOCAL_TRACE_BUF) 427 struct sctp_cwnd_log sctp_clog; 428 429 sctp_clog.x.rwnd.rwnd = peers_rwnd; 430 sctp_clog.x.rwnd.send_size = snd_size; 431 sctp_clog.x.rwnd.overhead = overhead; 432 sctp_clog.x.rwnd.new_rwnd = 0; 433 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 434 SCTP_LOG_EVENT_RWND, 435 from, 436 sctp_clog.x.misc.log1, 437 sctp_clog.x.misc.log2, 438 sctp_clog.x.misc.log3, 439 sctp_clog.x.misc.log4); 440 #endif 441 } 442 443 void 444 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 445 { 446 #if defined(SCTP_LOCAL_TRACE_BUF) 447 struct sctp_cwnd_log sctp_clog; 448 449 sctp_clog.x.rwnd.rwnd = peers_rwnd; 450 sctp_clog.x.rwnd.send_size = flight_size; 451 sctp_clog.x.rwnd.overhead = overhead; 452 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 453 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 454 SCTP_LOG_EVENT_RWND, 455 from, 456 sctp_clog.x.misc.log1, 457 sctp_clog.x.misc.log2, 458 sctp_clog.x.misc.log3, 459 sctp_clog.x.misc.log4); 460 #endif 461 } 462 463 #ifdef SCTP_MBCNT_LOGGING 464 static void 465 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 466 { 467 #if defined(SCTP_LOCAL_TRACE_BUF) 468 struct sctp_cwnd_log sctp_clog; 469 470 sctp_clog.x.mbcnt.total_queue_size = total_oq; 471 sctp_clog.x.mbcnt.size_change = book; 472 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 473 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 474 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 475 SCTP_LOG_EVENT_MBCNT, 476 from, 477 sctp_clog.x.misc.log1, 478 sctp_clog.x.misc.log2, 479 sctp_clog.x.misc.log3, 480 sctp_clog.x.misc.log4); 481 #endif 482 } 483 #endif 484 485 void 486 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 487 { 488 #if defined(SCTP_LOCAL_TRACE_BUF) 489 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 490 SCTP_LOG_MISC_EVENT, 491 from, 492 a, b, c, d); 493 #endif 494 } 495 496 void 497 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 498 { 499 #if defined(SCTP_LOCAL_TRACE_BUF) 500 struct sctp_cwnd_log sctp_clog; 501 502 sctp_clog.x.wake.stcb = (void *)stcb; 503 sctp_clog.x.wake.wake_cnt = wake_cnt; 504 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 505 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 506 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 507 508 if (stcb->asoc.stream_queue_cnt < 0xff) 509 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 510 else 511 sctp_clog.x.wake.stream_qcnt = 0xff; 512 513 if (stcb->asoc.chunks_on_out_queue < 0xff) 514 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 515 else 516 sctp_clog.x.wake.chunks_on_oque = 0xff; 517 518 sctp_clog.x.wake.sctpflags = 0; 519 /* set in the defered mode stuff */ 520 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 521 sctp_clog.x.wake.sctpflags |= 1; 522 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 523 sctp_clog.x.wake.sctpflags |= 2; 524 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 525 sctp_clog.x.wake.sctpflags |= 4; 526 /* what about the sb */ 527 if (stcb->sctp_socket) { 528 struct socket *so = stcb->sctp_socket; 529 530 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 531 } else { 532 sctp_clog.x.wake.sbflags = 0xff; 533 } 534 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 535 SCTP_LOG_EVENT_WAKE, 536 from, 537 sctp_clog.x.misc.log1, 538 sctp_clog.x.misc.log2, 539 sctp_clog.x.misc.log3, 540 sctp_clog.x.misc.log4); 541 #endif 542 } 543 544 void 545 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 546 { 547 #if defined(SCTP_LOCAL_TRACE_BUF) 548 struct sctp_cwnd_log sctp_clog; 549 550 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 551 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 552 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 553 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 554 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 555 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 556 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 557 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 558 SCTP_LOG_EVENT_BLOCK, 559 from, 560 sctp_clog.x.misc.log1, 561 sctp_clog.x.misc.log2, 562 sctp_clog.x.misc.log3, 563 sctp_clog.x.misc.log4); 564 #endif 565 } 566 567 int 568 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 569 { 570 /* May need to fix this if ktrdump does not work */ 571 return (0); 572 } 573 574 #ifdef SCTP_AUDITING_ENABLED 575 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 576 static int sctp_audit_indx = 0; 577 578 static 579 void 580 sctp_print_audit_report(void) 581 { 582 int i; 583 int cnt; 584 585 cnt = 0; 586 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 587 if ((sctp_audit_data[i][0] == 0xe0) && 588 (sctp_audit_data[i][1] == 0x01)) { 589 cnt = 0; 590 SCTP_PRINTF("\n"); 591 } else if (sctp_audit_data[i][0] == 0xf0) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if ((sctp_audit_data[i][0] == 0xc0) && 595 (sctp_audit_data[i][1] == 0x01)) { 596 SCTP_PRINTF("\n"); 597 cnt = 0; 598 } 599 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 600 (uint32_t)sctp_audit_data[i][1]); 601 cnt++; 602 if ((cnt % 14) == 0) 603 SCTP_PRINTF("\n"); 604 } 605 for (i = 0; i < sctp_audit_indx; i++) { 606 if ((sctp_audit_data[i][0] == 0xe0) && 607 (sctp_audit_data[i][1] == 0x01)) { 608 cnt = 0; 609 SCTP_PRINTF("\n"); 610 } else if (sctp_audit_data[i][0] == 0xf0) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if ((sctp_audit_data[i][0] == 0xc0) && 614 (sctp_audit_data[i][1] == 0x01)) { 615 SCTP_PRINTF("\n"); 616 cnt = 0; 617 } 618 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 619 (uint32_t)sctp_audit_data[i][1]); 620 cnt++; 621 if ((cnt % 14) == 0) 622 SCTP_PRINTF("\n"); 623 } 624 SCTP_PRINTF("\n"); 625 } 626 627 void 628 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 629 struct sctp_nets *net) 630 { 631 int resend_cnt, tot_out, rep, tot_book_cnt; 632 struct sctp_nets *lnet; 633 struct sctp_tmit_chunk *chk; 634 635 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 636 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 637 sctp_audit_indx++; 638 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 639 sctp_audit_indx = 0; 640 } 641 if (inp == NULL) { 642 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 643 sctp_audit_data[sctp_audit_indx][1] = 0x01; 644 sctp_audit_indx++; 645 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 646 sctp_audit_indx = 0; 647 } 648 return; 649 } 650 if (stcb == NULL) { 651 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 652 sctp_audit_data[sctp_audit_indx][1] = 0x02; 653 sctp_audit_indx++; 654 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 655 sctp_audit_indx = 0; 656 } 657 return; 658 } 659 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 660 sctp_audit_data[sctp_audit_indx][1] = 661 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 662 sctp_audit_indx++; 663 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 664 sctp_audit_indx = 0; 665 } 666 rep = 0; 667 tot_book_cnt = 0; 668 resend_cnt = tot_out = 0; 669 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 670 if (chk->sent == SCTP_DATAGRAM_RESEND) { 671 resend_cnt++; 672 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 673 tot_out += chk->book_size; 674 tot_book_cnt++; 675 } 676 } 677 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 678 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 679 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 680 sctp_audit_indx++; 681 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 682 sctp_audit_indx = 0; 683 } 684 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 685 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 686 rep = 1; 687 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 688 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 689 sctp_audit_data[sctp_audit_indx][1] = 690 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 691 sctp_audit_indx++; 692 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 693 sctp_audit_indx = 0; 694 } 695 } 696 if (tot_out != stcb->asoc.total_flight) { 697 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 698 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 699 sctp_audit_indx++; 700 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 701 sctp_audit_indx = 0; 702 } 703 rep = 1; 704 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 705 (int)stcb->asoc.total_flight); 706 stcb->asoc.total_flight = tot_out; 707 } 708 if (tot_book_cnt != stcb->asoc.total_flight_count) { 709 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 710 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 711 sctp_audit_indx++; 712 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 713 sctp_audit_indx = 0; 714 } 715 rep = 1; 716 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 717 718 stcb->asoc.total_flight_count = tot_book_cnt; 719 } 720 tot_out = 0; 721 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 722 tot_out += lnet->flight_size; 723 } 724 if (tot_out != stcb->asoc.total_flight) { 725 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 726 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 727 sctp_audit_indx++; 728 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 729 sctp_audit_indx = 0; 730 } 731 rep = 1; 732 SCTP_PRINTF("real flight:%d net total was %d\n", 733 stcb->asoc.total_flight, tot_out); 734 /* now corrective action */ 735 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 736 tot_out = 0; 737 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 738 if ((chk->whoTo == lnet) && 739 (chk->sent < SCTP_DATAGRAM_RESEND)) { 740 tot_out += chk->book_size; 741 } 742 } 743 if (lnet->flight_size != tot_out) { 744 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 745 (void *)lnet, lnet->flight_size, 746 tot_out); 747 lnet->flight_size = tot_out; 748 } 749 } 750 } 751 if (rep) { 752 sctp_print_audit_report(); 753 } 754 } 755 756 void 757 sctp_audit_log(uint8_t ev, uint8_t fd) 758 { 759 760 sctp_audit_data[sctp_audit_indx][0] = ev; 761 sctp_audit_data[sctp_audit_indx][1] = fd; 762 sctp_audit_indx++; 763 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 764 sctp_audit_indx = 0; 765 } 766 } 767 768 #endif 769 770 /* 771 * The conversion from time to ticks and vice versa is done by rounding 772 * upwards. This way we can test in the code the time to be positive and 773 * know that this corresponds to a positive number of ticks. 774 */ 775 776 uint32_t 777 sctp_msecs_to_ticks(uint32_t msecs) 778 { 779 uint64_t temp; 780 uint32_t ticks; 781 782 if (hz == 1000) { 783 ticks = msecs; 784 } else { 785 temp = (((uint64_t)msecs * hz) + 999) / 1000; 786 if (temp > UINT32_MAX) { 787 ticks = UINT32_MAX; 788 } else { 789 ticks = (uint32_t)temp; 790 } 791 } 792 return (ticks); 793 } 794 795 uint32_t 796 sctp_ticks_to_msecs(uint32_t ticks) 797 { 798 uint64_t temp; 799 uint32_t msecs; 800 801 if (hz == 1000) { 802 msecs = ticks; 803 } else { 804 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 805 if (temp > UINT32_MAX) { 806 msecs = UINT32_MAX; 807 } else { 808 msecs = (uint32_t)temp; 809 } 810 } 811 return (msecs); 812 } 813 814 uint32_t 815 sctp_secs_to_ticks(uint32_t secs) 816 { 817 uint64_t temp; 818 uint32_t ticks; 819 820 temp = (uint64_t)secs * hz; 821 if (temp > UINT32_MAX) { 822 ticks = UINT32_MAX; 823 } else { 824 ticks = (uint32_t)temp; 825 } 826 return (ticks); 827 } 828 829 uint32_t 830 sctp_ticks_to_secs(uint32_t ticks) 831 { 832 uint64_t temp; 833 uint32_t secs; 834 835 temp = ((uint64_t)ticks + (hz - 1)) / hz; 836 if (temp > UINT32_MAX) { 837 secs = UINT32_MAX; 838 } else { 839 secs = (uint32_t)temp; 840 } 841 return (secs); 842 } 843 844 /* 845 * sctp_stop_timers_for_shutdown() should be called 846 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 847 * state to make sure that all timers are stopped. 848 */ 849 void 850 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 851 { 852 struct sctp_inpcb *inp; 853 struct sctp_nets *net; 854 855 inp = stcb->sctp_ep; 856 857 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 858 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 859 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 860 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 861 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 863 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 865 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 866 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 868 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 869 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 870 } 871 } 872 873 void 874 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 875 { 876 struct sctp_inpcb *inp; 877 struct sctp_nets *net; 878 879 inp = stcb->sctp_ep; 880 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 881 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 882 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 883 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 884 if (stop_assoc_kill_timer) { 885 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 887 } 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 890 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 891 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 892 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 894 /* Mobility adaptation */ 895 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 897 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 898 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 900 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 901 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 902 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 904 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 908 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 910 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 912 } 913 } 914 915 /* 916 * A list of sizes based on typical mtu's, used only if next hop size not 917 * returned. These values MUST be multiples of 4 and MUST be ordered. 918 */ 919 static uint32_t sctp_mtu_sizes[] = { 920 68, 921 296, 922 508, 923 512, 924 544, 925 576, 926 1004, 927 1492, 928 1500, 929 1536, 930 2000, 931 2048, 932 4352, 933 4464, 934 8168, 935 17912, 936 32000, 937 65532 938 }; 939 940 /* 941 * Return the largest MTU in sctp_mtu_sizes smaller than val. 942 * If val is smaller than the minimum, just return the largest 943 * multiple of 4 smaller or equal to val. 944 * Ensure that the result is a multiple of 4. 945 */ 946 uint32_t 947 sctp_get_prev_mtu(uint32_t val) 948 { 949 uint32_t i; 950 951 val &= 0xfffffffc; 952 if (val <= sctp_mtu_sizes[0]) { 953 return (val); 954 } 955 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 956 if (val <= sctp_mtu_sizes[i]) { 957 break; 958 } 959 } 960 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 961 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 962 return (sctp_mtu_sizes[i - 1]); 963 } 964 965 /* 966 * Return the smallest MTU in sctp_mtu_sizes larger than val. 967 * If val is larger than the maximum, just return the largest multiple of 4 smaller 968 * or equal to val. 969 * Ensure that the result is a multiple of 4. 970 */ 971 uint32_t 972 sctp_get_next_mtu(uint32_t val) 973 { 974 /* select another MTU that is just bigger than this one */ 975 uint32_t i; 976 977 val &= 0xfffffffc; 978 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 979 if (val < sctp_mtu_sizes[i]) { 980 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 981 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 982 return (sctp_mtu_sizes[i]); 983 } 984 } 985 return (val); 986 } 987 988 void 989 sctp_fill_random_store(struct sctp_pcb *m) 990 { 991 /* 992 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 993 * our counter. The result becomes our good random numbers and we 994 * then setup to give these out. Note that we do no locking to 995 * protect this. This is ok, since if competing folks call this we 996 * will get more gobbled gook in the random store which is what we 997 * want. There is a danger that two guys will use the same random 998 * numbers, but thats ok too since that is random as well :-> 999 */ 1000 m->store_at = 0; 1001 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1002 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1003 sizeof(m->random_counter), (uint8_t *)m->random_store); 1004 m->random_counter++; 1005 } 1006 1007 uint32_t 1008 sctp_select_initial_TSN(struct sctp_pcb *inp) 1009 { 1010 /* 1011 * A true implementation should use random selection process to get 1012 * the initial stream sequence number, using RFC1750 as a good 1013 * guideline 1014 */ 1015 uint32_t x, *xp; 1016 uint8_t *p; 1017 int store_at, new_store; 1018 1019 if (inp->initial_sequence_debug != 0) { 1020 uint32_t ret; 1021 1022 ret = inp->initial_sequence_debug; 1023 inp->initial_sequence_debug++; 1024 return (ret); 1025 } 1026 retry: 1027 store_at = inp->store_at; 1028 new_store = store_at + sizeof(uint32_t); 1029 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1030 new_store = 0; 1031 } 1032 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1033 goto retry; 1034 } 1035 if (new_store == 0) { 1036 /* Refill the random store */ 1037 sctp_fill_random_store(inp); 1038 } 1039 p = &inp->random_store[store_at]; 1040 xp = (uint32_t *)p; 1041 x = *xp; 1042 return (x); 1043 } 1044 1045 uint32_t 1046 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1047 { 1048 uint32_t x; 1049 struct timeval now; 1050 1051 if (check) { 1052 (void)SCTP_GETTIME_TIMEVAL(&now); 1053 } 1054 for (;;) { 1055 x = sctp_select_initial_TSN(&inp->sctp_ep); 1056 if (x == 0) { 1057 /* we never use 0 */ 1058 continue; 1059 } 1060 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1061 break; 1062 } 1063 } 1064 return (x); 1065 } 1066 1067 int32_t 1068 sctp_map_assoc_state(int kernel_state) 1069 { 1070 int32_t user_state; 1071 1072 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1073 user_state = SCTP_CLOSED; 1074 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1075 user_state = SCTP_SHUTDOWN_PENDING; 1076 } else { 1077 switch (kernel_state & SCTP_STATE_MASK) { 1078 case SCTP_STATE_EMPTY: 1079 user_state = SCTP_CLOSED; 1080 break; 1081 case SCTP_STATE_INUSE: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_COOKIE_WAIT: 1085 user_state = SCTP_COOKIE_WAIT; 1086 break; 1087 case SCTP_STATE_COOKIE_ECHOED: 1088 user_state = SCTP_COOKIE_ECHOED; 1089 break; 1090 case SCTP_STATE_OPEN: 1091 user_state = SCTP_ESTABLISHED; 1092 break; 1093 case SCTP_STATE_SHUTDOWN_SENT: 1094 user_state = SCTP_SHUTDOWN_SENT; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_RECEIVED: 1097 user_state = SCTP_SHUTDOWN_RECEIVED; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1100 user_state = SCTP_SHUTDOWN_ACK_SENT; 1101 break; 1102 default: 1103 user_state = SCTP_CLOSED; 1104 break; 1105 } 1106 } 1107 return (user_state); 1108 } 1109 1110 int 1111 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1112 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1113 uint16_t o_strms) 1114 { 1115 struct sctp_association *asoc; 1116 1117 /* 1118 * Anything set to zero is taken care of by the allocation routine's 1119 * bzero 1120 */ 1121 1122 /* 1123 * Up front select what scoping to apply on addresses I tell my peer 1124 * Not sure what to do with these right now, we will need to come up 1125 * with a way to set them. We may need to pass them through from the 1126 * caller in the sctp_aloc_assoc() function. 1127 */ 1128 int i; 1129 #if defined(SCTP_DETAILED_STR_STATS) 1130 int j; 1131 #endif 1132 1133 asoc = &stcb->asoc; 1134 /* init all variables to a known value. */ 1135 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1136 asoc->max_burst = inp->sctp_ep.max_burst; 1137 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1138 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1139 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1140 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1141 asoc->ecn_supported = inp->ecn_supported; 1142 asoc->prsctp_supported = inp->prsctp_supported; 1143 asoc->auth_supported = inp->auth_supported; 1144 asoc->asconf_supported = inp->asconf_supported; 1145 asoc->reconfig_supported = inp->reconfig_supported; 1146 asoc->nrsack_supported = inp->nrsack_supported; 1147 asoc->pktdrop_supported = inp->pktdrop_supported; 1148 asoc->idata_supported = inp->idata_supported; 1149 asoc->rcv_edmid = inp->rcv_edmid; 1150 asoc->snd_edmid = SCTP_EDMID_NONE; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 if (override_tag) { 1194 asoc->init_seq_number = initial_tsn; 1195 } else { 1196 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1197 } 1198 asoc->asconf_seq_out = asoc->init_seq_number; 1199 asoc->str_reset_seq_out = asoc->init_seq_number; 1200 asoc->sending_seq = asoc->init_seq_number; 1201 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1202 /* we are optimistic here */ 1203 asoc->peer_supports_nat = 0; 1204 asoc->sent_queue_retran_cnt = 0; 1205 1206 /* for CMT */ 1207 asoc->last_net_cmt_send_started = NULL; 1208 1209 asoc->last_acked_seq = asoc->init_seq_number - 1; 1210 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1211 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1212 1213 /* here we are different, we hold the next one we expect */ 1214 asoc->str_reset_seq_in = asoc->init_seq_number; 1215 1216 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1217 asoc->initial_rto = inp->sctp_ep.initial_rto; 1218 1219 asoc->default_mtu = inp->sctp_ep.default_mtu; 1220 asoc->max_init_times = inp->sctp_ep.max_init_times; 1221 asoc->max_send_times = inp->sctp_ep.max_send_times; 1222 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1223 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1224 asoc->free_chunk_cnt = 0; 1225 1226 asoc->iam_blocking = 0; 1227 asoc->context = inp->sctp_context; 1228 asoc->local_strreset_support = inp->local_strreset_support; 1229 asoc->def_send = inp->def_send; 1230 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1231 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1232 asoc->pr_sctp_cnt = 0; 1233 asoc->total_output_queue_size = 0; 1234 1235 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1236 asoc->scope.ipv6_addr_legal = 1; 1237 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1238 asoc->scope.ipv4_addr_legal = 1; 1239 } else { 1240 asoc->scope.ipv4_addr_legal = 0; 1241 } 1242 } else { 1243 asoc->scope.ipv6_addr_legal = 0; 1244 asoc->scope.ipv4_addr_legal = 1; 1245 } 1246 1247 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1248 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1249 1250 asoc->smallest_mtu = 0; 1251 asoc->minrto = inp->sctp_ep.sctp_minrto; 1252 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1253 1254 asoc->stream_locked_on = 0; 1255 asoc->ecn_echo_cnt_onq = 0; 1256 asoc->stream_locked = 0; 1257 1258 asoc->send_sack = 1; 1259 1260 LIST_INIT(&asoc->sctp_restricted_addrs); 1261 1262 TAILQ_INIT(&asoc->nets); 1263 TAILQ_INIT(&asoc->pending_reply_queue); 1264 TAILQ_INIT(&asoc->asconf_ack_sent); 1265 /* Setup to fill the hb random cache at first HB */ 1266 asoc->hb_random_idx = 4; 1267 1268 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1269 1270 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1271 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1272 1273 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1274 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1275 1276 /* 1277 * Now the stream parameters, here we allocate space for all streams 1278 * that we request by default. 1279 */ 1280 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1281 o_strms; 1282 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1283 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1284 SCTP_M_STRMO); 1285 if (asoc->strmout == NULL) { 1286 /* big trouble no memory */ 1287 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1288 return (ENOMEM); 1289 } 1290 SCTP_TCB_LOCK(stcb); 1291 for (i = 0; i < asoc->streamoutcnt; i++) { 1292 /* 1293 * inbound side must be set to 0xffff, also NOTE when we get 1294 * the INIT-ACK back (for INIT sender) we MUST reduce the 1295 * count (streamoutcnt) but first check if we sent to any of 1296 * the upper streams that were dropped (if some were). Those 1297 * that were dropped must be notified to the upper layer as 1298 * failed to send. 1299 */ 1300 TAILQ_INIT(&asoc->strmout[i].outqueue); 1301 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1302 asoc->strmout[i].chunks_on_queues = 0; 1303 #if defined(SCTP_DETAILED_STR_STATS) 1304 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1305 asoc->strmout[i].abandoned_sent[j] = 0; 1306 asoc->strmout[i].abandoned_unsent[j] = 0; 1307 } 1308 #else 1309 asoc->strmout[i].abandoned_sent[0] = 0; 1310 asoc->strmout[i].abandoned_unsent[0] = 0; 1311 #endif 1312 asoc->strmout[i].next_mid_ordered = 0; 1313 asoc->strmout[i].next_mid_unordered = 0; 1314 asoc->strmout[i].sid = i; 1315 asoc->strmout[i].last_msg_incomplete = 0; 1316 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1317 } 1318 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1319 SCTP_TCB_UNLOCK(stcb); 1320 1321 /* Now the mapping array */ 1322 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1323 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1324 SCTP_M_MAP); 1325 if (asoc->mapping_array == NULL) { 1326 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1327 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1328 return (ENOMEM); 1329 } 1330 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1331 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1332 SCTP_M_MAP); 1333 if (asoc->nr_mapping_array == NULL) { 1334 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1335 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1336 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1337 return (ENOMEM); 1338 } 1339 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1340 1341 /* Now the init of the other outqueues */ 1342 TAILQ_INIT(&asoc->free_chunks); 1343 TAILQ_INIT(&asoc->control_send_queue); 1344 TAILQ_INIT(&asoc->asconf_send_queue); 1345 TAILQ_INIT(&asoc->send_queue); 1346 TAILQ_INIT(&asoc->sent_queue); 1347 TAILQ_INIT(&asoc->resetHead); 1348 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1349 TAILQ_INIT(&asoc->asconf_queue); 1350 /* authentication fields */ 1351 asoc->authinfo.random = NULL; 1352 asoc->authinfo.active_keyid = 0; 1353 asoc->authinfo.assoc_key = NULL; 1354 asoc->authinfo.assoc_keyid = 0; 1355 asoc->authinfo.recv_key = NULL; 1356 asoc->authinfo.recv_keyid = 0; 1357 LIST_INIT(&asoc->shared_keys); 1358 asoc->marked_retrans = 0; 1359 asoc->port = inp->sctp_ep.port; 1360 asoc->timoinit = 0; 1361 asoc->timodata = 0; 1362 asoc->timosack = 0; 1363 asoc->timoshutdown = 0; 1364 asoc->timoheartbeat = 0; 1365 asoc->timocookie = 0; 1366 asoc->timoshutdownack = 0; 1367 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1368 asoc->discontinuity_time = asoc->start_time; 1369 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1370 asoc->abandoned_unsent[i] = 0; 1371 asoc->abandoned_sent[i] = 0; 1372 } 1373 /* 1374 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1375 * freed later when the association is freed. 1376 */ 1377 return (0); 1378 } 1379 1380 void 1381 sctp_print_mapping_array(struct sctp_association *asoc) 1382 { 1383 unsigned int i, limit; 1384 1385 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1386 asoc->mapping_array_size, 1387 asoc->mapping_array_base_tsn, 1388 asoc->cumulative_tsn, 1389 asoc->highest_tsn_inside_map, 1390 asoc->highest_tsn_inside_nr_map); 1391 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1392 if (asoc->mapping_array[limit - 1] != 0) { 1393 break; 1394 } 1395 } 1396 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1397 for (i = 0; i < limit; i++) { 1398 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1399 } 1400 if (limit % 16) 1401 SCTP_PRINTF("\n"); 1402 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1403 if (asoc->nr_mapping_array[limit - 1]) { 1404 break; 1405 } 1406 } 1407 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1408 for (i = 0; i < limit; i++) { 1409 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1410 } 1411 if (limit % 16) 1412 SCTP_PRINTF("\n"); 1413 } 1414 1415 int 1416 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1417 { 1418 /* mapping array needs to grow */ 1419 uint8_t *new_array1, *new_array2; 1420 uint32_t new_size; 1421 1422 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1423 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1424 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1425 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1426 /* can't get more, forget it */ 1427 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1428 if (new_array1) { 1429 SCTP_FREE(new_array1, SCTP_M_MAP); 1430 } 1431 if (new_array2) { 1432 SCTP_FREE(new_array2, SCTP_M_MAP); 1433 } 1434 return (-1); 1435 } 1436 memset(new_array1, 0, new_size); 1437 memset(new_array2, 0, new_size); 1438 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1439 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1440 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1441 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1442 asoc->mapping_array = new_array1; 1443 asoc->nr_mapping_array = new_array2; 1444 asoc->mapping_array_size = new_size; 1445 return (0); 1446 } 1447 1448 static void 1449 sctp_iterator_work(struct sctp_iterator *it) 1450 { 1451 struct epoch_tracker et; 1452 struct sctp_inpcb *tinp; 1453 int iteration_count = 0; 1454 int inp_skip = 0; 1455 int first_in = 1; 1456 1457 NET_EPOCH_ENTER(et); 1458 SCTP_INP_INFO_RLOCK(); 1459 SCTP_ITERATOR_LOCK(); 1460 sctp_it_ctl.cur_it = it; 1461 if (it->inp) { 1462 SCTP_INP_RLOCK(it->inp); 1463 SCTP_INP_DECR_REF(it->inp); 1464 } 1465 if (it->inp == NULL) { 1466 /* iterator is complete */ 1467 done_with_iterator: 1468 sctp_it_ctl.cur_it = NULL; 1469 SCTP_ITERATOR_UNLOCK(); 1470 SCTP_INP_INFO_RUNLOCK(); 1471 if (it->function_atend != NULL) { 1472 (*it->function_atend) (it->pointer, it->val); 1473 } 1474 SCTP_FREE(it, SCTP_M_ITER); 1475 NET_EPOCH_EXIT(et); 1476 return; 1477 } 1478 select_a_new_ep: 1479 if (first_in) { 1480 first_in = 0; 1481 } else { 1482 SCTP_INP_RLOCK(it->inp); 1483 } 1484 while (((it->pcb_flags) && 1485 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1486 ((it->pcb_features) && 1487 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1488 /* endpoint flags or features don't match, so keep looking */ 1489 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1490 SCTP_INP_RUNLOCK(it->inp); 1491 goto done_with_iterator; 1492 } 1493 tinp = it->inp; 1494 it->inp = LIST_NEXT(it->inp, sctp_list); 1495 it->stcb = NULL; 1496 SCTP_INP_RUNLOCK(tinp); 1497 if (it->inp == NULL) { 1498 goto done_with_iterator; 1499 } 1500 SCTP_INP_RLOCK(it->inp); 1501 } 1502 /* now go through each assoc which is in the desired state */ 1503 if (it->done_current_ep == 0) { 1504 if (it->function_inp != NULL) 1505 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1506 it->done_current_ep = 1; 1507 } 1508 if (it->stcb == NULL) { 1509 /* run the per instance function */ 1510 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1511 } 1512 if ((inp_skip) || it->stcb == NULL) { 1513 if (it->function_inp_end != NULL) { 1514 inp_skip = (*it->function_inp_end) (it->inp, 1515 it->pointer, 1516 it->val); 1517 } 1518 SCTP_INP_RUNLOCK(it->inp); 1519 goto no_stcb; 1520 } 1521 while (it->stcb != NULL) { 1522 SCTP_TCB_LOCK(it->stcb); 1523 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1524 /* not in the right state... keep looking */ 1525 SCTP_TCB_UNLOCK(it->stcb); 1526 goto next_assoc; 1527 } 1528 /* see if we have limited out the iterator loop */ 1529 iteration_count++; 1530 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1531 /* Pause to let others grab the lock */ 1532 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1533 SCTP_TCB_UNLOCK(it->stcb); 1534 SCTP_INP_INCR_REF(it->inp); 1535 SCTP_INP_RUNLOCK(it->inp); 1536 SCTP_ITERATOR_UNLOCK(); 1537 SCTP_INP_INFO_RUNLOCK(); 1538 SCTP_INP_INFO_RLOCK(); 1539 SCTP_ITERATOR_LOCK(); 1540 if (sctp_it_ctl.iterator_flags) { 1541 /* We won't be staying here */ 1542 SCTP_INP_DECR_REF(it->inp); 1543 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_IT) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1547 goto done_with_iterator; 1548 } 1549 if (sctp_it_ctl.iterator_flags & 1550 SCTP_ITERATOR_STOP_CUR_INP) { 1551 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1552 goto no_stcb; 1553 } 1554 /* If we reach here huh? */ 1555 SCTP_PRINTF("Unknown it ctl flag %x\n", 1556 sctp_it_ctl.iterator_flags); 1557 sctp_it_ctl.iterator_flags = 0; 1558 } 1559 SCTP_INP_RLOCK(it->inp); 1560 SCTP_INP_DECR_REF(it->inp); 1561 SCTP_TCB_LOCK(it->stcb); 1562 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1563 iteration_count = 0; 1564 } 1565 KASSERT(it->inp == it->stcb->sctp_ep, 1566 ("%s: stcb %p does not belong to inp %p, but inp %p", 1567 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1568 SCTP_INP_RLOCK_ASSERT(it->inp); 1569 SCTP_TCB_LOCK_ASSERT(it->stcb); 1570 1571 /* run function on this one */ 1572 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1573 SCTP_INP_RLOCK_ASSERT(it->inp); 1574 SCTP_TCB_LOCK_ASSERT(it->stcb); 1575 1576 /* 1577 * we lie here, it really needs to have its own type but 1578 * first I must verify that this won't effect things :-0 1579 */ 1580 if (it->no_chunk_output == 0) { 1581 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1582 SCTP_INP_RLOCK_ASSERT(it->inp); 1583 SCTP_TCB_LOCK_ASSERT(it->stcb); 1584 } 1585 1586 SCTP_TCB_UNLOCK(it->stcb); 1587 next_assoc: 1588 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1589 if (it->stcb == NULL) { 1590 /* Run last function */ 1591 if (it->function_inp_end != NULL) { 1592 inp_skip = (*it->function_inp_end) (it->inp, 1593 it->pointer, 1594 it->val); 1595 } 1596 } 1597 } 1598 SCTP_INP_RUNLOCK(it->inp); 1599 no_stcb: 1600 /* done with all assocs on this endpoint, move on to next endpoint */ 1601 it->done_current_ep = 0; 1602 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1603 it->inp = NULL; 1604 } else { 1605 it->inp = LIST_NEXT(it->inp, sctp_list); 1606 } 1607 it->stcb = NULL; 1608 if (it->inp == NULL) { 1609 goto done_with_iterator; 1610 } 1611 goto select_a_new_ep; 1612 } 1613 1614 void 1615 sctp_iterator_worker(void) 1616 { 1617 struct sctp_iterator *it; 1618 1619 /* This function is called with the WQ lock in place */ 1620 sctp_it_ctl.iterator_running = 1; 1621 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1622 /* now lets work on this one */ 1623 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1624 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1625 CURVNET_SET(it->vn); 1626 sctp_iterator_work(it); 1627 CURVNET_RESTORE(); 1628 SCTP_IPI_ITERATOR_WQ_LOCK(); 1629 /* sa_ignore FREED_MEMORY */ 1630 } 1631 sctp_it_ctl.iterator_running = 0; 1632 return; 1633 } 1634 1635 static void 1636 sctp_handle_addr_wq(void) 1637 { 1638 /* deal with the ADDR wq from the rtsock calls */ 1639 struct sctp_laddr *wi, *nwi; 1640 struct sctp_asconf_iterator *asc; 1641 1642 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1643 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1644 if (asc == NULL) { 1645 /* Try later, no memory */ 1646 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1647 (struct sctp_inpcb *)NULL, 1648 (struct sctp_tcb *)NULL, 1649 (struct sctp_nets *)NULL); 1650 return; 1651 } 1652 LIST_INIT(&asc->list_of_work); 1653 asc->cnt = 0; 1654 1655 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1656 LIST_REMOVE(wi, sctp_nxt_addr); 1657 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1658 asc->cnt++; 1659 } 1660 1661 if (asc->cnt == 0) { 1662 SCTP_FREE(asc, SCTP_M_ASC_IT); 1663 } else { 1664 int ret; 1665 1666 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1667 sctp_asconf_iterator_stcb, 1668 NULL, /* No ep end for boundall */ 1669 SCTP_PCB_FLAGS_BOUNDALL, 1670 SCTP_PCB_ANY_FEATURES, 1671 SCTP_ASOC_ANY_STATE, 1672 (void *)asc, 0, 1673 sctp_asconf_iterator_end, NULL, 0); 1674 if (ret) { 1675 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1676 /* 1677 * Freeing if we are stopping or put back on the 1678 * addr_wq. 1679 */ 1680 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1681 sctp_asconf_iterator_end(asc, 0); 1682 } else { 1683 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1684 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1685 } 1686 SCTP_FREE(asc, SCTP_M_ASC_IT); 1687 } 1688 } 1689 } 1690 } 1691 1692 /*- 1693 * The following table shows which pointers for the inp, stcb, or net are 1694 * stored for each timer after it was started. 1695 * 1696 *|Name |Timer |inp |stcb|net | 1697 *|-----------------------------|-----------------------------|----|----|----| 1698 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1699 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1701 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1704 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1705 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1706 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1708 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1709 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1710 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1711 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1712 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1713 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1714 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1715 */ 1716 1717 void 1718 sctp_timeout_handler(void *t) 1719 { 1720 struct epoch_tracker et; 1721 struct timeval tv; 1722 struct sctp_inpcb *inp; 1723 struct sctp_tcb *stcb; 1724 struct sctp_nets *net; 1725 struct sctp_timer *tmr; 1726 struct mbuf *op_err; 1727 int type; 1728 int i, secret; 1729 bool did_output, released_asoc_reference; 1730 1731 /* 1732 * If inp, stcb or net are not NULL, then references to these were 1733 * added when the timer was started, and must be released before 1734 * this function returns. 1735 */ 1736 tmr = (struct sctp_timer *)t; 1737 inp = (struct sctp_inpcb *)tmr->ep; 1738 stcb = (struct sctp_tcb *)tmr->tcb; 1739 net = (struct sctp_nets *)tmr->net; 1740 CURVNET_SET((struct vnet *)tmr->vnet); 1741 NET_EPOCH_ENTER(et); 1742 released_asoc_reference = false; 1743 1744 #ifdef SCTP_AUDITING_ENABLED 1745 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1746 sctp_auditing(3, inp, stcb, net); 1747 #endif 1748 1749 /* sanity checks... */ 1750 KASSERT(tmr->self == NULL || tmr->self == tmr, 1751 ("sctp_timeout_handler: tmr->self corrupted")); 1752 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1753 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1754 type = tmr->type; 1755 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1756 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1757 type, stcb, stcb->sctp_ep)); 1758 tmr->stopped_from = 0xa001; 1759 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1760 SCTPDBG(SCTP_DEBUG_TIMER2, 1761 "Timer type %d handler exiting due to CLOSED association.\n", 1762 type); 1763 goto out_decr; 1764 } 1765 tmr->stopped_from = 0xa002; 1766 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1767 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1768 SCTPDBG(SCTP_DEBUG_TIMER2, 1769 "Timer type %d handler exiting due to not being active.\n", 1770 type); 1771 goto out_decr; 1772 } 1773 1774 tmr->stopped_from = 0xa003; 1775 if (stcb) { 1776 SCTP_TCB_LOCK(stcb); 1777 /* 1778 * Release reference so that association can be freed if 1779 * necessary below. This is safe now that we have acquired 1780 * the lock. 1781 */ 1782 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1783 released_asoc_reference = true; 1784 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1785 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1786 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1787 SCTPDBG(SCTP_DEBUG_TIMER2, 1788 "Timer type %d handler exiting due to CLOSED association.\n", 1789 type); 1790 goto out; 1791 } 1792 } else if (inp != NULL) { 1793 SCTP_INP_WLOCK(inp); 1794 } else { 1795 SCTP_WQ_ADDR_LOCK(); 1796 } 1797 1798 /* Record in stopped_from which timeout occurred. */ 1799 tmr->stopped_from = type; 1800 /* mark as being serviced now */ 1801 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1802 /* 1803 * Callout has been rescheduled. 1804 */ 1805 goto out; 1806 } 1807 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1808 /* 1809 * Not active, so no action. 1810 */ 1811 goto out; 1812 } 1813 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1814 1815 /* call the handler for the appropriate timer type */ 1816 switch (type) { 1817 case SCTP_TIMER_TYPE_SEND: 1818 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1819 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1820 type, inp, stcb, net)); 1821 SCTP_STAT_INCR(sctps_timodata); 1822 stcb->asoc.timodata++; 1823 stcb->asoc.num_send_timers_up--; 1824 if (stcb->asoc.num_send_timers_up < 0) { 1825 stcb->asoc.num_send_timers_up = 0; 1826 } 1827 SCTP_TCB_LOCK_ASSERT(stcb); 1828 if (sctp_t3rxt_timer(inp, stcb, net)) { 1829 /* no need to unlock on tcb its gone */ 1830 1831 goto out_decr; 1832 } 1833 SCTP_TCB_LOCK_ASSERT(stcb); 1834 #ifdef SCTP_AUDITING_ENABLED 1835 sctp_auditing(4, inp, stcb, net); 1836 #endif 1837 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1838 did_output = true; 1839 if ((stcb->asoc.num_send_timers_up == 0) && 1840 (stcb->asoc.sent_queue_cnt > 0)) { 1841 struct sctp_tmit_chunk *chk; 1842 1843 /* 1844 * Safeguard. If there on some on the sent queue 1845 * somewhere but no timers running something is 1846 * wrong... so we start a timer on the first chunk 1847 * on the send queue on whatever net it is sent to. 1848 */ 1849 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1850 if (chk->whoTo != NULL) { 1851 break; 1852 } 1853 } 1854 if (chk != NULL) { 1855 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1856 } 1857 } 1858 break; 1859 case SCTP_TIMER_TYPE_INIT: 1860 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1861 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1862 type, inp, stcb, net)); 1863 SCTP_STAT_INCR(sctps_timoinit); 1864 stcb->asoc.timoinit++; 1865 if (sctp_t1init_timer(inp, stcb, net)) { 1866 /* no need to unlock on tcb its gone */ 1867 goto out_decr; 1868 } 1869 did_output = false; 1870 break; 1871 case SCTP_TIMER_TYPE_RECV: 1872 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1873 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1874 type, inp, stcb, net)); 1875 SCTP_STAT_INCR(sctps_timosack); 1876 stcb->asoc.timosack++; 1877 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1878 #ifdef SCTP_AUDITING_ENABLED 1879 sctp_auditing(4, inp, stcb, NULL); 1880 #endif 1881 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1882 did_output = true; 1883 break; 1884 case SCTP_TIMER_TYPE_SHUTDOWN: 1885 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1886 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1887 type, inp, stcb, net)); 1888 SCTP_STAT_INCR(sctps_timoshutdown); 1889 stcb->asoc.timoshutdown++; 1890 if (sctp_shutdown_timer(inp, stcb, net)) { 1891 /* no need to unlock on tcb its gone */ 1892 goto out_decr; 1893 } 1894 #ifdef SCTP_AUDITING_ENABLED 1895 sctp_auditing(4, inp, stcb, net); 1896 #endif 1897 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1898 did_output = true; 1899 break; 1900 case SCTP_TIMER_TYPE_HEARTBEAT: 1901 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1902 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1903 type, inp, stcb, net)); 1904 SCTP_STAT_INCR(sctps_timoheartbeat); 1905 stcb->asoc.timoheartbeat++; 1906 if (sctp_heartbeat_timer(inp, stcb, net)) { 1907 /* no need to unlock on tcb its gone */ 1908 goto out_decr; 1909 } 1910 #ifdef SCTP_AUDITING_ENABLED 1911 sctp_auditing(4, inp, stcb, net); 1912 #endif 1913 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1914 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1915 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1916 did_output = true; 1917 } else { 1918 did_output = false; 1919 } 1920 break; 1921 case SCTP_TIMER_TYPE_COOKIE: 1922 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1923 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1924 type, inp, stcb, net)); 1925 SCTP_STAT_INCR(sctps_timocookie); 1926 stcb->asoc.timocookie++; 1927 if (sctp_cookie_timer(inp, stcb, net)) { 1928 /* no need to unlock on tcb its gone */ 1929 goto out_decr; 1930 } 1931 #ifdef SCTP_AUDITING_ENABLED 1932 sctp_auditing(4, inp, stcb, net); 1933 #endif 1934 /* 1935 * We consider T3 and Cookie timer pretty much the same with 1936 * respect to where from in chunk_output. 1937 */ 1938 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1939 did_output = true; 1940 break; 1941 case SCTP_TIMER_TYPE_NEWCOOKIE: 1942 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1943 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1944 type, inp, stcb, net)); 1945 SCTP_STAT_INCR(sctps_timosecret); 1946 (void)SCTP_GETTIME_TIMEVAL(&tv); 1947 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1948 inp->sctp_ep.last_secret_number = 1949 inp->sctp_ep.current_secret_number; 1950 inp->sctp_ep.current_secret_number++; 1951 if (inp->sctp_ep.current_secret_number >= 1952 SCTP_HOW_MANY_SECRETS) { 1953 inp->sctp_ep.current_secret_number = 0; 1954 } 1955 secret = (int)inp->sctp_ep.current_secret_number; 1956 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1957 inp->sctp_ep.secret_key[secret][i] = 1958 sctp_select_initial_TSN(&inp->sctp_ep); 1959 } 1960 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1961 did_output = false; 1962 break; 1963 case SCTP_TIMER_TYPE_PATHMTURAISE: 1964 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1965 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1966 type, inp, stcb, net)); 1967 SCTP_STAT_INCR(sctps_timopathmtu); 1968 sctp_pathmtu_timer(inp, stcb, net); 1969 did_output = false; 1970 break; 1971 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1972 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1973 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1974 type, inp, stcb, net)); 1975 if (sctp_shutdownack_timer(inp, stcb, net)) { 1976 /* no need to unlock on tcb its gone */ 1977 goto out_decr; 1978 } 1979 SCTP_STAT_INCR(sctps_timoshutdownack); 1980 stcb->asoc.timoshutdownack++; 1981 #ifdef SCTP_AUDITING_ENABLED 1982 sctp_auditing(4, inp, stcb, net); 1983 #endif 1984 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1985 did_output = true; 1986 break; 1987 case SCTP_TIMER_TYPE_ASCONF: 1988 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1989 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1990 type, inp, stcb, net)); 1991 SCTP_STAT_INCR(sctps_timoasconf); 1992 if (sctp_asconf_timer(inp, stcb, net)) { 1993 /* no need to unlock on tcb its gone */ 1994 goto out_decr; 1995 } 1996 #ifdef SCTP_AUDITING_ENABLED 1997 sctp_auditing(4, inp, stcb, net); 1998 #endif 1999 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2000 did_output = true; 2001 break; 2002 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2003 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2004 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2005 type, inp, stcb, net)); 2006 SCTP_STAT_INCR(sctps_timoshutdownguard); 2007 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2008 "Shutdown guard timer expired"); 2009 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2010 /* no need to unlock on tcb its gone */ 2011 goto out_decr; 2012 case SCTP_TIMER_TYPE_AUTOCLOSE: 2013 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2014 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2015 type, inp, stcb, net)); 2016 SCTP_STAT_INCR(sctps_timoautoclose); 2017 sctp_autoclose_timer(inp, stcb); 2018 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2019 did_output = true; 2020 break; 2021 case SCTP_TIMER_TYPE_STRRESET: 2022 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2023 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2024 type, inp, stcb, net)); 2025 SCTP_STAT_INCR(sctps_timostrmrst); 2026 if (sctp_strreset_timer(inp, stcb)) { 2027 /* no need to unlock on tcb its gone */ 2028 goto out_decr; 2029 } 2030 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2031 did_output = true; 2032 break; 2033 case SCTP_TIMER_TYPE_INPKILL: 2034 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2035 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2036 type, inp, stcb, net)); 2037 SCTP_STAT_INCR(sctps_timoinpkill); 2038 /* 2039 * special case, take away our increment since WE are the 2040 * killer 2041 */ 2042 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2043 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2044 SCTP_INP_DECR_REF(inp); 2045 SCTP_INP_WUNLOCK(inp); 2046 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2047 SCTP_CALLED_FROM_INPKILL_TIMER); 2048 inp = NULL; 2049 goto out_decr; 2050 case SCTP_TIMER_TYPE_ASOCKILL: 2051 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2052 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2053 type, inp, stcb, net)); 2054 SCTP_STAT_INCR(sctps_timoassockill); 2055 /* Can we free it yet? */ 2056 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2057 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2058 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2059 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2060 /* 2061 * free asoc, always unlocks (or destroy's) so prevent 2062 * duplicate unlock or unlock of a free mtx :-0 2063 */ 2064 stcb = NULL; 2065 goto out_decr; 2066 case SCTP_TIMER_TYPE_ADDR_WQ: 2067 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2068 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2069 type, inp, stcb, net)); 2070 sctp_handle_addr_wq(); 2071 did_output = true; 2072 break; 2073 case SCTP_TIMER_TYPE_PRIM_DELETED: 2074 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2075 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2076 type, inp, stcb, net)); 2077 SCTP_STAT_INCR(sctps_timodelprim); 2078 sctp_delete_prim_timer(inp, stcb); 2079 did_output = false; 2080 break; 2081 default: 2082 #ifdef INVARIANTS 2083 panic("Unknown timer type %d", type); 2084 #else 2085 goto out; 2086 #endif 2087 } 2088 #ifdef SCTP_AUDITING_ENABLED 2089 sctp_audit_log(0xF1, (uint8_t)type); 2090 if (inp != NULL) 2091 sctp_auditing(5, inp, stcb, net); 2092 #endif 2093 if (did_output && (stcb != NULL)) { 2094 /* 2095 * Now we need to clean up the control chunk chain if an 2096 * ECNE is on it. It must be marked as UNSENT again so next 2097 * call will continue to send it until such time that we get 2098 * a CWR, to remove it. It is, however, less likely that we 2099 * will find a ecn echo on the chain though. 2100 */ 2101 sctp_fix_ecn_echo(&stcb->asoc); 2102 } 2103 out: 2104 if (stcb != NULL) { 2105 SCTP_TCB_UNLOCK(stcb); 2106 } else if (inp != NULL) { 2107 SCTP_INP_WUNLOCK(inp); 2108 } else { 2109 SCTP_WQ_ADDR_UNLOCK(); 2110 } 2111 2112 out_decr: 2113 /* These reference counts were incremented in sctp_timer_start(). */ 2114 if (inp != NULL) { 2115 SCTP_INP_DECR_REF(inp); 2116 } 2117 if ((stcb != NULL) && !released_asoc_reference) { 2118 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2119 } 2120 if (net != NULL) { 2121 sctp_free_remote_addr(net); 2122 } 2123 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2124 CURVNET_RESTORE(); 2125 NET_EPOCH_EXIT(et); 2126 } 2127 2128 /*- 2129 * The following table shows which parameters must be provided 2130 * when calling sctp_timer_start(). For parameters not being 2131 * provided, NULL must be used. 2132 * 2133 * |Name |inp |stcb|net | 2134 * |-----------------------------|----|----|----| 2135 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2141 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2142 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2143 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2145 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2146 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2147 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2148 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2149 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2150 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2151 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2152 * 2153 */ 2154 2155 void 2156 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2157 struct sctp_nets *net) 2158 { 2159 struct sctp_timer *tmr; 2160 uint32_t to_ticks; 2161 uint32_t rndval, jitter; 2162 2163 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2164 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2165 t_type, stcb, stcb->sctp_ep)); 2166 tmr = NULL; 2167 if (stcb != NULL) { 2168 SCTP_TCB_LOCK_ASSERT(stcb); 2169 } else if (inp != NULL) { 2170 SCTP_INP_WLOCK_ASSERT(inp); 2171 } else { 2172 SCTP_WQ_ADDR_LOCK_ASSERT(); 2173 } 2174 if (stcb != NULL) { 2175 /* 2176 * Don't restart timer on association that's about to be 2177 * killed. 2178 */ 2179 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2180 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2181 SCTPDBG(SCTP_DEBUG_TIMER2, 2182 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2183 t_type, inp, stcb, net); 2184 return; 2185 } 2186 /* Don't restart timer on net that's been removed. */ 2187 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2188 SCTPDBG(SCTP_DEBUG_TIMER2, 2189 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2190 t_type, inp, stcb, net); 2191 return; 2192 } 2193 } 2194 switch (t_type) { 2195 case SCTP_TIMER_TYPE_SEND: 2196 /* Here we use the RTO timer. */ 2197 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2198 #ifdef INVARIANTS 2199 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2200 t_type, inp, stcb, net); 2201 #else 2202 return; 2203 #endif 2204 } 2205 tmr = &net->rxt_timer; 2206 if (net->RTO == 0) { 2207 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2208 } else { 2209 to_ticks = sctp_msecs_to_ticks(net->RTO); 2210 } 2211 break; 2212 case SCTP_TIMER_TYPE_INIT: 2213 /* 2214 * Here we use the INIT timer default usually about 1 2215 * second. 2216 */ 2217 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2218 #ifdef INVARIANTS 2219 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2220 t_type, inp, stcb, net); 2221 #else 2222 return; 2223 #endif 2224 } 2225 tmr = &net->rxt_timer; 2226 if (net->RTO == 0) { 2227 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2228 } else { 2229 to_ticks = sctp_msecs_to_ticks(net->RTO); 2230 } 2231 break; 2232 case SCTP_TIMER_TYPE_RECV: 2233 /* 2234 * Here we use the Delayed-Ack timer value from the inp, 2235 * usually about 200ms. 2236 */ 2237 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2238 #ifdef INVARIANTS 2239 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2240 t_type, inp, stcb, net); 2241 #else 2242 return; 2243 #endif 2244 } 2245 tmr = &stcb->asoc.dack_timer; 2246 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2247 break; 2248 case SCTP_TIMER_TYPE_SHUTDOWN: 2249 /* Here we use the RTO of the destination. */ 2250 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2251 #ifdef INVARIANTS 2252 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2253 t_type, inp, stcb, net); 2254 #else 2255 return; 2256 #endif 2257 } 2258 tmr = &net->rxt_timer; 2259 if (net->RTO == 0) { 2260 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2261 } else { 2262 to_ticks = sctp_msecs_to_ticks(net->RTO); 2263 } 2264 break; 2265 case SCTP_TIMER_TYPE_HEARTBEAT: 2266 /* 2267 * The net is used here so that we can add in the RTO. Even 2268 * though we use a different timer. We also add the HB timer 2269 * PLUS a random jitter. 2270 */ 2271 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2272 #ifdef INVARIANTS 2273 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2274 t_type, inp, stcb, net); 2275 #else 2276 return; 2277 #endif 2278 } 2279 if ((net->dest_state & SCTP_ADDR_NOHB) && 2280 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2281 SCTPDBG(SCTP_DEBUG_TIMER2, 2282 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2283 t_type, inp, stcb, net); 2284 return; 2285 } 2286 tmr = &net->hb_timer; 2287 if (net->RTO == 0) { 2288 to_ticks = stcb->asoc.initial_rto; 2289 } else { 2290 to_ticks = net->RTO; 2291 } 2292 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2293 jitter = rndval % to_ticks; 2294 if (to_ticks > 1) { 2295 to_ticks >>= 1; 2296 } 2297 if (jitter < (UINT32_MAX - to_ticks)) { 2298 to_ticks += jitter; 2299 } else { 2300 to_ticks = UINT32_MAX; 2301 } 2302 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2303 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2304 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2305 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2306 to_ticks += net->heart_beat_delay; 2307 } else { 2308 to_ticks = UINT32_MAX; 2309 } 2310 } 2311 /* 2312 * Now we must convert the to_ticks that are now in ms to 2313 * ticks. 2314 */ 2315 to_ticks = sctp_msecs_to_ticks(to_ticks); 2316 break; 2317 case SCTP_TIMER_TYPE_COOKIE: 2318 /* 2319 * Here we can use the RTO timer from the network since one 2320 * RTT was complete. If a retransmission happened then we 2321 * will be using the RTO initial value. 2322 */ 2323 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2324 #ifdef INVARIANTS 2325 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2326 t_type, inp, stcb, net); 2327 #else 2328 return; 2329 #endif 2330 } 2331 tmr = &net->rxt_timer; 2332 if (net->RTO == 0) { 2333 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2334 } else { 2335 to_ticks = sctp_msecs_to_ticks(net->RTO); 2336 } 2337 break; 2338 case SCTP_TIMER_TYPE_NEWCOOKIE: 2339 /* 2340 * Nothing needed but the endpoint here usually about 60 2341 * minutes. 2342 */ 2343 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2344 #ifdef INVARIANTS 2345 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2346 t_type, inp, stcb, net); 2347 #else 2348 return; 2349 #endif 2350 } 2351 tmr = &inp->sctp_ep.signature_change; 2352 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2353 break; 2354 case SCTP_TIMER_TYPE_PATHMTURAISE: 2355 /* 2356 * Here we use the value found in the EP for PMTUD, usually 2357 * about 10 minutes. 2358 */ 2359 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2360 #ifdef INVARIANTS 2361 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2362 t_type, inp, stcb, net); 2363 #else 2364 return; 2365 #endif 2366 } 2367 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2368 SCTPDBG(SCTP_DEBUG_TIMER2, 2369 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2370 t_type, inp, stcb, net); 2371 return; 2372 } 2373 tmr = &net->pmtu_timer; 2374 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2375 break; 2376 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2377 /* Here we use the RTO of the destination. */ 2378 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2379 #ifdef INVARIANTS 2380 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2381 t_type, inp, stcb, net); 2382 #else 2383 return; 2384 #endif 2385 } 2386 tmr = &net->rxt_timer; 2387 if (net->RTO == 0) { 2388 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2389 } else { 2390 to_ticks = sctp_msecs_to_ticks(net->RTO); 2391 } 2392 break; 2393 case SCTP_TIMER_TYPE_ASCONF: 2394 /* 2395 * Here the timer comes from the stcb but its value is from 2396 * the net's RTO. 2397 */ 2398 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2399 #ifdef INVARIANTS 2400 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2401 t_type, inp, stcb, net); 2402 #else 2403 return; 2404 #endif 2405 } 2406 tmr = &stcb->asoc.asconf_timer; 2407 if (net->RTO == 0) { 2408 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(net->RTO); 2411 } 2412 break; 2413 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2414 /* 2415 * Here we use the endpoints shutdown guard timer usually 2416 * about 3 minutes. 2417 */ 2418 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2419 #ifdef INVARIANTS 2420 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2421 t_type, inp, stcb, net); 2422 #else 2423 return; 2424 #endif 2425 } 2426 tmr = &stcb->asoc.shut_guard_timer; 2427 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2428 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2429 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2430 } else { 2431 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2432 } 2433 } else { 2434 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2435 } 2436 break; 2437 case SCTP_TIMER_TYPE_AUTOCLOSE: 2438 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2439 #ifdef INVARIANTS 2440 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2441 t_type, inp, stcb, net); 2442 #else 2443 return; 2444 #endif 2445 } 2446 tmr = &stcb->asoc.autoclose_timer; 2447 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2448 break; 2449 case SCTP_TIMER_TYPE_STRRESET: 2450 /* 2451 * Here the timer comes from the stcb but its value is from 2452 * the net's RTO. 2453 */ 2454 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &stcb->asoc.strreset_timer; 2463 if (net->RTO == 0) { 2464 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2465 } else { 2466 to_ticks = sctp_msecs_to_ticks(net->RTO); 2467 } 2468 break; 2469 case SCTP_TIMER_TYPE_INPKILL: 2470 /* 2471 * The inp is setup to die. We re-use the signature_change 2472 * timer since that has stopped and we are in the GONE 2473 * state. 2474 */ 2475 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2476 #ifdef INVARIANTS 2477 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2478 t_type, inp, stcb, net); 2479 #else 2480 return; 2481 #endif 2482 } 2483 tmr = &inp->sctp_ep.signature_change; 2484 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2485 break; 2486 case SCTP_TIMER_TYPE_ASOCKILL: 2487 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2488 #ifdef INVARIANTS 2489 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2490 t_type, inp, stcb, net); 2491 #else 2492 return; 2493 #endif 2494 } 2495 tmr = &stcb->asoc.strreset_timer; 2496 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2497 break; 2498 case SCTP_TIMER_TYPE_ADDR_WQ: 2499 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2500 #ifdef INVARIANTS 2501 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2502 t_type, inp, stcb, net); 2503 #else 2504 return; 2505 #endif 2506 } 2507 /* Only 1 tick away :-) */ 2508 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2509 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2510 break; 2511 case SCTP_TIMER_TYPE_PRIM_DELETED: 2512 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2513 #ifdef INVARIANTS 2514 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2515 t_type, inp, stcb, net); 2516 #else 2517 return; 2518 #endif 2519 } 2520 tmr = &stcb->asoc.delete_prim_timer; 2521 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2522 break; 2523 default: 2524 #ifdef INVARIANTS 2525 panic("Unknown timer type %d", t_type); 2526 #else 2527 return; 2528 #endif 2529 } 2530 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2531 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2532 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2533 /* 2534 * We do NOT allow you to have it already running. If it is, 2535 * we leave the current one up unchanged. 2536 */ 2537 SCTPDBG(SCTP_DEBUG_TIMER2, 2538 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2539 t_type, inp, stcb, net); 2540 return; 2541 } 2542 /* At this point we can proceed. */ 2543 if (t_type == SCTP_TIMER_TYPE_SEND) { 2544 stcb->asoc.num_send_timers_up++; 2545 } 2546 tmr->stopped_from = 0; 2547 tmr->type = t_type; 2548 tmr->ep = (void *)inp; 2549 tmr->tcb = (void *)stcb; 2550 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2551 tmr->net = NULL; 2552 } else { 2553 tmr->net = (void *)net; 2554 } 2555 tmr->self = (void *)tmr; 2556 tmr->vnet = (void *)curvnet; 2557 tmr->ticks = sctp_get_tick_count(); 2558 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2559 SCTPDBG(SCTP_DEBUG_TIMER2, 2560 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2561 t_type, to_ticks, inp, stcb, net); 2562 /* 2563 * If this is a newly scheduled callout, as opposed to a 2564 * rescheduled one, increment relevant reference counts. 2565 */ 2566 if (tmr->ep != NULL) { 2567 SCTP_INP_INCR_REF(inp); 2568 } 2569 if (tmr->tcb != NULL) { 2570 atomic_add_int(&stcb->asoc.refcnt, 1); 2571 } 2572 if (tmr->net != NULL) { 2573 atomic_add_int(&net->ref_count, 1); 2574 } 2575 } else { 2576 /* 2577 * This should not happen, since we checked for pending 2578 * above. 2579 */ 2580 SCTPDBG(SCTP_DEBUG_TIMER2, 2581 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2582 t_type, to_ticks, inp, stcb, net); 2583 } 2584 return; 2585 } 2586 2587 /*- 2588 * The following table shows which parameters must be provided 2589 * when calling sctp_timer_stop(). For parameters not being 2590 * provided, NULL must be used. 2591 * 2592 * |Name |inp |stcb|net | 2593 * |-----------------------------|----|----|----| 2594 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2595 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2596 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2600 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2601 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2604 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2605 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2606 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2608 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2610 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2611 * 2612 */ 2613 2614 void 2615 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2616 struct sctp_nets *net, uint32_t from) 2617 { 2618 struct sctp_timer *tmr; 2619 2620 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2621 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2622 t_type, stcb, stcb->sctp_ep)); 2623 if (stcb != NULL) { 2624 SCTP_TCB_LOCK_ASSERT(stcb); 2625 } else if (inp != NULL) { 2626 SCTP_INP_WLOCK_ASSERT(inp); 2627 } else { 2628 SCTP_WQ_ADDR_LOCK_ASSERT(); 2629 } 2630 tmr = NULL; 2631 switch (t_type) { 2632 case SCTP_TIMER_TYPE_SEND: 2633 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2634 #ifdef INVARIANTS 2635 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2636 t_type, inp, stcb, net); 2637 #else 2638 return; 2639 #endif 2640 } 2641 tmr = &net->rxt_timer; 2642 break; 2643 case SCTP_TIMER_TYPE_INIT: 2644 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2645 #ifdef INVARIANTS 2646 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2647 t_type, inp, stcb, net); 2648 #else 2649 return; 2650 #endif 2651 } 2652 tmr = &net->rxt_timer; 2653 break; 2654 case SCTP_TIMER_TYPE_RECV: 2655 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2656 #ifdef INVARIANTS 2657 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2658 t_type, inp, stcb, net); 2659 #else 2660 return; 2661 #endif 2662 } 2663 tmr = &stcb->asoc.dack_timer; 2664 break; 2665 case SCTP_TIMER_TYPE_SHUTDOWN: 2666 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2667 #ifdef INVARIANTS 2668 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2669 t_type, inp, stcb, net); 2670 #else 2671 return; 2672 #endif 2673 } 2674 tmr = &net->rxt_timer; 2675 break; 2676 case SCTP_TIMER_TYPE_HEARTBEAT: 2677 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2678 #ifdef INVARIANTS 2679 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2680 t_type, inp, stcb, net); 2681 #else 2682 return; 2683 #endif 2684 } 2685 tmr = &net->hb_timer; 2686 break; 2687 case SCTP_TIMER_TYPE_COOKIE: 2688 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2689 #ifdef INVARIANTS 2690 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2691 t_type, inp, stcb, net); 2692 #else 2693 return; 2694 #endif 2695 } 2696 tmr = &net->rxt_timer; 2697 break; 2698 case SCTP_TIMER_TYPE_NEWCOOKIE: 2699 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2700 #ifdef INVARIANTS 2701 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2702 t_type, inp, stcb, net); 2703 #else 2704 return; 2705 #endif 2706 } 2707 tmr = &inp->sctp_ep.signature_change; 2708 break; 2709 case SCTP_TIMER_TYPE_PATHMTURAISE: 2710 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2711 #ifdef INVARIANTS 2712 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2713 t_type, inp, stcb, net); 2714 #else 2715 return; 2716 #endif 2717 } 2718 tmr = &net->pmtu_timer; 2719 break; 2720 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2721 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2722 #ifdef INVARIANTS 2723 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2724 t_type, inp, stcb, net); 2725 #else 2726 return; 2727 #endif 2728 } 2729 tmr = &net->rxt_timer; 2730 break; 2731 case SCTP_TIMER_TYPE_ASCONF: 2732 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2733 #ifdef INVARIANTS 2734 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2735 t_type, inp, stcb, net); 2736 #else 2737 return; 2738 #endif 2739 } 2740 tmr = &stcb->asoc.asconf_timer; 2741 break; 2742 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2743 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2744 #ifdef INVARIANTS 2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2746 t_type, inp, stcb, net); 2747 #else 2748 return; 2749 #endif 2750 } 2751 tmr = &stcb->asoc.shut_guard_timer; 2752 break; 2753 case SCTP_TIMER_TYPE_AUTOCLOSE: 2754 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2755 #ifdef INVARIANTS 2756 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2757 t_type, inp, stcb, net); 2758 #else 2759 return; 2760 #endif 2761 } 2762 tmr = &stcb->asoc.autoclose_timer; 2763 break; 2764 case SCTP_TIMER_TYPE_STRRESET: 2765 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2766 #ifdef INVARIANTS 2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2768 t_type, inp, stcb, net); 2769 #else 2770 return; 2771 #endif 2772 } 2773 tmr = &stcb->asoc.strreset_timer; 2774 break; 2775 case SCTP_TIMER_TYPE_INPKILL: 2776 /* 2777 * The inp is setup to die. We re-use the signature_change 2778 * timer since that has stopped and we are in the GONE 2779 * state. 2780 */ 2781 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2782 #ifdef INVARIANTS 2783 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2784 t_type, inp, stcb, net); 2785 #else 2786 return; 2787 #endif 2788 } 2789 tmr = &inp->sctp_ep.signature_change; 2790 break; 2791 case SCTP_TIMER_TYPE_ASOCKILL: 2792 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2793 #ifdef INVARIANTS 2794 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2795 t_type, inp, stcb, net); 2796 #else 2797 return; 2798 #endif 2799 } 2800 tmr = &stcb->asoc.strreset_timer; 2801 break; 2802 case SCTP_TIMER_TYPE_ADDR_WQ: 2803 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2804 #ifdef INVARIANTS 2805 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2806 t_type, inp, stcb, net); 2807 #else 2808 return; 2809 #endif 2810 } 2811 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2812 break; 2813 case SCTP_TIMER_TYPE_PRIM_DELETED: 2814 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2815 #ifdef INVARIANTS 2816 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2817 t_type, inp, stcb, net); 2818 #else 2819 return; 2820 #endif 2821 } 2822 tmr = &stcb->asoc.delete_prim_timer; 2823 break; 2824 default: 2825 #ifdef INVARIANTS 2826 panic("Unknown timer type %d", t_type); 2827 #else 2828 return; 2829 #endif 2830 } 2831 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2832 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2833 (tmr->type != t_type)) { 2834 /* 2835 * Ok we have a timer that is under joint use. Cookie timer 2836 * per chance with the SEND timer. We therefore are NOT 2837 * running the timer that the caller wants stopped. So just 2838 * return. 2839 */ 2840 SCTPDBG(SCTP_DEBUG_TIMER2, 2841 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2842 t_type, inp, stcb, net); 2843 return; 2844 } 2845 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2846 stcb->asoc.num_send_timers_up--; 2847 if (stcb->asoc.num_send_timers_up < 0) { 2848 stcb->asoc.num_send_timers_up = 0; 2849 } 2850 } 2851 tmr->self = NULL; 2852 tmr->stopped_from = from; 2853 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2854 KASSERT(tmr->ep == inp, 2855 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2856 t_type, inp, tmr->ep)); 2857 KASSERT(tmr->tcb == stcb, 2858 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2859 t_type, stcb, tmr->tcb)); 2860 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2861 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2862 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2863 t_type, net, tmr->net)); 2864 SCTPDBG(SCTP_DEBUG_TIMER2, 2865 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2866 t_type, inp, stcb, net); 2867 /* 2868 * If the timer was actually stopped, decrement reference 2869 * counts that were incremented in sctp_timer_start(). 2870 */ 2871 if (tmr->ep != NULL) { 2872 tmr->ep = NULL; 2873 SCTP_INP_DECR_REF(inp); 2874 } 2875 if (tmr->tcb != NULL) { 2876 tmr->tcb = NULL; 2877 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2878 } 2879 if (tmr->net != NULL) { 2880 struct sctp_nets *tmr_net; 2881 2882 /* 2883 * Can't use net, since it doesn't work for 2884 * SCTP_TIMER_TYPE_ASCONF. 2885 */ 2886 tmr_net = tmr->net; 2887 tmr->net = NULL; 2888 sctp_free_remote_addr(tmr_net); 2889 } 2890 } else { 2891 SCTPDBG(SCTP_DEBUG_TIMER2, 2892 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2893 t_type, inp, stcb, net); 2894 } 2895 return; 2896 } 2897 2898 uint32_t 2899 sctp_calculate_len(struct mbuf *m) 2900 { 2901 struct mbuf *at; 2902 uint32_t tlen; 2903 2904 tlen = 0; 2905 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2906 tlen += SCTP_BUF_LEN(at); 2907 } 2908 return (tlen); 2909 } 2910 2911 /* 2912 * Given an association and starting time of the current RTT period, update 2913 * RTO in number of msecs. net should point to the current network. 2914 * Return 1, if an RTO update was performed, return 0 if no update was 2915 * performed due to invalid starting point. 2916 */ 2917 2918 int 2919 sctp_calculate_rto(struct sctp_tcb *stcb, 2920 struct sctp_association *asoc, 2921 struct sctp_nets *net, 2922 struct timeval *old, 2923 int rtt_from_sack) 2924 { 2925 struct timeval now; 2926 uint64_t rtt_us; /* RTT in us */ 2927 int32_t rtt; /* RTT in ms */ 2928 uint32_t new_rto; 2929 int first_measure = 0; 2930 2931 /************************/ 2932 /* 1. calculate new RTT */ 2933 /************************/ 2934 /* get the current time */ 2935 if (stcb->asoc.use_precise_time) { 2936 (void)SCTP_GETPTIME_TIMEVAL(&now); 2937 } else { 2938 (void)SCTP_GETTIME_TIMEVAL(&now); 2939 } 2940 if ((old->tv_sec > now.tv_sec) || 2941 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2942 /* The starting point is in the future. */ 2943 return (0); 2944 } 2945 timevalsub(&now, old); 2946 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2947 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2948 /* The RTT is larger than a sane value. */ 2949 return (0); 2950 } 2951 /* store the current RTT in us */ 2952 net->rtt = rtt_us; 2953 /* compute rtt in ms */ 2954 rtt = (int32_t)(net->rtt / 1000); 2955 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2956 /* 2957 * Tell the CC module that a new update has just occurred 2958 * from a sack 2959 */ 2960 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2961 } 2962 /* 2963 * Do we need to determine the lan? We do this only on sacks i.e. 2964 * RTT being determined from data not non-data (HB/INIT->INITACK). 2965 */ 2966 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2967 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2968 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2969 net->lan_type = SCTP_LAN_INTERNET; 2970 } else { 2971 net->lan_type = SCTP_LAN_LOCAL; 2972 } 2973 } 2974 2975 /***************************/ 2976 /* 2. update RTTVAR & SRTT */ 2977 /***************************/ 2978 /*- 2979 * Compute the scaled average lastsa and the 2980 * scaled variance lastsv as described in van Jacobson 2981 * Paper "Congestion Avoidance and Control", Annex A. 2982 * 2983 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2984 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2985 */ 2986 if (net->RTO_measured) { 2987 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2988 net->lastsa += rtt; 2989 if (rtt < 0) { 2990 rtt = -rtt; 2991 } 2992 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2993 net->lastsv += rtt; 2994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2995 rto_logging(net, SCTP_LOG_RTTVAR); 2996 } 2997 } else { 2998 /* First RTO measurement */ 2999 net->RTO_measured = 1; 3000 first_measure = 1; 3001 net->lastsa = rtt << SCTP_RTT_SHIFT; 3002 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3004 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3005 } 3006 } 3007 if (net->lastsv == 0) { 3008 net->lastsv = SCTP_CLOCK_GRANULARITY; 3009 } 3010 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3011 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3012 (stcb->asoc.sat_network_lockout == 0)) { 3013 stcb->asoc.sat_network = 1; 3014 } else if ((!first_measure) && stcb->asoc.sat_network) { 3015 stcb->asoc.sat_network = 0; 3016 stcb->asoc.sat_network_lockout = 1; 3017 } 3018 /* bound it, per C6/C7 in Section 5.3.1 */ 3019 if (new_rto < stcb->asoc.minrto) { 3020 new_rto = stcb->asoc.minrto; 3021 } 3022 if (new_rto > stcb->asoc.maxrto) { 3023 new_rto = stcb->asoc.maxrto; 3024 } 3025 net->RTO = new_rto; 3026 return (1); 3027 } 3028 3029 /* 3030 * return a pointer to a contiguous piece of data from the given mbuf chain 3031 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3032 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3033 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3034 */ 3035 caddr_t 3036 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3037 { 3038 uint32_t count; 3039 uint8_t *ptr; 3040 3041 ptr = in_ptr; 3042 if ((off < 0) || (len <= 0)) 3043 return (NULL); 3044 3045 /* find the desired start location */ 3046 while ((m != NULL) && (off > 0)) { 3047 if (off < SCTP_BUF_LEN(m)) 3048 break; 3049 off -= SCTP_BUF_LEN(m); 3050 m = SCTP_BUF_NEXT(m); 3051 } 3052 if (m == NULL) 3053 return (NULL); 3054 3055 /* is the current mbuf large enough (eg. contiguous)? */ 3056 if ((SCTP_BUF_LEN(m) - off) >= len) { 3057 return (mtod(m, caddr_t)+off); 3058 } else { 3059 /* else, it spans more than one mbuf, so save a temp copy... */ 3060 while ((m != NULL) && (len > 0)) { 3061 count = min(SCTP_BUF_LEN(m) - off, len); 3062 memcpy(ptr, mtod(m, caddr_t)+off, count); 3063 len -= count; 3064 ptr += count; 3065 off = 0; 3066 m = SCTP_BUF_NEXT(m); 3067 } 3068 if ((m == NULL) && (len > 0)) 3069 return (NULL); 3070 else 3071 return ((caddr_t)in_ptr); 3072 } 3073 } 3074 3075 struct sctp_paramhdr * 3076 sctp_get_next_param(struct mbuf *m, 3077 int offset, 3078 struct sctp_paramhdr *pull, 3079 int pull_limit) 3080 { 3081 /* This just provides a typed signature to Peter's Pull routine */ 3082 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3083 (uint8_t *)pull)); 3084 } 3085 3086 struct mbuf * 3087 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3088 { 3089 struct mbuf *m_last; 3090 caddr_t dp; 3091 3092 if (padlen > 3) { 3093 return (NULL); 3094 } 3095 if (padlen <= M_TRAILINGSPACE(m)) { 3096 /* 3097 * The easy way. We hope the majority of the time we hit 3098 * here :) 3099 */ 3100 m_last = m; 3101 } else { 3102 /* Hard way we must grow the mbuf chain */ 3103 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3104 if (m_last == NULL) { 3105 return (NULL); 3106 } 3107 SCTP_BUF_LEN(m_last) = 0; 3108 SCTP_BUF_NEXT(m_last) = NULL; 3109 SCTP_BUF_NEXT(m) = m_last; 3110 } 3111 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3112 SCTP_BUF_LEN(m_last) += padlen; 3113 memset(dp, 0, padlen); 3114 return (m_last); 3115 } 3116 3117 struct mbuf * 3118 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3119 { 3120 /* find the last mbuf in chain and pad it */ 3121 struct mbuf *m_at; 3122 3123 if (last_mbuf != NULL) { 3124 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3125 } else { 3126 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3127 if (SCTP_BUF_NEXT(m_at) == NULL) { 3128 return (sctp_add_pad_tombuf(m_at, padval)); 3129 } 3130 } 3131 } 3132 return (NULL); 3133 } 3134 3135 static void 3136 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3137 uint16_t error, struct sctp_abort_chunk *abort, 3138 bool from_peer, bool timedout, int so_locked) 3139 { 3140 struct mbuf *m_notify; 3141 struct sctp_assoc_change *sac; 3142 struct sctp_queued_to_read *control; 3143 unsigned int notif_len; 3144 uint16_t abort_len; 3145 unsigned int i; 3146 3147 KASSERT(abort == NULL || from_peer, 3148 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3149 KASSERT(!from_peer || !timedout, 3150 ("sctp_notify_assoc_change: timeouts can only be local")); 3151 if (stcb == NULL) { 3152 return; 3153 } 3154 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3155 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3156 if (abort != NULL) { 3157 abort_len = ntohs(abort->ch.chunk_length); 3158 /* 3159 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3160 * contiguous. 3161 */ 3162 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3163 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3164 } 3165 } else { 3166 abort_len = 0; 3167 } 3168 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3169 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3170 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3171 notif_len += abort_len; 3172 } 3173 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3174 if (m_notify == NULL) { 3175 /* Retry with smaller value. */ 3176 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3177 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3178 if (m_notify == NULL) { 3179 goto set_error; 3180 } 3181 } 3182 SCTP_BUF_NEXT(m_notify) = NULL; 3183 sac = mtod(m_notify, struct sctp_assoc_change *); 3184 memset(sac, 0, notif_len); 3185 sac->sac_type = SCTP_ASSOC_CHANGE; 3186 sac->sac_flags = 0; 3187 sac->sac_length = sizeof(struct sctp_assoc_change); 3188 sac->sac_state = state; 3189 sac->sac_error = error; 3190 if (state == SCTP_CANT_STR_ASSOC) { 3191 sac->sac_outbound_streams = 0; 3192 sac->sac_inbound_streams = 0; 3193 } else { 3194 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3195 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3196 } 3197 sac->sac_assoc_id = sctp_get_associd(stcb); 3198 if (notif_len > sizeof(struct sctp_assoc_change)) { 3199 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3200 i = 0; 3201 if (stcb->asoc.prsctp_supported == 1) { 3202 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3203 } 3204 if (stcb->asoc.auth_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3206 } 3207 if (stcb->asoc.asconf_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3209 } 3210 if (stcb->asoc.idata_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3212 } 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3214 if (stcb->asoc.reconfig_supported == 1) { 3215 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3216 } 3217 sac->sac_length += i; 3218 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3219 memcpy(sac->sac_info, abort, abort_len); 3220 sac->sac_length += abort_len; 3221 } 3222 } 3223 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3224 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3225 0, 0, stcb->asoc.context, 0, 0, 0, 3226 m_notify); 3227 if (control != NULL) { 3228 control->length = SCTP_BUF_LEN(m_notify); 3229 control->spec_flags = M_NOTIFICATION; 3230 /* not that we need this */ 3231 control->tail_mbuf = m_notify; 3232 sctp_add_to_readq(stcb->sctp_ep, stcb, 3233 control, 3234 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3235 so_locked); 3236 } else { 3237 sctp_m_freem(m_notify); 3238 } 3239 } 3240 /* 3241 * For 1-to-1 style sockets, we send up and error when an ABORT 3242 * comes in. 3243 */ 3244 set_error: 3245 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3246 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3247 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3248 SOCK_LOCK(stcb->sctp_socket); 3249 if (from_peer) { 3250 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3251 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3252 stcb->sctp_socket->so_error = ECONNREFUSED; 3253 } else { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3255 stcb->sctp_socket->so_error = ECONNRESET; 3256 } 3257 } else { 3258 if (timedout) { 3259 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3260 stcb->sctp_socket->so_error = ETIMEDOUT; 3261 } else { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3263 stcb->sctp_socket->so_error = ECONNABORTED; 3264 } 3265 } 3266 SOCK_UNLOCK(stcb->sctp_socket); 3267 } 3268 /* Wake ANY sleepers */ 3269 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3270 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3271 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3272 socantrcvmore(stcb->sctp_socket); 3273 } 3274 sorwakeup(stcb->sctp_socket); 3275 sowwakeup(stcb->sctp_socket); 3276 } 3277 3278 static void 3279 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3280 struct sockaddr *sa, uint32_t error, int so_locked) 3281 { 3282 struct mbuf *m_notify; 3283 struct sctp_paddr_change *spc; 3284 struct sctp_queued_to_read *control; 3285 3286 if ((stcb == NULL) || 3287 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3288 /* event not enabled */ 3289 return; 3290 } 3291 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3292 if (m_notify == NULL) 3293 return; 3294 SCTP_BUF_LEN(m_notify) = 0; 3295 spc = mtod(m_notify, struct sctp_paddr_change *); 3296 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3297 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3298 spc->spc_flags = 0; 3299 spc->spc_length = sizeof(struct sctp_paddr_change); 3300 switch (sa->sa_family) { 3301 #ifdef INET 3302 case AF_INET: 3303 #ifdef INET6 3304 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3305 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3306 (struct sockaddr_in6 *)&spc->spc_aaddr); 3307 } else { 3308 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3309 } 3310 #else 3311 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3312 #endif 3313 break; 3314 #endif 3315 #ifdef INET6 3316 case AF_INET6: 3317 { 3318 struct sockaddr_in6 *sin6; 3319 3320 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3321 3322 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3323 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3324 if (sin6->sin6_scope_id == 0) { 3325 /* recover scope_id for user */ 3326 (void)sa6_recoverscope(sin6); 3327 } else { 3328 /* clear embedded scope_id for user */ 3329 in6_clearscope(&sin6->sin6_addr); 3330 } 3331 } 3332 break; 3333 } 3334 #endif 3335 default: 3336 /* TSNH */ 3337 break; 3338 } 3339 spc->spc_state = state; 3340 spc->spc_error = error; 3341 spc->spc_assoc_id = sctp_get_associd(stcb); 3342 3343 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3344 SCTP_BUF_NEXT(m_notify) = NULL; 3345 3346 /* append to socket */ 3347 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3348 0, 0, stcb->asoc.context, 0, 0, 0, 3349 m_notify); 3350 if (control == NULL) { 3351 /* no memory */ 3352 sctp_m_freem(m_notify); 3353 return; 3354 } 3355 control->length = SCTP_BUF_LEN(m_notify); 3356 control->spec_flags = M_NOTIFICATION; 3357 /* not that we need this */ 3358 control->tail_mbuf = m_notify; 3359 sctp_add_to_readq(stcb->sctp_ep, stcb, 3360 control, 3361 &stcb->sctp_socket->so_rcv, 1, 3362 SCTP_READ_LOCK_NOT_HELD, 3363 so_locked); 3364 } 3365 3366 static void 3367 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3368 struct sctp_tmit_chunk *chk, int so_locked) 3369 { 3370 struct mbuf *m_notify; 3371 struct sctp_send_failed *ssf; 3372 struct sctp_send_failed_event *ssfe; 3373 struct sctp_queued_to_read *control; 3374 struct sctp_chunkhdr *chkhdr; 3375 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3376 3377 if ((stcb == NULL) || 3378 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3379 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3380 /* event not enabled */ 3381 return; 3382 } 3383 3384 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3385 notifhdr_len = sizeof(struct sctp_send_failed_event); 3386 } else { 3387 notifhdr_len = sizeof(struct sctp_send_failed); 3388 } 3389 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3390 if (m_notify == NULL) 3391 /* no space left */ 3392 return; 3393 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3394 if (stcb->asoc.idata_supported) { 3395 chkhdr_len = sizeof(struct sctp_idata_chunk); 3396 } else { 3397 chkhdr_len = sizeof(struct sctp_data_chunk); 3398 } 3399 /* Use some defaults in case we can't access the chunk header */ 3400 if (chk->send_size >= chkhdr_len) { 3401 payload_len = chk->send_size - chkhdr_len; 3402 } else { 3403 payload_len = 0; 3404 } 3405 padding_len = 0; 3406 if (chk->data != NULL) { 3407 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3408 if (chkhdr != NULL) { 3409 chk_len = ntohs(chkhdr->chunk_length); 3410 if ((chk_len >= chkhdr_len) && 3411 (chk->send_size >= chk_len) && 3412 (chk->send_size - chk_len < 4)) { 3413 padding_len = chk->send_size - chk_len; 3414 payload_len = chk->send_size - chkhdr_len - padding_len; 3415 } 3416 } 3417 } 3418 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3419 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3420 memset(ssfe, 0, notifhdr_len); 3421 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3422 if (sent) { 3423 ssfe->ssfe_flags = SCTP_DATA_SENT; 3424 } else { 3425 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3426 } 3427 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3428 ssfe->ssfe_error = error; 3429 /* not exactly what the user sent in, but should be close :) */ 3430 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3431 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3432 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3433 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3434 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3435 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3436 } else { 3437 ssf = mtod(m_notify, struct sctp_send_failed *); 3438 memset(ssf, 0, notifhdr_len); 3439 ssf->ssf_type = SCTP_SEND_FAILED; 3440 if (sent) { 3441 ssf->ssf_flags = SCTP_DATA_SENT; 3442 } else { 3443 ssf->ssf_flags = SCTP_DATA_UNSENT; 3444 } 3445 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3446 ssf->ssf_error = error; 3447 /* not exactly what the user sent in, but should be close :) */ 3448 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3449 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3450 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3451 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3452 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3453 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3454 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3455 } 3456 if (chk->data != NULL) { 3457 /* Trim off the sctp chunk header (it should be there) */ 3458 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3459 m_adj(chk->data, chkhdr_len); 3460 m_adj(chk->data, -padding_len); 3461 sctp_mbuf_crush(chk->data); 3462 chk->send_size -= (chkhdr_len + padding_len); 3463 } 3464 } 3465 SCTP_BUF_NEXT(m_notify) = chk->data; 3466 /* Steal off the mbuf */ 3467 chk->data = NULL; 3468 /* 3469 * For this case, we check the actual socket buffer, since the assoc 3470 * is going away we don't want to overfill the socket buffer for a 3471 * non-reader 3472 */ 3473 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3474 sctp_m_freem(m_notify); 3475 return; 3476 } 3477 /* append to socket */ 3478 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3479 0, 0, stcb->asoc.context, 0, 0, 0, 3480 m_notify); 3481 if (control == NULL) { 3482 /* no memory */ 3483 sctp_m_freem(m_notify); 3484 return; 3485 } 3486 control->length = SCTP_BUF_LEN(m_notify); 3487 control->spec_flags = M_NOTIFICATION; 3488 /* not that we need this */ 3489 control->tail_mbuf = m_notify; 3490 sctp_add_to_readq(stcb->sctp_ep, stcb, 3491 control, 3492 &stcb->sctp_socket->so_rcv, 1, 3493 SCTP_READ_LOCK_NOT_HELD, 3494 so_locked); 3495 } 3496 3497 static void 3498 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3499 struct sctp_stream_queue_pending *sp, int so_locked) 3500 { 3501 struct mbuf *m_notify; 3502 struct sctp_send_failed *ssf; 3503 struct sctp_send_failed_event *ssfe; 3504 struct sctp_queued_to_read *control; 3505 int notifhdr_len; 3506 3507 if ((stcb == NULL) || 3508 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3509 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3510 /* event not enabled */ 3511 return; 3512 } 3513 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3514 notifhdr_len = sizeof(struct sctp_send_failed_event); 3515 } else { 3516 notifhdr_len = sizeof(struct sctp_send_failed); 3517 } 3518 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3519 if (m_notify == NULL) { 3520 /* no space left */ 3521 return; 3522 } 3523 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3524 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3525 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3526 memset(ssfe, 0, notifhdr_len); 3527 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3528 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3529 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3530 ssfe->ssfe_error = error; 3531 /* not exactly what the user sent in, but should be close :) */ 3532 ssfe->ssfe_info.snd_sid = sp->sid; 3533 if (sp->some_taken) { 3534 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3535 } else { 3536 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3537 } 3538 ssfe->ssfe_info.snd_ppid = sp->ppid; 3539 ssfe->ssfe_info.snd_context = sp->context; 3540 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3541 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3542 } else { 3543 ssf = mtod(m_notify, struct sctp_send_failed *); 3544 memset(ssf, 0, notifhdr_len); 3545 ssf->ssf_type = SCTP_SEND_FAILED; 3546 ssf->ssf_flags = SCTP_DATA_UNSENT; 3547 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3548 ssf->ssf_error = error; 3549 /* not exactly what the user sent in, but should be close :) */ 3550 ssf->ssf_info.sinfo_stream = sp->sid; 3551 ssf->ssf_info.sinfo_ssn = 0; 3552 if (sp->some_taken) { 3553 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3554 } else { 3555 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3556 } 3557 ssf->ssf_info.sinfo_ppid = sp->ppid; 3558 ssf->ssf_info.sinfo_context = sp->context; 3559 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3560 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3561 } 3562 SCTP_BUF_NEXT(m_notify) = sp->data; 3563 3564 /* Steal off the mbuf */ 3565 sp->data = NULL; 3566 /* 3567 * For this case, we check the actual socket buffer, since the assoc 3568 * is going away we don't want to overfill the socket buffer for a 3569 * non-reader 3570 */ 3571 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3572 sctp_m_freem(m_notify); 3573 return; 3574 } 3575 /* append to socket */ 3576 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3577 0, 0, stcb->asoc.context, 0, 0, 0, 3578 m_notify); 3579 if (control == NULL) { 3580 /* no memory */ 3581 sctp_m_freem(m_notify); 3582 return; 3583 } 3584 control->length = SCTP_BUF_LEN(m_notify); 3585 control->spec_flags = M_NOTIFICATION; 3586 /* not that we need this */ 3587 control->tail_mbuf = m_notify; 3588 sctp_add_to_readq(stcb->sctp_ep, stcb, 3589 control, 3590 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3591 } 3592 3593 static void 3594 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3595 { 3596 struct mbuf *m_notify; 3597 struct sctp_adaptation_event *sai; 3598 struct sctp_queued_to_read *control; 3599 3600 if ((stcb == NULL) || 3601 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3602 /* event not enabled */ 3603 return; 3604 } 3605 3606 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3607 if (m_notify == NULL) 3608 /* no space left */ 3609 return; 3610 SCTP_BUF_LEN(m_notify) = 0; 3611 sai = mtod(m_notify, struct sctp_adaptation_event *); 3612 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3613 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3614 sai->sai_flags = 0; 3615 sai->sai_length = sizeof(struct sctp_adaptation_event); 3616 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3617 sai->sai_assoc_id = sctp_get_associd(stcb); 3618 3619 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3620 SCTP_BUF_NEXT(m_notify) = NULL; 3621 3622 /* append to socket */ 3623 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3624 0, 0, stcb->asoc.context, 0, 0, 0, 3625 m_notify); 3626 if (control == NULL) { 3627 /* no memory */ 3628 sctp_m_freem(m_notify); 3629 return; 3630 } 3631 control->length = SCTP_BUF_LEN(m_notify); 3632 control->spec_flags = M_NOTIFICATION; 3633 /* not that we need this */ 3634 control->tail_mbuf = m_notify; 3635 sctp_add_to_readq(stcb->sctp_ep, stcb, 3636 control, 3637 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3638 } 3639 3640 static void 3641 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3642 struct sctp_queued_to_read *aborted_control, 3643 int so_locked) 3644 { 3645 struct mbuf *m_notify; 3646 struct sctp_pdapi_event *pdapi; 3647 struct sctp_queued_to_read *control; 3648 struct sockbuf *sb; 3649 3650 if ((stcb == NULL) || 3651 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3652 /* event not enabled */ 3653 return; 3654 } 3655 3656 KASSERT(aborted_control != NULL, ("aborted_control is NULL")); 3657 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3658 3659 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3660 if (m_notify == NULL) 3661 /* no space left */ 3662 return; 3663 SCTP_BUF_LEN(m_notify) = 0; 3664 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3665 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3666 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3667 pdapi->pdapi_flags = 0; 3668 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3669 pdapi->pdapi_indication = error; 3670 pdapi->pdapi_stream = aborted_control->sinfo_stream; 3671 pdapi->pdapi_seq = (uint16_t)aborted_control->mid; 3672 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3673 3674 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3675 SCTP_BUF_NEXT(m_notify) = NULL; 3676 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3677 0, 0, stcb->asoc.context, 0, 0, 0, 3678 m_notify); 3679 if (control == NULL) { 3680 /* no memory */ 3681 sctp_m_freem(m_notify); 3682 return; 3683 } 3684 control->length = SCTP_BUF_LEN(m_notify); 3685 control->spec_flags = M_NOTIFICATION; 3686 /* not that we need this */ 3687 control->tail_mbuf = m_notify; 3688 sb = &stcb->sctp_socket->so_rcv; 3689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3690 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3691 } 3692 sctp_sballoc(stcb, sb, m_notify); 3693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3694 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3695 } 3696 control->end_added = 1; 3697 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, aborted_control, control, next); 3698 if (stcb->sctp_ep && stcb->sctp_socket) { 3699 /* This should always be the case */ 3700 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3701 } 3702 } 3703 3704 static void 3705 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3706 { 3707 struct mbuf *m_notify; 3708 struct sctp_shutdown_event *sse; 3709 struct sctp_queued_to_read *control; 3710 3711 /* 3712 * For TCP model AND UDP connected sockets we will send an error up 3713 * when an SHUTDOWN completes 3714 */ 3715 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3716 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3717 /* mark socket closed for read/write and wakeup! */ 3718 socantsendmore(stcb->sctp_socket); 3719 } 3720 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3721 /* event not enabled */ 3722 return; 3723 } 3724 3725 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3726 if (m_notify == NULL) 3727 /* no space left */ 3728 return; 3729 sse = mtod(m_notify, struct sctp_shutdown_event *); 3730 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3731 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3732 sse->sse_flags = 0; 3733 sse->sse_length = sizeof(struct sctp_shutdown_event); 3734 sse->sse_assoc_id = sctp_get_associd(stcb); 3735 3736 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3737 SCTP_BUF_NEXT(m_notify) = NULL; 3738 3739 /* append to socket */ 3740 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3741 0, 0, stcb->asoc.context, 0, 0, 0, 3742 m_notify); 3743 if (control == NULL) { 3744 /* no memory */ 3745 sctp_m_freem(m_notify); 3746 return; 3747 } 3748 control->length = SCTP_BUF_LEN(m_notify); 3749 control->spec_flags = M_NOTIFICATION; 3750 /* not that we need this */ 3751 control->tail_mbuf = m_notify; 3752 sctp_add_to_readq(stcb->sctp_ep, stcb, 3753 control, 3754 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3755 } 3756 3757 static void 3758 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3759 int so_locked) 3760 { 3761 struct mbuf *m_notify; 3762 struct sctp_sender_dry_event *event; 3763 struct sctp_queued_to_read *control; 3764 3765 if ((stcb == NULL) || 3766 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3767 /* event not enabled */ 3768 return; 3769 } 3770 3771 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3772 if (m_notify == NULL) { 3773 /* no space left */ 3774 return; 3775 } 3776 SCTP_BUF_LEN(m_notify) = 0; 3777 event = mtod(m_notify, struct sctp_sender_dry_event *); 3778 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3779 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3780 event->sender_dry_flags = 0; 3781 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3782 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3783 3784 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3785 SCTP_BUF_NEXT(m_notify) = NULL; 3786 3787 /* append to socket */ 3788 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3789 0, 0, stcb->asoc.context, 0, 0, 0, 3790 m_notify); 3791 if (control == NULL) { 3792 /* no memory */ 3793 sctp_m_freem(m_notify); 3794 return; 3795 } 3796 control->length = SCTP_BUF_LEN(m_notify); 3797 control->spec_flags = M_NOTIFICATION; 3798 /* not that we need this */ 3799 control->tail_mbuf = m_notify; 3800 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3801 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3802 } 3803 3804 void 3805 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3806 { 3807 struct mbuf *m_notify; 3808 struct sctp_queued_to_read *control; 3809 struct sctp_stream_change_event *stradd; 3810 3811 if ((stcb == NULL) || 3812 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3813 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3814 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3815 /* If the socket is gone we are out of here. */ 3816 return; 3817 } 3818 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 3819 /* event not enabled */ 3820 return; 3821 } 3822 3823 if ((stcb->asoc.peer_req_out) && flag) { 3824 /* Peer made the request, don't tell the local user */ 3825 stcb->asoc.peer_req_out = 0; 3826 return; 3827 } 3828 stcb->asoc.peer_req_out = 0; 3829 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3830 if (m_notify == NULL) 3831 /* no space left */ 3832 return; 3833 SCTP_BUF_LEN(m_notify) = 0; 3834 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3835 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3836 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3837 stradd->strchange_flags = flag; 3838 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3839 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3840 stradd->strchange_instrms = numberin; 3841 stradd->strchange_outstrms = numberout; 3842 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3843 SCTP_BUF_NEXT(m_notify) = NULL; 3844 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3845 /* no space */ 3846 sctp_m_freem(m_notify); 3847 return; 3848 } 3849 /* append to socket */ 3850 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3851 0, 0, stcb->asoc.context, 0, 0, 0, 3852 m_notify); 3853 if (control == NULL) { 3854 /* no memory */ 3855 sctp_m_freem(m_notify); 3856 return; 3857 } 3858 control->length = SCTP_BUF_LEN(m_notify); 3859 control->spec_flags = M_NOTIFICATION; 3860 /* not that we need this */ 3861 control->tail_mbuf = m_notify; 3862 sctp_add_to_readq(stcb->sctp_ep, stcb, 3863 control, 3864 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3865 } 3866 3867 void 3868 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3869 { 3870 struct mbuf *m_notify; 3871 struct sctp_queued_to_read *control; 3872 struct sctp_assoc_reset_event *strasoc; 3873 3874 if ((stcb == NULL) || 3875 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3876 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3877 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3878 /* If the socket is gone we are out of here. */ 3879 return; 3880 } 3881 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 3882 /* event not enabled */ 3883 return; 3884 } 3885 3886 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3887 if (m_notify == NULL) 3888 /* no space left */ 3889 return; 3890 SCTP_BUF_LEN(m_notify) = 0; 3891 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3892 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3893 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3894 strasoc->assocreset_flags = flag; 3895 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3896 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3897 strasoc->assocreset_local_tsn = sending_tsn; 3898 strasoc->assocreset_remote_tsn = recv_tsn; 3899 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3900 SCTP_BUF_NEXT(m_notify) = NULL; 3901 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3902 /* no space */ 3903 sctp_m_freem(m_notify); 3904 return; 3905 } 3906 /* append to socket */ 3907 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3908 0, 0, stcb->asoc.context, 0, 0, 0, 3909 m_notify); 3910 if (control == NULL) { 3911 /* no memory */ 3912 sctp_m_freem(m_notify); 3913 return; 3914 } 3915 control->length = SCTP_BUF_LEN(m_notify); 3916 control->spec_flags = M_NOTIFICATION; 3917 /* not that we need this */ 3918 control->tail_mbuf = m_notify; 3919 sctp_add_to_readq(stcb->sctp_ep, stcb, 3920 control, 3921 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3922 } 3923 3924 static void 3925 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3926 int number_entries, uint16_t *list, int flag) 3927 { 3928 struct mbuf *m_notify; 3929 struct sctp_queued_to_read *control; 3930 struct sctp_stream_reset_event *strreset; 3931 int len; 3932 3933 if ((stcb == NULL) || 3934 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3935 /* event not enabled */ 3936 return; 3937 } 3938 3939 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3940 if (m_notify == NULL) 3941 /* no space left */ 3942 return; 3943 SCTP_BUF_LEN(m_notify) = 0; 3944 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3945 if (len > M_TRAILINGSPACE(m_notify)) { 3946 /* never enough room */ 3947 sctp_m_freem(m_notify); 3948 return; 3949 } 3950 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3951 memset(strreset, 0, len); 3952 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3953 strreset->strreset_flags = flag; 3954 strreset->strreset_length = len; 3955 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3956 if (number_entries) { 3957 int i; 3958 3959 for (i = 0; i < number_entries; i++) { 3960 strreset->strreset_stream_list[i] = ntohs(list[i]); 3961 } 3962 } 3963 SCTP_BUF_LEN(m_notify) = len; 3964 SCTP_BUF_NEXT(m_notify) = NULL; 3965 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3966 /* no space */ 3967 sctp_m_freem(m_notify); 3968 return; 3969 } 3970 /* append to socket */ 3971 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3972 0, 0, stcb->asoc.context, 0, 0, 0, 3973 m_notify); 3974 if (control == NULL) { 3975 /* no memory */ 3976 sctp_m_freem(m_notify); 3977 return; 3978 } 3979 control->length = SCTP_BUF_LEN(m_notify); 3980 control->spec_flags = M_NOTIFICATION; 3981 /* not that we need this */ 3982 control->tail_mbuf = m_notify; 3983 sctp_add_to_readq(stcb->sctp_ep, stcb, 3984 control, 3985 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3986 } 3987 3988 static void 3989 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3990 { 3991 struct mbuf *m_notify; 3992 struct sctp_remote_error *sre; 3993 struct sctp_queued_to_read *control; 3994 unsigned int notif_len; 3995 uint16_t chunk_len; 3996 3997 if ((stcb == NULL) || 3998 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3999 return; 4000 } 4001 if (chunk != NULL) { 4002 chunk_len = ntohs(chunk->ch.chunk_length); 4003 /* 4004 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4005 * contiguous. 4006 */ 4007 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4008 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4009 } 4010 } else { 4011 chunk_len = 0; 4012 } 4013 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4014 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4015 if (m_notify == NULL) { 4016 /* Retry with smaller value. */ 4017 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4018 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4019 if (m_notify == NULL) { 4020 return; 4021 } 4022 } 4023 SCTP_BUF_NEXT(m_notify) = NULL; 4024 sre = mtod(m_notify, struct sctp_remote_error *); 4025 memset(sre, 0, notif_len); 4026 sre->sre_type = SCTP_REMOTE_ERROR; 4027 sre->sre_flags = 0; 4028 sre->sre_length = sizeof(struct sctp_remote_error); 4029 sre->sre_error = error; 4030 sre->sre_assoc_id = sctp_get_associd(stcb); 4031 if (notif_len > sizeof(struct sctp_remote_error)) { 4032 memcpy(sre->sre_data, chunk, chunk_len); 4033 sre->sre_length += chunk_len; 4034 } 4035 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4036 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4037 0, 0, stcb->asoc.context, 0, 0, 0, 4038 m_notify); 4039 if (control != NULL) { 4040 control->length = SCTP_BUF_LEN(m_notify); 4041 control->spec_flags = M_NOTIFICATION; 4042 /* not that we need this */ 4043 control->tail_mbuf = m_notify; 4044 sctp_add_to_readq(stcb->sctp_ep, stcb, 4045 control, 4046 &stcb->sctp_socket->so_rcv, 1, 4047 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4048 } else { 4049 sctp_m_freem(m_notify); 4050 } 4051 } 4052 4053 void 4054 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4055 uint32_t error, void *data, int so_locked) 4056 { 4057 struct sctp_inpcb *inp; 4058 struct sctp_nets *net; 4059 4060 KASSERT(stcb != NULL, ("stcb == NULL")); 4061 SCTP_TCB_LOCK_ASSERT(stcb); 4062 4063 inp = stcb->sctp_ep; 4064 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4065 return; 4066 } 4067 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4068 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4069 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4070 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4071 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4072 /* Don't report these in front states */ 4073 return; 4074 } 4075 } 4076 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4077 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4078 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4079 return; 4080 } 4081 4082 switch (notification) { 4083 case SCTP_NOTIFY_ASSOC_UP: 4084 if (stcb->asoc.assoc_up_sent == 0) { 4085 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4086 stcb->asoc.assoc_up_sent = 1; 4087 } 4088 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4089 sctp_notify_adaptation_layer(stcb); 4090 } 4091 if (stcb->asoc.auth_supported == 0) { 4092 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4093 NULL, so_locked); 4094 } 4095 break; 4096 case SCTP_NOTIFY_ASSOC_DOWN: 4097 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4098 break; 4099 case SCTP_NOTIFY_INTERFACE_DOWN: 4100 net = (struct sctp_nets *)data; 4101 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4102 &net->ro._l_addr.sa, error, so_locked); 4103 break; 4104 case SCTP_NOTIFY_INTERFACE_UP: 4105 net = (struct sctp_nets *)data; 4106 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4107 &net->ro._l_addr.sa, error, so_locked); 4108 break; 4109 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4110 net = (struct sctp_nets *)data; 4111 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4112 &net->ro._l_addr.sa, error, so_locked); 4113 break; 4114 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4115 sctp_notify_send_failed2(stcb, error, 4116 (struct sctp_stream_queue_pending *)data, so_locked); 4117 break; 4118 case SCTP_NOTIFY_SENT_DG_FAIL: 4119 sctp_notify_send_failed(stcb, 1, error, 4120 (struct sctp_tmit_chunk *)data, so_locked); 4121 break; 4122 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4123 sctp_notify_send_failed(stcb, 0, error, 4124 (struct sctp_tmit_chunk *)data, so_locked); 4125 break; 4126 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4127 sctp_notify_partial_delivery_indication(stcb, error, 4128 (struct sctp_queued_to_read *)data, 4129 so_locked); 4130 break; 4131 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4132 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4133 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4134 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4135 } else { 4136 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4137 } 4138 break; 4139 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4140 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4141 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4142 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4143 } else { 4144 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4145 } 4146 break; 4147 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4148 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4149 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4150 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4151 } else { 4152 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4153 } 4154 break; 4155 case SCTP_NOTIFY_ASSOC_RESTART: 4156 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4157 if (stcb->asoc.auth_supported == 0) { 4158 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4159 NULL, so_locked); 4160 } 4161 break; 4162 case SCTP_NOTIFY_STR_RESET_SEND: 4163 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4164 break; 4165 case SCTP_NOTIFY_STR_RESET_RECV: 4166 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4167 break; 4168 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4169 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4170 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4171 break; 4172 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4173 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4174 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4175 break; 4176 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4177 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4178 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4179 break; 4180 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4181 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4182 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4183 break; 4184 case SCTP_NOTIFY_ASCONF_ADD_IP: 4185 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4186 error, so_locked); 4187 break; 4188 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4189 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4190 error, so_locked); 4191 break; 4192 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4193 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4194 error, so_locked); 4195 break; 4196 case SCTP_NOTIFY_PEER_SHUTDOWN: 4197 sctp_notify_shutdown_event(stcb); 4198 break; 4199 case SCTP_NOTIFY_AUTH_NEW_KEY: 4200 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4201 (uint16_t)(uintptr_t)data, 4202 so_locked); 4203 break; 4204 case SCTP_NOTIFY_AUTH_FREE_KEY: 4205 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4206 (uint16_t)(uintptr_t)data, 4207 so_locked); 4208 break; 4209 case SCTP_NOTIFY_NO_PEER_AUTH: 4210 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4211 (uint16_t)(uintptr_t)data, 4212 so_locked); 4213 break; 4214 case SCTP_NOTIFY_SENDER_DRY: 4215 sctp_notify_sender_dry_event(stcb, so_locked); 4216 break; 4217 case SCTP_NOTIFY_REMOTE_ERROR: 4218 sctp_notify_remote_error(stcb, error, data); 4219 break; 4220 default: 4221 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4222 __func__, notification, notification); 4223 break; 4224 } 4225 } 4226 4227 void 4228 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4229 { 4230 struct sctp_association *asoc; 4231 struct sctp_stream_out *outs; 4232 struct sctp_tmit_chunk *chk, *nchk; 4233 struct sctp_stream_queue_pending *sp, *nsp; 4234 int i; 4235 4236 if (stcb == NULL) { 4237 return; 4238 } 4239 asoc = &stcb->asoc; 4240 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4241 /* already being freed */ 4242 return; 4243 } 4244 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4245 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4246 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4247 return; 4248 } 4249 /* now through all the gunk freeing chunks */ 4250 /* sent queue SHOULD be empty */ 4251 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4252 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4253 asoc->sent_queue_cnt--; 4254 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4255 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4256 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4257 #ifdef INVARIANTS 4258 } else { 4259 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4260 #endif 4261 } 4262 } 4263 if (chk->data != NULL) { 4264 sctp_free_bufspace(stcb, asoc, chk, 1); 4265 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4266 error, chk, so_locked); 4267 if (chk->data) { 4268 sctp_m_freem(chk->data); 4269 chk->data = NULL; 4270 } 4271 } 4272 sctp_free_a_chunk(stcb, chk, so_locked); 4273 /* sa_ignore FREED_MEMORY */ 4274 } 4275 /* pending send queue SHOULD be empty */ 4276 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4277 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4278 asoc->send_queue_cnt--; 4279 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4280 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4281 #ifdef INVARIANTS 4282 } else { 4283 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4284 #endif 4285 } 4286 if (chk->data != NULL) { 4287 sctp_free_bufspace(stcb, asoc, chk, 1); 4288 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4289 error, chk, so_locked); 4290 if (chk->data) { 4291 sctp_m_freem(chk->data); 4292 chk->data = NULL; 4293 } 4294 } 4295 sctp_free_a_chunk(stcb, chk, so_locked); 4296 /* sa_ignore FREED_MEMORY */ 4297 } 4298 for (i = 0; i < asoc->streamoutcnt; i++) { 4299 /* For each stream */ 4300 outs = &asoc->strmout[i]; 4301 /* clean up any sends there */ 4302 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4303 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4304 TAILQ_REMOVE(&outs->outqueue, sp, next); 4305 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4306 sctp_free_spbufspace(stcb, asoc, sp); 4307 if (sp->data) { 4308 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4309 error, (void *)sp, so_locked); 4310 if (sp->data) { 4311 sctp_m_freem(sp->data); 4312 sp->data = NULL; 4313 sp->tail_mbuf = NULL; 4314 sp->length = 0; 4315 } 4316 } 4317 if (sp->net) { 4318 sctp_free_remote_addr(sp->net); 4319 sp->net = NULL; 4320 } 4321 /* Free the chunk */ 4322 sctp_free_a_strmoq(stcb, sp, so_locked); 4323 /* sa_ignore FREED_MEMORY */ 4324 } 4325 } 4326 } 4327 4328 void 4329 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4330 uint16_t error, struct sctp_abort_chunk *abort, 4331 int so_locked) 4332 { 4333 if (stcb == NULL) { 4334 return; 4335 } 4336 SCTP_TCB_LOCK_ASSERT(stcb); 4337 4338 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4339 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4340 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4341 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4342 } 4343 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4344 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4345 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4346 return; 4347 } 4348 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4349 /* Tell them we lost the asoc */ 4350 sctp_report_all_outbound(stcb, error, so_locked); 4351 if (from_peer) { 4352 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4353 } else { 4354 if (timeout) { 4355 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4356 } else { 4357 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4358 } 4359 } 4360 } 4361 4362 void 4363 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4364 struct mbuf *m, int iphlen, 4365 struct sockaddr *src, struct sockaddr *dst, 4366 struct sctphdr *sh, struct mbuf *op_err, 4367 uint8_t mflowtype, uint32_t mflowid, 4368 uint32_t vrf_id, uint16_t port) 4369 { 4370 struct sctp_gen_error_cause *cause; 4371 uint32_t vtag; 4372 uint16_t cause_code; 4373 4374 if (stcb != NULL) { 4375 vtag = stcb->asoc.peer_vtag; 4376 vrf_id = stcb->asoc.vrf_id; 4377 if (op_err != NULL) { 4378 /* Read the cause code from the error cause. */ 4379 cause = mtod(op_err, struct sctp_gen_error_cause *); 4380 cause_code = ntohs(cause->code); 4381 } else { 4382 cause_code = 0; 4383 } 4384 } else { 4385 vtag = 0; 4386 } 4387 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4388 mflowtype, mflowid, inp->fibnum, 4389 vrf_id, port); 4390 if (stcb != NULL) { 4391 /* We have a TCB to abort, send notification too */ 4392 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4393 /* Ok, now lets free it */ 4394 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4395 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4396 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4397 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4398 } 4399 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4400 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4401 } 4402 } 4403 #ifdef SCTP_ASOCLOG_OF_TSNS 4404 void 4405 sctp_print_out_track_log(struct sctp_tcb *stcb) 4406 { 4407 #ifdef NOSIY_PRINTS 4408 int i; 4409 4410 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4411 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4412 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4413 SCTP_PRINTF("None rcvd\n"); 4414 goto none_in; 4415 } 4416 if (stcb->asoc.tsn_in_wrapped) { 4417 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4418 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4419 stcb->asoc.in_tsnlog[i].tsn, 4420 stcb->asoc.in_tsnlog[i].strm, 4421 stcb->asoc.in_tsnlog[i].seq, 4422 stcb->asoc.in_tsnlog[i].flgs, 4423 stcb->asoc.in_tsnlog[i].sz); 4424 } 4425 } 4426 if (stcb->asoc.tsn_in_at) { 4427 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4428 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4429 stcb->asoc.in_tsnlog[i].tsn, 4430 stcb->asoc.in_tsnlog[i].strm, 4431 stcb->asoc.in_tsnlog[i].seq, 4432 stcb->asoc.in_tsnlog[i].flgs, 4433 stcb->asoc.in_tsnlog[i].sz); 4434 } 4435 } 4436 none_in: 4437 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4438 if ((stcb->asoc.tsn_out_at == 0) && 4439 (stcb->asoc.tsn_out_wrapped == 0)) { 4440 SCTP_PRINTF("None sent\n"); 4441 } 4442 if (stcb->asoc.tsn_out_wrapped) { 4443 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4444 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4445 stcb->asoc.out_tsnlog[i].tsn, 4446 stcb->asoc.out_tsnlog[i].strm, 4447 stcb->asoc.out_tsnlog[i].seq, 4448 stcb->asoc.out_tsnlog[i].flgs, 4449 stcb->asoc.out_tsnlog[i].sz); 4450 } 4451 } 4452 if (stcb->asoc.tsn_out_at) { 4453 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4454 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4455 stcb->asoc.out_tsnlog[i].tsn, 4456 stcb->asoc.out_tsnlog[i].strm, 4457 stcb->asoc.out_tsnlog[i].seq, 4458 stcb->asoc.out_tsnlog[i].flgs, 4459 stcb->asoc.out_tsnlog[i].sz); 4460 } 4461 } 4462 #endif 4463 } 4464 #endif 4465 4466 void 4467 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4468 struct mbuf *op_err, bool timedout, int so_locked) 4469 { 4470 struct sctp_gen_error_cause *cause; 4471 uint16_t cause_code; 4472 4473 if (stcb == NULL) { 4474 /* Got to have a TCB */ 4475 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4476 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4477 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4478 SCTP_CALLED_DIRECTLY_NOCMPSET); 4479 } 4480 } 4481 return; 4482 } 4483 if (op_err != NULL) { 4484 /* Read the cause code from the error cause. */ 4485 cause = mtod(op_err, struct sctp_gen_error_cause *); 4486 cause_code = ntohs(cause->code); 4487 } else { 4488 cause_code = 0; 4489 } 4490 /* notify the peer */ 4491 sctp_send_abort_tcb(stcb, op_err, so_locked); 4492 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4493 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4494 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4495 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4496 } 4497 /* notify the ulp */ 4498 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4499 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4500 } 4501 /* now free the asoc */ 4502 #ifdef SCTP_ASOCLOG_OF_TSNS 4503 sctp_print_out_track_log(stcb); 4504 #endif 4505 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4506 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4507 } 4508 4509 void 4510 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4511 struct sockaddr *src, struct sockaddr *dst, 4512 struct sctphdr *sh, struct sctp_inpcb *inp, 4513 struct mbuf *cause, 4514 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4515 uint32_t vrf_id, uint16_t port) 4516 { 4517 struct sctp_chunkhdr *ch, chunk_buf; 4518 unsigned int chk_length; 4519 int contains_init_chunk; 4520 4521 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4522 /* Generate a TO address for future reference */ 4523 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4524 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4525 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4526 SCTP_CALLED_DIRECTLY_NOCMPSET); 4527 } 4528 } 4529 contains_init_chunk = 0; 4530 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4531 sizeof(*ch), (uint8_t *)&chunk_buf); 4532 while (ch != NULL) { 4533 chk_length = ntohs(ch->chunk_length); 4534 if (chk_length < sizeof(*ch)) { 4535 /* break to abort land */ 4536 break; 4537 } 4538 switch (ch->chunk_type) { 4539 case SCTP_INIT: 4540 contains_init_chunk = 1; 4541 break; 4542 case SCTP_PACKET_DROPPED: 4543 /* we don't respond to pkt-dropped */ 4544 return; 4545 case SCTP_ABORT_ASSOCIATION: 4546 /* we don't respond with an ABORT to an ABORT */ 4547 return; 4548 case SCTP_SHUTDOWN_COMPLETE: 4549 /* 4550 * we ignore it since we are not waiting for it and 4551 * peer is gone 4552 */ 4553 return; 4554 case SCTP_SHUTDOWN_ACK: 4555 sctp_send_shutdown_complete2(src, dst, sh, 4556 mflowtype, mflowid, fibnum, 4557 vrf_id, port); 4558 return; 4559 default: 4560 break; 4561 } 4562 offset += SCTP_SIZE32(chk_length); 4563 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4564 sizeof(*ch), (uint8_t *)&chunk_buf); 4565 } 4566 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4567 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4568 (contains_init_chunk == 0))) { 4569 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4570 mflowtype, mflowid, fibnum, 4571 vrf_id, port); 4572 } 4573 } 4574 4575 /* 4576 * check the inbound datagram to make sure there is not an abort inside it, 4577 * if there is return 1, else return 0. 4578 */ 4579 int 4580 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4581 { 4582 struct sctp_chunkhdr *ch; 4583 struct sctp_init_chunk *init_chk, chunk_buf; 4584 int offset; 4585 unsigned int chk_length; 4586 4587 offset = iphlen + sizeof(struct sctphdr); 4588 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4589 (uint8_t *)&chunk_buf); 4590 while (ch != NULL) { 4591 chk_length = ntohs(ch->chunk_length); 4592 if (chk_length < sizeof(*ch)) { 4593 /* packet is probably corrupt */ 4594 break; 4595 } 4596 /* we seem to be ok, is it an abort? */ 4597 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4598 /* yep, tell them */ 4599 return (1); 4600 } 4601 if ((ch->chunk_type == SCTP_INITIATION) || 4602 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4603 /* need to update the Vtag */ 4604 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4605 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4606 if (init_chk != NULL) { 4607 *vtag = ntohl(init_chk->init.initiate_tag); 4608 } 4609 } 4610 /* Nope, move to the next chunk */ 4611 offset += SCTP_SIZE32(chk_length); 4612 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4613 sizeof(*ch), (uint8_t *)&chunk_buf); 4614 } 4615 return (0); 4616 } 4617 4618 /* 4619 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4620 * set (i.e. it's 0) so, create this function to compare link local scopes 4621 */ 4622 #ifdef INET6 4623 uint32_t 4624 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4625 { 4626 struct sockaddr_in6 a, b; 4627 4628 /* save copies */ 4629 a = *addr1; 4630 b = *addr2; 4631 4632 if (a.sin6_scope_id == 0) 4633 if (sa6_recoverscope(&a)) { 4634 /* can't get scope, so can't match */ 4635 return (0); 4636 } 4637 if (b.sin6_scope_id == 0) 4638 if (sa6_recoverscope(&b)) { 4639 /* can't get scope, so can't match */ 4640 return (0); 4641 } 4642 if (a.sin6_scope_id != b.sin6_scope_id) 4643 return (0); 4644 4645 return (1); 4646 } 4647 4648 /* 4649 * returns a sockaddr_in6 with embedded scope recovered and removed 4650 */ 4651 struct sockaddr_in6 * 4652 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4653 { 4654 /* check and strip embedded scope junk */ 4655 if (addr->sin6_family == AF_INET6) { 4656 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4657 if (addr->sin6_scope_id == 0) { 4658 *store = *addr; 4659 if (!sa6_recoverscope(store)) { 4660 /* use the recovered scope */ 4661 addr = store; 4662 } 4663 } else { 4664 /* else, return the original "to" addr */ 4665 in6_clearscope(&addr->sin6_addr); 4666 } 4667 } 4668 } 4669 return (addr); 4670 } 4671 #endif 4672 4673 /* 4674 * are the two addresses the same? currently a "scopeless" check returns: 1 4675 * if same, 0 if not 4676 */ 4677 int 4678 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4679 { 4680 4681 /* must be valid */ 4682 if (sa1 == NULL || sa2 == NULL) 4683 return (0); 4684 4685 /* must be the same family */ 4686 if (sa1->sa_family != sa2->sa_family) 4687 return (0); 4688 4689 switch (sa1->sa_family) { 4690 #ifdef INET6 4691 case AF_INET6: 4692 { 4693 /* IPv6 addresses */ 4694 struct sockaddr_in6 *sin6_1, *sin6_2; 4695 4696 sin6_1 = (struct sockaddr_in6 *)sa1; 4697 sin6_2 = (struct sockaddr_in6 *)sa2; 4698 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4699 sin6_2)); 4700 } 4701 #endif 4702 #ifdef INET 4703 case AF_INET: 4704 { 4705 /* IPv4 addresses */ 4706 struct sockaddr_in *sin_1, *sin_2; 4707 4708 sin_1 = (struct sockaddr_in *)sa1; 4709 sin_2 = (struct sockaddr_in *)sa2; 4710 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4711 } 4712 #endif 4713 default: 4714 /* we don't do these... */ 4715 return (0); 4716 } 4717 } 4718 4719 void 4720 sctp_print_address(struct sockaddr *sa) 4721 { 4722 #ifdef INET6 4723 char ip6buf[INET6_ADDRSTRLEN]; 4724 #endif 4725 4726 switch (sa->sa_family) { 4727 #ifdef INET6 4728 case AF_INET6: 4729 { 4730 struct sockaddr_in6 *sin6; 4731 4732 sin6 = (struct sockaddr_in6 *)sa; 4733 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4734 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4735 ntohs(sin6->sin6_port), 4736 sin6->sin6_scope_id); 4737 break; 4738 } 4739 #endif 4740 #ifdef INET 4741 case AF_INET: 4742 { 4743 struct sockaddr_in *sin; 4744 unsigned char *p; 4745 4746 sin = (struct sockaddr_in *)sa; 4747 p = (unsigned char *)&sin->sin_addr; 4748 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4749 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4750 break; 4751 } 4752 #endif 4753 default: 4754 SCTP_PRINTF("?\n"); 4755 break; 4756 } 4757 } 4758 4759 void 4760 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4761 struct sctp_inpcb *new_inp, 4762 struct sctp_tcb *stcb, 4763 int waitflags) 4764 { 4765 /* 4766 * go through our old INP and pull off any control structures that 4767 * belong to stcb and move then to the new inp. 4768 */ 4769 struct socket *old_so, *new_so; 4770 struct sctp_queued_to_read *control, *nctl; 4771 struct sctp_readhead tmp_queue; 4772 struct mbuf *m; 4773 int error = 0; 4774 4775 old_so = old_inp->sctp_socket; 4776 new_so = new_inp->sctp_socket; 4777 TAILQ_INIT(&tmp_queue); 4778 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4779 if (error) { 4780 /* 4781 * Gak, can't get I/O lock, we have a problem. data will be 4782 * left stranded.. and we don't dare look at it since the 4783 * other thread may be reading something. Oh well, its a 4784 * screwed up app that does a peeloff OR a accept while 4785 * reading from the main socket... actually its only the 4786 * peeloff() case, since I think read will fail on a 4787 * listening socket.. 4788 */ 4789 return; 4790 } 4791 /* lock the socket buffers */ 4792 SCTP_INP_READ_LOCK(old_inp); 4793 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4794 /* Pull off all for out target stcb */ 4795 if (control->stcb == stcb) { 4796 /* remove it we want it */ 4797 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4798 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4799 m = control->data; 4800 while (m) { 4801 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4802 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4803 } 4804 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4805 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4806 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4807 } 4808 m = SCTP_BUF_NEXT(m); 4809 } 4810 } 4811 } 4812 SCTP_INP_READ_UNLOCK(old_inp); 4813 /* Remove the recv-lock on the old socket */ 4814 SOCK_IO_RECV_UNLOCK(old_so); 4815 /* Now we move them over to the new socket buffer */ 4816 SCTP_INP_READ_LOCK(new_inp); 4817 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4818 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4819 m = control->data; 4820 while (m) { 4821 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4822 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4823 } 4824 sctp_sballoc(stcb, &new_so->so_rcv, m); 4825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4826 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4827 } 4828 m = SCTP_BUF_NEXT(m); 4829 } 4830 } 4831 SCTP_INP_READ_UNLOCK(new_inp); 4832 } 4833 4834 void 4835 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4836 struct sctp_tcb *stcb, 4837 int so_locked 4838 SCTP_UNUSED 4839 ) 4840 { 4841 if ((inp != NULL) && 4842 (inp->sctp_socket != NULL) && 4843 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4844 !SCTP_IS_LISTENING(inp))) { 4845 sctp_sorwakeup(inp, inp->sctp_socket); 4846 } 4847 } 4848 4849 void 4850 sctp_add_to_readq(struct sctp_inpcb *inp, 4851 struct sctp_tcb *stcb, 4852 struct sctp_queued_to_read *control, 4853 struct sockbuf *sb, 4854 int end, 4855 int inp_read_lock_held, 4856 int so_locked) 4857 { 4858 /* 4859 * Here we must place the control on the end of the socket read 4860 * queue AND increment sb_cc so that select will work properly on 4861 * read. 4862 */ 4863 struct mbuf *m, *prev = NULL; 4864 4865 if (inp == NULL) { 4866 /* Gak, TSNH!! */ 4867 #ifdef INVARIANTS 4868 panic("Gak, inp NULL on add_to_readq"); 4869 #endif 4870 return; 4871 } 4872 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4873 SCTP_INP_READ_LOCK(inp); 4874 } 4875 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4876 if (!control->on_strm_q) { 4877 sctp_free_remote_addr(control->whoFrom); 4878 if (control->data) { 4879 sctp_m_freem(control->data); 4880 control->data = NULL; 4881 } 4882 sctp_free_a_readq(stcb, control); 4883 } 4884 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4885 SCTP_INP_READ_UNLOCK(inp); 4886 } 4887 return; 4888 } 4889 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4890 atomic_add_int(&inp->total_recvs, 1); 4891 if (!control->do_not_ref_stcb) { 4892 atomic_add_int(&stcb->total_recvs, 1); 4893 } 4894 } 4895 m = control->data; 4896 control->held_length = 0; 4897 control->length = 0; 4898 while (m != NULL) { 4899 if (SCTP_BUF_LEN(m) == 0) { 4900 /* Skip mbufs with NO length */ 4901 if (prev == NULL) { 4902 /* First one */ 4903 control->data = sctp_m_free(m); 4904 m = control->data; 4905 } else { 4906 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4907 m = SCTP_BUF_NEXT(prev); 4908 } 4909 if (m == NULL) { 4910 control->tail_mbuf = prev; 4911 } 4912 continue; 4913 } 4914 prev = m; 4915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4916 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4917 } 4918 sctp_sballoc(stcb, sb, m); 4919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4920 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4921 } 4922 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4923 m = SCTP_BUF_NEXT(m); 4924 } 4925 if (prev != NULL) { 4926 control->tail_mbuf = prev; 4927 } else { 4928 /* Everything got collapsed out?? */ 4929 if (!control->on_strm_q) { 4930 sctp_free_remote_addr(control->whoFrom); 4931 sctp_free_a_readq(stcb, control); 4932 } 4933 if (inp_read_lock_held == 0) 4934 SCTP_INP_READ_UNLOCK(inp); 4935 return; 4936 } 4937 if (end) { 4938 control->end_added = 1; 4939 } 4940 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4941 control->on_read_q = 1; 4942 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4943 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4944 } 4945 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4946 SCTP_INP_READ_UNLOCK(inp); 4947 } 4948 } 4949 4950 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4951 *************ALTERNATE ROUTING CODE 4952 */ 4953 4954 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4955 *************ALTERNATE ROUTING CODE 4956 */ 4957 4958 struct mbuf * 4959 sctp_generate_cause(uint16_t code, char *info) 4960 { 4961 struct mbuf *m; 4962 struct sctp_gen_error_cause *cause; 4963 size_t info_len; 4964 uint16_t len; 4965 4966 if ((code == 0) || (info == NULL)) { 4967 return (NULL); 4968 } 4969 info_len = strlen(info); 4970 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4971 return (NULL); 4972 } 4973 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4974 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4975 if (m != NULL) { 4976 SCTP_BUF_LEN(m) = len; 4977 cause = mtod(m, struct sctp_gen_error_cause *); 4978 cause->code = htons(code); 4979 cause->length = htons(len); 4980 memcpy(cause->info, info, info_len); 4981 } 4982 return (m); 4983 } 4984 4985 struct mbuf * 4986 sctp_generate_no_user_data_cause(uint32_t tsn) 4987 { 4988 struct mbuf *m; 4989 struct sctp_error_no_user_data *no_user_data_cause; 4990 uint16_t len; 4991 4992 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4993 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4994 if (m != NULL) { 4995 SCTP_BUF_LEN(m) = len; 4996 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4997 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4998 no_user_data_cause->cause.length = htons(len); 4999 no_user_data_cause->tsn = htonl(tsn); 5000 } 5001 return (m); 5002 } 5003 5004 void 5005 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5006 struct sctp_tmit_chunk *tp1, int chk_cnt) 5007 { 5008 if (tp1->data == NULL) { 5009 return; 5010 } 5011 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5012 #ifdef SCTP_MBCNT_LOGGING 5013 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5014 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5015 asoc->total_output_queue_size, 5016 tp1->book_size, 5017 0, 5018 tp1->mbcnt); 5019 } 5020 #endif 5021 if (asoc->total_output_queue_size >= tp1->book_size) { 5022 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5023 } else { 5024 asoc->total_output_queue_size = 0; 5025 } 5026 if ((stcb->sctp_socket != NULL) && 5027 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5028 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5029 SCTP_SB_DECR(&stcb->sctp_socket->so_snd, tp1->book_size); 5030 } 5031 } 5032 5033 int 5034 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5035 uint8_t sent, int so_locked) 5036 { 5037 struct sctp_stream_out *strq; 5038 struct sctp_tmit_chunk *chk = NULL, *tp2; 5039 struct sctp_stream_queue_pending *sp; 5040 uint32_t mid; 5041 uint16_t sid; 5042 uint8_t foundeom = 0; 5043 int ret_sz = 0; 5044 int notdone; 5045 int do_wakeup_routine = 0; 5046 5047 SCTP_TCB_LOCK_ASSERT(stcb); 5048 5049 sid = tp1->rec.data.sid; 5050 mid = tp1->rec.data.mid; 5051 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5052 stcb->asoc.abandoned_sent[0]++; 5053 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5054 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5055 #if defined(SCTP_DETAILED_STR_STATS) 5056 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5057 #endif 5058 } else { 5059 stcb->asoc.abandoned_unsent[0]++; 5060 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5061 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5062 #if defined(SCTP_DETAILED_STR_STATS) 5063 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5064 #endif 5065 } 5066 do { 5067 ret_sz += tp1->book_size; 5068 if (tp1->data != NULL) { 5069 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5070 sctp_flight_size_decrease(tp1); 5071 sctp_total_flight_decrease(stcb, tp1); 5072 } 5073 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5074 stcb->asoc.peers_rwnd += tp1->send_size; 5075 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5076 if (sent) { 5077 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5078 } else { 5079 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5080 } 5081 if (tp1->data) { 5082 sctp_m_freem(tp1->data); 5083 tp1->data = NULL; 5084 } 5085 do_wakeup_routine = 1; 5086 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5087 stcb->asoc.sent_queue_cnt_removeable--; 5088 } 5089 } 5090 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5091 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5092 SCTP_DATA_NOT_FRAG) { 5093 /* not frag'ed we ae done */ 5094 notdone = 0; 5095 foundeom = 1; 5096 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5097 /* end of frag, we are done */ 5098 notdone = 0; 5099 foundeom = 1; 5100 } else { 5101 /* 5102 * Its a begin or middle piece, we must mark all of 5103 * it 5104 */ 5105 notdone = 1; 5106 tp1 = TAILQ_NEXT(tp1, sctp_next); 5107 } 5108 } while (tp1 && notdone); 5109 if (foundeom == 0) { 5110 /* 5111 * The multi-part message was scattered across the send and 5112 * sent queue. 5113 */ 5114 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5115 if ((tp1->rec.data.sid != sid) || 5116 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5117 break; 5118 } 5119 /* 5120 * save to chk in case we have some on stream out 5121 * queue. If so and we have an un-transmitted one we 5122 * don't have to fudge the TSN. 5123 */ 5124 chk = tp1; 5125 ret_sz += tp1->book_size; 5126 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5127 if (sent) { 5128 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5129 } else { 5130 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5131 } 5132 if (tp1->data) { 5133 sctp_m_freem(tp1->data); 5134 tp1->data = NULL; 5135 } 5136 /* No flight involved here book the size to 0 */ 5137 tp1->book_size = 0; 5138 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5139 foundeom = 1; 5140 } 5141 do_wakeup_routine = 1; 5142 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5143 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5144 /* 5145 * on to the sent queue so we can wait for it to be 5146 * passed by. 5147 */ 5148 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5149 sctp_next); 5150 stcb->asoc.send_queue_cnt--; 5151 stcb->asoc.sent_queue_cnt++; 5152 } 5153 } 5154 if (foundeom == 0) { 5155 /* 5156 * Still no eom found. That means there is stuff left on the 5157 * stream out queue.. yuck. 5158 */ 5159 strq = &stcb->asoc.strmout[sid]; 5160 sp = TAILQ_FIRST(&strq->outqueue); 5161 if (sp != NULL) { 5162 sp->discard_rest = 1; 5163 /* 5164 * We may need to put a chunk on the queue that 5165 * holds the TSN that would have been sent with the 5166 * LAST bit. 5167 */ 5168 if (chk == NULL) { 5169 /* Yep, we have to */ 5170 sctp_alloc_a_chunk(stcb, chk); 5171 if (chk == NULL) { 5172 /* 5173 * we are hosed. All we can do is 5174 * nothing.. which will cause an 5175 * abort if the peer is paying 5176 * attention. 5177 */ 5178 goto oh_well; 5179 } 5180 memset(chk, 0, sizeof(*chk)); 5181 chk->rec.data.rcv_flags = 0; 5182 chk->sent = SCTP_FORWARD_TSN_SKIP; 5183 chk->asoc = &stcb->asoc; 5184 if (stcb->asoc.idata_supported == 0) { 5185 if (sp->sinfo_flags & SCTP_UNORDERED) { 5186 chk->rec.data.mid = 0; 5187 } else { 5188 chk->rec.data.mid = strq->next_mid_ordered; 5189 } 5190 } else { 5191 if (sp->sinfo_flags & SCTP_UNORDERED) { 5192 chk->rec.data.mid = strq->next_mid_unordered; 5193 } else { 5194 chk->rec.data.mid = strq->next_mid_ordered; 5195 } 5196 } 5197 chk->rec.data.sid = sp->sid; 5198 chk->rec.data.ppid = sp->ppid; 5199 chk->rec.data.context = sp->context; 5200 chk->flags = sp->act_flags; 5201 chk->whoTo = NULL; 5202 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5203 strq->chunks_on_queues++; 5204 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5205 stcb->asoc.sent_queue_cnt++; 5206 stcb->asoc.pr_sctp_cnt++; 5207 } 5208 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5209 if (sp->sinfo_flags & SCTP_UNORDERED) { 5210 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5211 } 5212 if (stcb->asoc.idata_supported == 0) { 5213 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5214 strq->next_mid_ordered++; 5215 } 5216 } else { 5217 if (sp->sinfo_flags & SCTP_UNORDERED) { 5218 strq->next_mid_unordered++; 5219 } else { 5220 strq->next_mid_ordered++; 5221 } 5222 } 5223 oh_well: 5224 if (sp->data) { 5225 /* 5226 * Pull any data to free up the SB and allow 5227 * sender to "add more" while we will throw 5228 * away :-) 5229 */ 5230 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5231 ret_sz += sp->length; 5232 do_wakeup_routine = 1; 5233 sp->some_taken = 1; 5234 sctp_m_freem(sp->data); 5235 sp->data = NULL; 5236 sp->tail_mbuf = NULL; 5237 sp->length = 0; 5238 } 5239 } 5240 } 5241 if (do_wakeup_routine) { 5242 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5243 } 5244 return (ret_sz); 5245 } 5246 5247 /* 5248 * checks to see if the given address, sa, is one that is currently known by 5249 * the kernel note: can't distinguish the same address on multiple interfaces 5250 * and doesn't handle multiple addresses with different zone/scope id's note: 5251 * ifa_ifwithaddr() compares the entire sockaddr struct 5252 */ 5253 struct sctp_ifa * 5254 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5255 int holds_lock) 5256 { 5257 struct sctp_laddr *laddr; 5258 5259 if (holds_lock == 0) { 5260 SCTP_INP_RLOCK(inp); 5261 } 5262 5263 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5264 if (laddr->ifa == NULL) 5265 continue; 5266 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5267 continue; 5268 #ifdef INET 5269 if (addr->sa_family == AF_INET) { 5270 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5271 laddr->ifa->address.sin.sin_addr.s_addr) { 5272 /* found him. */ 5273 break; 5274 } 5275 } 5276 #endif 5277 #ifdef INET6 5278 if (addr->sa_family == AF_INET6) { 5279 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5280 &laddr->ifa->address.sin6)) { 5281 /* found him. */ 5282 break; 5283 } 5284 } 5285 #endif 5286 } 5287 if (holds_lock == 0) { 5288 SCTP_INP_RUNLOCK(inp); 5289 } 5290 if (laddr != NULL) { 5291 return (laddr->ifa); 5292 } else { 5293 return (NULL); 5294 } 5295 } 5296 5297 uint32_t 5298 sctp_get_ifa_hash_val(struct sockaddr *addr) 5299 { 5300 switch (addr->sa_family) { 5301 #ifdef INET 5302 case AF_INET: 5303 { 5304 struct sockaddr_in *sin; 5305 5306 sin = (struct sockaddr_in *)addr; 5307 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5308 } 5309 #endif 5310 #ifdef INET6 5311 case AF_INET6: 5312 { 5313 struct sockaddr_in6 *sin6; 5314 uint32_t hash_of_addr; 5315 5316 sin6 = (struct sockaddr_in6 *)addr; 5317 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5318 sin6->sin6_addr.s6_addr32[1] + 5319 sin6->sin6_addr.s6_addr32[2] + 5320 sin6->sin6_addr.s6_addr32[3]); 5321 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5322 return (hash_of_addr); 5323 } 5324 #endif 5325 default: 5326 break; 5327 } 5328 return (0); 5329 } 5330 5331 struct sctp_ifa * 5332 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5333 { 5334 struct sctp_ifa *sctp_ifap; 5335 struct sctp_vrf *vrf; 5336 struct sctp_ifalist *hash_head; 5337 uint32_t hash_of_addr; 5338 5339 if (holds_lock == 0) { 5340 SCTP_IPI_ADDR_RLOCK(); 5341 } else { 5342 SCTP_IPI_ADDR_LOCK_ASSERT(); 5343 } 5344 5345 vrf = sctp_find_vrf(vrf_id); 5346 if (vrf == NULL) { 5347 if (holds_lock == 0) 5348 SCTP_IPI_ADDR_RUNLOCK(); 5349 return (NULL); 5350 } 5351 5352 hash_of_addr = sctp_get_ifa_hash_val(addr); 5353 5354 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5355 if (hash_head == NULL) { 5356 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5357 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5358 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5359 sctp_print_address(addr); 5360 SCTP_PRINTF("No such bucket for address\n"); 5361 if (holds_lock == 0) 5362 SCTP_IPI_ADDR_RUNLOCK(); 5363 5364 return (NULL); 5365 } 5366 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5367 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5368 continue; 5369 #ifdef INET 5370 if (addr->sa_family == AF_INET) { 5371 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5372 sctp_ifap->address.sin.sin_addr.s_addr) { 5373 /* found him. */ 5374 break; 5375 } 5376 } 5377 #endif 5378 #ifdef INET6 5379 if (addr->sa_family == AF_INET6) { 5380 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5381 &sctp_ifap->address.sin6)) { 5382 /* found him. */ 5383 break; 5384 } 5385 } 5386 #endif 5387 } 5388 if (holds_lock == 0) 5389 SCTP_IPI_ADDR_RUNLOCK(); 5390 return (sctp_ifap); 5391 } 5392 5393 static void 5394 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5395 uint32_t rwnd_req) 5396 { 5397 /* User pulled some data, do we need a rwnd update? */ 5398 struct epoch_tracker et; 5399 int r_unlocked = 0; 5400 uint32_t dif, rwnd; 5401 struct socket *so = NULL; 5402 5403 if (stcb == NULL) 5404 return; 5405 5406 atomic_add_int(&stcb->asoc.refcnt, 1); 5407 5408 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5409 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5410 /* Pre-check If we are freeing no update */ 5411 goto no_lock; 5412 } 5413 SCTP_INP_INCR_REF(stcb->sctp_ep); 5414 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5415 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5416 goto out; 5417 } 5418 so = stcb->sctp_socket; 5419 if (so == NULL) { 5420 goto out; 5421 } 5422 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5423 /* Have you have freed enough to look */ 5424 *freed_so_far = 0; 5425 /* Yep, its worth a look and the lock overhead */ 5426 5427 /* Figure out what the rwnd would be */ 5428 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5429 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5430 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5431 } else { 5432 dif = 0; 5433 } 5434 if (dif >= rwnd_req) { 5435 if (hold_rlock) { 5436 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5437 r_unlocked = 1; 5438 } 5439 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5440 /* 5441 * One last check before we allow the guy possibly 5442 * to get in. There is a race, where the guy has not 5443 * reached the gate. In that case 5444 */ 5445 goto out; 5446 } 5447 SCTP_TCB_LOCK(stcb); 5448 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5449 /* No reports here */ 5450 SCTP_TCB_UNLOCK(stcb); 5451 goto out; 5452 } 5453 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5454 NET_EPOCH_ENTER(et); 5455 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5456 5457 sctp_chunk_output(stcb->sctp_ep, stcb, 5458 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5459 /* make sure no timer is running */ 5460 NET_EPOCH_EXIT(et); 5461 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5462 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5463 SCTP_TCB_UNLOCK(stcb); 5464 } else { 5465 /* Update how much we have pending */ 5466 stcb->freed_by_sorcv_sincelast = dif; 5467 } 5468 out: 5469 if (so && r_unlocked && hold_rlock) { 5470 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5471 } 5472 5473 SCTP_INP_DECR_REF(stcb->sctp_ep); 5474 no_lock: 5475 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5476 return; 5477 } 5478 5479 int 5480 sctp_sorecvmsg(struct socket *so, 5481 struct uio *uio, 5482 struct mbuf **mp, 5483 struct sockaddr *from, 5484 int fromlen, 5485 int *msg_flags, 5486 struct sctp_sndrcvinfo *sinfo, 5487 int filling_sinfo) 5488 { 5489 /* 5490 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5491 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5492 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5493 * On the way out we may send out any combination of: 5494 * MSG_NOTIFICATION MSG_EOR 5495 * 5496 */ 5497 struct sctp_inpcb *inp = NULL; 5498 ssize_t my_len = 0; 5499 ssize_t cp_len = 0; 5500 int error = 0; 5501 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5502 struct mbuf *m = NULL; 5503 struct sctp_tcb *stcb = NULL; 5504 int wakeup_read_socket = 0; 5505 int freecnt_applied = 0; 5506 int out_flags = 0, in_flags = 0; 5507 int block_allowed = 1; 5508 uint32_t freed_so_far = 0; 5509 ssize_t copied_so_far = 0; 5510 int in_eeor_mode = 0; 5511 int no_rcv_needed = 0; 5512 uint32_t rwnd_req = 0; 5513 int hold_sblock = 0; 5514 int hold_rlock = 0; 5515 ssize_t slen = 0; 5516 uint32_t held_length = 0; 5517 int sockbuf_lock = 0; 5518 5519 if (uio == NULL) { 5520 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5521 return (EINVAL); 5522 } 5523 5524 if (msg_flags) { 5525 in_flags = *msg_flags; 5526 if (in_flags & MSG_PEEK) 5527 SCTP_STAT_INCR(sctps_read_peeks); 5528 } else { 5529 in_flags = 0; 5530 } 5531 slen = uio->uio_resid; 5532 5533 /* Pull in and set up our int flags */ 5534 if (in_flags & MSG_OOB) { 5535 /* Out of band's NOT supported */ 5536 return (EOPNOTSUPP); 5537 } 5538 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5539 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5540 return (EINVAL); 5541 } 5542 if ((in_flags & (MSG_DONTWAIT 5543 | MSG_NBIO 5544 )) || 5545 SCTP_SO_IS_NBIO(so)) { 5546 block_allowed = 0; 5547 } 5548 /* setup the endpoint */ 5549 inp = (struct sctp_inpcb *)so->so_pcb; 5550 if (inp == NULL) { 5551 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5552 return (EFAULT); 5553 } 5554 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5555 /* Must be at least a MTU's worth */ 5556 if (rwnd_req < SCTP_MIN_RWND) 5557 rwnd_req = SCTP_MIN_RWND; 5558 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5560 sctp_misc_ints(SCTP_SORECV_ENTER, 5561 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5562 } 5563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5564 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5565 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5566 } 5567 5568 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5569 if (error) { 5570 goto release_unlocked; 5571 } 5572 sockbuf_lock = 1; 5573 restart: 5574 5575 restart_nosblocks: 5576 if (hold_sblock == 0) { 5577 SOCKBUF_LOCK(&so->so_rcv); 5578 hold_sblock = 1; 5579 } 5580 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5581 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5582 goto out; 5583 } 5584 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5585 if (so->so_error) { 5586 error = so->so_error; 5587 if ((in_flags & MSG_PEEK) == 0) 5588 so->so_error = 0; 5589 goto out; 5590 } else { 5591 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5592 /* indicate EOF */ 5593 error = 0; 5594 goto out; 5595 } 5596 } 5597 } 5598 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5599 if (so->so_error) { 5600 error = so->so_error; 5601 if ((in_flags & MSG_PEEK) == 0) { 5602 so->so_error = 0; 5603 } 5604 goto out; 5605 } 5606 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5607 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5608 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5609 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5610 /* 5611 * For active open side clear flags for 5612 * re-use passive open is blocked by 5613 * connect. 5614 */ 5615 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5616 /* 5617 * You were aborted, passive side 5618 * always hits here 5619 */ 5620 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5621 error = ECONNRESET; 5622 } 5623 so->so_state &= ~(SS_ISCONNECTING | 5624 SS_ISDISCONNECTING | 5625 SS_ISCONFIRMING | 5626 SS_ISCONNECTED); 5627 if (error == 0) { 5628 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5629 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5630 error = ENOTCONN; 5631 } 5632 } 5633 goto out; 5634 } 5635 } 5636 if (block_allowed) { 5637 error = sbwait(so, SO_RCV); 5638 if (error) { 5639 goto out; 5640 } 5641 held_length = 0; 5642 goto restart_nosblocks; 5643 } else { 5644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5645 error = EWOULDBLOCK; 5646 goto out; 5647 } 5648 } 5649 if (hold_sblock == 1) { 5650 SOCKBUF_UNLOCK(&so->so_rcv); 5651 hold_sblock = 0; 5652 } 5653 /* we possibly have data we can read */ 5654 /* sa_ignore FREED_MEMORY */ 5655 control = TAILQ_FIRST(&inp->read_queue); 5656 if (control == NULL) { 5657 /* 5658 * This could be happening since the appender did the 5659 * increment but as not yet did the tailq insert onto the 5660 * read_queue 5661 */ 5662 if (hold_rlock == 0) { 5663 SCTP_INP_READ_LOCK(inp); 5664 } 5665 control = TAILQ_FIRST(&inp->read_queue); 5666 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5667 #ifdef INVARIANTS 5668 panic("Huh, its non zero and nothing on control?"); 5669 #endif 5670 SCTP_SB_CLEAR(so->so_rcv); 5671 } 5672 SCTP_INP_READ_UNLOCK(inp); 5673 hold_rlock = 0; 5674 goto restart; 5675 } 5676 5677 if ((control->length == 0) && 5678 (control->do_not_ref_stcb)) { 5679 /* 5680 * Clean up code for freeing assoc that left behind a 5681 * pdapi.. maybe a peer in EEOR that just closed after 5682 * sending and never indicated a EOR. 5683 */ 5684 if (hold_rlock == 0) { 5685 hold_rlock = 1; 5686 SCTP_INP_READ_LOCK(inp); 5687 } 5688 control->held_length = 0; 5689 if (control->data) { 5690 /* Hmm there is data here .. fix */ 5691 struct mbuf *m_tmp; 5692 int cnt = 0; 5693 5694 m_tmp = control->data; 5695 while (m_tmp) { 5696 cnt += SCTP_BUF_LEN(m_tmp); 5697 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5698 control->tail_mbuf = m_tmp; 5699 control->end_added = 1; 5700 } 5701 m_tmp = SCTP_BUF_NEXT(m_tmp); 5702 } 5703 control->length = cnt; 5704 } else { 5705 /* remove it */ 5706 TAILQ_REMOVE(&inp->read_queue, control, next); 5707 /* Add back any hidden data */ 5708 sctp_free_remote_addr(control->whoFrom); 5709 sctp_free_a_readq(stcb, control); 5710 } 5711 if (hold_rlock) { 5712 hold_rlock = 0; 5713 SCTP_INP_READ_UNLOCK(inp); 5714 } 5715 goto restart; 5716 } 5717 if ((control->length == 0) && 5718 (control->end_added == 1)) { 5719 /* 5720 * Do we also need to check for (control->pdapi_aborted == 5721 * 1)? 5722 */ 5723 if (hold_rlock == 0) { 5724 hold_rlock = 1; 5725 SCTP_INP_READ_LOCK(inp); 5726 } 5727 TAILQ_REMOVE(&inp->read_queue, control, next); 5728 if (control->data) { 5729 #ifdef INVARIANTS 5730 panic("control->data not null but control->length == 0"); 5731 #else 5732 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5733 sctp_m_freem(control->data); 5734 control->data = NULL; 5735 #endif 5736 } 5737 if (control->aux_data) { 5738 sctp_m_free(control->aux_data); 5739 control->aux_data = NULL; 5740 } 5741 #ifdef INVARIANTS 5742 if (control->on_strm_q) { 5743 panic("About to free ctl:%p so:%p and its in %d", 5744 control, so, control->on_strm_q); 5745 } 5746 #endif 5747 sctp_free_remote_addr(control->whoFrom); 5748 sctp_free_a_readq(stcb, control); 5749 if (hold_rlock) { 5750 hold_rlock = 0; 5751 SCTP_INP_READ_UNLOCK(inp); 5752 } 5753 goto restart; 5754 } 5755 if (control->length == 0) { 5756 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5757 (filling_sinfo)) { 5758 /* find a more suitable one then this */ 5759 ctl = TAILQ_NEXT(control, next); 5760 while (ctl) { 5761 if ((ctl->stcb != control->stcb) && (ctl->length) && 5762 (ctl->some_taken || 5763 (ctl->spec_flags & M_NOTIFICATION) || 5764 ((ctl->do_not_ref_stcb == 0) && 5765 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5766 ) { 5767 /*- 5768 * If we have a different TCB next, and there is data 5769 * present. If we have already taken some (pdapi), OR we can 5770 * ref the tcb and no delivery as started on this stream, we 5771 * take it. Note we allow a notification on a different 5772 * assoc to be delivered.. 5773 */ 5774 control = ctl; 5775 goto found_one; 5776 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5777 (ctl->length) && 5778 ((ctl->some_taken) || 5779 ((ctl->do_not_ref_stcb == 0) && 5780 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5781 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5782 /*- 5783 * If we have the same tcb, and there is data present, and we 5784 * have the strm interleave feature present. Then if we have 5785 * taken some (pdapi) or we can refer to tht tcb AND we have 5786 * not started a delivery for this stream, we can take it. 5787 * Note we do NOT allow a notification on the same assoc to 5788 * be delivered. 5789 */ 5790 control = ctl; 5791 goto found_one; 5792 } 5793 ctl = TAILQ_NEXT(ctl, next); 5794 } 5795 } 5796 /* 5797 * if we reach here, not suitable replacement is available 5798 * <or> fragment interleave is NOT on. So stuff the sb_cc 5799 * into the our held count, and its time to sleep again. 5800 */ 5801 held_length = SCTP_SBAVAIL(&so->so_rcv); 5802 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5803 goto restart; 5804 } 5805 /* Clear the held length since there is something to read */ 5806 control->held_length = 0; 5807 found_one: 5808 /* 5809 * If we reach here, control has a some data for us to read off. 5810 * Note that stcb COULD be NULL. 5811 */ 5812 if (hold_rlock == 0) { 5813 hold_rlock = 1; 5814 SCTP_INP_READ_LOCK(inp); 5815 } 5816 control->some_taken++; 5817 stcb = control->stcb; 5818 if (stcb) { 5819 if ((control->do_not_ref_stcb == 0) && 5820 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5821 if (freecnt_applied == 0) 5822 stcb = NULL; 5823 } else if (control->do_not_ref_stcb == 0) { 5824 /* you can't free it on me please */ 5825 /* 5826 * The lock on the socket buffer protects us so the 5827 * free code will stop. But since we used the 5828 * socketbuf lock and the sender uses the tcb_lock 5829 * to increment, we need to use the atomic add to 5830 * the refcnt 5831 */ 5832 if (freecnt_applied) { 5833 #ifdef INVARIANTS 5834 panic("refcnt already incremented"); 5835 #else 5836 SCTP_PRINTF("refcnt already incremented?\n"); 5837 #endif 5838 } else { 5839 atomic_add_int(&stcb->asoc.refcnt, 1); 5840 freecnt_applied = 1; 5841 } 5842 /* 5843 * Setup to remember how much we have not yet told 5844 * the peer our rwnd has opened up. Note we grab the 5845 * value from the tcb from last time. Note too that 5846 * sack sending clears this when a sack is sent, 5847 * which is fine. Once we hit the rwnd_req, we then 5848 * will go to the sctp_user_rcvd() that will not 5849 * lock until it KNOWs it MUST send a WUP-SACK. 5850 */ 5851 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5852 stcb->freed_by_sorcv_sincelast = 0; 5853 } 5854 } 5855 if (stcb && 5856 ((control->spec_flags & M_NOTIFICATION) == 0) && 5857 control->do_not_ref_stcb == 0) { 5858 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5859 } 5860 5861 /* First lets get off the sinfo and sockaddr info */ 5862 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5863 sinfo->sinfo_stream = control->sinfo_stream; 5864 sinfo->sinfo_ssn = (uint16_t)control->mid; 5865 sinfo->sinfo_flags = control->sinfo_flags; 5866 sinfo->sinfo_ppid = control->sinfo_ppid; 5867 sinfo->sinfo_context = control->sinfo_context; 5868 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5869 sinfo->sinfo_tsn = control->sinfo_tsn; 5870 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5871 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5872 nxt = TAILQ_NEXT(control, next); 5873 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5874 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5875 struct sctp_extrcvinfo *s_extra; 5876 5877 s_extra = (struct sctp_extrcvinfo *)sinfo; 5878 if ((nxt) && 5879 (nxt->length)) { 5880 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5881 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5882 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5883 } 5884 if (nxt->spec_flags & M_NOTIFICATION) { 5885 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5886 } 5887 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5888 s_extra->serinfo_next_length = nxt->length; 5889 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5890 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5891 if (nxt->tail_mbuf != NULL) { 5892 if (nxt->end_added) { 5893 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5894 } 5895 } 5896 } else { 5897 /* 5898 * we explicitly 0 this, since the memcpy 5899 * got some other things beyond the older 5900 * sinfo_ that is on the control's structure 5901 * :-D 5902 */ 5903 nxt = NULL; 5904 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5905 s_extra->serinfo_next_aid = 0; 5906 s_extra->serinfo_next_length = 0; 5907 s_extra->serinfo_next_ppid = 0; 5908 s_extra->serinfo_next_stream = 0; 5909 } 5910 } 5911 /* 5912 * update off the real current cum-ack, if we have an stcb. 5913 */ 5914 if ((control->do_not_ref_stcb == 0) && stcb) 5915 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5916 /* 5917 * mask off the high bits, we keep the actual chunk bits in 5918 * there. 5919 */ 5920 sinfo->sinfo_flags &= 0x00ff; 5921 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5922 sinfo->sinfo_flags |= SCTP_UNORDERED; 5923 } 5924 } 5925 #ifdef SCTP_ASOCLOG_OF_TSNS 5926 { 5927 int index, newindex; 5928 struct sctp_pcbtsn_rlog *entry; 5929 5930 do { 5931 index = inp->readlog_index; 5932 newindex = index + 1; 5933 if (newindex >= SCTP_READ_LOG_SIZE) { 5934 newindex = 0; 5935 } 5936 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5937 entry = &inp->readlog[index]; 5938 entry->vtag = control->sinfo_assoc_id; 5939 entry->strm = control->sinfo_stream; 5940 entry->seq = (uint16_t)control->mid; 5941 entry->sz = control->length; 5942 entry->flgs = control->sinfo_flags; 5943 } 5944 #endif 5945 if ((fromlen > 0) && (from != NULL)) { 5946 union sctp_sockstore store; 5947 size_t len; 5948 5949 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5950 #ifdef INET6 5951 case AF_INET6: 5952 len = sizeof(struct sockaddr_in6); 5953 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5954 store.sin6.sin6_port = control->port_from; 5955 break; 5956 #endif 5957 #ifdef INET 5958 case AF_INET: 5959 #ifdef INET6 5960 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5961 len = sizeof(struct sockaddr_in6); 5962 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5963 &store.sin6); 5964 store.sin6.sin6_port = control->port_from; 5965 } else { 5966 len = sizeof(struct sockaddr_in); 5967 store.sin = control->whoFrom->ro._l_addr.sin; 5968 store.sin.sin_port = control->port_from; 5969 } 5970 #else 5971 len = sizeof(struct sockaddr_in); 5972 store.sin = control->whoFrom->ro._l_addr.sin; 5973 store.sin.sin_port = control->port_from; 5974 #endif 5975 break; 5976 #endif 5977 default: 5978 len = 0; 5979 break; 5980 } 5981 memcpy(from, &store, min((size_t)fromlen, len)); 5982 #ifdef INET6 5983 { 5984 struct sockaddr_in6 lsa6, *from6; 5985 5986 from6 = (struct sockaddr_in6 *)from; 5987 sctp_recover_scope_mac(from6, (&lsa6)); 5988 } 5989 #endif 5990 } 5991 if (hold_rlock) { 5992 SCTP_INP_READ_UNLOCK(inp); 5993 hold_rlock = 0; 5994 } 5995 if (hold_sblock) { 5996 SOCKBUF_UNLOCK(&so->so_rcv); 5997 hold_sblock = 0; 5998 } 5999 /* now copy out what data we can */ 6000 if (mp == NULL) { 6001 /* copy out each mbuf in the chain up to length */ 6002 get_more_data: 6003 m = control->data; 6004 while (m) { 6005 /* Move out all we can */ 6006 cp_len = uio->uio_resid; 6007 my_len = SCTP_BUF_LEN(m); 6008 if (cp_len > my_len) { 6009 /* not enough in this buf */ 6010 cp_len = my_len; 6011 } 6012 if (hold_rlock) { 6013 SCTP_INP_READ_UNLOCK(inp); 6014 hold_rlock = 0; 6015 } 6016 if (cp_len > 0) 6017 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6018 /* re-read */ 6019 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6020 goto release; 6021 } 6022 6023 if ((control->do_not_ref_stcb == 0) && stcb && 6024 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6025 no_rcv_needed = 1; 6026 } 6027 if (error) { 6028 /* error we are out of here */ 6029 goto release; 6030 } 6031 SCTP_INP_READ_LOCK(inp); 6032 hold_rlock = 1; 6033 if (cp_len == SCTP_BUF_LEN(m)) { 6034 if ((SCTP_BUF_NEXT(m) == NULL) && 6035 (control->end_added)) { 6036 out_flags |= MSG_EOR; 6037 if ((control->do_not_ref_stcb == 0) && 6038 (control->stcb != NULL) && 6039 ((control->spec_flags & M_NOTIFICATION) == 0)) 6040 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6041 } 6042 if (control->spec_flags & M_NOTIFICATION) { 6043 out_flags |= MSG_NOTIFICATION; 6044 } 6045 /* we ate up the mbuf */ 6046 if (in_flags & MSG_PEEK) { 6047 /* just looking */ 6048 m = SCTP_BUF_NEXT(m); 6049 copied_so_far += cp_len; 6050 } else { 6051 /* dispose of the mbuf */ 6052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6053 sctp_sblog(&so->so_rcv, 6054 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6055 } 6056 sctp_sbfree(control, stcb, &so->so_rcv, m); 6057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6058 sctp_sblog(&so->so_rcv, 6059 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6060 } 6061 copied_so_far += cp_len; 6062 freed_so_far += (uint32_t)cp_len; 6063 freed_so_far += MSIZE; 6064 atomic_subtract_int(&control->length, (int)cp_len); 6065 control->data = sctp_m_free(m); 6066 m = control->data; 6067 /* 6068 * been through it all, must hold sb 6069 * lock ok to null tail 6070 */ 6071 if (control->data == NULL) { 6072 #ifdef INVARIANTS 6073 if ((control->end_added == 0) || 6074 (TAILQ_NEXT(control, next) == NULL)) { 6075 /* 6076 * If the end is not 6077 * added, OR the 6078 * next is NOT null 6079 * we MUST have the 6080 * lock. 6081 */ 6082 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6083 panic("Hmm we don't own the lock?"); 6084 } 6085 } 6086 #endif 6087 control->tail_mbuf = NULL; 6088 #ifdef INVARIANTS 6089 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6090 panic("end_added, nothing left and no MSG_EOR"); 6091 } 6092 #endif 6093 } 6094 } 6095 } else { 6096 /* Do we need to trim the mbuf? */ 6097 if (control->spec_flags & M_NOTIFICATION) { 6098 out_flags |= MSG_NOTIFICATION; 6099 } 6100 if ((in_flags & MSG_PEEK) == 0) { 6101 SCTP_BUF_RESV_UF(m, cp_len); 6102 SCTP_BUF_LEN(m) -= (int)cp_len; 6103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6104 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6105 } 6106 SCTP_SB_DECR(&so->so_rcv, cp_len); 6107 if ((control->do_not_ref_stcb == 0) && 6108 stcb) { 6109 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6110 } 6111 copied_so_far += cp_len; 6112 freed_so_far += (uint32_t)cp_len; 6113 freed_so_far += MSIZE; 6114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6115 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6116 SCTP_LOG_SBRESULT, 0); 6117 } 6118 atomic_subtract_int(&control->length, (int)cp_len); 6119 } else { 6120 copied_so_far += cp_len; 6121 } 6122 } 6123 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6124 break; 6125 } 6126 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6127 (control->do_not_ref_stcb == 0) && 6128 (freed_so_far >= rwnd_req)) { 6129 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6130 } 6131 } /* end while(m) */ 6132 /* 6133 * At this point we have looked at it all and we either have 6134 * a MSG_EOR/or read all the user wants... <OR> 6135 * control->length == 0. 6136 */ 6137 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6138 /* we are done with this control */ 6139 if (control->length == 0) { 6140 if (control->data) { 6141 #ifdef INVARIANTS 6142 panic("control->data not null at read eor?"); 6143 #else 6144 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6145 sctp_m_freem(control->data); 6146 control->data = NULL; 6147 #endif 6148 } 6149 done_with_control: 6150 if (hold_rlock == 0) { 6151 SCTP_INP_READ_LOCK(inp); 6152 hold_rlock = 1; 6153 } 6154 TAILQ_REMOVE(&inp->read_queue, control, next); 6155 /* Add back any hidden data */ 6156 if (control->held_length) { 6157 held_length = 0; 6158 control->held_length = 0; 6159 wakeup_read_socket = 1; 6160 } 6161 if (control->aux_data) { 6162 sctp_m_free(control->aux_data); 6163 control->aux_data = NULL; 6164 } 6165 no_rcv_needed = control->do_not_ref_stcb; 6166 sctp_free_remote_addr(control->whoFrom); 6167 control->data = NULL; 6168 #ifdef INVARIANTS 6169 if (control->on_strm_q) { 6170 panic("About to free ctl:%p so:%p and its in %d", 6171 control, so, control->on_strm_q); 6172 } 6173 #endif 6174 sctp_free_a_readq(stcb, control); 6175 control = NULL; 6176 if ((freed_so_far >= rwnd_req) && 6177 (no_rcv_needed == 0)) 6178 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6179 6180 } else { 6181 /* 6182 * The user did not read all of this 6183 * message, turn off the returned MSG_EOR 6184 * since we are leaving more behind on the 6185 * control to read. 6186 */ 6187 #ifdef INVARIANTS 6188 if (control->end_added && 6189 (control->data == NULL) && 6190 (control->tail_mbuf == NULL)) { 6191 panic("Gak, control->length is corrupt?"); 6192 } 6193 #endif 6194 no_rcv_needed = control->do_not_ref_stcb; 6195 out_flags &= ~MSG_EOR; 6196 } 6197 } 6198 if (out_flags & MSG_EOR) { 6199 goto release; 6200 } 6201 if ((uio->uio_resid == 0) || 6202 ((in_eeor_mode) && 6203 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6204 goto release; 6205 } 6206 /* 6207 * If I hit here the receiver wants more and this message is 6208 * NOT done (pd-api). So two questions. Can we block? if not 6209 * we are done. Did the user NOT set MSG_WAITALL? 6210 */ 6211 if (block_allowed == 0) { 6212 goto release; 6213 } 6214 /* 6215 * We need to wait for more data a few things: - We don't 6216 * release the I/O lock so we don't get someone else 6217 * reading. - We must be sure to account for the case where 6218 * what is added is NOT to our control when we wakeup. 6219 */ 6220 6221 /* 6222 * Do we need to tell the transport a rwnd update might be 6223 * needed before we go to sleep? 6224 */ 6225 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6226 ((freed_so_far >= rwnd_req) && 6227 (control->do_not_ref_stcb == 0) && 6228 (no_rcv_needed == 0))) { 6229 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6230 } 6231 wait_some_more: 6232 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6233 goto release; 6234 } 6235 6236 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6237 goto release; 6238 6239 if (hold_rlock == 1) { 6240 SCTP_INP_READ_UNLOCK(inp); 6241 hold_rlock = 0; 6242 } 6243 if (hold_sblock == 0) { 6244 SOCKBUF_LOCK(&so->so_rcv); 6245 hold_sblock = 1; 6246 } 6247 if ((copied_so_far) && (control->length == 0) && 6248 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6249 goto release; 6250 } 6251 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6252 error = sbwait(so, SO_RCV); 6253 if (error) { 6254 goto release; 6255 } 6256 control->held_length = 0; 6257 } 6258 if (hold_sblock) { 6259 SOCKBUF_UNLOCK(&so->so_rcv); 6260 hold_sblock = 0; 6261 } 6262 if (control->length == 0) { 6263 /* still nothing here */ 6264 if (control->end_added == 1) { 6265 /* he aborted, or is done i.e.did a shutdown */ 6266 out_flags |= MSG_EOR; 6267 if (control->pdapi_aborted) { 6268 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6269 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6270 6271 out_flags |= MSG_TRUNC; 6272 } else { 6273 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6274 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6275 } 6276 goto done_with_control; 6277 } 6278 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6279 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6280 held_length = 0; 6281 } 6282 goto wait_some_more; 6283 } else if (control->data == NULL) { 6284 /* 6285 * we must re-sync since data is probably being 6286 * added 6287 */ 6288 SCTP_INP_READ_LOCK(inp); 6289 if ((control->length > 0) && (control->data == NULL)) { 6290 /* 6291 * big trouble.. we have the lock and its 6292 * corrupt? 6293 */ 6294 #ifdef INVARIANTS 6295 panic("Impossible data==NULL length !=0"); 6296 #endif 6297 out_flags |= MSG_EOR; 6298 out_flags |= MSG_TRUNC; 6299 control->length = 0; 6300 SCTP_INP_READ_UNLOCK(inp); 6301 goto done_with_control; 6302 } 6303 SCTP_INP_READ_UNLOCK(inp); 6304 /* We will fall around to get more data */ 6305 } 6306 goto get_more_data; 6307 } else { 6308 /*- 6309 * Give caller back the mbuf chain, 6310 * store in uio_resid the length 6311 */ 6312 wakeup_read_socket = 0; 6313 if ((control->end_added == 0) || 6314 (TAILQ_NEXT(control, next) == NULL)) { 6315 /* Need to get rlock */ 6316 if (hold_rlock == 0) { 6317 SCTP_INP_READ_LOCK(inp); 6318 hold_rlock = 1; 6319 } 6320 } 6321 if (control->end_added) { 6322 out_flags |= MSG_EOR; 6323 if ((control->do_not_ref_stcb == 0) && 6324 (control->stcb != NULL) && 6325 ((control->spec_flags & M_NOTIFICATION) == 0)) 6326 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6327 } 6328 if (control->spec_flags & M_NOTIFICATION) { 6329 out_flags |= MSG_NOTIFICATION; 6330 } 6331 uio->uio_resid = control->length; 6332 *mp = control->data; 6333 m = control->data; 6334 while (m) { 6335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6336 sctp_sblog(&so->so_rcv, 6337 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6338 } 6339 sctp_sbfree(control, stcb, &so->so_rcv, m); 6340 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6341 freed_so_far += MSIZE; 6342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6343 sctp_sblog(&so->so_rcv, 6344 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6345 } 6346 m = SCTP_BUF_NEXT(m); 6347 } 6348 control->data = control->tail_mbuf = NULL; 6349 control->length = 0; 6350 if (out_flags & MSG_EOR) { 6351 /* Done with this control */ 6352 goto done_with_control; 6353 } 6354 } 6355 release: 6356 if (hold_rlock == 1) { 6357 SCTP_INP_READ_UNLOCK(inp); 6358 hold_rlock = 0; 6359 } 6360 if (hold_sblock == 1) { 6361 SOCKBUF_UNLOCK(&so->so_rcv); 6362 hold_sblock = 0; 6363 } 6364 6365 SOCK_IO_RECV_UNLOCK(so); 6366 sockbuf_lock = 0; 6367 6368 release_unlocked: 6369 if (hold_sblock) { 6370 SOCKBUF_UNLOCK(&so->so_rcv); 6371 hold_sblock = 0; 6372 } 6373 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6374 if ((freed_so_far >= rwnd_req) && 6375 (control && (control->do_not_ref_stcb == 0)) && 6376 (no_rcv_needed == 0)) 6377 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6378 } 6379 out: 6380 if (msg_flags) { 6381 *msg_flags = out_flags; 6382 } 6383 if (((out_flags & MSG_EOR) == 0) && 6384 ((in_flags & MSG_PEEK) == 0) && 6385 (sinfo) && 6386 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6387 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6388 struct sctp_extrcvinfo *s_extra; 6389 6390 s_extra = (struct sctp_extrcvinfo *)sinfo; 6391 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6392 } 6393 if (hold_rlock == 1) { 6394 SCTP_INP_READ_UNLOCK(inp); 6395 } 6396 if (hold_sblock) { 6397 SOCKBUF_UNLOCK(&so->so_rcv); 6398 } 6399 if (sockbuf_lock) { 6400 SOCK_IO_RECV_UNLOCK(so); 6401 } 6402 6403 if (freecnt_applied) { 6404 /* 6405 * The lock on the socket buffer protects us so the free 6406 * code will stop. But since we used the socketbuf lock and 6407 * the sender uses the tcb_lock to increment, we need to use 6408 * the atomic add to the refcnt. 6409 */ 6410 if (stcb == NULL) { 6411 #ifdef INVARIANTS 6412 panic("stcb for refcnt has gone NULL?"); 6413 goto stage_left; 6414 #else 6415 goto stage_left; 6416 #endif 6417 } 6418 /* Save the value back for next time */ 6419 stcb->freed_by_sorcv_sincelast = freed_so_far; 6420 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6421 } 6422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6423 if (stcb) { 6424 sctp_misc_ints(SCTP_SORECV_DONE, 6425 freed_so_far, 6426 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6427 stcb->asoc.my_rwnd, 6428 SCTP_SBAVAIL(&so->so_rcv)); 6429 } else { 6430 sctp_misc_ints(SCTP_SORECV_DONE, 6431 freed_so_far, 6432 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6433 0, 6434 SCTP_SBAVAIL(&so->so_rcv)); 6435 } 6436 } 6437 stage_left: 6438 if (wakeup_read_socket) { 6439 sctp_sorwakeup(inp, so); 6440 } 6441 return (error); 6442 } 6443 6444 #ifdef SCTP_MBUF_LOGGING 6445 struct mbuf * 6446 sctp_m_free(struct mbuf *m) 6447 { 6448 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6449 sctp_log_mb(m, SCTP_MBUF_IFREE); 6450 } 6451 return (m_free(m)); 6452 } 6453 6454 void 6455 sctp_m_freem(struct mbuf *mb) 6456 { 6457 while (mb != NULL) 6458 mb = sctp_m_free(mb); 6459 } 6460 6461 #endif 6462 6463 int 6464 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6465 { 6466 /* 6467 * Given a local address. For all associations that holds the 6468 * address, request a peer-set-primary. 6469 */ 6470 struct sctp_ifa *ifa; 6471 struct sctp_laddr *wi; 6472 6473 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6474 if (ifa == NULL) { 6475 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6476 return (EADDRNOTAVAIL); 6477 } 6478 /* 6479 * Now that we have the ifa we must awaken the iterator with this 6480 * message. 6481 */ 6482 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6483 if (wi == NULL) { 6484 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6485 return (ENOMEM); 6486 } 6487 /* Now incr the count and int wi structure */ 6488 SCTP_INCR_LADDR_COUNT(); 6489 memset(wi, 0, sizeof(*wi)); 6490 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6491 wi->ifa = ifa; 6492 wi->action = SCTP_SET_PRIM_ADDR; 6493 atomic_add_int(&ifa->refcount, 1); 6494 6495 /* Now add it to the work queue */ 6496 SCTP_WQ_ADDR_LOCK(); 6497 /* 6498 * Should this really be a tailq? As it is we will process the 6499 * newest first :-0 6500 */ 6501 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6502 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6503 (struct sctp_inpcb *)NULL, 6504 (struct sctp_tcb *)NULL, 6505 (struct sctp_nets *)NULL); 6506 SCTP_WQ_ADDR_UNLOCK(); 6507 return (0); 6508 } 6509 6510 int 6511 sctp_soreceive(struct socket *so, 6512 struct sockaddr **psa, 6513 struct uio *uio, 6514 struct mbuf **mp0, 6515 struct mbuf **controlp, 6516 int *flagsp) 6517 { 6518 int error, fromlen; 6519 uint8_t sockbuf[256]; 6520 struct sockaddr *from; 6521 struct sctp_extrcvinfo sinfo; 6522 int filling_sinfo = 1; 6523 int flags; 6524 struct sctp_inpcb *inp; 6525 6526 inp = (struct sctp_inpcb *)so->so_pcb; 6527 /* pickup the assoc we are reading from */ 6528 if (inp == NULL) { 6529 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6530 return (EINVAL); 6531 } 6532 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6533 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6534 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6535 (controlp == NULL)) { 6536 /* user does not want the sndrcv ctl */ 6537 filling_sinfo = 0; 6538 } 6539 if (psa) { 6540 from = (struct sockaddr *)sockbuf; 6541 fromlen = sizeof(sockbuf); 6542 from->sa_len = 0; 6543 } else { 6544 from = NULL; 6545 fromlen = 0; 6546 } 6547 6548 if (filling_sinfo) { 6549 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6550 } 6551 if (flagsp != NULL) { 6552 flags = *flagsp; 6553 } else { 6554 flags = 0; 6555 } 6556 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6557 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6558 if (flagsp != NULL) { 6559 *flagsp = flags; 6560 } 6561 if (controlp != NULL) { 6562 /* copy back the sinfo in a CMSG format */ 6563 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6564 *controlp = sctp_build_ctl_nchunk(inp, 6565 (struct sctp_sndrcvinfo *)&sinfo); 6566 } else { 6567 *controlp = NULL; 6568 } 6569 } 6570 if (psa) { 6571 /* copy back the address info */ 6572 if (from && from->sa_len) { 6573 *psa = sodupsockaddr(from, M_NOWAIT); 6574 } else { 6575 *psa = NULL; 6576 } 6577 } 6578 return (error); 6579 } 6580 6581 int 6582 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6583 int totaddr, int *error) 6584 { 6585 int added = 0; 6586 int i; 6587 struct sctp_inpcb *inp; 6588 struct sockaddr *sa; 6589 size_t incr = 0; 6590 #ifdef INET 6591 struct sockaddr_in *sin; 6592 #endif 6593 #ifdef INET6 6594 struct sockaddr_in6 *sin6; 6595 #endif 6596 6597 sa = addr; 6598 inp = stcb->sctp_ep; 6599 *error = 0; 6600 for (i = 0; i < totaddr; i++) { 6601 switch (sa->sa_family) { 6602 #ifdef INET 6603 case AF_INET: 6604 incr = sizeof(struct sockaddr_in); 6605 sin = (struct sockaddr_in *)sa; 6606 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6607 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6608 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6609 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6610 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6611 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6612 *error = EINVAL; 6613 goto out_now; 6614 } 6615 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6616 SCTP_DONOT_SETSCOPE, 6617 SCTP_ADDR_IS_CONFIRMED)) { 6618 /* assoc gone no un-lock */ 6619 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6620 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6621 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6622 *error = ENOBUFS; 6623 goto out_now; 6624 } 6625 added++; 6626 break; 6627 #endif 6628 #ifdef INET6 6629 case AF_INET6: 6630 incr = sizeof(struct sockaddr_in6); 6631 sin6 = (struct sockaddr_in6 *)sa; 6632 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6633 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6634 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6635 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6636 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6637 *error = EINVAL; 6638 goto out_now; 6639 } 6640 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6641 SCTP_DONOT_SETSCOPE, 6642 SCTP_ADDR_IS_CONFIRMED)) { 6643 /* assoc gone no un-lock */ 6644 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6645 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6646 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6647 *error = ENOBUFS; 6648 goto out_now; 6649 } 6650 added++; 6651 break; 6652 #endif 6653 default: 6654 break; 6655 } 6656 sa = (struct sockaddr *)((caddr_t)sa + incr); 6657 } 6658 out_now: 6659 return (added); 6660 } 6661 6662 int 6663 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6664 unsigned int totaddr, 6665 unsigned int *num_v4, unsigned int *num_v6, 6666 unsigned int limit) 6667 { 6668 struct sockaddr *sa; 6669 struct sctp_tcb *stcb; 6670 unsigned int incr, at, i; 6671 6672 at = 0; 6673 sa = addr; 6674 *num_v6 = *num_v4 = 0; 6675 /* account and validate addresses */ 6676 if (totaddr == 0) { 6677 return (EINVAL); 6678 } 6679 for (i = 0; i < totaddr; i++) { 6680 if (at + sizeof(struct sockaddr) > limit) { 6681 return (EINVAL); 6682 } 6683 switch (sa->sa_family) { 6684 #ifdef INET 6685 case AF_INET: 6686 incr = (unsigned int)sizeof(struct sockaddr_in); 6687 if (sa->sa_len != incr) { 6688 return (EINVAL); 6689 } 6690 (*num_v4) += 1; 6691 break; 6692 #endif 6693 #ifdef INET6 6694 case AF_INET6: 6695 { 6696 struct sockaddr_in6 *sin6; 6697 6698 incr = (unsigned int)sizeof(struct sockaddr_in6); 6699 if (sa->sa_len != incr) { 6700 return (EINVAL); 6701 } 6702 sin6 = (struct sockaddr_in6 *)sa; 6703 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6704 /* Must be non-mapped for connectx */ 6705 return (EINVAL); 6706 } 6707 (*num_v6) += 1; 6708 break; 6709 } 6710 #endif 6711 default: 6712 return (EINVAL); 6713 } 6714 if ((at + incr) > limit) { 6715 return (EINVAL); 6716 } 6717 SCTP_INP_INCR_REF(inp); 6718 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6719 if (stcb != NULL) { 6720 SCTP_TCB_UNLOCK(stcb); 6721 return (EALREADY); 6722 } else { 6723 SCTP_INP_DECR_REF(inp); 6724 } 6725 at += incr; 6726 sa = (struct sockaddr *)((caddr_t)sa + incr); 6727 } 6728 return (0); 6729 } 6730 6731 /* 6732 * sctp_bindx(ADD) for one address. 6733 * assumes all arguments are valid/checked by caller. 6734 */ 6735 void 6736 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6737 struct sockaddr *sa, uint32_t vrf_id, int *error, 6738 void *p) 6739 { 6740 #if defined(INET) && defined(INET6) 6741 struct sockaddr_in sin; 6742 #endif 6743 #ifdef INET6 6744 struct sockaddr_in6 *sin6; 6745 #endif 6746 #ifdef INET 6747 struct sockaddr_in *sinp; 6748 #endif 6749 struct sockaddr *addr_to_use; 6750 struct sctp_inpcb *lep; 6751 uint16_t port; 6752 6753 /* see if we're bound all already! */ 6754 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6755 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6756 *error = EINVAL; 6757 return; 6758 } 6759 switch (sa->sa_family) { 6760 #ifdef INET6 6761 case AF_INET6: 6762 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6763 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6764 *error = EINVAL; 6765 return; 6766 } 6767 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6768 /* can only bind v6 on PF_INET6 sockets */ 6769 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6770 *error = EINVAL; 6771 return; 6772 } 6773 sin6 = (struct sockaddr_in6 *)sa; 6774 port = sin6->sin6_port; 6775 #ifdef INET 6776 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6777 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6778 SCTP_IPV6_V6ONLY(inp)) { 6779 /* can't bind v4-mapped on PF_INET sockets */ 6780 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6781 *error = EINVAL; 6782 return; 6783 } 6784 in6_sin6_2_sin(&sin, sin6); 6785 addr_to_use = (struct sockaddr *)&sin; 6786 } else { 6787 addr_to_use = sa; 6788 } 6789 #else 6790 addr_to_use = sa; 6791 #endif 6792 break; 6793 #endif 6794 #ifdef INET 6795 case AF_INET: 6796 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6798 *error = EINVAL; 6799 return; 6800 } 6801 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6802 SCTP_IPV6_V6ONLY(inp)) { 6803 /* can't bind v4 on PF_INET sockets */ 6804 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6805 *error = EINVAL; 6806 return; 6807 } 6808 sinp = (struct sockaddr_in *)sa; 6809 port = sinp->sin_port; 6810 addr_to_use = sa; 6811 break; 6812 #endif 6813 default: 6814 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6815 *error = EINVAL; 6816 return; 6817 } 6818 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6819 if (p == NULL) { 6820 /* Can't get proc for Net/Open BSD */ 6821 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6822 *error = EINVAL; 6823 return; 6824 } 6825 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6826 return; 6827 } 6828 /* Validate the incoming port. */ 6829 if ((port != 0) && (port != inp->sctp_lport)) { 6830 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6831 *error = EINVAL; 6832 return; 6833 } 6834 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6835 if (lep == NULL) { 6836 /* add the address */ 6837 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6838 SCTP_ADD_IP_ADDRESS, vrf_id); 6839 } else { 6840 if (lep != inp) { 6841 *error = EADDRINUSE; 6842 } 6843 SCTP_INP_DECR_REF(lep); 6844 } 6845 } 6846 6847 /* 6848 * sctp_bindx(DELETE) for one address. 6849 * assumes all arguments are valid/checked by caller. 6850 */ 6851 void 6852 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6853 struct sockaddr *sa, uint32_t vrf_id, int *error) 6854 { 6855 struct sockaddr *addr_to_use; 6856 #if defined(INET) && defined(INET6) 6857 struct sockaddr_in6 *sin6; 6858 struct sockaddr_in sin; 6859 #endif 6860 6861 /* see if we're bound all already! */ 6862 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6863 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6864 *error = EINVAL; 6865 return; 6866 } 6867 switch (sa->sa_family) { 6868 #ifdef INET6 6869 case AF_INET6: 6870 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6871 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6872 *error = EINVAL; 6873 return; 6874 } 6875 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6876 /* can only bind v6 on PF_INET6 sockets */ 6877 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6878 *error = EINVAL; 6879 return; 6880 } 6881 #ifdef INET 6882 sin6 = (struct sockaddr_in6 *)sa; 6883 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6884 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6885 SCTP_IPV6_V6ONLY(inp)) { 6886 /* can't bind mapped-v4 on PF_INET sockets */ 6887 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6888 *error = EINVAL; 6889 return; 6890 } 6891 in6_sin6_2_sin(&sin, sin6); 6892 addr_to_use = (struct sockaddr *)&sin; 6893 } else { 6894 addr_to_use = sa; 6895 } 6896 #else 6897 addr_to_use = sa; 6898 #endif 6899 break; 6900 #endif 6901 #ifdef INET 6902 case AF_INET: 6903 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6904 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6905 *error = EINVAL; 6906 return; 6907 } 6908 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6909 SCTP_IPV6_V6ONLY(inp)) { 6910 /* can't bind v4 on PF_INET sockets */ 6911 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6912 *error = EINVAL; 6913 return; 6914 } 6915 addr_to_use = sa; 6916 break; 6917 #endif 6918 default: 6919 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6920 *error = EINVAL; 6921 return; 6922 } 6923 /* No lock required mgmt_ep_sa does its own locking. */ 6924 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6925 vrf_id); 6926 } 6927 6928 /* 6929 * returns the valid local address count for an assoc, taking into account 6930 * all scoping rules 6931 */ 6932 int 6933 sctp_local_addr_count(struct sctp_tcb *stcb) 6934 { 6935 int loopback_scope; 6936 #if defined(INET) 6937 int ipv4_local_scope, ipv4_addr_legal; 6938 #endif 6939 #if defined(INET6) 6940 int local_scope, site_scope, ipv6_addr_legal; 6941 #endif 6942 struct sctp_vrf *vrf; 6943 struct sctp_ifn *sctp_ifn; 6944 struct sctp_ifa *sctp_ifa; 6945 int count = 0; 6946 6947 /* Turn on all the appropriate scopes */ 6948 loopback_scope = stcb->asoc.scope.loopback_scope; 6949 #if defined(INET) 6950 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6951 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6952 #endif 6953 #if defined(INET6) 6954 local_scope = stcb->asoc.scope.local_scope; 6955 site_scope = stcb->asoc.scope.site_scope; 6956 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6957 #endif 6958 SCTP_IPI_ADDR_RLOCK(); 6959 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6960 if (vrf == NULL) { 6961 /* no vrf, no addresses */ 6962 SCTP_IPI_ADDR_RUNLOCK(); 6963 return (0); 6964 } 6965 6966 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6967 /* 6968 * bound all case: go through all ifns on the vrf 6969 */ 6970 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6971 if ((loopback_scope == 0) && 6972 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6973 continue; 6974 } 6975 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6976 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6977 continue; 6978 switch (sctp_ifa->address.sa.sa_family) { 6979 #ifdef INET 6980 case AF_INET: 6981 if (ipv4_addr_legal) { 6982 struct sockaddr_in *sin; 6983 6984 sin = &sctp_ifa->address.sin; 6985 if (sin->sin_addr.s_addr == 0) { 6986 /* 6987 * skip unspecified 6988 * addrs 6989 */ 6990 continue; 6991 } 6992 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6993 &sin->sin_addr) != 0) { 6994 continue; 6995 } 6996 if ((ipv4_local_scope == 0) && 6997 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6998 continue; 6999 } 7000 /* count this one */ 7001 count++; 7002 } else { 7003 continue; 7004 } 7005 break; 7006 #endif 7007 #ifdef INET6 7008 case AF_INET6: 7009 if (ipv6_addr_legal) { 7010 struct sockaddr_in6 *sin6; 7011 7012 sin6 = &sctp_ifa->address.sin6; 7013 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7014 continue; 7015 } 7016 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7017 &sin6->sin6_addr) != 0) { 7018 continue; 7019 } 7020 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7021 if (local_scope == 0) 7022 continue; 7023 if (sin6->sin6_scope_id == 0) { 7024 if (sa6_recoverscope(sin6) != 0) 7025 /* 7026 * 7027 * bad 7028 * link 7029 * 7030 * local 7031 * 7032 * address 7033 */ 7034 continue; 7035 } 7036 } 7037 if ((site_scope == 0) && 7038 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7039 continue; 7040 } 7041 /* count this one */ 7042 count++; 7043 } 7044 break; 7045 #endif 7046 default: 7047 /* TSNH */ 7048 break; 7049 } 7050 } 7051 } 7052 } else { 7053 /* 7054 * subset bound case 7055 */ 7056 struct sctp_laddr *laddr; 7057 7058 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7059 sctp_nxt_addr) { 7060 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7061 continue; 7062 } 7063 /* count this one */ 7064 count++; 7065 } 7066 } 7067 SCTP_IPI_ADDR_RUNLOCK(); 7068 return (count); 7069 } 7070 7071 #if defined(SCTP_LOCAL_TRACE_BUF) 7072 7073 void 7074 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7075 { 7076 uint32_t saveindex, newindex; 7077 7078 do { 7079 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7080 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7081 newindex = 1; 7082 } else { 7083 newindex = saveindex + 1; 7084 } 7085 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7086 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7087 saveindex = 0; 7088 } 7089 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7090 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7091 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7092 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7093 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7094 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7095 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7096 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7097 } 7098 7099 #endif 7100 static bool 7101 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7102 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7103 { 7104 struct ip *iph; 7105 #ifdef INET6 7106 struct ip6_hdr *ip6; 7107 #endif 7108 struct mbuf *sp, *last; 7109 struct udphdr *uhdr; 7110 uint16_t port; 7111 7112 if ((m->m_flags & M_PKTHDR) == 0) { 7113 /* Can't handle one that is not a pkt hdr */ 7114 goto out; 7115 } 7116 /* Pull the src port */ 7117 iph = mtod(m, struct ip *); 7118 uhdr = (struct udphdr *)((caddr_t)iph + off); 7119 port = uhdr->uh_sport; 7120 /* 7121 * Split out the mbuf chain. Leave the IP header in m, place the 7122 * rest in the sp. 7123 */ 7124 sp = m_split(m, off, M_NOWAIT); 7125 if (sp == NULL) { 7126 /* Gak, drop packet, we can't do a split */ 7127 goto out; 7128 } 7129 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7130 /* Gak, packet can't have an SCTP header in it - too small */ 7131 m_freem(sp); 7132 goto out; 7133 } 7134 /* Now pull up the UDP header and SCTP header together */ 7135 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7136 if (sp == NULL) { 7137 /* Gak pullup failed */ 7138 goto out; 7139 } 7140 /* Trim out the UDP header */ 7141 m_adj(sp, sizeof(struct udphdr)); 7142 7143 /* Now reconstruct the mbuf chain */ 7144 for (last = m; last->m_next; last = last->m_next); 7145 last->m_next = sp; 7146 m->m_pkthdr.len += sp->m_pkthdr.len; 7147 /* 7148 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7149 * checksum and it was valid. Since CSUM_DATA_VALID == 7150 * CSUM_SCTP_VALID this would imply that the HW also verified the 7151 * SCTP checksum. Therefore, clear the bit. 7152 */ 7153 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7154 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7155 m->m_pkthdr.len, 7156 if_name(m->m_pkthdr.rcvif), 7157 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7158 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7159 iph = mtod(m, struct ip *); 7160 switch (iph->ip_v) { 7161 #ifdef INET 7162 case IPVERSION: 7163 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7164 sctp_input_with_port(m, off, port); 7165 break; 7166 #endif 7167 #ifdef INET6 7168 case IPV6_VERSION >> 4: 7169 ip6 = mtod(m, struct ip6_hdr *); 7170 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7171 sctp6_input_with_port(&m, &off, port); 7172 break; 7173 #endif 7174 default: 7175 goto out; 7176 break; 7177 } 7178 return (true); 7179 out: 7180 m_freem(m); 7181 7182 return (true); 7183 } 7184 7185 #ifdef INET 7186 static void 7187 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 7188 { 7189 struct icmp *icmp = param.icmp; 7190 struct ip *outer_ip, *inner_ip; 7191 struct sctphdr *sh; 7192 struct udphdr *udp; 7193 struct sctp_inpcb *inp; 7194 struct sctp_tcb *stcb; 7195 struct sctp_nets *net; 7196 struct sctp_init_chunk *ch; 7197 struct sockaddr_in src, dst; 7198 uint8_t type, code; 7199 7200 inner_ip = &icmp->icmp_ip; 7201 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7202 if (ntohs(outer_ip->ip_len) < 7203 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7204 return; 7205 } 7206 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7207 sh = (struct sctphdr *)(udp + 1); 7208 memset(&src, 0, sizeof(struct sockaddr_in)); 7209 src.sin_family = AF_INET; 7210 src.sin_len = sizeof(struct sockaddr_in); 7211 src.sin_port = sh->src_port; 7212 src.sin_addr = inner_ip->ip_src; 7213 memset(&dst, 0, sizeof(struct sockaddr_in)); 7214 dst.sin_family = AF_INET; 7215 dst.sin_len = sizeof(struct sockaddr_in); 7216 dst.sin_port = sh->dest_port; 7217 dst.sin_addr = inner_ip->ip_dst; 7218 /* 7219 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7220 * holds our local endpoint address. Thus we reverse the dst and the 7221 * src in the lookup. 7222 */ 7223 inp = NULL; 7224 net = NULL; 7225 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7226 (struct sockaddr *)&src, 7227 &inp, &net, 1, 7228 SCTP_DEFAULT_VRFID); 7229 if ((stcb != NULL) && 7230 (net != NULL) && 7231 (inp != NULL)) { 7232 /* Check the UDP port numbers */ 7233 if ((udp->uh_dport != net->port) || 7234 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7235 SCTP_TCB_UNLOCK(stcb); 7236 return; 7237 } 7238 /* Check the verification tag */ 7239 if (ntohl(sh->v_tag) != 0) { 7240 /* 7241 * This must be the verification tag used for 7242 * sending out packets. We don't consider packets 7243 * reflecting the verification tag. 7244 */ 7245 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7246 SCTP_TCB_UNLOCK(stcb); 7247 return; 7248 } 7249 } else { 7250 if (ntohs(outer_ip->ip_len) >= 7251 sizeof(struct ip) + 7252 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7253 /* 7254 * In this case we can check if we got an 7255 * INIT chunk and if the initiate tag 7256 * matches. 7257 */ 7258 ch = (struct sctp_init_chunk *)(sh + 1); 7259 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7260 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7261 SCTP_TCB_UNLOCK(stcb); 7262 return; 7263 } 7264 } else { 7265 SCTP_TCB_UNLOCK(stcb); 7266 return; 7267 } 7268 } 7269 type = icmp->icmp_type; 7270 code = icmp->icmp_code; 7271 if ((type == ICMP_UNREACH) && 7272 (code == ICMP_UNREACH_PORT)) { 7273 code = ICMP_UNREACH_PROTOCOL; 7274 } 7275 sctp_notify(inp, stcb, net, type, code, 7276 ntohs(inner_ip->ip_len), 7277 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7278 } else { 7279 if ((stcb == NULL) && (inp != NULL)) { 7280 /* reduce ref-count */ 7281 SCTP_INP_WLOCK(inp); 7282 SCTP_INP_DECR_REF(inp); 7283 SCTP_INP_WUNLOCK(inp); 7284 } 7285 if (stcb) { 7286 SCTP_TCB_UNLOCK(stcb); 7287 } 7288 } 7289 return; 7290 } 7291 #endif 7292 7293 #ifdef INET6 7294 static void 7295 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 7296 { 7297 struct ip6ctlparam *ip6cp = param.ip6cp; 7298 struct sctp_inpcb *inp; 7299 struct sctp_tcb *stcb; 7300 struct sctp_nets *net; 7301 struct sctphdr sh; 7302 struct udphdr udp; 7303 struct sockaddr_in6 src, dst; 7304 uint8_t type, code; 7305 7306 /* 7307 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7308 */ 7309 if (ip6cp->ip6c_m == NULL) { 7310 return; 7311 } 7312 /* 7313 * Check if we can safely examine the ports and the verification tag 7314 * of the SCTP common header. 7315 */ 7316 if (ip6cp->ip6c_m->m_pkthdr.len < 7317 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7318 return; 7319 } 7320 /* Copy out the UDP header. */ 7321 memset(&udp, 0, sizeof(struct udphdr)); 7322 m_copydata(ip6cp->ip6c_m, 7323 ip6cp->ip6c_off, 7324 sizeof(struct udphdr), 7325 (caddr_t)&udp); 7326 /* Copy out the port numbers and the verification tag. */ 7327 memset(&sh, 0, sizeof(struct sctphdr)); 7328 m_copydata(ip6cp->ip6c_m, 7329 ip6cp->ip6c_off + sizeof(struct udphdr), 7330 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7331 (caddr_t)&sh); 7332 memset(&src, 0, sizeof(struct sockaddr_in6)); 7333 src.sin6_family = AF_INET6; 7334 src.sin6_len = sizeof(struct sockaddr_in6); 7335 src.sin6_port = sh.src_port; 7336 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7337 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7338 return; 7339 } 7340 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7341 dst.sin6_family = AF_INET6; 7342 dst.sin6_len = sizeof(struct sockaddr_in6); 7343 dst.sin6_port = sh.dest_port; 7344 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7345 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7346 return; 7347 } 7348 inp = NULL; 7349 net = NULL; 7350 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7351 (struct sockaddr *)&src, 7352 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7353 if ((stcb != NULL) && 7354 (net != NULL) && 7355 (inp != NULL)) { 7356 /* Check the UDP port numbers */ 7357 if ((udp.uh_dport != net->port) || 7358 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7359 SCTP_TCB_UNLOCK(stcb); 7360 return; 7361 } 7362 /* Check the verification tag */ 7363 if (ntohl(sh.v_tag) != 0) { 7364 /* 7365 * This must be the verification tag used for 7366 * sending out packets. We don't consider packets 7367 * reflecting the verification tag. 7368 */ 7369 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7370 SCTP_TCB_UNLOCK(stcb); 7371 return; 7372 } 7373 } else { 7374 if (ip6cp->ip6c_m->m_pkthdr.len >= 7375 ip6cp->ip6c_off + sizeof(struct udphdr) + 7376 sizeof(struct sctphdr) + 7377 sizeof(struct sctp_chunkhdr) + 7378 offsetof(struct sctp_init, a_rwnd)) { 7379 /* 7380 * In this case we can check if we got an 7381 * INIT chunk and if the initiate tag 7382 * matches. 7383 */ 7384 uint32_t initiate_tag; 7385 uint8_t chunk_type; 7386 7387 m_copydata(ip6cp->ip6c_m, 7388 ip6cp->ip6c_off + 7389 sizeof(struct udphdr) + 7390 sizeof(struct sctphdr), 7391 sizeof(uint8_t), 7392 (caddr_t)&chunk_type); 7393 m_copydata(ip6cp->ip6c_m, 7394 ip6cp->ip6c_off + 7395 sizeof(struct udphdr) + 7396 sizeof(struct sctphdr) + 7397 sizeof(struct sctp_chunkhdr), 7398 sizeof(uint32_t), 7399 (caddr_t)&initiate_tag); 7400 if ((chunk_type != SCTP_INITIATION) || 7401 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7402 SCTP_TCB_UNLOCK(stcb); 7403 return; 7404 } 7405 } else { 7406 SCTP_TCB_UNLOCK(stcb); 7407 return; 7408 } 7409 } 7410 type = ip6cp->ip6c_icmp6->icmp6_type; 7411 code = ip6cp->ip6c_icmp6->icmp6_code; 7412 if ((type == ICMP6_DST_UNREACH) && 7413 (code == ICMP6_DST_UNREACH_NOPORT)) { 7414 type = ICMP6_PARAM_PROB; 7415 code = ICMP6_PARAMPROB_NEXTHEADER; 7416 } 7417 sctp6_notify(inp, stcb, net, type, code, 7418 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7419 } else { 7420 if ((stcb == NULL) && (inp != NULL)) { 7421 /* reduce inp's ref-count */ 7422 SCTP_INP_WLOCK(inp); 7423 SCTP_INP_DECR_REF(inp); 7424 SCTP_INP_WUNLOCK(inp); 7425 } 7426 if (stcb) { 7427 SCTP_TCB_UNLOCK(stcb); 7428 } 7429 } 7430 } 7431 #endif 7432 7433 void 7434 sctp_over_udp_stop(void) 7435 { 7436 /* 7437 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7438 * for writing! 7439 */ 7440 #ifdef INET 7441 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7442 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7443 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7444 } 7445 #endif 7446 #ifdef INET6 7447 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7448 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7449 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7450 } 7451 #endif 7452 } 7453 7454 int 7455 sctp_over_udp_start(void) 7456 { 7457 uint16_t port; 7458 int ret; 7459 #ifdef INET 7460 struct sockaddr_in sin; 7461 #endif 7462 #ifdef INET6 7463 struct sockaddr_in6 sin6; 7464 #endif 7465 /* 7466 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7467 * for writing! 7468 */ 7469 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7470 if (ntohs(port) == 0) { 7471 /* Must have a port set */ 7472 return (EINVAL); 7473 } 7474 #ifdef INET 7475 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7476 /* Already running -- must stop first */ 7477 return (EALREADY); 7478 } 7479 #endif 7480 #ifdef INET6 7481 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7482 /* Already running -- must stop first */ 7483 return (EALREADY); 7484 } 7485 #endif 7486 #ifdef INET 7487 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7488 SOCK_DGRAM, IPPROTO_UDP, 7489 curthread->td_ucred, curthread))) { 7490 sctp_over_udp_stop(); 7491 return (ret); 7492 } 7493 /* Call the special UDP hook. */ 7494 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7495 sctp_recv_udp_tunneled_packet, 7496 sctp_recv_icmp_tunneled_packet, 7497 NULL))) { 7498 sctp_over_udp_stop(); 7499 return (ret); 7500 } 7501 /* Ok, we have a socket, bind it to the port. */ 7502 memset(&sin, 0, sizeof(struct sockaddr_in)); 7503 sin.sin_len = sizeof(struct sockaddr_in); 7504 sin.sin_family = AF_INET; 7505 sin.sin_port = htons(port); 7506 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7507 (struct sockaddr *)&sin, curthread))) { 7508 sctp_over_udp_stop(); 7509 return (ret); 7510 } 7511 #endif 7512 #ifdef INET6 7513 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7514 SOCK_DGRAM, IPPROTO_UDP, 7515 curthread->td_ucred, curthread))) { 7516 sctp_over_udp_stop(); 7517 return (ret); 7518 } 7519 /* Call the special UDP hook. */ 7520 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7521 sctp_recv_udp_tunneled_packet, 7522 sctp_recv_icmp6_tunneled_packet, 7523 NULL))) { 7524 sctp_over_udp_stop(); 7525 return (ret); 7526 } 7527 /* Ok, we have a socket, bind it to the port. */ 7528 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7529 sin6.sin6_len = sizeof(struct sockaddr_in6); 7530 sin6.sin6_family = AF_INET6; 7531 sin6.sin6_port = htons(port); 7532 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7533 (struct sockaddr *)&sin6, curthread))) { 7534 sctp_over_udp_stop(); 7535 return (ret); 7536 } 7537 #endif 7538 return (0); 7539 } 7540 7541 /* 7542 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7543 * If all arguments are zero, zero is returned. 7544 */ 7545 uint32_t 7546 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7547 { 7548 if (mtu1 > 0) { 7549 if (mtu2 > 0) { 7550 if (mtu3 > 0) { 7551 return (min(mtu1, min(mtu2, mtu3))); 7552 } else { 7553 return (min(mtu1, mtu2)); 7554 } 7555 } else { 7556 if (mtu3 > 0) { 7557 return (min(mtu1, mtu3)); 7558 } else { 7559 return (mtu1); 7560 } 7561 } 7562 } else { 7563 if (mtu2 > 0) { 7564 if (mtu3 > 0) { 7565 return (min(mtu2, mtu3)); 7566 } else { 7567 return (mtu2); 7568 } 7569 } else { 7570 return (mtu3); 7571 } 7572 } 7573 } 7574 7575 void 7576 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7577 { 7578 struct in_conninfo inc; 7579 7580 memset(&inc, 0, sizeof(struct in_conninfo)); 7581 inc.inc_fibnum = fibnum; 7582 switch (addr->sa.sa_family) { 7583 #ifdef INET 7584 case AF_INET: 7585 inc.inc_faddr = addr->sin.sin_addr; 7586 break; 7587 #endif 7588 #ifdef INET6 7589 case AF_INET6: 7590 inc.inc_flags |= INC_ISIPV6; 7591 inc.inc6_faddr = addr->sin6.sin6_addr; 7592 break; 7593 #endif 7594 default: 7595 return; 7596 } 7597 tcp_hc_updatemtu(&inc, (u_long)mtu); 7598 } 7599 7600 uint32_t 7601 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7602 { 7603 struct in_conninfo inc; 7604 7605 memset(&inc, 0, sizeof(struct in_conninfo)); 7606 inc.inc_fibnum = fibnum; 7607 switch (addr->sa.sa_family) { 7608 #ifdef INET 7609 case AF_INET: 7610 inc.inc_faddr = addr->sin.sin_addr; 7611 break; 7612 #endif 7613 #ifdef INET6 7614 case AF_INET6: 7615 inc.inc_flags |= INC_ISIPV6; 7616 inc.inc6_faddr = addr->sin6.sin6_addr; 7617 break; 7618 #endif 7619 default: 7620 return (0); 7621 } 7622 return ((uint32_t)tcp_hc_getmtu(&inc)); 7623 } 7624 7625 void 7626 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7627 { 7628 #if defined(KDTRACE_HOOKS) 7629 int old_state = stcb->asoc.state; 7630 #endif 7631 7632 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7633 ("sctp_set_state: Can't set substate (new_state = %x)", 7634 new_state)); 7635 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7636 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7637 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7638 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7639 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7640 } 7641 #if defined(KDTRACE_HOOKS) 7642 if (((old_state & SCTP_STATE_MASK) != new_state) && 7643 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7644 (new_state == SCTP_STATE_INUSE))) { 7645 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7646 } 7647 #endif 7648 } 7649 7650 void 7651 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7652 { 7653 #if defined(KDTRACE_HOOKS) 7654 int old_state = stcb->asoc.state; 7655 #endif 7656 7657 KASSERT((substate & SCTP_STATE_MASK) == 0, 7658 ("sctp_add_substate: Can't set state (substate = %x)", 7659 substate)); 7660 stcb->asoc.state |= substate; 7661 #if defined(KDTRACE_HOOKS) 7662 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7663 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7664 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7665 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7666 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7667 } 7668 #endif 7669 } 7670