1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->zero_checksum = inp->zero_checksum; 1153 asoc->sctp_cmt_pf = (uint8_t)0; 1154 asoc->sctp_frag_point = inp->sctp_frag_point; 1155 asoc->sctp_features = inp->sctp_features; 1156 asoc->default_dscp = inp->sctp_ep.default_dscp; 1157 asoc->max_cwnd = inp->max_cwnd; 1158 #ifdef INET6 1159 if (inp->sctp_ep.default_flowlabel) { 1160 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1161 } else { 1162 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1163 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1164 asoc->default_flowlabel &= 0x000fffff; 1165 asoc->default_flowlabel |= 0x80000000; 1166 } else { 1167 asoc->default_flowlabel = 0; 1168 } 1169 } 1170 #endif 1171 asoc->sb_send_resv = 0; 1172 if (override_tag) { 1173 asoc->my_vtag = override_tag; 1174 } else { 1175 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1176 } 1177 /* Get the nonce tags */ 1178 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1180 asoc->vrf_id = vrf_id; 1181 1182 #ifdef SCTP_ASOCLOG_OF_TSNS 1183 asoc->tsn_in_at = 0; 1184 asoc->tsn_out_at = 0; 1185 asoc->tsn_in_wrapped = 0; 1186 asoc->tsn_out_wrapped = 0; 1187 asoc->cumack_log_at = 0; 1188 asoc->cumack_log_atsnt = 0; 1189 #endif 1190 #ifdef SCTP_FS_SPEC_LOG 1191 asoc->fs_index = 0; 1192 #endif 1193 asoc->refcnt = 0; 1194 asoc->assoc_up_sent = 0; 1195 if (override_tag) { 1196 asoc->init_seq_number = initial_tsn; 1197 } else { 1198 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1199 } 1200 asoc->asconf_seq_out = asoc->init_seq_number; 1201 asoc->str_reset_seq_out = asoc->init_seq_number; 1202 asoc->sending_seq = asoc->init_seq_number; 1203 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1204 /* we are optimistic here */ 1205 asoc->peer_supports_nat = 0; 1206 asoc->sent_queue_retran_cnt = 0; 1207 1208 /* for CMT */ 1209 asoc->last_net_cmt_send_started = NULL; 1210 1211 asoc->last_acked_seq = asoc->init_seq_number - 1; 1212 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1213 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1214 1215 /* here we are different, we hold the next one we expect */ 1216 asoc->str_reset_seq_in = asoc->init_seq_number; 1217 1218 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1219 asoc->initial_rto = inp->sctp_ep.initial_rto; 1220 1221 asoc->default_mtu = inp->sctp_ep.default_mtu; 1222 asoc->max_init_times = inp->sctp_ep.max_init_times; 1223 asoc->max_send_times = inp->sctp_ep.max_send_times; 1224 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1225 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1226 asoc->free_chunk_cnt = 0; 1227 1228 asoc->iam_blocking = 0; 1229 asoc->context = inp->sctp_context; 1230 asoc->local_strreset_support = inp->local_strreset_support; 1231 asoc->def_send = inp->def_send; 1232 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1233 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1234 asoc->pr_sctp_cnt = 0; 1235 asoc->total_output_queue_size = 0; 1236 1237 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1238 asoc->scope.ipv6_addr_legal = 1; 1239 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1240 asoc->scope.ipv4_addr_legal = 1; 1241 } else { 1242 asoc->scope.ipv4_addr_legal = 0; 1243 } 1244 } else { 1245 asoc->scope.ipv6_addr_legal = 0; 1246 asoc->scope.ipv4_addr_legal = 1; 1247 } 1248 1249 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1250 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1251 1252 asoc->smallest_mtu = 0; 1253 asoc->minrto = inp->sctp_ep.sctp_minrto; 1254 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1255 1256 asoc->stream_locked_on = 0; 1257 asoc->ecn_echo_cnt_onq = 0; 1258 asoc->stream_locked = 0; 1259 1260 asoc->send_sack = 1; 1261 1262 LIST_INIT(&asoc->sctp_restricted_addrs); 1263 1264 TAILQ_INIT(&asoc->nets); 1265 TAILQ_INIT(&asoc->pending_reply_queue); 1266 TAILQ_INIT(&asoc->asconf_ack_sent); 1267 /* Setup to fill the hb random cache at first HB */ 1268 asoc->hb_random_idx = 4; 1269 1270 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1271 1272 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1273 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1274 1275 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1276 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1277 1278 /* 1279 * Now the stream parameters, here we allocate space for all streams 1280 * that we request by default. 1281 */ 1282 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1283 o_strms; 1284 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1285 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1286 SCTP_M_STRMO); 1287 if (asoc->strmout == NULL) { 1288 /* big trouble no memory */ 1289 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1290 return (ENOMEM); 1291 } 1292 SCTP_TCB_LOCK(stcb); 1293 for (i = 0; i < asoc->streamoutcnt; i++) { 1294 /* 1295 * inbound side must be set to 0xffff, also NOTE when we get 1296 * the INIT-ACK back (for INIT sender) we MUST reduce the 1297 * count (streamoutcnt) but first check if we sent to any of 1298 * the upper streams that were dropped (if some were). Those 1299 * that were dropped must be notified to the upper layer as 1300 * failed to send. 1301 */ 1302 TAILQ_INIT(&asoc->strmout[i].outqueue); 1303 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1304 asoc->strmout[i].chunks_on_queues = 0; 1305 #if defined(SCTP_DETAILED_STR_STATS) 1306 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1307 asoc->strmout[i].abandoned_sent[j] = 0; 1308 asoc->strmout[i].abandoned_unsent[j] = 0; 1309 } 1310 #else 1311 asoc->strmout[i].abandoned_sent[0] = 0; 1312 asoc->strmout[i].abandoned_unsent[0] = 0; 1313 #endif 1314 asoc->strmout[i].next_mid_ordered = 0; 1315 asoc->strmout[i].next_mid_unordered = 0; 1316 asoc->strmout[i].sid = i; 1317 asoc->strmout[i].last_msg_incomplete = 0; 1318 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1319 } 1320 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1321 SCTP_TCB_UNLOCK(stcb); 1322 1323 /* Now the mapping array */ 1324 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1325 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1326 SCTP_M_MAP); 1327 if (asoc->mapping_array == NULL) { 1328 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1330 return (ENOMEM); 1331 } 1332 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1333 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1334 SCTP_M_MAP); 1335 if (asoc->nr_mapping_array == NULL) { 1336 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1337 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1338 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1339 return (ENOMEM); 1340 } 1341 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1342 1343 /* Now the init of the other outqueues */ 1344 TAILQ_INIT(&asoc->free_chunks); 1345 TAILQ_INIT(&asoc->control_send_queue); 1346 TAILQ_INIT(&asoc->asconf_send_queue); 1347 TAILQ_INIT(&asoc->send_queue); 1348 TAILQ_INIT(&asoc->sent_queue); 1349 TAILQ_INIT(&asoc->resetHead); 1350 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1351 TAILQ_INIT(&asoc->asconf_queue); 1352 /* authentication fields */ 1353 asoc->authinfo.random = NULL; 1354 asoc->authinfo.active_keyid = 0; 1355 asoc->authinfo.assoc_key = NULL; 1356 asoc->authinfo.assoc_keyid = 0; 1357 asoc->authinfo.recv_key = NULL; 1358 asoc->authinfo.recv_keyid = 0; 1359 LIST_INIT(&asoc->shared_keys); 1360 asoc->marked_retrans = 0; 1361 asoc->port = inp->sctp_ep.port; 1362 asoc->timoinit = 0; 1363 asoc->timodata = 0; 1364 asoc->timosack = 0; 1365 asoc->timoshutdown = 0; 1366 asoc->timoheartbeat = 0; 1367 asoc->timocookie = 0; 1368 asoc->timoshutdownack = 0; 1369 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1370 asoc->discontinuity_time = asoc->start_time; 1371 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1372 asoc->abandoned_unsent[i] = 0; 1373 asoc->abandoned_sent[i] = 0; 1374 } 1375 /* 1376 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1377 * freed later when the association is freed. 1378 */ 1379 return (0); 1380 } 1381 1382 void 1383 sctp_print_mapping_array(struct sctp_association *asoc) 1384 { 1385 unsigned int i, limit; 1386 1387 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1388 asoc->mapping_array_size, 1389 asoc->mapping_array_base_tsn, 1390 asoc->cumulative_tsn, 1391 asoc->highest_tsn_inside_map, 1392 asoc->highest_tsn_inside_nr_map); 1393 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1394 if (asoc->mapping_array[limit - 1] != 0) { 1395 break; 1396 } 1397 } 1398 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1399 for (i = 0; i < limit; i++) { 1400 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1401 } 1402 if (limit % 16) 1403 SCTP_PRINTF("\n"); 1404 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1405 if (asoc->nr_mapping_array[limit - 1]) { 1406 break; 1407 } 1408 } 1409 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1410 for (i = 0; i < limit; i++) { 1411 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1412 } 1413 if (limit % 16) 1414 SCTP_PRINTF("\n"); 1415 } 1416 1417 int 1418 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1419 { 1420 /* mapping array needs to grow */ 1421 uint8_t *new_array1, *new_array2; 1422 uint32_t new_size; 1423 1424 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1425 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1426 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1427 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1428 /* can't get more, forget it */ 1429 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1430 if (new_array1) { 1431 SCTP_FREE(new_array1, SCTP_M_MAP); 1432 } 1433 if (new_array2) { 1434 SCTP_FREE(new_array2, SCTP_M_MAP); 1435 } 1436 return (-1); 1437 } 1438 memset(new_array1, 0, new_size); 1439 memset(new_array2, 0, new_size); 1440 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1441 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1442 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1443 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1444 asoc->mapping_array = new_array1; 1445 asoc->nr_mapping_array = new_array2; 1446 asoc->mapping_array_size = new_size; 1447 return (0); 1448 } 1449 1450 static void 1451 sctp_iterator_work(struct sctp_iterator *it) 1452 { 1453 struct epoch_tracker et; 1454 struct sctp_inpcb *tinp; 1455 int iteration_count = 0; 1456 int inp_skip = 0; 1457 int first_in = 1; 1458 1459 NET_EPOCH_ENTER(et); 1460 SCTP_INP_INFO_RLOCK(); 1461 SCTP_ITERATOR_LOCK(); 1462 sctp_it_ctl.cur_it = it; 1463 if (it->inp) { 1464 SCTP_INP_RLOCK(it->inp); 1465 SCTP_INP_DECR_REF(it->inp); 1466 } 1467 if (it->inp == NULL) { 1468 /* iterator is complete */ 1469 done_with_iterator: 1470 sctp_it_ctl.cur_it = NULL; 1471 SCTP_ITERATOR_UNLOCK(); 1472 SCTP_INP_INFO_RUNLOCK(); 1473 if (it->function_atend != NULL) { 1474 (*it->function_atend) (it->pointer, it->val); 1475 } 1476 SCTP_FREE(it, SCTP_M_ITER); 1477 NET_EPOCH_EXIT(et); 1478 return; 1479 } 1480 select_a_new_ep: 1481 if (first_in) { 1482 first_in = 0; 1483 } else { 1484 SCTP_INP_RLOCK(it->inp); 1485 } 1486 while (((it->pcb_flags) && 1487 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1488 ((it->pcb_features) && 1489 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1490 /* endpoint flags or features don't match, so keep looking */ 1491 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1492 SCTP_INP_RUNLOCK(it->inp); 1493 goto done_with_iterator; 1494 } 1495 tinp = it->inp; 1496 it->inp = LIST_NEXT(it->inp, sctp_list); 1497 it->stcb = NULL; 1498 SCTP_INP_RUNLOCK(tinp); 1499 if (it->inp == NULL) { 1500 goto done_with_iterator; 1501 } 1502 SCTP_INP_RLOCK(it->inp); 1503 } 1504 /* now go through each assoc which is in the desired state */ 1505 if (it->done_current_ep == 0) { 1506 if (it->function_inp != NULL) 1507 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1508 it->done_current_ep = 1; 1509 } 1510 if (it->stcb == NULL) { 1511 /* run the per instance function */ 1512 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1513 } 1514 if ((inp_skip) || it->stcb == NULL) { 1515 if (it->function_inp_end != NULL) { 1516 inp_skip = (*it->function_inp_end) (it->inp, 1517 it->pointer, 1518 it->val); 1519 } 1520 SCTP_INP_RUNLOCK(it->inp); 1521 goto no_stcb; 1522 } 1523 while (it->stcb != NULL) { 1524 SCTP_TCB_LOCK(it->stcb); 1525 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1526 /* not in the right state... keep looking */ 1527 SCTP_TCB_UNLOCK(it->stcb); 1528 goto next_assoc; 1529 } 1530 /* see if we have limited out the iterator loop */ 1531 iteration_count++; 1532 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1533 /* Pause to let others grab the lock */ 1534 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1535 SCTP_TCB_UNLOCK(it->stcb); 1536 SCTP_INP_INCR_REF(it->inp); 1537 SCTP_INP_RUNLOCK(it->inp); 1538 SCTP_ITERATOR_UNLOCK(); 1539 SCTP_INP_INFO_RUNLOCK(); 1540 SCTP_INP_INFO_RLOCK(); 1541 SCTP_ITERATOR_LOCK(); 1542 if (sctp_it_ctl.iterator_flags) { 1543 /* We won't be staying here */ 1544 SCTP_INP_DECR_REF(it->inp); 1545 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1546 if (sctp_it_ctl.iterator_flags & 1547 SCTP_ITERATOR_STOP_CUR_IT) { 1548 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1549 goto done_with_iterator; 1550 } 1551 if (sctp_it_ctl.iterator_flags & 1552 SCTP_ITERATOR_STOP_CUR_INP) { 1553 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1554 goto no_stcb; 1555 } 1556 /* If we reach here huh? */ 1557 SCTP_PRINTF("Unknown it ctl flag %x\n", 1558 sctp_it_ctl.iterator_flags); 1559 sctp_it_ctl.iterator_flags = 0; 1560 } 1561 SCTP_INP_RLOCK(it->inp); 1562 SCTP_INP_DECR_REF(it->inp); 1563 SCTP_TCB_LOCK(it->stcb); 1564 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1565 iteration_count = 0; 1566 } 1567 KASSERT(it->inp == it->stcb->sctp_ep, 1568 ("%s: stcb %p does not belong to inp %p, but inp %p", 1569 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1570 SCTP_INP_RLOCK_ASSERT(it->inp); 1571 SCTP_TCB_LOCK_ASSERT(it->stcb); 1572 1573 /* run function on this one */ 1574 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1575 SCTP_INP_RLOCK_ASSERT(it->inp); 1576 SCTP_TCB_LOCK_ASSERT(it->stcb); 1577 1578 /* 1579 * we lie here, it really needs to have its own type but 1580 * first I must verify that this won't effect things :-0 1581 */ 1582 if (it->no_chunk_output == 0) { 1583 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1584 SCTP_INP_RLOCK_ASSERT(it->inp); 1585 SCTP_TCB_LOCK_ASSERT(it->stcb); 1586 } 1587 1588 SCTP_TCB_UNLOCK(it->stcb); 1589 next_assoc: 1590 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1591 if (it->stcb == NULL) { 1592 /* Run last function */ 1593 if (it->function_inp_end != NULL) { 1594 inp_skip = (*it->function_inp_end) (it->inp, 1595 it->pointer, 1596 it->val); 1597 } 1598 } 1599 } 1600 SCTP_INP_RUNLOCK(it->inp); 1601 no_stcb: 1602 /* done with all assocs on this endpoint, move on to next endpoint */ 1603 it->done_current_ep = 0; 1604 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1605 it->inp = NULL; 1606 } else { 1607 it->inp = LIST_NEXT(it->inp, sctp_list); 1608 } 1609 it->stcb = NULL; 1610 if (it->inp == NULL) { 1611 goto done_with_iterator; 1612 } 1613 goto select_a_new_ep; 1614 } 1615 1616 void 1617 sctp_iterator_worker(void) 1618 { 1619 struct sctp_iterator *it; 1620 1621 /* This function is called with the WQ lock in place */ 1622 sctp_it_ctl.iterator_running = 1; 1623 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1624 /* now lets work on this one */ 1625 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1626 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1627 CURVNET_SET(it->vn); 1628 sctp_iterator_work(it); 1629 CURVNET_RESTORE(); 1630 SCTP_IPI_ITERATOR_WQ_LOCK(); 1631 /* sa_ignore FREED_MEMORY */ 1632 } 1633 sctp_it_ctl.iterator_running = 0; 1634 return; 1635 } 1636 1637 static void 1638 sctp_handle_addr_wq(void) 1639 { 1640 /* deal with the ADDR wq from the rtsock calls */ 1641 struct sctp_laddr *wi, *nwi; 1642 struct sctp_asconf_iterator *asc; 1643 1644 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1645 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1646 if (asc == NULL) { 1647 /* Try later, no memory */ 1648 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1649 (struct sctp_inpcb *)NULL, 1650 (struct sctp_tcb *)NULL, 1651 (struct sctp_nets *)NULL); 1652 return; 1653 } 1654 LIST_INIT(&asc->list_of_work); 1655 asc->cnt = 0; 1656 1657 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1658 LIST_REMOVE(wi, sctp_nxt_addr); 1659 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1660 asc->cnt++; 1661 } 1662 1663 if (asc->cnt == 0) { 1664 SCTP_FREE(asc, SCTP_M_ASC_IT); 1665 } else { 1666 int ret; 1667 1668 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1669 sctp_asconf_iterator_stcb, 1670 NULL, /* No ep end for boundall */ 1671 SCTP_PCB_FLAGS_BOUNDALL, 1672 SCTP_PCB_ANY_FEATURES, 1673 SCTP_ASOC_ANY_STATE, 1674 (void *)asc, 0, 1675 sctp_asconf_iterator_end, NULL, 0); 1676 if (ret) { 1677 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1678 /* 1679 * Freeing if we are stopping or put back on the 1680 * addr_wq. 1681 */ 1682 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1683 sctp_asconf_iterator_end(asc, 0); 1684 } else { 1685 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1686 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1687 } 1688 SCTP_FREE(asc, SCTP_M_ASC_IT); 1689 } 1690 } 1691 } 1692 } 1693 1694 /*- 1695 * The following table shows which pointers for the inp, stcb, or net are 1696 * stored for each timer after it was started. 1697 * 1698 *|Name |Timer |inp |stcb|net | 1699 *|-----------------------------|-----------------------------|----|----|----| 1700 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1701 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1703 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1704 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1706 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1707 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1708 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1710 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1711 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1713 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1714 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1715 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1716 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1717 */ 1718 1719 void 1720 sctp_timeout_handler(void *t) 1721 { 1722 struct epoch_tracker et; 1723 struct timeval tv; 1724 struct sctp_inpcb *inp; 1725 struct sctp_tcb *stcb; 1726 struct sctp_nets *net; 1727 struct sctp_timer *tmr; 1728 struct mbuf *op_err; 1729 int type; 1730 int i, secret; 1731 bool did_output, released_asoc_reference; 1732 1733 /* 1734 * If inp, stcb or net are not NULL, then references to these were 1735 * added when the timer was started, and must be released before 1736 * this function returns. 1737 */ 1738 tmr = (struct sctp_timer *)t; 1739 inp = (struct sctp_inpcb *)tmr->ep; 1740 stcb = (struct sctp_tcb *)tmr->tcb; 1741 net = (struct sctp_nets *)tmr->net; 1742 CURVNET_SET((struct vnet *)tmr->vnet); 1743 NET_EPOCH_ENTER(et); 1744 released_asoc_reference = false; 1745 1746 #ifdef SCTP_AUDITING_ENABLED 1747 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1748 sctp_auditing(3, inp, stcb, net); 1749 #endif 1750 1751 /* sanity checks... */ 1752 KASSERT(tmr->self == NULL || tmr->self == tmr, 1753 ("sctp_timeout_handler: tmr->self corrupted")); 1754 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1755 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1756 type = tmr->type; 1757 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1758 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1759 type, stcb, stcb->sctp_ep)); 1760 tmr->stopped_from = 0xa001; 1761 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1762 SCTPDBG(SCTP_DEBUG_TIMER2, 1763 "Timer type %d handler exiting due to CLOSED association.\n", 1764 type); 1765 goto out_decr; 1766 } 1767 tmr->stopped_from = 0xa002; 1768 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1769 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1770 SCTPDBG(SCTP_DEBUG_TIMER2, 1771 "Timer type %d handler exiting due to not being active.\n", 1772 type); 1773 goto out_decr; 1774 } 1775 1776 tmr->stopped_from = 0xa003; 1777 if (stcb) { 1778 SCTP_TCB_LOCK(stcb); 1779 /* 1780 * Release reference so that association can be freed if 1781 * necessary below. This is safe now that we have acquired 1782 * the lock. 1783 */ 1784 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1785 released_asoc_reference = true; 1786 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1787 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1788 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1789 SCTPDBG(SCTP_DEBUG_TIMER2, 1790 "Timer type %d handler exiting due to CLOSED association.\n", 1791 type); 1792 goto out; 1793 } 1794 } else if (inp != NULL) { 1795 SCTP_INP_WLOCK(inp); 1796 } else { 1797 SCTP_WQ_ADDR_LOCK(); 1798 } 1799 1800 /* Record in stopped_from which timeout occurred. */ 1801 tmr->stopped_from = type; 1802 /* mark as being serviced now */ 1803 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1804 /* 1805 * Callout has been rescheduled. 1806 */ 1807 goto out; 1808 } 1809 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1810 /* 1811 * Not active, so no action. 1812 */ 1813 goto out; 1814 } 1815 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1816 1817 /* call the handler for the appropriate timer type */ 1818 switch (type) { 1819 case SCTP_TIMER_TYPE_SEND: 1820 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1821 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1822 type, inp, stcb, net)); 1823 SCTP_STAT_INCR(sctps_timodata); 1824 stcb->asoc.timodata++; 1825 stcb->asoc.num_send_timers_up--; 1826 if (stcb->asoc.num_send_timers_up < 0) { 1827 stcb->asoc.num_send_timers_up = 0; 1828 } 1829 SCTP_TCB_LOCK_ASSERT(stcb); 1830 if (sctp_t3rxt_timer(inp, stcb, net)) { 1831 /* no need to unlock on tcb its gone */ 1832 1833 goto out_decr; 1834 } 1835 SCTP_TCB_LOCK_ASSERT(stcb); 1836 #ifdef SCTP_AUDITING_ENABLED 1837 sctp_auditing(4, inp, stcb, net); 1838 #endif 1839 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1840 did_output = true; 1841 if ((stcb->asoc.num_send_timers_up == 0) && 1842 (stcb->asoc.sent_queue_cnt > 0)) { 1843 struct sctp_tmit_chunk *chk; 1844 1845 /* 1846 * Safeguard. If there on some on the sent queue 1847 * somewhere but no timers running something is 1848 * wrong... so we start a timer on the first chunk 1849 * on the send queue on whatever net it is sent to. 1850 */ 1851 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1852 if (chk->whoTo != NULL) { 1853 break; 1854 } 1855 } 1856 if (chk != NULL) { 1857 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1858 } 1859 } 1860 break; 1861 case SCTP_TIMER_TYPE_INIT: 1862 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1863 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1864 type, inp, stcb, net)); 1865 SCTP_STAT_INCR(sctps_timoinit); 1866 stcb->asoc.timoinit++; 1867 if (sctp_t1init_timer(inp, stcb, net)) { 1868 /* no need to unlock on tcb its gone */ 1869 goto out_decr; 1870 } 1871 did_output = false; 1872 break; 1873 case SCTP_TIMER_TYPE_RECV: 1874 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1875 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1876 type, inp, stcb, net)); 1877 SCTP_STAT_INCR(sctps_timosack); 1878 stcb->asoc.timosack++; 1879 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1880 #ifdef SCTP_AUDITING_ENABLED 1881 sctp_auditing(4, inp, stcb, NULL); 1882 #endif 1883 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1884 did_output = true; 1885 break; 1886 case SCTP_TIMER_TYPE_SHUTDOWN: 1887 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1888 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1889 type, inp, stcb, net)); 1890 SCTP_STAT_INCR(sctps_timoshutdown); 1891 stcb->asoc.timoshutdown++; 1892 if (sctp_shutdown_timer(inp, stcb, net)) { 1893 /* no need to unlock on tcb its gone */ 1894 goto out_decr; 1895 } 1896 #ifdef SCTP_AUDITING_ENABLED 1897 sctp_auditing(4, inp, stcb, net); 1898 #endif 1899 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1900 did_output = true; 1901 break; 1902 case SCTP_TIMER_TYPE_HEARTBEAT: 1903 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1904 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1905 type, inp, stcb, net)); 1906 SCTP_STAT_INCR(sctps_timoheartbeat); 1907 stcb->asoc.timoheartbeat++; 1908 if (sctp_heartbeat_timer(inp, stcb, net)) { 1909 /* no need to unlock on tcb its gone */ 1910 goto out_decr; 1911 } 1912 #ifdef SCTP_AUDITING_ENABLED 1913 sctp_auditing(4, inp, stcb, net); 1914 #endif 1915 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1916 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1917 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1918 did_output = true; 1919 } else { 1920 did_output = false; 1921 } 1922 break; 1923 case SCTP_TIMER_TYPE_COOKIE: 1924 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1925 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1926 type, inp, stcb, net)); 1927 SCTP_STAT_INCR(sctps_timocookie); 1928 stcb->asoc.timocookie++; 1929 if (sctp_cookie_timer(inp, stcb, net)) { 1930 /* no need to unlock on tcb its gone */ 1931 goto out_decr; 1932 } 1933 #ifdef SCTP_AUDITING_ENABLED 1934 sctp_auditing(4, inp, stcb, net); 1935 #endif 1936 /* 1937 * We consider T3 and Cookie timer pretty much the same with 1938 * respect to where from in chunk_output. 1939 */ 1940 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1941 did_output = true; 1942 break; 1943 case SCTP_TIMER_TYPE_NEWCOOKIE: 1944 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1945 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1946 type, inp, stcb, net)); 1947 SCTP_STAT_INCR(sctps_timosecret); 1948 (void)SCTP_GETTIME_TIMEVAL(&tv); 1949 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1950 inp->sctp_ep.last_secret_number = 1951 inp->sctp_ep.current_secret_number; 1952 inp->sctp_ep.current_secret_number++; 1953 if (inp->sctp_ep.current_secret_number >= 1954 SCTP_HOW_MANY_SECRETS) { 1955 inp->sctp_ep.current_secret_number = 0; 1956 } 1957 secret = (int)inp->sctp_ep.current_secret_number; 1958 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1959 inp->sctp_ep.secret_key[secret][i] = 1960 sctp_select_initial_TSN(&inp->sctp_ep); 1961 } 1962 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1963 did_output = false; 1964 break; 1965 case SCTP_TIMER_TYPE_PATHMTURAISE: 1966 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1967 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1968 type, inp, stcb, net)); 1969 SCTP_STAT_INCR(sctps_timopathmtu); 1970 sctp_pathmtu_timer(inp, stcb, net); 1971 did_output = false; 1972 break; 1973 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1974 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1975 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1976 type, inp, stcb, net)); 1977 if (sctp_shutdownack_timer(inp, stcb, net)) { 1978 /* no need to unlock on tcb its gone */ 1979 goto out_decr; 1980 } 1981 SCTP_STAT_INCR(sctps_timoshutdownack); 1982 stcb->asoc.timoshutdownack++; 1983 #ifdef SCTP_AUDITING_ENABLED 1984 sctp_auditing(4, inp, stcb, net); 1985 #endif 1986 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1987 did_output = true; 1988 break; 1989 case SCTP_TIMER_TYPE_ASCONF: 1990 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1991 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1992 type, inp, stcb, net)); 1993 SCTP_STAT_INCR(sctps_timoasconf); 1994 if (sctp_asconf_timer(inp, stcb, net)) { 1995 /* no need to unlock on tcb its gone */ 1996 goto out_decr; 1997 } 1998 #ifdef SCTP_AUDITING_ENABLED 1999 sctp_auditing(4, inp, stcb, net); 2000 #endif 2001 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2002 did_output = true; 2003 break; 2004 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2005 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2006 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2007 type, inp, stcb, net)); 2008 SCTP_STAT_INCR(sctps_timoshutdownguard); 2009 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2010 "Shutdown guard timer expired"); 2011 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2012 /* no need to unlock on tcb its gone */ 2013 goto out_decr; 2014 case SCTP_TIMER_TYPE_AUTOCLOSE: 2015 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2016 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2017 type, inp, stcb, net)); 2018 SCTP_STAT_INCR(sctps_timoautoclose); 2019 sctp_autoclose_timer(inp, stcb); 2020 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2021 did_output = true; 2022 break; 2023 case SCTP_TIMER_TYPE_STRRESET: 2024 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2025 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2026 type, inp, stcb, net)); 2027 SCTP_STAT_INCR(sctps_timostrmrst); 2028 if (sctp_strreset_timer(inp, stcb)) { 2029 /* no need to unlock on tcb its gone */ 2030 goto out_decr; 2031 } 2032 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2033 did_output = true; 2034 break; 2035 case SCTP_TIMER_TYPE_INPKILL: 2036 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2037 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2038 type, inp, stcb, net)); 2039 SCTP_STAT_INCR(sctps_timoinpkill); 2040 /* 2041 * special case, take away our increment since WE are the 2042 * killer 2043 */ 2044 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2045 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2046 SCTP_INP_DECR_REF(inp); 2047 SCTP_INP_WUNLOCK(inp); 2048 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2049 SCTP_CALLED_FROM_INPKILL_TIMER); 2050 inp = NULL; 2051 goto out_decr; 2052 case SCTP_TIMER_TYPE_ASOCKILL: 2053 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2054 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2055 type, inp, stcb, net)); 2056 SCTP_STAT_INCR(sctps_timoassockill); 2057 /* Can we free it yet? */ 2058 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2059 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2060 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2061 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2062 /* 2063 * free asoc, always unlocks (or destroy's) so prevent 2064 * duplicate unlock or unlock of a free mtx :-0 2065 */ 2066 stcb = NULL; 2067 goto out_decr; 2068 case SCTP_TIMER_TYPE_ADDR_WQ: 2069 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2070 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2071 type, inp, stcb, net)); 2072 sctp_handle_addr_wq(); 2073 did_output = true; 2074 break; 2075 case SCTP_TIMER_TYPE_PRIM_DELETED: 2076 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2077 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2078 type, inp, stcb, net)); 2079 SCTP_STAT_INCR(sctps_timodelprim); 2080 sctp_delete_prim_timer(inp, stcb); 2081 did_output = false; 2082 break; 2083 default: 2084 #ifdef INVARIANTS 2085 panic("Unknown timer type %d", type); 2086 #else 2087 goto out; 2088 #endif 2089 } 2090 #ifdef SCTP_AUDITING_ENABLED 2091 sctp_audit_log(0xF1, (uint8_t)type); 2092 if (inp != NULL) 2093 sctp_auditing(5, inp, stcb, net); 2094 #endif 2095 if (did_output && (stcb != NULL)) { 2096 /* 2097 * Now we need to clean up the control chunk chain if an 2098 * ECNE is on it. It must be marked as UNSENT again so next 2099 * call will continue to send it until such time that we get 2100 * a CWR, to remove it. It is, however, less likely that we 2101 * will find a ecn echo on the chain though. 2102 */ 2103 sctp_fix_ecn_echo(&stcb->asoc); 2104 } 2105 out: 2106 if (stcb != NULL) { 2107 SCTP_TCB_UNLOCK(stcb); 2108 } else if (inp != NULL) { 2109 SCTP_INP_WUNLOCK(inp); 2110 } else { 2111 SCTP_WQ_ADDR_UNLOCK(); 2112 } 2113 2114 out_decr: 2115 /* These reference counts were incremented in sctp_timer_start(). */ 2116 if (inp != NULL) { 2117 SCTP_INP_DECR_REF(inp); 2118 } 2119 if ((stcb != NULL) && !released_asoc_reference) { 2120 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2121 } 2122 if (net != NULL) { 2123 sctp_free_remote_addr(net); 2124 } 2125 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2126 CURVNET_RESTORE(); 2127 NET_EPOCH_EXIT(et); 2128 } 2129 2130 /*- 2131 * The following table shows which parameters must be provided 2132 * when calling sctp_timer_start(). For parameters not being 2133 * provided, NULL must be used. 2134 * 2135 * |Name |inp |stcb|net | 2136 * |-----------------------------|----|----|----| 2137 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2140 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2141 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2143 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2144 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2145 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2147 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2148 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2149 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2150 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2151 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2152 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2153 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2154 * 2155 */ 2156 2157 void 2158 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2159 struct sctp_nets *net) 2160 { 2161 struct sctp_timer *tmr; 2162 uint32_t to_ticks; 2163 uint32_t rndval, jitter; 2164 2165 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2166 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2167 t_type, stcb, stcb->sctp_ep)); 2168 tmr = NULL; 2169 if (stcb != NULL) { 2170 SCTP_TCB_LOCK_ASSERT(stcb); 2171 } else if (inp != NULL) { 2172 SCTP_INP_WLOCK_ASSERT(inp); 2173 } else { 2174 SCTP_WQ_ADDR_LOCK_ASSERT(); 2175 } 2176 if (stcb != NULL) { 2177 /* 2178 * Don't restart timer on association that's about to be 2179 * killed. 2180 */ 2181 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2182 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2183 SCTPDBG(SCTP_DEBUG_TIMER2, 2184 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2185 t_type, inp, stcb, net); 2186 return; 2187 } 2188 /* Don't restart timer on net that's been removed. */ 2189 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2190 SCTPDBG(SCTP_DEBUG_TIMER2, 2191 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2192 t_type, inp, stcb, net); 2193 return; 2194 } 2195 } 2196 switch (t_type) { 2197 case SCTP_TIMER_TYPE_SEND: 2198 /* Here we use the RTO timer. */ 2199 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2200 #ifdef INVARIANTS 2201 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2202 t_type, inp, stcb, net); 2203 #else 2204 return; 2205 #endif 2206 } 2207 tmr = &net->rxt_timer; 2208 if (net->RTO == 0) { 2209 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2210 } else { 2211 to_ticks = sctp_msecs_to_ticks(net->RTO); 2212 } 2213 break; 2214 case SCTP_TIMER_TYPE_INIT: 2215 /* 2216 * Here we use the INIT timer default usually about 1 2217 * second. 2218 */ 2219 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2220 #ifdef INVARIANTS 2221 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2222 t_type, inp, stcb, net); 2223 #else 2224 return; 2225 #endif 2226 } 2227 tmr = &net->rxt_timer; 2228 if (net->RTO == 0) { 2229 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2230 } else { 2231 to_ticks = sctp_msecs_to_ticks(net->RTO); 2232 } 2233 break; 2234 case SCTP_TIMER_TYPE_RECV: 2235 /* 2236 * Here we use the Delayed-Ack timer value from the inp, 2237 * usually about 200ms. 2238 */ 2239 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2240 #ifdef INVARIANTS 2241 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2242 t_type, inp, stcb, net); 2243 #else 2244 return; 2245 #endif 2246 } 2247 tmr = &stcb->asoc.dack_timer; 2248 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2249 break; 2250 case SCTP_TIMER_TYPE_SHUTDOWN: 2251 /* Here we use the RTO of the destination. */ 2252 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2253 #ifdef INVARIANTS 2254 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2255 t_type, inp, stcb, net); 2256 #else 2257 return; 2258 #endif 2259 } 2260 tmr = &net->rxt_timer; 2261 if (net->RTO == 0) { 2262 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2263 } else { 2264 to_ticks = sctp_msecs_to_ticks(net->RTO); 2265 } 2266 break; 2267 case SCTP_TIMER_TYPE_HEARTBEAT: 2268 /* 2269 * The net is used here so that we can add in the RTO. Even 2270 * though we use a different timer. We also add the HB timer 2271 * PLUS a random jitter. 2272 */ 2273 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2274 #ifdef INVARIANTS 2275 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2276 t_type, inp, stcb, net); 2277 #else 2278 return; 2279 #endif 2280 } 2281 if ((net->dest_state & SCTP_ADDR_NOHB) && 2282 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2283 SCTPDBG(SCTP_DEBUG_TIMER2, 2284 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2285 t_type, inp, stcb, net); 2286 return; 2287 } 2288 tmr = &net->hb_timer; 2289 if (net->RTO == 0) { 2290 to_ticks = stcb->asoc.initial_rto; 2291 } else { 2292 to_ticks = net->RTO; 2293 } 2294 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2295 jitter = rndval % to_ticks; 2296 if (to_ticks > 1) { 2297 to_ticks >>= 1; 2298 } 2299 if (jitter < (UINT32_MAX - to_ticks)) { 2300 to_ticks += jitter; 2301 } else { 2302 to_ticks = UINT32_MAX; 2303 } 2304 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2305 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2306 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2307 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2308 to_ticks += net->heart_beat_delay; 2309 } else { 2310 to_ticks = UINT32_MAX; 2311 } 2312 } 2313 /* 2314 * Now we must convert the to_ticks that are now in ms to 2315 * ticks. 2316 */ 2317 to_ticks = sctp_msecs_to_ticks(to_ticks); 2318 break; 2319 case SCTP_TIMER_TYPE_COOKIE: 2320 /* 2321 * Here we can use the RTO timer from the network since one 2322 * RTT was complete. If a retransmission happened then we 2323 * will be using the RTO initial value. 2324 */ 2325 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2326 #ifdef INVARIANTS 2327 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2328 t_type, inp, stcb, net); 2329 #else 2330 return; 2331 #endif 2332 } 2333 tmr = &net->rxt_timer; 2334 if (net->RTO == 0) { 2335 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2336 } else { 2337 to_ticks = sctp_msecs_to_ticks(net->RTO); 2338 } 2339 break; 2340 case SCTP_TIMER_TYPE_NEWCOOKIE: 2341 /* 2342 * Nothing needed but the endpoint here usually about 60 2343 * minutes. 2344 */ 2345 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2346 #ifdef INVARIANTS 2347 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2348 t_type, inp, stcb, net); 2349 #else 2350 return; 2351 #endif 2352 } 2353 tmr = &inp->sctp_ep.signature_change; 2354 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2355 break; 2356 case SCTP_TIMER_TYPE_PATHMTURAISE: 2357 /* 2358 * Here we use the value found in the EP for PMTUD, usually 2359 * about 10 minutes. 2360 */ 2361 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2362 #ifdef INVARIANTS 2363 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2364 t_type, inp, stcb, net); 2365 #else 2366 return; 2367 #endif 2368 } 2369 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2370 SCTPDBG(SCTP_DEBUG_TIMER2, 2371 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2372 t_type, inp, stcb, net); 2373 return; 2374 } 2375 tmr = &net->pmtu_timer; 2376 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2377 break; 2378 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2379 /* Here we use the RTO of the destination. */ 2380 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2381 #ifdef INVARIANTS 2382 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2383 t_type, inp, stcb, net); 2384 #else 2385 return; 2386 #endif 2387 } 2388 tmr = &net->rxt_timer; 2389 if (net->RTO == 0) { 2390 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2391 } else { 2392 to_ticks = sctp_msecs_to_ticks(net->RTO); 2393 } 2394 break; 2395 case SCTP_TIMER_TYPE_ASCONF: 2396 /* 2397 * Here the timer comes from the stcb but its value is from 2398 * the net's RTO. 2399 */ 2400 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2401 #ifdef INVARIANTS 2402 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2403 t_type, inp, stcb, net); 2404 #else 2405 return; 2406 #endif 2407 } 2408 tmr = &stcb->asoc.asconf_timer; 2409 if (net->RTO == 0) { 2410 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2411 } else { 2412 to_ticks = sctp_msecs_to_ticks(net->RTO); 2413 } 2414 break; 2415 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2416 /* 2417 * Here we use the endpoints shutdown guard timer usually 2418 * about 3 minutes. 2419 */ 2420 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2421 #ifdef INVARIANTS 2422 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2423 t_type, inp, stcb, net); 2424 #else 2425 return; 2426 #endif 2427 } 2428 tmr = &stcb->asoc.shut_guard_timer; 2429 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2430 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2431 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2432 } else { 2433 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2434 } 2435 } else { 2436 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2437 } 2438 break; 2439 case SCTP_TIMER_TYPE_AUTOCLOSE: 2440 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2441 #ifdef INVARIANTS 2442 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2443 t_type, inp, stcb, net); 2444 #else 2445 return; 2446 #endif 2447 } 2448 tmr = &stcb->asoc.autoclose_timer; 2449 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2450 break; 2451 case SCTP_TIMER_TYPE_STRRESET: 2452 /* 2453 * Here the timer comes from the stcb but its value is from 2454 * the net's RTO. 2455 */ 2456 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2457 #ifdef INVARIANTS 2458 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2459 t_type, inp, stcb, net); 2460 #else 2461 return; 2462 #endif 2463 } 2464 tmr = &stcb->asoc.strreset_timer; 2465 if (net->RTO == 0) { 2466 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2467 } else { 2468 to_ticks = sctp_msecs_to_ticks(net->RTO); 2469 } 2470 break; 2471 case SCTP_TIMER_TYPE_INPKILL: 2472 /* 2473 * The inp is setup to die. We re-use the signature_change 2474 * timer since that has stopped and we are in the GONE 2475 * state. 2476 */ 2477 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2478 #ifdef INVARIANTS 2479 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2480 t_type, inp, stcb, net); 2481 #else 2482 return; 2483 #endif 2484 } 2485 tmr = &inp->sctp_ep.signature_change; 2486 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2487 break; 2488 case SCTP_TIMER_TYPE_ASOCKILL: 2489 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2490 #ifdef INVARIANTS 2491 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2492 t_type, inp, stcb, net); 2493 #else 2494 return; 2495 #endif 2496 } 2497 tmr = &stcb->asoc.strreset_timer; 2498 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2499 break; 2500 case SCTP_TIMER_TYPE_ADDR_WQ: 2501 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2502 #ifdef INVARIANTS 2503 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2504 t_type, inp, stcb, net); 2505 #else 2506 return; 2507 #endif 2508 } 2509 /* Only 1 tick away :-) */ 2510 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2511 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2512 break; 2513 case SCTP_TIMER_TYPE_PRIM_DELETED: 2514 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2515 #ifdef INVARIANTS 2516 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2517 t_type, inp, stcb, net); 2518 #else 2519 return; 2520 #endif 2521 } 2522 tmr = &stcb->asoc.delete_prim_timer; 2523 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2524 break; 2525 default: 2526 #ifdef INVARIANTS 2527 panic("Unknown timer type %d", t_type); 2528 #else 2529 return; 2530 #endif 2531 } 2532 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2533 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2534 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2535 /* 2536 * We do NOT allow you to have it already running. If it is, 2537 * we leave the current one up unchanged. 2538 */ 2539 SCTPDBG(SCTP_DEBUG_TIMER2, 2540 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2541 t_type, inp, stcb, net); 2542 return; 2543 } 2544 /* At this point we can proceed. */ 2545 if (t_type == SCTP_TIMER_TYPE_SEND) { 2546 stcb->asoc.num_send_timers_up++; 2547 } 2548 tmr->stopped_from = 0; 2549 tmr->type = t_type; 2550 tmr->ep = (void *)inp; 2551 tmr->tcb = (void *)stcb; 2552 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2553 tmr->net = NULL; 2554 } else { 2555 tmr->net = (void *)net; 2556 } 2557 tmr->self = (void *)tmr; 2558 tmr->vnet = (void *)curvnet; 2559 tmr->ticks = sctp_get_tick_count(); 2560 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2561 SCTPDBG(SCTP_DEBUG_TIMER2, 2562 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2563 t_type, to_ticks, inp, stcb, net); 2564 /* 2565 * If this is a newly scheduled callout, as opposed to a 2566 * rescheduled one, increment relevant reference counts. 2567 */ 2568 if (tmr->ep != NULL) { 2569 SCTP_INP_INCR_REF(inp); 2570 } 2571 if (tmr->tcb != NULL) { 2572 atomic_add_int(&stcb->asoc.refcnt, 1); 2573 } 2574 if (tmr->net != NULL) { 2575 atomic_add_int(&net->ref_count, 1); 2576 } 2577 } else { 2578 /* 2579 * This should not happen, since we checked for pending 2580 * above. 2581 */ 2582 SCTPDBG(SCTP_DEBUG_TIMER2, 2583 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2584 t_type, to_ticks, inp, stcb, net); 2585 } 2586 return; 2587 } 2588 2589 /*- 2590 * The following table shows which parameters must be provided 2591 * when calling sctp_timer_stop(). For parameters not being 2592 * provided, NULL must be used. 2593 * 2594 * |Name |inp |stcb|net | 2595 * |-----------------------------|----|----|----| 2596 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2597 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2599 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2600 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2601 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2603 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2604 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2605 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2606 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2608 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2610 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2611 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2612 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2613 * 2614 */ 2615 2616 void 2617 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2618 struct sctp_nets *net, uint32_t from) 2619 { 2620 struct sctp_timer *tmr; 2621 2622 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2623 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2624 t_type, stcb, stcb->sctp_ep)); 2625 if (stcb != NULL) { 2626 SCTP_TCB_LOCK_ASSERT(stcb); 2627 } else if (inp != NULL) { 2628 SCTP_INP_WLOCK_ASSERT(inp); 2629 } else { 2630 SCTP_WQ_ADDR_LOCK_ASSERT(); 2631 } 2632 tmr = NULL; 2633 switch (t_type) { 2634 case SCTP_TIMER_TYPE_SEND: 2635 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2636 #ifdef INVARIANTS 2637 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2638 t_type, inp, stcb, net); 2639 #else 2640 return; 2641 #endif 2642 } 2643 tmr = &net->rxt_timer; 2644 break; 2645 case SCTP_TIMER_TYPE_INIT: 2646 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2647 #ifdef INVARIANTS 2648 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2649 t_type, inp, stcb, net); 2650 #else 2651 return; 2652 #endif 2653 } 2654 tmr = &net->rxt_timer; 2655 break; 2656 case SCTP_TIMER_TYPE_RECV: 2657 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2658 #ifdef INVARIANTS 2659 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2660 t_type, inp, stcb, net); 2661 #else 2662 return; 2663 #endif 2664 } 2665 tmr = &stcb->asoc.dack_timer; 2666 break; 2667 case SCTP_TIMER_TYPE_SHUTDOWN: 2668 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2669 #ifdef INVARIANTS 2670 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2671 t_type, inp, stcb, net); 2672 #else 2673 return; 2674 #endif 2675 } 2676 tmr = &net->rxt_timer; 2677 break; 2678 case SCTP_TIMER_TYPE_HEARTBEAT: 2679 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2680 #ifdef INVARIANTS 2681 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2682 t_type, inp, stcb, net); 2683 #else 2684 return; 2685 #endif 2686 } 2687 tmr = &net->hb_timer; 2688 break; 2689 case SCTP_TIMER_TYPE_COOKIE: 2690 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2691 #ifdef INVARIANTS 2692 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2693 t_type, inp, stcb, net); 2694 #else 2695 return; 2696 #endif 2697 } 2698 tmr = &net->rxt_timer; 2699 break; 2700 case SCTP_TIMER_TYPE_NEWCOOKIE: 2701 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2702 #ifdef INVARIANTS 2703 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2704 t_type, inp, stcb, net); 2705 #else 2706 return; 2707 #endif 2708 } 2709 tmr = &inp->sctp_ep.signature_change; 2710 break; 2711 case SCTP_TIMER_TYPE_PATHMTURAISE: 2712 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2713 #ifdef INVARIANTS 2714 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2715 t_type, inp, stcb, net); 2716 #else 2717 return; 2718 #endif 2719 } 2720 tmr = &net->pmtu_timer; 2721 break; 2722 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2723 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2724 #ifdef INVARIANTS 2725 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2726 t_type, inp, stcb, net); 2727 #else 2728 return; 2729 #endif 2730 } 2731 tmr = &net->rxt_timer; 2732 break; 2733 case SCTP_TIMER_TYPE_ASCONF: 2734 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2735 #ifdef INVARIANTS 2736 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2737 t_type, inp, stcb, net); 2738 #else 2739 return; 2740 #endif 2741 } 2742 tmr = &stcb->asoc.asconf_timer; 2743 break; 2744 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2745 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2746 #ifdef INVARIANTS 2747 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2748 t_type, inp, stcb, net); 2749 #else 2750 return; 2751 #endif 2752 } 2753 tmr = &stcb->asoc.shut_guard_timer; 2754 break; 2755 case SCTP_TIMER_TYPE_AUTOCLOSE: 2756 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2757 #ifdef INVARIANTS 2758 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2759 t_type, inp, stcb, net); 2760 #else 2761 return; 2762 #endif 2763 } 2764 tmr = &stcb->asoc.autoclose_timer; 2765 break; 2766 case SCTP_TIMER_TYPE_STRRESET: 2767 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2768 #ifdef INVARIANTS 2769 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2770 t_type, inp, stcb, net); 2771 #else 2772 return; 2773 #endif 2774 } 2775 tmr = &stcb->asoc.strreset_timer; 2776 break; 2777 case SCTP_TIMER_TYPE_INPKILL: 2778 /* 2779 * The inp is setup to die. We re-use the signature_change 2780 * timer since that has stopped and we are in the GONE 2781 * state. 2782 */ 2783 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2784 #ifdef INVARIANTS 2785 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2786 t_type, inp, stcb, net); 2787 #else 2788 return; 2789 #endif 2790 } 2791 tmr = &inp->sctp_ep.signature_change; 2792 break; 2793 case SCTP_TIMER_TYPE_ASOCKILL: 2794 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2795 #ifdef INVARIANTS 2796 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2797 t_type, inp, stcb, net); 2798 #else 2799 return; 2800 #endif 2801 } 2802 tmr = &stcb->asoc.strreset_timer; 2803 break; 2804 case SCTP_TIMER_TYPE_ADDR_WQ: 2805 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2806 #ifdef INVARIANTS 2807 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2808 t_type, inp, stcb, net); 2809 #else 2810 return; 2811 #endif 2812 } 2813 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2814 break; 2815 case SCTP_TIMER_TYPE_PRIM_DELETED: 2816 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2817 #ifdef INVARIANTS 2818 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2819 t_type, inp, stcb, net); 2820 #else 2821 return; 2822 #endif 2823 } 2824 tmr = &stcb->asoc.delete_prim_timer; 2825 break; 2826 default: 2827 #ifdef INVARIANTS 2828 panic("Unknown timer type %d", t_type); 2829 #else 2830 return; 2831 #endif 2832 } 2833 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2834 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2835 (tmr->type != t_type)) { 2836 /* 2837 * Ok we have a timer that is under joint use. Cookie timer 2838 * per chance with the SEND timer. We therefore are NOT 2839 * running the timer that the caller wants stopped. So just 2840 * return. 2841 */ 2842 SCTPDBG(SCTP_DEBUG_TIMER2, 2843 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2844 t_type, inp, stcb, net); 2845 return; 2846 } 2847 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2848 stcb->asoc.num_send_timers_up--; 2849 if (stcb->asoc.num_send_timers_up < 0) { 2850 stcb->asoc.num_send_timers_up = 0; 2851 } 2852 } 2853 tmr->self = NULL; 2854 tmr->stopped_from = from; 2855 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2856 KASSERT(tmr->ep == inp, 2857 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2858 t_type, inp, tmr->ep)); 2859 KASSERT(tmr->tcb == stcb, 2860 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2861 t_type, stcb, tmr->tcb)); 2862 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2863 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2864 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2865 t_type, net, tmr->net)); 2866 SCTPDBG(SCTP_DEBUG_TIMER2, 2867 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2868 t_type, inp, stcb, net); 2869 /* 2870 * If the timer was actually stopped, decrement reference 2871 * counts that were incremented in sctp_timer_start(). 2872 */ 2873 if (tmr->ep != NULL) { 2874 tmr->ep = NULL; 2875 SCTP_INP_DECR_REF(inp); 2876 } 2877 if (tmr->tcb != NULL) { 2878 tmr->tcb = NULL; 2879 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2880 } 2881 if (tmr->net != NULL) { 2882 struct sctp_nets *tmr_net; 2883 2884 /* 2885 * Can't use net, since it doesn't work for 2886 * SCTP_TIMER_TYPE_ASCONF. 2887 */ 2888 tmr_net = tmr->net; 2889 tmr->net = NULL; 2890 sctp_free_remote_addr(tmr_net); 2891 } 2892 } else { 2893 SCTPDBG(SCTP_DEBUG_TIMER2, 2894 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2895 t_type, inp, stcb, net); 2896 } 2897 return; 2898 } 2899 2900 uint32_t 2901 sctp_calculate_len(struct mbuf *m) 2902 { 2903 struct mbuf *at; 2904 uint32_t tlen; 2905 2906 tlen = 0; 2907 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2908 tlen += SCTP_BUF_LEN(at); 2909 } 2910 return (tlen); 2911 } 2912 2913 /* 2914 * Given an association and starting time of the current RTT period, update 2915 * RTO in number of msecs. net should point to the current network. 2916 * Return 1, if an RTO update was performed, return 0 if no update was 2917 * performed due to invalid starting point. 2918 */ 2919 2920 int 2921 sctp_calculate_rto(struct sctp_tcb *stcb, 2922 struct sctp_association *asoc, 2923 struct sctp_nets *net, 2924 struct timeval *old, 2925 int rtt_from_sack) 2926 { 2927 struct timeval now; 2928 uint64_t rtt_us; /* RTT in us */ 2929 int32_t rtt; /* RTT in ms */ 2930 uint32_t new_rto; 2931 int first_measure = 0; 2932 2933 /************************/ 2934 /* 1. calculate new RTT */ 2935 /************************/ 2936 /* get the current time */ 2937 if (stcb->asoc.use_precise_time) { 2938 (void)SCTP_GETPTIME_TIMEVAL(&now); 2939 } else { 2940 (void)SCTP_GETTIME_TIMEVAL(&now); 2941 } 2942 if ((old->tv_sec > now.tv_sec) || 2943 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2944 /* The starting point is in the future. */ 2945 return (0); 2946 } 2947 timevalsub(&now, old); 2948 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2949 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2950 /* The RTT is larger than a sane value. */ 2951 return (0); 2952 } 2953 /* store the current RTT in us */ 2954 net->rtt = rtt_us; 2955 /* compute rtt in ms */ 2956 rtt = (int32_t)(net->rtt / 1000); 2957 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2958 /* 2959 * Tell the CC module that a new update has just occurred 2960 * from a sack 2961 */ 2962 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2963 } 2964 /* 2965 * Do we need to determine the lan? We do this only on sacks i.e. 2966 * RTT being determined from data not non-data (HB/INIT->INITACK). 2967 */ 2968 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2969 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2970 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2971 net->lan_type = SCTP_LAN_INTERNET; 2972 } else { 2973 net->lan_type = SCTP_LAN_LOCAL; 2974 } 2975 } 2976 2977 /***************************/ 2978 /* 2. update RTTVAR & SRTT */ 2979 /***************************/ 2980 /*- 2981 * Compute the scaled average lastsa and the 2982 * scaled variance lastsv as described in van Jacobson 2983 * Paper "Congestion Avoidance and Control", Annex A. 2984 * 2985 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2986 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2987 */ 2988 if (net->RTO_measured) { 2989 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2990 net->lastsa += rtt; 2991 if (rtt < 0) { 2992 rtt = -rtt; 2993 } 2994 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2995 net->lastsv += rtt; 2996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2997 rto_logging(net, SCTP_LOG_RTTVAR); 2998 } 2999 } else { 3000 /* First RTO measurement */ 3001 net->RTO_measured = 1; 3002 first_measure = 1; 3003 net->lastsa = rtt << SCTP_RTT_SHIFT; 3004 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3005 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3006 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3007 } 3008 } 3009 if (net->lastsv == 0) { 3010 net->lastsv = SCTP_CLOCK_GRANULARITY; 3011 } 3012 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3013 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3014 (stcb->asoc.sat_network_lockout == 0)) { 3015 stcb->asoc.sat_network = 1; 3016 } else if ((!first_measure) && stcb->asoc.sat_network) { 3017 stcb->asoc.sat_network = 0; 3018 stcb->asoc.sat_network_lockout = 1; 3019 } 3020 /* bound it, per C6/C7 in Section 5.3.1 */ 3021 if (new_rto < stcb->asoc.minrto) { 3022 new_rto = stcb->asoc.minrto; 3023 } 3024 if (new_rto > stcb->asoc.maxrto) { 3025 new_rto = stcb->asoc.maxrto; 3026 } 3027 net->RTO = new_rto; 3028 return (1); 3029 } 3030 3031 /* 3032 * return a pointer to a contiguous piece of data from the given mbuf chain 3033 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3034 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3035 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3036 */ 3037 caddr_t 3038 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3039 { 3040 uint32_t count; 3041 uint8_t *ptr; 3042 3043 ptr = in_ptr; 3044 if ((off < 0) || (len <= 0)) 3045 return (NULL); 3046 3047 /* find the desired start location */ 3048 while ((m != NULL) && (off > 0)) { 3049 if (off < SCTP_BUF_LEN(m)) 3050 break; 3051 off -= SCTP_BUF_LEN(m); 3052 m = SCTP_BUF_NEXT(m); 3053 } 3054 if (m == NULL) 3055 return (NULL); 3056 3057 /* is the current mbuf large enough (eg. contiguous)? */ 3058 if ((SCTP_BUF_LEN(m) - off) >= len) { 3059 return (mtod(m, caddr_t)+off); 3060 } else { 3061 /* else, it spans more than one mbuf, so save a temp copy... */ 3062 while ((m != NULL) && (len > 0)) { 3063 count = min(SCTP_BUF_LEN(m) - off, len); 3064 memcpy(ptr, mtod(m, caddr_t)+off, count); 3065 len -= count; 3066 ptr += count; 3067 off = 0; 3068 m = SCTP_BUF_NEXT(m); 3069 } 3070 if ((m == NULL) && (len > 0)) 3071 return (NULL); 3072 else 3073 return ((caddr_t)in_ptr); 3074 } 3075 } 3076 3077 struct sctp_paramhdr * 3078 sctp_get_next_param(struct mbuf *m, 3079 int offset, 3080 struct sctp_paramhdr *pull, 3081 int pull_limit) 3082 { 3083 /* This just provides a typed signature to Peter's Pull routine */ 3084 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3085 (uint8_t *)pull)); 3086 } 3087 3088 struct mbuf * 3089 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3090 { 3091 struct mbuf *m_last; 3092 caddr_t dp; 3093 3094 if (padlen > 3) { 3095 return (NULL); 3096 } 3097 if (padlen <= M_TRAILINGSPACE(m)) { 3098 /* 3099 * The easy way. We hope the majority of the time we hit 3100 * here :) 3101 */ 3102 m_last = m; 3103 } else { 3104 /* Hard way we must grow the mbuf chain */ 3105 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3106 if (m_last == NULL) { 3107 return (NULL); 3108 } 3109 SCTP_BUF_LEN(m_last) = 0; 3110 SCTP_BUF_NEXT(m_last) = NULL; 3111 SCTP_BUF_NEXT(m) = m_last; 3112 } 3113 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3114 SCTP_BUF_LEN(m_last) += padlen; 3115 memset(dp, 0, padlen); 3116 return (m_last); 3117 } 3118 3119 struct mbuf * 3120 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3121 { 3122 /* find the last mbuf in chain and pad it */ 3123 struct mbuf *m_at; 3124 3125 if (last_mbuf != NULL) { 3126 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3127 } else { 3128 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3129 if (SCTP_BUF_NEXT(m_at) == NULL) { 3130 return (sctp_add_pad_tombuf(m_at, padval)); 3131 } 3132 } 3133 } 3134 return (NULL); 3135 } 3136 3137 static void 3138 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3139 uint16_t error, struct sctp_abort_chunk *abort, 3140 bool from_peer, bool timedout, int so_locked) 3141 { 3142 struct mbuf *m_notify; 3143 struct sctp_assoc_change *sac; 3144 struct sctp_queued_to_read *control; 3145 unsigned int notif_len; 3146 uint16_t abort_len; 3147 unsigned int i; 3148 3149 KASSERT(abort == NULL || from_peer, 3150 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3151 KASSERT(!from_peer || !timedout, 3152 ("sctp_notify_assoc_change: timeouts can only be local")); 3153 if (stcb == NULL) { 3154 return; 3155 } 3156 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3157 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3158 if (abort != NULL) { 3159 abort_len = ntohs(abort->ch.chunk_length); 3160 /* 3161 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3162 * contiguous. 3163 */ 3164 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3165 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3166 } 3167 } else { 3168 abort_len = 0; 3169 } 3170 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3171 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3172 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3173 notif_len += abort_len; 3174 } 3175 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3176 if (m_notify == NULL) { 3177 /* Retry with smaller value. */ 3178 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3179 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3180 if (m_notify == NULL) { 3181 goto set_error; 3182 } 3183 } 3184 SCTP_BUF_NEXT(m_notify) = NULL; 3185 sac = mtod(m_notify, struct sctp_assoc_change *); 3186 memset(sac, 0, notif_len); 3187 sac->sac_type = SCTP_ASSOC_CHANGE; 3188 sac->sac_flags = 0; 3189 sac->sac_length = sizeof(struct sctp_assoc_change); 3190 sac->sac_state = state; 3191 sac->sac_error = error; 3192 if (state == SCTP_CANT_STR_ASSOC) { 3193 sac->sac_outbound_streams = 0; 3194 sac->sac_inbound_streams = 0; 3195 } else { 3196 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3197 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3198 } 3199 sac->sac_assoc_id = sctp_get_associd(stcb); 3200 if (notif_len > sizeof(struct sctp_assoc_change)) { 3201 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3202 i = 0; 3203 if (stcb->asoc.prsctp_supported == 1) { 3204 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3205 } 3206 if (stcb->asoc.auth_supported == 1) { 3207 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3208 } 3209 if (stcb->asoc.asconf_supported == 1) { 3210 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3211 } 3212 if (stcb->asoc.idata_supported == 1) { 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3214 } 3215 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3216 if (stcb->asoc.reconfig_supported == 1) { 3217 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3218 } 3219 sac->sac_length += i; 3220 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3221 memcpy(sac->sac_info, abort, abort_len); 3222 sac->sac_length += abort_len; 3223 } 3224 } 3225 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3226 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3227 0, 0, stcb->asoc.context, 0, 0, 0, 3228 m_notify); 3229 if (control != NULL) { 3230 control->length = SCTP_BUF_LEN(m_notify); 3231 control->spec_flags = M_NOTIFICATION; 3232 /* not that we need this */ 3233 control->tail_mbuf = m_notify; 3234 sctp_add_to_readq(stcb->sctp_ep, stcb, 3235 control, 3236 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3237 so_locked); 3238 } else { 3239 sctp_m_freem(m_notify); 3240 } 3241 } 3242 /* 3243 * For 1-to-1 style sockets, we send up and error when an ABORT 3244 * comes in. 3245 */ 3246 set_error: 3247 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3248 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3249 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3250 SOCK_LOCK(stcb->sctp_socket); 3251 if (from_peer) { 3252 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3253 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3254 stcb->sctp_socket->so_error = ECONNREFUSED; 3255 } else { 3256 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3257 stcb->sctp_socket->so_error = ECONNRESET; 3258 } 3259 } else { 3260 if (timedout) { 3261 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3262 stcb->sctp_socket->so_error = ETIMEDOUT; 3263 } else { 3264 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3265 stcb->sctp_socket->so_error = ECONNABORTED; 3266 } 3267 } 3268 SOCK_UNLOCK(stcb->sctp_socket); 3269 } 3270 /* Wake ANY sleepers */ 3271 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3272 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3273 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3274 socantrcvmore(stcb->sctp_socket); 3275 } 3276 sorwakeup(stcb->sctp_socket); 3277 sowwakeup(stcb->sctp_socket); 3278 } 3279 3280 static void 3281 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3282 struct sockaddr *sa, uint32_t error, int so_locked) 3283 { 3284 struct mbuf *m_notify; 3285 struct sctp_paddr_change *spc; 3286 struct sctp_queued_to_read *control; 3287 3288 if ((stcb == NULL) || 3289 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3290 /* event not enabled */ 3291 return; 3292 } 3293 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3294 if (m_notify == NULL) 3295 return; 3296 SCTP_BUF_LEN(m_notify) = 0; 3297 spc = mtod(m_notify, struct sctp_paddr_change *); 3298 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3299 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3300 spc->spc_flags = 0; 3301 spc->spc_length = sizeof(struct sctp_paddr_change); 3302 switch (sa->sa_family) { 3303 #ifdef INET 3304 case AF_INET: 3305 #ifdef INET6 3306 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3307 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3308 (struct sockaddr_in6 *)&spc->spc_aaddr); 3309 } else { 3310 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3311 } 3312 #else 3313 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3314 #endif 3315 break; 3316 #endif 3317 #ifdef INET6 3318 case AF_INET6: 3319 { 3320 struct sockaddr_in6 *sin6; 3321 3322 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3323 3324 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3325 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3326 if (sin6->sin6_scope_id == 0) { 3327 /* recover scope_id for user */ 3328 (void)sa6_recoverscope(sin6); 3329 } else { 3330 /* clear embedded scope_id for user */ 3331 in6_clearscope(&sin6->sin6_addr); 3332 } 3333 } 3334 break; 3335 } 3336 #endif 3337 default: 3338 /* TSNH */ 3339 break; 3340 } 3341 spc->spc_state = state; 3342 spc->spc_error = error; 3343 spc->spc_assoc_id = sctp_get_associd(stcb); 3344 3345 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3346 SCTP_BUF_NEXT(m_notify) = NULL; 3347 3348 /* append to socket */ 3349 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3350 0, 0, stcb->asoc.context, 0, 0, 0, 3351 m_notify); 3352 if (control == NULL) { 3353 /* no memory */ 3354 sctp_m_freem(m_notify); 3355 return; 3356 } 3357 control->length = SCTP_BUF_LEN(m_notify); 3358 control->spec_flags = M_NOTIFICATION; 3359 /* not that we need this */ 3360 control->tail_mbuf = m_notify; 3361 sctp_add_to_readq(stcb->sctp_ep, stcb, 3362 control, 3363 &stcb->sctp_socket->so_rcv, 1, 3364 SCTP_READ_LOCK_NOT_HELD, 3365 so_locked); 3366 } 3367 3368 static void 3369 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3370 struct sctp_tmit_chunk *chk, int so_locked) 3371 { 3372 struct mbuf *m_notify; 3373 struct sctp_send_failed *ssf; 3374 struct sctp_send_failed_event *ssfe; 3375 struct sctp_queued_to_read *control; 3376 struct sctp_chunkhdr *chkhdr; 3377 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3378 3379 if ((stcb == NULL) || 3380 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3381 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3382 /* event not enabled */ 3383 return; 3384 } 3385 3386 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3387 notifhdr_len = sizeof(struct sctp_send_failed_event); 3388 } else { 3389 notifhdr_len = sizeof(struct sctp_send_failed); 3390 } 3391 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3392 if (m_notify == NULL) 3393 /* no space left */ 3394 return; 3395 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3396 if (stcb->asoc.idata_supported) { 3397 chkhdr_len = sizeof(struct sctp_idata_chunk); 3398 } else { 3399 chkhdr_len = sizeof(struct sctp_data_chunk); 3400 } 3401 /* Use some defaults in case we can't access the chunk header */ 3402 if (chk->send_size >= chkhdr_len) { 3403 payload_len = chk->send_size - chkhdr_len; 3404 } else { 3405 payload_len = 0; 3406 } 3407 padding_len = 0; 3408 if (chk->data != NULL) { 3409 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3410 if (chkhdr != NULL) { 3411 chk_len = ntohs(chkhdr->chunk_length); 3412 if ((chk_len >= chkhdr_len) && 3413 (chk->send_size >= chk_len) && 3414 (chk->send_size - chk_len < 4)) { 3415 padding_len = chk->send_size - chk_len; 3416 payload_len = chk->send_size - chkhdr_len - padding_len; 3417 } 3418 } 3419 } 3420 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3421 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3422 memset(ssfe, 0, notifhdr_len); 3423 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3424 if (sent) { 3425 ssfe->ssfe_flags = SCTP_DATA_SENT; 3426 } else { 3427 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3428 } 3429 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3430 ssfe->ssfe_error = error; 3431 /* not exactly what the user sent in, but should be close :) */ 3432 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3433 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3434 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3435 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3436 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3437 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3438 } else { 3439 ssf = mtod(m_notify, struct sctp_send_failed *); 3440 memset(ssf, 0, notifhdr_len); 3441 ssf->ssf_type = SCTP_SEND_FAILED; 3442 if (sent) { 3443 ssf->ssf_flags = SCTP_DATA_SENT; 3444 } else { 3445 ssf->ssf_flags = SCTP_DATA_UNSENT; 3446 } 3447 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3448 ssf->ssf_error = error; 3449 /* not exactly what the user sent in, but should be close :) */ 3450 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3451 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3452 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3453 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3454 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3455 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3456 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3457 } 3458 if (chk->data != NULL) { 3459 /* Trim off the sctp chunk header (it should be there) */ 3460 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3461 m_adj(chk->data, chkhdr_len); 3462 m_adj(chk->data, -padding_len); 3463 sctp_mbuf_crush(chk->data); 3464 chk->send_size -= (chkhdr_len + padding_len); 3465 } 3466 } 3467 SCTP_BUF_NEXT(m_notify) = chk->data; 3468 /* Steal off the mbuf */ 3469 chk->data = NULL; 3470 /* 3471 * For this case, we check the actual socket buffer, since the assoc 3472 * is going away we don't want to overfill the socket buffer for a 3473 * non-reader 3474 */ 3475 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3476 sctp_m_freem(m_notify); 3477 return; 3478 } 3479 /* append to socket */ 3480 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3481 0, 0, stcb->asoc.context, 0, 0, 0, 3482 m_notify); 3483 if (control == NULL) { 3484 /* no memory */ 3485 sctp_m_freem(m_notify); 3486 return; 3487 } 3488 control->length = SCTP_BUF_LEN(m_notify); 3489 control->spec_flags = M_NOTIFICATION; 3490 /* not that we need this */ 3491 control->tail_mbuf = m_notify; 3492 sctp_add_to_readq(stcb->sctp_ep, stcb, 3493 control, 3494 &stcb->sctp_socket->so_rcv, 1, 3495 SCTP_READ_LOCK_NOT_HELD, 3496 so_locked); 3497 } 3498 3499 static void 3500 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3501 struct sctp_stream_queue_pending *sp, int so_locked) 3502 { 3503 struct mbuf *m_notify; 3504 struct sctp_send_failed *ssf; 3505 struct sctp_send_failed_event *ssfe; 3506 struct sctp_queued_to_read *control; 3507 int notifhdr_len; 3508 3509 if ((stcb == NULL) || 3510 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3511 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3512 /* event not enabled */ 3513 return; 3514 } 3515 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3516 notifhdr_len = sizeof(struct sctp_send_failed_event); 3517 } else { 3518 notifhdr_len = sizeof(struct sctp_send_failed); 3519 } 3520 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3521 if (m_notify == NULL) { 3522 /* no space left */ 3523 return; 3524 } 3525 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3526 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3527 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3528 memset(ssfe, 0, notifhdr_len); 3529 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3530 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3531 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3532 ssfe->ssfe_error = error; 3533 /* not exactly what the user sent in, but should be close :) */ 3534 ssfe->ssfe_info.snd_sid = sp->sid; 3535 if (sp->some_taken) { 3536 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3537 } else { 3538 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3539 } 3540 ssfe->ssfe_info.snd_ppid = sp->ppid; 3541 ssfe->ssfe_info.snd_context = sp->context; 3542 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3543 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3544 } else { 3545 ssf = mtod(m_notify, struct sctp_send_failed *); 3546 memset(ssf, 0, notifhdr_len); 3547 ssf->ssf_type = SCTP_SEND_FAILED; 3548 ssf->ssf_flags = SCTP_DATA_UNSENT; 3549 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3550 ssf->ssf_error = error; 3551 /* not exactly what the user sent in, but should be close :) */ 3552 ssf->ssf_info.sinfo_stream = sp->sid; 3553 ssf->ssf_info.sinfo_ssn = 0; 3554 if (sp->some_taken) { 3555 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3556 } else { 3557 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3558 } 3559 ssf->ssf_info.sinfo_ppid = sp->ppid; 3560 ssf->ssf_info.sinfo_context = sp->context; 3561 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3562 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3563 } 3564 SCTP_BUF_NEXT(m_notify) = sp->data; 3565 3566 /* Steal off the mbuf */ 3567 sp->data = NULL; 3568 /* 3569 * For this case, we check the actual socket buffer, since the assoc 3570 * is going away we don't want to overfill the socket buffer for a 3571 * non-reader 3572 */ 3573 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3574 sctp_m_freem(m_notify); 3575 return; 3576 } 3577 /* append to socket */ 3578 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3579 0, 0, stcb->asoc.context, 0, 0, 0, 3580 m_notify); 3581 if (control == NULL) { 3582 /* no memory */ 3583 sctp_m_freem(m_notify); 3584 return; 3585 } 3586 control->length = SCTP_BUF_LEN(m_notify); 3587 control->spec_flags = M_NOTIFICATION; 3588 /* not that we need this */ 3589 control->tail_mbuf = m_notify; 3590 sctp_add_to_readq(stcb->sctp_ep, stcb, 3591 control, 3592 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3593 } 3594 3595 static void 3596 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3597 { 3598 struct mbuf *m_notify; 3599 struct sctp_adaptation_event *sai; 3600 struct sctp_queued_to_read *control; 3601 3602 if ((stcb == NULL) || 3603 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3604 /* event not enabled */ 3605 return; 3606 } 3607 3608 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3609 if (m_notify == NULL) 3610 /* no space left */ 3611 return; 3612 SCTP_BUF_LEN(m_notify) = 0; 3613 sai = mtod(m_notify, struct sctp_adaptation_event *); 3614 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3615 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3616 sai->sai_flags = 0; 3617 sai->sai_length = sizeof(struct sctp_adaptation_event); 3618 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3619 sai->sai_assoc_id = sctp_get_associd(stcb); 3620 3621 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3622 SCTP_BUF_NEXT(m_notify) = NULL; 3623 3624 /* append to socket */ 3625 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3626 0, 0, stcb->asoc.context, 0, 0, 0, 3627 m_notify); 3628 if (control == NULL) { 3629 /* no memory */ 3630 sctp_m_freem(m_notify); 3631 return; 3632 } 3633 control->length = SCTP_BUF_LEN(m_notify); 3634 control->spec_flags = M_NOTIFICATION; 3635 /* not that we need this */ 3636 control->tail_mbuf = m_notify; 3637 sctp_add_to_readq(stcb->sctp_ep, stcb, 3638 control, 3639 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3640 } 3641 3642 /* This always must be called with the read-queue LOCKED in the INP */ 3643 static void 3644 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3645 uint32_t val, int so_locked) 3646 { 3647 struct mbuf *m_notify; 3648 struct sctp_pdapi_event *pdapi; 3649 struct sctp_queued_to_read *control; 3650 struct sockbuf *sb; 3651 3652 if ((stcb == NULL) || 3653 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3654 /* event not enabled */ 3655 return; 3656 } 3657 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3658 return; 3659 } 3660 3661 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3662 if (m_notify == NULL) 3663 /* no space left */ 3664 return; 3665 SCTP_BUF_LEN(m_notify) = 0; 3666 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3667 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3668 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3669 pdapi->pdapi_flags = 0; 3670 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3671 pdapi->pdapi_indication = error; 3672 pdapi->pdapi_stream = (val >> 16); 3673 pdapi->pdapi_seq = (val & 0x0000ffff); 3674 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3675 3676 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3677 SCTP_BUF_NEXT(m_notify) = NULL; 3678 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3679 0, 0, stcb->asoc.context, 0, 0, 0, 3680 m_notify); 3681 if (control == NULL) { 3682 /* no memory */ 3683 sctp_m_freem(m_notify); 3684 return; 3685 } 3686 control->length = SCTP_BUF_LEN(m_notify); 3687 control->spec_flags = M_NOTIFICATION; 3688 /* not that we need this */ 3689 control->tail_mbuf = m_notify; 3690 sb = &stcb->sctp_socket->so_rcv; 3691 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3692 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3693 } 3694 sctp_sballoc(stcb, sb, m_notify); 3695 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3696 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3697 } 3698 control->end_added = 1; 3699 if (stcb->asoc.control_pdapi) 3700 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3701 else { 3702 /* we really should not see this case */ 3703 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3704 } 3705 if (stcb->sctp_ep && stcb->sctp_socket) { 3706 /* This should always be the case */ 3707 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3708 } 3709 } 3710 3711 static void 3712 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3713 { 3714 struct mbuf *m_notify; 3715 struct sctp_shutdown_event *sse; 3716 struct sctp_queued_to_read *control; 3717 3718 /* 3719 * For TCP model AND UDP connected sockets we will send an error up 3720 * when an SHUTDOWN completes 3721 */ 3722 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3723 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3724 /* mark socket closed for read/write and wakeup! */ 3725 socantsendmore(stcb->sctp_socket); 3726 } 3727 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3728 /* event not enabled */ 3729 return; 3730 } 3731 3732 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3733 if (m_notify == NULL) 3734 /* no space left */ 3735 return; 3736 sse = mtod(m_notify, struct sctp_shutdown_event *); 3737 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3738 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3739 sse->sse_flags = 0; 3740 sse->sse_length = sizeof(struct sctp_shutdown_event); 3741 sse->sse_assoc_id = sctp_get_associd(stcb); 3742 3743 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3744 SCTP_BUF_NEXT(m_notify) = NULL; 3745 3746 /* append to socket */ 3747 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3748 0, 0, stcb->asoc.context, 0, 0, 0, 3749 m_notify); 3750 if (control == NULL) { 3751 /* no memory */ 3752 sctp_m_freem(m_notify); 3753 return; 3754 } 3755 control->length = SCTP_BUF_LEN(m_notify); 3756 control->spec_flags = M_NOTIFICATION; 3757 /* not that we need this */ 3758 control->tail_mbuf = m_notify; 3759 sctp_add_to_readq(stcb->sctp_ep, stcb, 3760 control, 3761 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3762 } 3763 3764 static void 3765 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3766 int so_locked) 3767 { 3768 struct mbuf *m_notify; 3769 struct sctp_sender_dry_event *event; 3770 struct sctp_queued_to_read *control; 3771 3772 if ((stcb == NULL) || 3773 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3774 /* event not enabled */ 3775 return; 3776 } 3777 3778 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3779 if (m_notify == NULL) { 3780 /* no space left */ 3781 return; 3782 } 3783 SCTP_BUF_LEN(m_notify) = 0; 3784 event = mtod(m_notify, struct sctp_sender_dry_event *); 3785 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3786 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3787 event->sender_dry_flags = 0; 3788 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3789 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3790 3791 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3792 SCTP_BUF_NEXT(m_notify) = NULL; 3793 3794 /* append to socket */ 3795 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3796 0, 0, stcb->asoc.context, 0, 0, 0, 3797 m_notify); 3798 if (control == NULL) { 3799 /* no memory */ 3800 sctp_m_freem(m_notify); 3801 return; 3802 } 3803 control->length = SCTP_BUF_LEN(m_notify); 3804 control->spec_flags = M_NOTIFICATION; 3805 /* not that we need this */ 3806 control->tail_mbuf = m_notify; 3807 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3808 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3809 } 3810 3811 void 3812 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3813 { 3814 struct mbuf *m_notify; 3815 struct sctp_queued_to_read *control; 3816 struct sctp_stream_change_event *stradd; 3817 3818 if ((stcb == NULL) || 3819 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3820 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3821 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3822 /* If the socket is gone we are out of here. */ 3823 return; 3824 } 3825 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 3826 /* event not enabled */ 3827 return; 3828 } 3829 3830 if ((stcb->asoc.peer_req_out) && flag) { 3831 /* Peer made the request, don't tell the local user */ 3832 stcb->asoc.peer_req_out = 0; 3833 return; 3834 } 3835 stcb->asoc.peer_req_out = 0; 3836 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3837 if (m_notify == NULL) 3838 /* no space left */ 3839 return; 3840 SCTP_BUF_LEN(m_notify) = 0; 3841 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3842 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3843 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3844 stradd->strchange_flags = flag; 3845 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3846 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3847 stradd->strchange_instrms = numberin; 3848 stradd->strchange_outstrms = numberout; 3849 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3850 SCTP_BUF_NEXT(m_notify) = NULL; 3851 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3852 /* no space */ 3853 sctp_m_freem(m_notify); 3854 return; 3855 } 3856 /* append to socket */ 3857 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3858 0, 0, stcb->asoc.context, 0, 0, 0, 3859 m_notify); 3860 if (control == NULL) { 3861 /* no memory */ 3862 sctp_m_freem(m_notify); 3863 return; 3864 } 3865 control->length = SCTP_BUF_LEN(m_notify); 3866 control->spec_flags = M_NOTIFICATION; 3867 /* not that we need this */ 3868 control->tail_mbuf = m_notify; 3869 sctp_add_to_readq(stcb->sctp_ep, stcb, 3870 control, 3871 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3872 } 3873 3874 void 3875 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3876 { 3877 struct mbuf *m_notify; 3878 struct sctp_queued_to_read *control; 3879 struct sctp_assoc_reset_event *strasoc; 3880 3881 if ((stcb == NULL) || 3882 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3883 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3884 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3885 /* If the socket is gone we are out of here. */ 3886 return; 3887 } 3888 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 3889 /* event not enabled */ 3890 return; 3891 } 3892 3893 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3894 if (m_notify == NULL) 3895 /* no space left */ 3896 return; 3897 SCTP_BUF_LEN(m_notify) = 0; 3898 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3899 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3900 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3901 strasoc->assocreset_flags = flag; 3902 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3903 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3904 strasoc->assocreset_local_tsn = sending_tsn; 3905 strasoc->assocreset_remote_tsn = recv_tsn; 3906 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3907 SCTP_BUF_NEXT(m_notify) = NULL; 3908 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3909 /* no space */ 3910 sctp_m_freem(m_notify); 3911 return; 3912 } 3913 /* append to socket */ 3914 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3915 0, 0, stcb->asoc.context, 0, 0, 0, 3916 m_notify); 3917 if (control == NULL) { 3918 /* no memory */ 3919 sctp_m_freem(m_notify); 3920 return; 3921 } 3922 control->length = SCTP_BUF_LEN(m_notify); 3923 control->spec_flags = M_NOTIFICATION; 3924 /* not that we need this */ 3925 control->tail_mbuf = m_notify; 3926 sctp_add_to_readq(stcb->sctp_ep, stcb, 3927 control, 3928 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3929 } 3930 3931 static void 3932 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3933 int number_entries, uint16_t *list, int flag) 3934 { 3935 struct mbuf *m_notify; 3936 struct sctp_queued_to_read *control; 3937 struct sctp_stream_reset_event *strreset; 3938 int len; 3939 3940 if ((stcb == NULL) || 3941 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3942 /* event not enabled */ 3943 return; 3944 } 3945 3946 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3947 if (m_notify == NULL) 3948 /* no space left */ 3949 return; 3950 SCTP_BUF_LEN(m_notify) = 0; 3951 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3952 if (len > M_TRAILINGSPACE(m_notify)) { 3953 /* never enough room */ 3954 sctp_m_freem(m_notify); 3955 return; 3956 } 3957 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3958 memset(strreset, 0, len); 3959 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3960 strreset->strreset_flags = flag; 3961 strreset->strreset_length = len; 3962 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3963 if (number_entries) { 3964 int i; 3965 3966 for (i = 0; i < number_entries; i++) { 3967 strreset->strreset_stream_list[i] = ntohs(list[i]); 3968 } 3969 } 3970 SCTP_BUF_LEN(m_notify) = len; 3971 SCTP_BUF_NEXT(m_notify) = NULL; 3972 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3973 /* no space */ 3974 sctp_m_freem(m_notify); 3975 return; 3976 } 3977 /* append to socket */ 3978 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3979 0, 0, stcb->asoc.context, 0, 0, 0, 3980 m_notify); 3981 if (control == NULL) { 3982 /* no memory */ 3983 sctp_m_freem(m_notify); 3984 return; 3985 } 3986 control->length = SCTP_BUF_LEN(m_notify); 3987 control->spec_flags = M_NOTIFICATION; 3988 /* not that we need this */ 3989 control->tail_mbuf = m_notify; 3990 sctp_add_to_readq(stcb->sctp_ep, stcb, 3991 control, 3992 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3993 } 3994 3995 static void 3996 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3997 { 3998 struct mbuf *m_notify; 3999 struct sctp_remote_error *sre; 4000 struct sctp_queued_to_read *control; 4001 unsigned int notif_len; 4002 uint16_t chunk_len; 4003 4004 if ((stcb == NULL) || 4005 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4006 return; 4007 } 4008 if (chunk != NULL) { 4009 chunk_len = ntohs(chunk->ch.chunk_length); 4010 /* 4011 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4012 * contiguous. 4013 */ 4014 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4015 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4016 } 4017 } else { 4018 chunk_len = 0; 4019 } 4020 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4021 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4022 if (m_notify == NULL) { 4023 /* Retry with smaller value. */ 4024 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4025 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4026 if (m_notify == NULL) { 4027 return; 4028 } 4029 } 4030 SCTP_BUF_NEXT(m_notify) = NULL; 4031 sre = mtod(m_notify, struct sctp_remote_error *); 4032 memset(sre, 0, notif_len); 4033 sre->sre_type = SCTP_REMOTE_ERROR; 4034 sre->sre_flags = 0; 4035 sre->sre_length = sizeof(struct sctp_remote_error); 4036 sre->sre_error = error; 4037 sre->sre_assoc_id = sctp_get_associd(stcb); 4038 if (notif_len > sizeof(struct sctp_remote_error)) { 4039 memcpy(sre->sre_data, chunk, chunk_len); 4040 sre->sre_length += chunk_len; 4041 } 4042 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4043 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4044 0, 0, stcb->asoc.context, 0, 0, 0, 4045 m_notify); 4046 if (control != NULL) { 4047 control->length = SCTP_BUF_LEN(m_notify); 4048 control->spec_flags = M_NOTIFICATION; 4049 /* not that we need this */ 4050 control->tail_mbuf = m_notify; 4051 sctp_add_to_readq(stcb->sctp_ep, stcb, 4052 control, 4053 &stcb->sctp_socket->so_rcv, 1, 4054 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4055 } else { 4056 sctp_m_freem(m_notify); 4057 } 4058 } 4059 4060 void 4061 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4062 uint32_t error, void *data, int so_locked) 4063 { 4064 if ((stcb == NULL) || 4065 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4066 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4067 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4068 /* If the socket is gone we are out of here */ 4069 return; 4070 } 4071 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4072 return; 4073 } 4074 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4075 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4076 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4077 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4078 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4079 /* Don't report these in front states */ 4080 return; 4081 } 4082 } 4083 switch (notification) { 4084 case SCTP_NOTIFY_ASSOC_UP: 4085 if (stcb->asoc.assoc_up_sent == 0) { 4086 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4087 stcb->asoc.assoc_up_sent = 1; 4088 } 4089 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4090 sctp_notify_adaptation_layer(stcb); 4091 } 4092 if (stcb->asoc.auth_supported == 0) { 4093 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4094 NULL, so_locked); 4095 } 4096 break; 4097 case SCTP_NOTIFY_ASSOC_DOWN: 4098 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4099 break; 4100 case SCTP_NOTIFY_INTERFACE_DOWN: 4101 { 4102 struct sctp_nets *net; 4103 4104 net = (struct sctp_nets *)data; 4105 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4106 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4107 break; 4108 } 4109 case SCTP_NOTIFY_INTERFACE_UP: 4110 { 4111 struct sctp_nets *net; 4112 4113 net = (struct sctp_nets *)data; 4114 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4115 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4116 break; 4117 } 4118 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4119 { 4120 struct sctp_nets *net; 4121 4122 net = (struct sctp_nets *)data; 4123 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4124 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4125 break; 4126 } 4127 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4128 sctp_notify_send_failed2(stcb, error, 4129 (struct sctp_stream_queue_pending *)data, so_locked); 4130 break; 4131 case SCTP_NOTIFY_SENT_DG_FAIL: 4132 sctp_notify_send_failed(stcb, 1, error, 4133 (struct sctp_tmit_chunk *)data, so_locked); 4134 break; 4135 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4136 sctp_notify_send_failed(stcb, 0, error, 4137 (struct sctp_tmit_chunk *)data, so_locked); 4138 break; 4139 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4140 { 4141 uint32_t val; 4142 4143 val = *((uint32_t *)data); 4144 4145 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4146 break; 4147 } 4148 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4149 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4150 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4151 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4152 } else { 4153 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4154 } 4155 break; 4156 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4157 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4158 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4159 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4160 } else { 4161 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4162 } 4163 break; 4164 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4165 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4166 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4167 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4168 } else { 4169 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4170 } 4171 break; 4172 case SCTP_NOTIFY_ASSOC_RESTART: 4173 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4174 if (stcb->asoc.auth_supported == 0) { 4175 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4176 NULL, so_locked); 4177 } 4178 break; 4179 case SCTP_NOTIFY_STR_RESET_SEND: 4180 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4181 break; 4182 case SCTP_NOTIFY_STR_RESET_RECV: 4183 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4184 break; 4185 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4186 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4187 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4188 break; 4189 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4190 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4191 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4192 break; 4193 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4194 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4195 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4196 break; 4197 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4198 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4199 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4200 break; 4201 case SCTP_NOTIFY_ASCONF_ADD_IP: 4202 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4203 error, so_locked); 4204 break; 4205 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4206 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4207 error, so_locked); 4208 break; 4209 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4210 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4211 error, so_locked); 4212 break; 4213 case SCTP_NOTIFY_PEER_SHUTDOWN: 4214 sctp_notify_shutdown_event(stcb); 4215 break; 4216 case SCTP_NOTIFY_AUTH_NEW_KEY: 4217 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4218 (uint16_t)(uintptr_t)data, 4219 so_locked); 4220 break; 4221 case SCTP_NOTIFY_AUTH_FREE_KEY: 4222 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4223 (uint16_t)(uintptr_t)data, 4224 so_locked); 4225 break; 4226 case SCTP_NOTIFY_NO_PEER_AUTH: 4227 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4228 (uint16_t)(uintptr_t)data, 4229 so_locked); 4230 break; 4231 case SCTP_NOTIFY_SENDER_DRY: 4232 sctp_notify_sender_dry_event(stcb, so_locked); 4233 break; 4234 case SCTP_NOTIFY_REMOTE_ERROR: 4235 sctp_notify_remote_error(stcb, error, data); 4236 break; 4237 default: 4238 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4239 __func__, notification, notification); 4240 break; 4241 } /* end switch */ 4242 } 4243 4244 void 4245 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4246 { 4247 struct sctp_association *asoc; 4248 struct sctp_stream_out *outs; 4249 struct sctp_tmit_chunk *chk, *nchk; 4250 struct sctp_stream_queue_pending *sp, *nsp; 4251 int i; 4252 4253 if (stcb == NULL) { 4254 return; 4255 } 4256 asoc = &stcb->asoc; 4257 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4258 /* already being freed */ 4259 return; 4260 } 4261 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4262 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4263 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4264 return; 4265 } 4266 /* now through all the gunk freeing chunks */ 4267 /* sent queue SHOULD be empty */ 4268 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4269 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4270 asoc->sent_queue_cnt--; 4271 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4272 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4273 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4274 #ifdef INVARIANTS 4275 } else { 4276 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4277 #endif 4278 } 4279 } 4280 if (chk->data != NULL) { 4281 sctp_free_bufspace(stcb, asoc, chk, 1); 4282 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4283 error, chk, so_locked); 4284 if (chk->data) { 4285 sctp_m_freem(chk->data); 4286 chk->data = NULL; 4287 } 4288 } 4289 sctp_free_a_chunk(stcb, chk, so_locked); 4290 /* sa_ignore FREED_MEMORY */ 4291 } 4292 /* pending send queue SHOULD be empty */ 4293 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4294 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4295 asoc->send_queue_cnt--; 4296 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4297 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4298 #ifdef INVARIANTS 4299 } else { 4300 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4301 #endif 4302 } 4303 if (chk->data != NULL) { 4304 sctp_free_bufspace(stcb, asoc, chk, 1); 4305 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4306 error, chk, so_locked); 4307 if (chk->data) { 4308 sctp_m_freem(chk->data); 4309 chk->data = NULL; 4310 } 4311 } 4312 sctp_free_a_chunk(stcb, chk, so_locked); 4313 /* sa_ignore FREED_MEMORY */ 4314 } 4315 for (i = 0; i < asoc->streamoutcnt; i++) { 4316 /* For each stream */ 4317 outs = &asoc->strmout[i]; 4318 /* clean up any sends there */ 4319 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4320 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4321 TAILQ_REMOVE(&outs->outqueue, sp, next); 4322 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4323 sctp_free_spbufspace(stcb, asoc, sp); 4324 if (sp->data) { 4325 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4326 error, (void *)sp, so_locked); 4327 if (sp->data) { 4328 sctp_m_freem(sp->data); 4329 sp->data = NULL; 4330 sp->tail_mbuf = NULL; 4331 sp->length = 0; 4332 } 4333 } 4334 if (sp->net) { 4335 sctp_free_remote_addr(sp->net); 4336 sp->net = NULL; 4337 } 4338 /* Free the chunk */ 4339 sctp_free_a_strmoq(stcb, sp, so_locked); 4340 /* sa_ignore FREED_MEMORY */ 4341 } 4342 } 4343 } 4344 4345 void 4346 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4347 uint16_t error, struct sctp_abort_chunk *abort, 4348 int so_locked) 4349 { 4350 if (stcb == NULL) { 4351 return; 4352 } 4353 SCTP_TCB_LOCK_ASSERT(stcb); 4354 4355 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4356 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4357 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4358 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4359 } 4360 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4361 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4362 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4363 return; 4364 } 4365 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4366 /* Tell them we lost the asoc */ 4367 sctp_report_all_outbound(stcb, error, so_locked); 4368 if (from_peer) { 4369 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4370 } else { 4371 if (timeout) { 4372 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4373 } else { 4374 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4375 } 4376 } 4377 } 4378 4379 void 4380 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4381 struct mbuf *m, int iphlen, 4382 struct sockaddr *src, struct sockaddr *dst, 4383 struct sctphdr *sh, struct mbuf *op_err, 4384 uint8_t mflowtype, uint32_t mflowid, 4385 uint32_t vrf_id, uint16_t port) 4386 { 4387 struct sctp_gen_error_cause *cause; 4388 uint32_t vtag; 4389 uint16_t cause_code; 4390 4391 if (stcb != NULL) { 4392 vtag = stcb->asoc.peer_vtag; 4393 vrf_id = stcb->asoc.vrf_id; 4394 if (op_err != NULL) { 4395 /* Read the cause code from the error cause. */ 4396 cause = mtod(op_err, struct sctp_gen_error_cause *); 4397 cause_code = ntohs(cause->code); 4398 } else { 4399 cause_code = 0; 4400 } 4401 } else { 4402 vtag = 0; 4403 } 4404 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4405 mflowtype, mflowid, inp->fibnum, 4406 vrf_id, port); 4407 if (stcb != NULL) { 4408 /* We have a TCB to abort, send notification too */ 4409 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4410 /* Ok, now lets free it */ 4411 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4412 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4413 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4414 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4415 } 4416 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4417 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4418 } 4419 } 4420 #ifdef SCTP_ASOCLOG_OF_TSNS 4421 void 4422 sctp_print_out_track_log(struct sctp_tcb *stcb) 4423 { 4424 #ifdef NOSIY_PRINTS 4425 int i; 4426 4427 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4428 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4429 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4430 SCTP_PRINTF("None rcvd\n"); 4431 goto none_in; 4432 } 4433 if (stcb->asoc.tsn_in_wrapped) { 4434 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4435 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4436 stcb->asoc.in_tsnlog[i].tsn, 4437 stcb->asoc.in_tsnlog[i].strm, 4438 stcb->asoc.in_tsnlog[i].seq, 4439 stcb->asoc.in_tsnlog[i].flgs, 4440 stcb->asoc.in_tsnlog[i].sz); 4441 } 4442 } 4443 if (stcb->asoc.tsn_in_at) { 4444 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4445 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4446 stcb->asoc.in_tsnlog[i].tsn, 4447 stcb->asoc.in_tsnlog[i].strm, 4448 stcb->asoc.in_tsnlog[i].seq, 4449 stcb->asoc.in_tsnlog[i].flgs, 4450 stcb->asoc.in_tsnlog[i].sz); 4451 } 4452 } 4453 none_in: 4454 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4455 if ((stcb->asoc.tsn_out_at == 0) && 4456 (stcb->asoc.tsn_out_wrapped == 0)) { 4457 SCTP_PRINTF("None sent\n"); 4458 } 4459 if (stcb->asoc.tsn_out_wrapped) { 4460 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4461 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4462 stcb->asoc.out_tsnlog[i].tsn, 4463 stcb->asoc.out_tsnlog[i].strm, 4464 stcb->asoc.out_tsnlog[i].seq, 4465 stcb->asoc.out_tsnlog[i].flgs, 4466 stcb->asoc.out_tsnlog[i].sz); 4467 } 4468 } 4469 if (stcb->asoc.tsn_out_at) { 4470 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4471 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4472 stcb->asoc.out_tsnlog[i].tsn, 4473 stcb->asoc.out_tsnlog[i].strm, 4474 stcb->asoc.out_tsnlog[i].seq, 4475 stcb->asoc.out_tsnlog[i].flgs, 4476 stcb->asoc.out_tsnlog[i].sz); 4477 } 4478 } 4479 #endif 4480 } 4481 #endif 4482 4483 void 4484 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4485 struct mbuf *op_err, bool timedout, int so_locked) 4486 { 4487 struct sctp_gen_error_cause *cause; 4488 uint16_t cause_code; 4489 4490 if (stcb == NULL) { 4491 /* Got to have a TCB */ 4492 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4493 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4494 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4495 SCTP_CALLED_DIRECTLY_NOCMPSET); 4496 } 4497 } 4498 return; 4499 } 4500 if (op_err != NULL) { 4501 /* Read the cause code from the error cause. */ 4502 cause = mtod(op_err, struct sctp_gen_error_cause *); 4503 cause_code = ntohs(cause->code); 4504 } else { 4505 cause_code = 0; 4506 } 4507 /* notify the peer */ 4508 sctp_send_abort_tcb(stcb, op_err, so_locked); 4509 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4510 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4511 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4512 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4513 } 4514 /* notify the ulp */ 4515 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4516 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4517 } 4518 /* now free the asoc */ 4519 #ifdef SCTP_ASOCLOG_OF_TSNS 4520 sctp_print_out_track_log(stcb); 4521 #endif 4522 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4523 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4524 } 4525 4526 void 4527 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4528 struct sockaddr *src, struct sockaddr *dst, 4529 struct sctphdr *sh, struct sctp_inpcb *inp, 4530 struct mbuf *cause, 4531 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4532 uint32_t vrf_id, uint16_t port) 4533 { 4534 struct sctp_chunkhdr *ch, chunk_buf; 4535 unsigned int chk_length; 4536 int contains_init_chunk; 4537 4538 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4539 /* Generate a TO address for future reference */ 4540 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4541 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4542 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4543 SCTP_CALLED_DIRECTLY_NOCMPSET); 4544 } 4545 } 4546 contains_init_chunk = 0; 4547 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4548 sizeof(*ch), (uint8_t *)&chunk_buf); 4549 while (ch != NULL) { 4550 chk_length = ntohs(ch->chunk_length); 4551 if (chk_length < sizeof(*ch)) { 4552 /* break to abort land */ 4553 break; 4554 } 4555 switch (ch->chunk_type) { 4556 case SCTP_INIT: 4557 contains_init_chunk = 1; 4558 break; 4559 case SCTP_PACKET_DROPPED: 4560 /* we don't respond to pkt-dropped */ 4561 return; 4562 case SCTP_ABORT_ASSOCIATION: 4563 /* we don't respond with an ABORT to an ABORT */ 4564 return; 4565 case SCTP_SHUTDOWN_COMPLETE: 4566 /* 4567 * we ignore it since we are not waiting for it and 4568 * peer is gone 4569 */ 4570 return; 4571 case SCTP_SHUTDOWN_ACK: 4572 sctp_send_shutdown_complete2(src, dst, sh, 4573 mflowtype, mflowid, fibnum, 4574 vrf_id, port); 4575 return; 4576 default: 4577 break; 4578 } 4579 offset += SCTP_SIZE32(chk_length); 4580 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4581 sizeof(*ch), (uint8_t *)&chunk_buf); 4582 } 4583 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4584 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4585 (contains_init_chunk == 0))) { 4586 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4587 mflowtype, mflowid, fibnum, 4588 vrf_id, port); 4589 } 4590 } 4591 4592 /* 4593 * check the inbound datagram to make sure there is not an abort inside it, 4594 * if there is return 1, else return 0. 4595 */ 4596 int 4597 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4598 { 4599 struct sctp_chunkhdr *ch; 4600 struct sctp_init_chunk *init_chk, chunk_buf; 4601 int offset; 4602 unsigned int chk_length; 4603 4604 offset = iphlen + sizeof(struct sctphdr); 4605 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4606 (uint8_t *)&chunk_buf); 4607 while (ch != NULL) { 4608 chk_length = ntohs(ch->chunk_length); 4609 if (chk_length < sizeof(*ch)) { 4610 /* packet is probably corrupt */ 4611 break; 4612 } 4613 /* we seem to be ok, is it an abort? */ 4614 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4615 /* yep, tell them */ 4616 return (1); 4617 } 4618 if ((ch->chunk_type == SCTP_INITIATION) || 4619 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4620 /* need to update the Vtag */ 4621 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4622 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4623 if (init_chk != NULL) { 4624 *vtag = ntohl(init_chk->init.initiate_tag); 4625 } 4626 } 4627 /* Nope, move to the next chunk */ 4628 offset += SCTP_SIZE32(chk_length); 4629 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4630 sizeof(*ch), (uint8_t *)&chunk_buf); 4631 } 4632 return (0); 4633 } 4634 4635 /* 4636 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4637 * set (i.e. it's 0) so, create this function to compare link local scopes 4638 */ 4639 #ifdef INET6 4640 uint32_t 4641 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4642 { 4643 struct sockaddr_in6 a, b; 4644 4645 /* save copies */ 4646 a = *addr1; 4647 b = *addr2; 4648 4649 if (a.sin6_scope_id == 0) 4650 if (sa6_recoverscope(&a)) { 4651 /* can't get scope, so can't match */ 4652 return (0); 4653 } 4654 if (b.sin6_scope_id == 0) 4655 if (sa6_recoverscope(&b)) { 4656 /* can't get scope, so can't match */ 4657 return (0); 4658 } 4659 if (a.sin6_scope_id != b.sin6_scope_id) 4660 return (0); 4661 4662 return (1); 4663 } 4664 4665 /* 4666 * returns a sockaddr_in6 with embedded scope recovered and removed 4667 */ 4668 struct sockaddr_in6 * 4669 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4670 { 4671 /* check and strip embedded scope junk */ 4672 if (addr->sin6_family == AF_INET6) { 4673 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4674 if (addr->sin6_scope_id == 0) { 4675 *store = *addr; 4676 if (!sa6_recoverscope(store)) { 4677 /* use the recovered scope */ 4678 addr = store; 4679 } 4680 } else { 4681 /* else, return the original "to" addr */ 4682 in6_clearscope(&addr->sin6_addr); 4683 } 4684 } 4685 } 4686 return (addr); 4687 } 4688 #endif 4689 4690 /* 4691 * are the two addresses the same? currently a "scopeless" check returns: 1 4692 * if same, 0 if not 4693 */ 4694 int 4695 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4696 { 4697 4698 /* must be valid */ 4699 if (sa1 == NULL || sa2 == NULL) 4700 return (0); 4701 4702 /* must be the same family */ 4703 if (sa1->sa_family != sa2->sa_family) 4704 return (0); 4705 4706 switch (sa1->sa_family) { 4707 #ifdef INET6 4708 case AF_INET6: 4709 { 4710 /* IPv6 addresses */ 4711 struct sockaddr_in6 *sin6_1, *sin6_2; 4712 4713 sin6_1 = (struct sockaddr_in6 *)sa1; 4714 sin6_2 = (struct sockaddr_in6 *)sa2; 4715 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4716 sin6_2)); 4717 } 4718 #endif 4719 #ifdef INET 4720 case AF_INET: 4721 { 4722 /* IPv4 addresses */ 4723 struct sockaddr_in *sin_1, *sin_2; 4724 4725 sin_1 = (struct sockaddr_in *)sa1; 4726 sin_2 = (struct sockaddr_in *)sa2; 4727 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4728 } 4729 #endif 4730 default: 4731 /* we don't do these... */ 4732 return (0); 4733 } 4734 } 4735 4736 void 4737 sctp_print_address(struct sockaddr *sa) 4738 { 4739 #ifdef INET6 4740 char ip6buf[INET6_ADDRSTRLEN]; 4741 #endif 4742 4743 switch (sa->sa_family) { 4744 #ifdef INET6 4745 case AF_INET6: 4746 { 4747 struct sockaddr_in6 *sin6; 4748 4749 sin6 = (struct sockaddr_in6 *)sa; 4750 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4751 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4752 ntohs(sin6->sin6_port), 4753 sin6->sin6_scope_id); 4754 break; 4755 } 4756 #endif 4757 #ifdef INET 4758 case AF_INET: 4759 { 4760 struct sockaddr_in *sin; 4761 unsigned char *p; 4762 4763 sin = (struct sockaddr_in *)sa; 4764 p = (unsigned char *)&sin->sin_addr; 4765 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4766 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4767 break; 4768 } 4769 #endif 4770 default: 4771 SCTP_PRINTF("?\n"); 4772 break; 4773 } 4774 } 4775 4776 void 4777 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4778 struct sctp_inpcb *new_inp, 4779 struct sctp_tcb *stcb, 4780 int waitflags) 4781 { 4782 /* 4783 * go through our old INP and pull off any control structures that 4784 * belong to stcb and move then to the new inp. 4785 */ 4786 struct socket *old_so, *new_so; 4787 struct sctp_queued_to_read *control, *nctl; 4788 struct sctp_readhead tmp_queue; 4789 struct mbuf *m; 4790 int error = 0; 4791 4792 old_so = old_inp->sctp_socket; 4793 new_so = new_inp->sctp_socket; 4794 TAILQ_INIT(&tmp_queue); 4795 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4796 if (error) { 4797 /* 4798 * Gak, can't get I/O lock, we have a problem. data will be 4799 * left stranded.. and we don't dare look at it since the 4800 * other thread may be reading something. Oh well, its a 4801 * screwed up app that does a peeloff OR a accept while 4802 * reading from the main socket... actually its only the 4803 * peeloff() case, since I think read will fail on a 4804 * listening socket.. 4805 */ 4806 return; 4807 } 4808 /* lock the socket buffers */ 4809 SCTP_INP_READ_LOCK(old_inp); 4810 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4811 /* Pull off all for out target stcb */ 4812 if (control->stcb == stcb) { 4813 /* remove it we want it */ 4814 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4815 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4816 m = control->data; 4817 while (m) { 4818 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4819 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4820 } 4821 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4823 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4824 } 4825 m = SCTP_BUF_NEXT(m); 4826 } 4827 } 4828 } 4829 SCTP_INP_READ_UNLOCK(old_inp); 4830 /* Remove the recv-lock on the old socket */ 4831 SOCK_IO_RECV_UNLOCK(old_so); 4832 /* Now we move them over to the new socket buffer */ 4833 SCTP_INP_READ_LOCK(new_inp); 4834 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4835 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4836 m = control->data; 4837 while (m) { 4838 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4839 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4840 } 4841 sctp_sballoc(stcb, &new_so->so_rcv, m); 4842 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4843 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4844 } 4845 m = SCTP_BUF_NEXT(m); 4846 } 4847 } 4848 SCTP_INP_READ_UNLOCK(new_inp); 4849 } 4850 4851 void 4852 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4853 struct sctp_tcb *stcb, 4854 int so_locked 4855 SCTP_UNUSED 4856 ) 4857 { 4858 if ((inp != NULL) && 4859 (inp->sctp_socket != NULL) && 4860 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4861 !SCTP_IS_LISTENING(inp))) { 4862 sctp_sorwakeup(inp, inp->sctp_socket); 4863 } 4864 } 4865 4866 void 4867 sctp_add_to_readq(struct sctp_inpcb *inp, 4868 struct sctp_tcb *stcb, 4869 struct sctp_queued_to_read *control, 4870 struct sockbuf *sb, 4871 int end, 4872 int inp_read_lock_held, 4873 int so_locked) 4874 { 4875 /* 4876 * Here we must place the control on the end of the socket read 4877 * queue AND increment sb_cc so that select will work properly on 4878 * read. 4879 */ 4880 struct mbuf *m, *prev = NULL; 4881 4882 if (inp == NULL) { 4883 /* Gak, TSNH!! */ 4884 #ifdef INVARIANTS 4885 panic("Gak, inp NULL on add_to_readq"); 4886 #endif 4887 return; 4888 } 4889 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4890 SCTP_INP_READ_LOCK(inp); 4891 } 4892 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4893 if (!control->on_strm_q) { 4894 sctp_free_remote_addr(control->whoFrom); 4895 if (control->data) { 4896 sctp_m_freem(control->data); 4897 control->data = NULL; 4898 } 4899 sctp_free_a_readq(stcb, control); 4900 } 4901 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4902 SCTP_INP_READ_UNLOCK(inp); 4903 } 4904 return; 4905 } 4906 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4907 atomic_add_int(&inp->total_recvs, 1); 4908 if (!control->do_not_ref_stcb) { 4909 atomic_add_int(&stcb->total_recvs, 1); 4910 } 4911 } 4912 m = control->data; 4913 control->held_length = 0; 4914 control->length = 0; 4915 while (m != NULL) { 4916 if (SCTP_BUF_LEN(m) == 0) { 4917 /* Skip mbufs with NO length */ 4918 if (prev == NULL) { 4919 /* First one */ 4920 control->data = sctp_m_free(m); 4921 m = control->data; 4922 } else { 4923 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4924 m = SCTP_BUF_NEXT(prev); 4925 } 4926 if (m == NULL) { 4927 control->tail_mbuf = prev; 4928 } 4929 continue; 4930 } 4931 prev = m; 4932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4933 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4934 } 4935 sctp_sballoc(stcb, sb, m); 4936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4937 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4938 } 4939 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4940 m = SCTP_BUF_NEXT(m); 4941 } 4942 if (prev != NULL) { 4943 control->tail_mbuf = prev; 4944 } else { 4945 /* Everything got collapsed out?? */ 4946 if (!control->on_strm_q) { 4947 sctp_free_remote_addr(control->whoFrom); 4948 sctp_free_a_readq(stcb, control); 4949 } 4950 if (inp_read_lock_held == 0) 4951 SCTP_INP_READ_UNLOCK(inp); 4952 return; 4953 } 4954 if (end) { 4955 control->end_added = 1; 4956 } 4957 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4958 control->on_read_q = 1; 4959 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4960 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4961 } 4962 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4963 SCTP_INP_READ_UNLOCK(inp); 4964 } 4965 } 4966 4967 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4968 *************ALTERNATE ROUTING CODE 4969 */ 4970 4971 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4972 *************ALTERNATE ROUTING CODE 4973 */ 4974 4975 struct mbuf * 4976 sctp_generate_cause(uint16_t code, char *info) 4977 { 4978 struct mbuf *m; 4979 struct sctp_gen_error_cause *cause; 4980 size_t info_len; 4981 uint16_t len; 4982 4983 if ((code == 0) || (info == NULL)) { 4984 return (NULL); 4985 } 4986 info_len = strlen(info); 4987 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4988 return (NULL); 4989 } 4990 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4991 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4992 if (m != NULL) { 4993 SCTP_BUF_LEN(m) = len; 4994 cause = mtod(m, struct sctp_gen_error_cause *); 4995 cause->code = htons(code); 4996 cause->length = htons(len); 4997 memcpy(cause->info, info, info_len); 4998 } 4999 return (m); 5000 } 5001 5002 struct mbuf * 5003 sctp_generate_no_user_data_cause(uint32_t tsn) 5004 { 5005 struct mbuf *m; 5006 struct sctp_error_no_user_data *no_user_data_cause; 5007 uint16_t len; 5008 5009 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5010 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5011 if (m != NULL) { 5012 SCTP_BUF_LEN(m) = len; 5013 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5014 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5015 no_user_data_cause->cause.length = htons(len); 5016 no_user_data_cause->tsn = htonl(tsn); 5017 } 5018 return (m); 5019 } 5020 5021 void 5022 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5023 struct sctp_tmit_chunk *tp1, int chk_cnt) 5024 { 5025 if (tp1->data == NULL) { 5026 return; 5027 } 5028 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5029 #ifdef SCTP_MBCNT_LOGGING 5030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5031 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5032 asoc->total_output_queue_size, 5033 tp1->book_size, 5034 0, 5035 tp1->mbcnt); 5036 } 5037 #endif 5038 if (asoc->total_output_queue_size >= tp1->book_size) { 5039 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5040 } else { 5041 asoc->total_output_queue_size = 0; 5042 } 5043 if ((stcb->sctp_socket != NULL) && 5044 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5045 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5046 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5047 atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_cc), tp1->book_size); 5048 } else { 5049 stcb->sctp_socket->so_snd.sb_cc = 0; 5050 } 5051 } 5052 } 5053 5054 int 5055 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5056 uint8_t sent, int so_locked) 5057 { 5058 struct sctp_stream_out *strq; 5059 struct sctp_tmit_chunk *chk = NULL, *tp2; 5060 struct sctp_stream_queue_pending *sp; 5061 uint32_t mid; 5062 uint16_t sid; 5063 uint8_t foundeom = 0; 5064 int ret_sz = 0; 5065 int notdone; 5066 int do_wakeup_routine = 0; 5067 5068 SCTP_TCB_LOCK_ASSERT(stcb); 5069 5070 sid = tp1->rec.data.sid; 5071 mid = tp1->rec.data.mid; 5072 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5073 stcb->asoc.abandoned_sent[0]++; 5074 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5075 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5076 #if defined(SCTP_DETAILED_STR_STATS) 5077 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5078 #endif 5079 } else { 5080 stcb->asoc.abandoned_unsent[0]++; 5081 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5082 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5083 #if defined(SCTP_DETAILED_STR_STATS) 5084 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5085 #endif 5086 } 5087 do { 5088 ret_sz += tp1->book_size; 5089 if (tp1->data != NULL) { 5090 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5091 sctp_flight_size_decrease(tp1); 5092 sctp_total_flight_decrease(stcb, tp1); 5093 } 5094 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5095 stcb->asoc.peers_rwnd += tp1->send_size; 5096 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5097 if (sent) { 5098 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5099 } else { 5100 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5101 } 5102 if (tp1->data) { 5103 sctp_m_freem(tp1->data); 5104 tp1->data = NULL; 5105 } 5106 do_wakeup_routine = 1; 5107 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5108 stcb->asoc.sent_queue_cnt_removeable--; 5109 } 5110 } 5111 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5112 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5113 SCTP_DATA_NOT_FRAG) { 5114 /* not frag'ed we ae done */ 5115 notdone = 0; 5116 foundeom = 1; 5117 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5118 /* end of frag, we are done */ 5119 notdone = 0; 5120 foundeom = 1; 5121 } else { 5122 /* 5123 * Its a begin or middle piece, we must mark all of 5124 * it 5125 */ 5126 notdone = 1; 5127 tp1 = TAILQ_NEXT(tp1, sctp_next); 5128 } 5129 } while (tp1 && notdone); 5130 if (foundeom == 0) { 5131 /* 5132 * The multi-part message was scattered across the send and 5133 * sent queue. 5134 */ 5135 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5136 if ((tp1->rec.data.sid != sid) || 5137 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5138 break; 5139 } 5140 /* 5141 * save to chk in case we have some on stream out 5142 * queue. If so and we have an un-transmitted one we 5143 * don't have to fudge the TSN. 5144 */ 5145 chk = tp1; 5146 ret_sz += tp1->book_size; 5147 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5148 if (sent) { 5149 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5150 } else { 5151 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5152 } 5153 if (tp1->data) { 5154 sctp_m_freem(tp1->data); 5155 tp1->data = NULL; 5156 } 5157 /* No flight involved here book the size to 0 */ 5158 tp1->book_size = 0; 5159 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5160 foundeom = 1; 5161 } 5162 do_wakeup_routine = 1; 5163 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5164 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5165 /* 5166 * on to the sent queue so we can wait for it to be 5167 * passed by. 5168 */ 5169 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5170 sctp_next); 5171 stcb->asoc.send_queue_cnt--; 5172 stcb->asoc.sent_queue_cnt++; 5173 } 5174 } 5175 if (foundeom == 0) { 5176 /* 5177 * Still no eom found. That means there is stuff left on the 5178 * stream out queue.. yuck. 5179 */ 5180 strq = &stcb->asoc.strmout[sid]; 5181 sp = TAILQ_FIRST(&strq->outqueue); 5182 if (sp != NULL) { 5183 sp->discard_rest = 1; 5184 /* 5185 * We may need to put a chunk on the queue that 5186 * holds the TSN that would have been sent with the 5187 * LAST bit. 5188 */ 5189 if (chk == NULL) { 5190 /* Yep, we have to */ 5191 sctp_alloc_a_chunk(stcb, chk); 5192 if (chk == NULL) { 5193 /* 5194 * we are hosed. All we can do is 5195 * nothing.. which will cause an 5196 * abort if the peer is paying 5197 * attention. 5198 */ 5199 goto oh_well; 5200 } 5201 memset(chk, 0, sizeof(*chk)); 5202 chk->rec.data.rcv_flags = 0; 5203 chk->sent = SCTP_FORWARD_TSN_SKIP; 5204 chk->asoc = &stcb->asoc; 5205 if (stcb->asoc.idata_supported == 0) { 5206 if (sp->sinfo_flags & SCTP_UNORDERED) { 5207 chk->rec.data.mid = 0; 5208 } else { 5209 chk->rec.data.mid = strq->next_mid_ordered; 5210 } 5211 } else { 5212 if (sp->sinfo_flags & SCTP_UNORDERED) { 5213 chk->rec.data.mid = strq->next_mid_unordered; 5214 } else { 5215 chk->rec.data.mid = strq->next_mid_ordered; 5216 } 5217 } 5218 chk->rec.data.sid = sp->sid; 5219 chk->rec.data.ppid = sp->ppid; 5220 chk->rec.data.context = sp->context; 5221 chk->flags = sp->act_flags; 5222 chk->whoTo = NULL; 5223 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5224 strq->chunks_on_queues++; 5225 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5226 stcb->asoc.sent_queue_cnt++; 5227 stcb->asoc.pr_sctp_cnt++; 5228 } 5229 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5230 if (sp->sinfo_flags & SCTP_UNORDERED) { 5231 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5232 } 5233 if (stcb->asoc.idata_supported == 0) { 5234 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5235 strq->next_mid_ordered++; 5236 } 5237 } else { 5238 if (sp->sinfo_flags & SCTP_UNORDERED) { 5239 strq->next_mid_unordered++; 5240 } else { 5241 strq->next_mid_ordered++; 5242 } 5243 } 5244 oh_well: 5245 if (sp->data) { 5246 /* 5247 * Pull any data to free up the SB and allow 5248 * sender to "add more" while we will throw 5249 * away :-) 5250 */ 5251 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5252 ret_sz += sp->length; 5253 do_wakeup_routine = 1; 5254 sp->some_taken = 1; 5255 sctp_m_freem(sp->data); 5256 sp->data = NULL; 5257 sp->tail_mbuf = NULL; 5258 sp->length = 0; 5259 } 5260 } 5261 } 5262 if (do_wakeup_routine) { 5263 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5264 } 5265 return (ret_sz); 5266 } 5267 5268 /* 5269 * checks to see if the given address, sa, is one that is currently known by 5270 * the kernel note: can't distinguish the same address on multiple interfaces 5271 * and doesn't handle multiple addresses with different zone/scope id's note: 5272 * ifa_ifwithaddr() compares the entire sockaddr struct 5273 */ 5274 struct sctp_ifa * 5275 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5276 int holds_lock) 5277 { 5278 struct sctp_laddr *laddr; 5279 5280 if (holds_lock == 0) { 5281 SCTP_INP_RLOCK(inp); 5282 } 5283 5284 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5285 if (laddr->ifa == NULL) 5286 continue; 5287 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5288 continue; 5289 #ifdef INET 5290 if (addr->sa_family == AF_INET) { 5291 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5292 laddr->ifa->address.sin.sin_addr.s_addr) { 5293 /* found him. */ 5294 break; 5295 } 5296 } 5297 #endif 5298 #ifdef INET6 5299 if (addr->sa_family == AF_INET6) { 5300 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5301 &laddr->ifa->address.sin6)) { 5302 /* found him. */ 5303 break; 5304 } 5305 } 5306 #endif 5307 } 5308 if (holds_lock == 0) { 5309 SCTP_INP_RUNLOCK(inp); 5310 } 5311 if (laddr != NULL) { 5312 return (laddr->ifa); 5313 } else { 5314 return (NULL); 5315 } 5316 } 5317 5318 uint32_t 5319 sctp_get_ifa_hash_val(struct sockaddr *addr) 5320 { 5321 switch (addr->sa_family) { 5322 #ifdef INET 5323 case AF_INET: 5324 { 5325 struct sockaddr_in *sin; 5326 5327 sin = (struct sockaddr_in *)addr; 5328 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5329 } 5330 #endif 5331 #ifdef INET6 5332 case AF_INET6: 5333 { 5334 struct sockaddr_in6 *sin6; 5335 uint32_t hash_of_addr; 5336 5337 sin6 = (struct sockaddr_in6 *)addr; 5338 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5339 sin6->sin6_addr.s6_addr32[1] + 5340 sin6->sin6_addr.s6_addr32[2] + 5341 sin6->sin6_addr.s6_addr32[3]); 5342 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5343 return (hash_of_addr); 5344 } 5345 #endif 5346 default: 5347 break; 5348 } 5349 return (0); 5350 } 5351 5352 struct sctp_ifa * 5353 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5354 { 5355 struct sctp_ifa *sctp_ifap; 5356 struct sctp_vrf *vrf; 5357 struct sctp_ifalist *hash_head; 5358 uint32_t hash_of_addr; 5359 5360 if (holds_lock == 0) { 5361 SCTP_IPI_ADDR_RLOCK(); 5362 } else { 5363 SCTP_IPI_ADDR_LOCK_ASSERT(); 5364 } 5365 5366 vrf = sctp_find_vrf(vrf_id); 5367 if (vrf == NULL) { 5368 if (holds_lock == 0) 5369 SCTP_IPI_ADDR_RUNLOCK(); 5370 return (NULL); 5371 } 5372 5373 hash_of_addr = sctp_get_ifa_hash_val(addr); 5374 5375 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5376 if (hash_head == NULL) { 5377 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5378 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5379 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5380 sctp_print_address(addr); 5381 SCTP_PRINTF("No such bucket for address\n"); 5382 if (holds_lock == 0) 5383 SCTP_IPI_ADDR_RUNLOCK(); 5384 5385 return (NULL); 5386 } 5387 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5388 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5389 continue; 5390 #ifdef INET 5391 if (addr->sa_family == AF_INET) { 5392 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5393 sctp_ifap->address.sin.sin_addr.s_addr) { 5394 /* found him. */ 5395 break; 5396 } 5397 } 5398 #endif 5399 #ifdef INET6 5400 if (addr->sa_family == AF_INET6) { 5401 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5402 &sctp_ifap->address.sin6)) { 5403 /* found him. */ 5404 break; 5405 } 5406 } 5407 #endif 5408 } 5409 if (holds_lock == 0) 5410 SCTP_IPI_ADDR_RUNLOCK(); 5411 return (sctp_ifap); 5412 } 5413 5414 static void 5415 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5416 uint32_t rwnd_req) 5417 { 5418 /* User pulled some data, do we need a rwnd update? */ 5419 struct epoch_tracker et; 5420 int r_unlocked = 0; 5421 uint32_t dif, rwnd; 5422 struct socket *so = NULL; 5423 5424 if (stcb == NULL) 5425 return; 5426 5427 atomic_add_int(&stcb->asoc.refcnt, 1); 5428 5429 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5430 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5431 /* Pre-check If we are freeing no update */ 5432 goto no_lock; 5433 } 5434 SCTP_INP_INCR_REF(stcb->sctp_ep); 5435 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5436 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5437 goto out; 5438 } 5439 so = stcb->sctp_socket; 5440 if (so == NULL) { 5441 goto out; 5442 } 5443 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5444 /* Have you have freed enough to look */ 5445 *freed_so_far = 0; 5446 /* Yep, its worth a look and the lock overhead */ 5447 5448 /* Figure out what the rwnd would be */ 5449 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5450 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5451 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5452 } else { 5453 dif = 0; 5454 } 5455 if (dif >= rwnd_req) { 5456 if (hold_rlock) { 5457 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5458 r_unlocked = 1; 5459 } 5460 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5461 /* 5462 * One last check before we allow the guy possibly 5463 * to get in. There is a race, where the guy has not 5464 * reached the gate. In that case 5465 */ 5466 goto out; 5467 } 5468 SCTP_TCB_LOCK(stcb); 5469 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5470 /* No reports here */ 5471 SCTP_TCB_UNLOCK(stcb); 5472 goto out; 5473 } 5474 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5475 NET_EPOCH_ENTER(et); 5476 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5477 5478 sctp_chunk_output(stcb->sctp_ep, stcb, 5479 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5480 /* make sure no timer is running */ 5481 NET_EPOCH_EXIT(et); 5482 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5483 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5484 SCTP_TCB_UNLOCK(stcb); 5485 } else { 5486 /* Update how much we have pending */ 5487 stcb->freed_by_sorcv_sincelast = dif; 5488 } 5489 out: 5490 if (so && r_unlocked && hold_rlock) { 5491 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5492 } 5493 5494 SCTP_INP_DECR_REF(stcb->sctp_ep); 5495 no_lock: 5496 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5497 return; 5498 } 5499 5500 int 5501 sctp_sorecvmsg(struct socket *so, 5502 struct uio *uio, 5503 struct mbuf **mp, 5504 struct sockaddr *from, 5505 int fromlen, 5506 int *msg_flags, 5507 struct sctp_sndrcvinfo *sinfo, 5508 int filling_sinfo) 5509 { 5510 /* 5511 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5512 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5513 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5514 * On the way out we may send out any combination of: 5515 * MSG_NOTIFICATION MSG_EOR 5516 * 5517 */ 5518 struct sctp_inpcb *inp = NULL; 5519 ssize_t my_len = 0; 5520 ssize_t cp_len = 0; 5521 int error = 0; 5522 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5523 struct mbuf *m = NULL; 5524 struct sctp_tcb *stcb = NULL; 5525 int wakeup_read_socket = 0; 5526 int freecnt_applied = 0; 5527 int out_flags = 0, in_flags = 0; 5528 int block_allowed = 1; 5529 uint32_t freed_so_far = 0; 5530 ssize_t copied_so_far = 0; 5531 int in_eeor_mode = 0; 5532 int no_rcv_needed = 0; 5533 uint32_t rwnd_req = 0; 5534 int hold_sblock = 0; 5535 int hold_rlock = 0; 5536 ssize_t slen = 0; 5537 uint32_t held_length = 0; 5538 int sockbuf_lock = 0; 5539 5540 if (uio == NULL) { 5541 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5542 return (EINVAL); 5543 } 5544 5545 if (msg_flags) { 5546 in_flags = *msg_flags; 5547 if (in_flags & MSG_PEEK) 5548 SCTP_STAT_INCR(sctps_read_peeks); 5549 } else { 5550 in_flags = 0; 5551 } 5552 slen = uio->uio_resid; 5553 5554 /* Pull in and set up our int flags */ 5555 if (in_flags & MSG_OOB) { 5556 /* Out of band's NOT supported */ 5557 return (EOPNOTSUPP); 5558 } 5559 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5560 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5561 return (EINVAL); 5562 } 5563 if ((in_flags & (MSG_DONTWAIT 5564 | MSG_NBIO 5565 )) || 5566 SCTP_SO_IS_NBIO(so)) { 5567 block_allowed = 0; 5568 } 5569 /* setup the endpoint */ 5570 inp = (struct sctp_inpcb *)so->so_pcb; 5571 if (inp == NULL) { 5572 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5573 return (EFAULT); 5574 } 5575 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5576 /* Must be at least a MTU's worth */ 5577 if (rwnd_req < SCTP_MIN_RWND) 5578 rwnd_req = SCTP_MIN_RWND; 5579 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5581 sctp_misc_ints(SCTP_SORECV_ENTER, 5582 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5583 } 5584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5585 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5586 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5587 } 5588 5589 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5590 if (error) { 5591 goto release_unlocked; 5592 } 5593 sockbuf_lock = 1; 5594 restart: 5595 5596 restart_nosblocks: 5597 if (hold_sblock == 0) { 5598 SOCKBUF_LOCK(&so->so_rcv); 5599 hold_sblock = 1; 5600 } 5601 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5602 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5603 goto out; 5604 } 5605 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5606 if (so->so_error) { 5607 error = so->so_error; 5608 if ((in_flags & MSG_PEEK) == 0) 5609 so->so_error = 0; 5610 goto out; 5611 } else { 5612 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5613 /* indicate EOF */ 5614 error = 0; 5615 goto out; 5616 } 5617 } 5618 } 5619 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5620 if (so->so_error) { 5621 error = so->so_error; 5622 if ((in_flags & MSG_PEEK) == 0) { 5623 so->so_error = 0; 5624 } 5625 goto out; 5626 } 5627 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5628 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5629 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5630 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5631 /* 5632 * For active open side clear flags for 5633 * re-use passive open is blocked by 5634 * connect. 5635 */ 5636 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5637 /* 5638 * You were aborted, passive side 5639 * always hits here 5640 */ 5641 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5642 error = ECONNRESET; 5643 } 5644 so->so_state &= ~(SS_ISCONNECTING | 5645 SS_ISDISCONNECTING | 5646 SS_ISCONFIRMING | 5647 SS_ISCONNECTED); 5648 if (error == 0) { 5649 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5650 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5651 error = ENOTCONN; 5652 } 5653 } 5654 goto out; 5655 } 5656 } 5657 if (block_allowed) { 5658 error = sbwait(so, SO_RCV); 5659 if (error) { 5660 goto out; 5661 } 5662 held_length = 0; 5663 goto restart_nosblocks; 5664 } else { 5665 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5666 error = EWOULDBLOCK; 5667 goto out; 5668 } 5669 } 5670 if (hold_sblock == 1) { 5671 SOCKBUF_UNLOCK(&so->so_rcv); 5672 hold_sblock = 0; 5673 } 5674 /* we possibly have data we can read */ 5675 /* sa_ignore FREED_MEMORY */ 5676 control = TAILQ_FIRST(&inp->read_queue); 5677 if (control == NULL) { 5678 /* 5679 * This could be happening since the appender did the 5680 * increment but as not yet did the tailq insert onto the 5681 * read_queue 5682 */ 5683 if (hold_rlock == 0) { 5684 SCTP_INP_READ_LOCK(inp); 5685 } 5686 control = TAILQ_FIRST(&inp->read_queue); 5687 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5688 #ifdef INVARIANTS 5689 panic("Huh, its non zero and nothing on control?"); 5690 #endif 5691 SCTP_SB_CLEAR(so->so_rcv); 5692 } 5693 SCTP_INP_READ_UNLOCK(inp); 5694 hold_rlock = 0; 5695 goto restart; 5696 } 5697 5698 if ((control->length == 0) && 5699 (control->do_not_ref_stcb)) { 5700 /* 5701 * Clean up code for freeing assoc that left behind a 5702 * pdapi.. maybe a peer in EEOR that just closed after 5703 * sending and never indicated a EOR. 5704 */ 5705 if (hold_rlock == 0) { 5706 hold_rlock = 1; 5707 SCTP_INP_READ_LOCK(inp); 5708 } 5709 control->held_length = 0; 5710 if (control->data) { 5711 /* Hmm there is data here .. fix */ 5712 struct mbuf *m_tmp; 5713 int cnt = 0; 5714 5715 m_tmp = control->data; 5716 while (m_tmp) { 5717 cnt += SCTP_BUF_LEN(m_tmp); 5718 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5719 control->tail_mbuf = m_tmp; 5720 control->end_added = 1; 5721 } 5722 m_tmp = SCTP_BUF_NEXT(m_tmp); 5723 } 5724 control->length = cnt; 5725 } else { 5726 /* remove it */ 5727 TAILQ_REMOVE(&inp->read_queue, control, next); 5728 /* Add back any hidden data */ 5729 sctp_free_remote_addr(control->whoFrom); 5730 sctp_free_a_readq(stcb, control); 5731 } 5732 if (hold_rlock) { 5733 hold_rlock = 0; 5734 SCTP_INP_READ_UNLOCK(inp); 5735 } 5736 goto restart; 5737 } 5738 if ((control->length == 0) && 5739 (control->end_added == 1)) { 5740 /* 5741 * Do we also need to check for (control->pdapi_aborted == 5742 * 1)? 5743 */ 5744 if (hold_rlock == 0) { 5745 hold_rlock = 1; 5746 SCTP_INP_READ_LOCK(inp); 5747 } 5748 TAILQ_REMOVE(&inp->read_queue, control, next); 5749 if (control->data) { 5750 #ifdef INVARIANTS 5751 panic("control->data not null but control->length == 0"); 5752 #else 5753 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5754 sctp_m_freem(control->data); 5755 control->data = NULL; 5756 #endif 5757 } 5758 if (control->aux_data) { 5759 sctp_m_free(control->aux_data); 5760 control->aux_data = NULL; 5761 } 5762 #ifdef INVARIANTS 5763 if (control->on_strm_q) { 5764 panic("About to free ctl:%p so:%p and its in %d", 5765 control, so, control->on_strm_q); 5766 } 5767 #endif 5768 sctp_free_remote_addr(control->whoFrom); 5769 sctp_free_a_readq(stcb, control); 5770 if (hold_rlock) { 5771 hold_rlock = 0; 5772 SCTP_INP_READ_UNLOCK(inp); 5773 } 5774 goto restart; 5775 } 5776 if (control->length == 0) { 5777 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5778 (filling_sinfo)) { 5779 /* find a more suitable one then this */ 5780 ctl = TAILQ_NEXT(control, next); 5781 while (ctl) { 5782 if ((ctl->stcb != control->stcb) && (ctl->length) && 5783 (ctl->some_taken || 5784 (ctl->spec_flags & M_NOTIFICATION) || 5785 ((ctl->do_not_ref_stcb == 0) && 5786 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5787 ) { 5788 /*- 5789 * If we have a different TCB next, and there is data 5790 * present. If we have already taken some (pdapi), OR we can 5791 * ref the tcb and no delivery as started on this stream, we 5792 * take it. Note we allow a notification on a different 5793 * assoc to be delivered.. 5794 */ 5795 control = ctl; 5796 goto found_one; 5797 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5798 (ctl->length) && 5799 ((ctl->some_taken) || 5800 ((ctl->do_not_ref_stcb == 0) && 5801 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5802 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5803 /*- 5804 * If we have the same tcb, and there is data present, and we 5805 * have the strm interleave feature present. Then if we have 5806 * taken some (pdapi) or we can refer to tht tcb AND we have 5807 * not started a delivery for this stream, we can take it. 5808 * Note we do NOT allow a notification on the same assoc to 5809 * be delivered. 5810 */ 5811 control = ctl; 5812 goto found_one; 5813 } 5814 ctl = TAILQ_NEXT(ctl, next); 5815 } 5816 } 5817 /* 5818 * if we reach here, not suitable replacement is available 5819 * <or> fragment interleave is NOT on. So stuff the sb_cc 5820 * into the our held count, and its time to sleep again. 5821 */ 5822 held_length = SCTP_SBAVAIL(&so->so_rcv); 5823 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5824 goto restart; 5825 } 5826 /* Clear the held length since there is something to read */ 5827 control->held_length = 0; 5828 found_one: 5829 /* 5830 * If we reach here, control has a some data for us to read off. 5831 * Note that stcb COULD be NULL. 5832 */ 5833 if (hold_rlock == 0) { 5834 hold_rlock = 1; 5835 SCTP_INP_READ_LOCK(inp); 5836 } 5837 control->some_taken++; 5838 stcb = control->stcb; 5839 if (stcb) { 5840 if ((control->do_not_ref_stcb == 0) && 5841 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5842 if (freecnt_applied == 0) 5843 stcb = NULL; 5844 } else if (control->do_not_ref_stcb == 0) { 5845 /* you can't free it on me please */ 5846 /* 5847 * The lock on the socket buffer protects us so the 5848 * free code will stop. But since we used the 5849 * socketbuf lock and the sender uses the tcb_lock 5850 * to increment, we need to use the atomic add to 5851 * the refcnt 5852 */ 5853 if (freecnt_applied) { 5854 #ifdef INVARIANTS 5855 panic("refcnt already incremented"); 5856 #else 5857 SCTP_PRINTF("refcnt already incremented?\n"); 5858 #endif 5859 } else { 5860 atomic_add_int(&stcb->asoc.refcnt, 1); 5861 freecnt_applied = 1; 5862 } 5863 /* 5864 * Setup to remember how much we have not yet told 5865 * the peer our rwnd has opened up. Note we grab the 5866 * value from the tcb from last time. Note too that 5867 * sack sending clears this when a sack is sent, 5868 * which is fine. Once we hit the rwnd_req, we then 5869 * will go to the sctp_user_rcvd() that will not 5870 * lock until it KNOWs it MUST send a WUP-SACK. 5871 */ 5872 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5873 stcb->freed_by_sorcv_sincelast = 0; 5874 } 5875 } 5876 if (stcb && 5877 ((control->spec_flags & M_NOTIFICATION) == 0) && 5878 control->do_not_ref_stcb == 0) { 5879 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5880 } 5881 5882 /* First lets get off the sinfo and sockaddr info */ 5883 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5884 sinfo->sinfo_stream = control->sinfo_stream; 5885 sinfo->sinfo_ssn = (uint16_t)control->mid; 5886 sinfo->sinfo_flags = control->sinfo_flags; 5887 sinfo->sinfo_ppid = control->sinfo_ppid; 5888 sinfo->sinfo_context = control->sinfo_context; 5889 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5890 sinfo->sinfo_tsn = control->sinfo_tsn; 5891 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5892 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5893 nxt = TAILQ_NEXT(control, next); 5894 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5895 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5896 struct sctp_extrcvinfo *s_extra; 5897 5898 s_extra = (struct sctp_extrcvinfo *)sinfo; 5899 if ((nxt) && 5900 (nxt->length)) { 5901 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5902 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5903 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5904 } 5905 if (nxt->spec_flags & M_NOTIFICATION) { 5906 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5907 } 5908 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5909 s_extra->serinfo_next_length = nxt->length; 5910 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5911 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5912 if (nxt->tail_mbuf != NULL) { 5913 if (nxt->end_added) { 5914 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5915 } 5916 } 5917 } else { 5918 /* 5919 * we explicitly 0 this, since the memcpy 5920 * got some other things beyond the older 5921 * sinfo_ that is on the control's structure 5922 * :-D 5923 */ 5924 nxt = NULL; 5925 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5926 s_extra->serinfo_next_aid = 0; 5927 s_extra->serinfo_next_length = 0; 5928 s_extra->serinfo_next_ppid = 0; 5929 s_extra->serinfo_next_stream = 0; 5930 } 5931 } 5932 /* 5933 * update off the real current cum-ack, if we have an stcb. 5934 */ 5935 if ((control->do_not_ref_stcb == 0) && stcb) 5936 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5937 /* 5938 * mask off the high bits, we keep the actual chunk bits in 5939 * there. 5940 */ 5941 sinfo->sinfo_flags &= 0x00ff; 5942 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5943 sinfo->sinfo_flags |= SCTP_UNORDERED; 5944 } 5945 } 5946 #ifdef SCTP_ASOCLOG_OF_TSNS 5947 { 5948 int index, newindex; 5949 struct sctp_pcbtsn_rlog *entry; 5950 5951 do { 5952 index = inp->readlog_index; 5953 newindex = index + 1; 5954 if (newindex >= SCTP_READ_LOG_SIZE) { 5955 newindex = 0; 5956 } 5957 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5958 entry = &inp->readlog[index]; 5959 entry->vtag = control->sinfo_assoc_id; 5960 entry->strm = control->sinfo_stream; 5961 entry->seq = (uint16_t)control->mid; 5962 entry->sz = control->length; 5963 entry->flgs = control->sinfo_flags; 5964 } 5965 #endif 5966 if ((fromlen > 0) && (from != NULL)) { 5967 union sctp_sockstore store; 5968 size_t len; 5969 5970 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5971 #ifdef INET6 5972 case AF_INET6: 5973 len = sizeof(struct sockaddr_in6); 5974 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5975 store.sin6.sin6_port = control->port_from; 5976 break; 5977 #endif 5978 #ifdef INET 5979 case AF_INET: 5980 #ifdef INET6 5981 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5982 len = sizeof(struct sockaddr_in6); 5983 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5984 &store.sin6); 5985 store.sin6.sin6_port = control->port_from; 5986 } else { 5987 len = sizeof(struct sockaddr_in); 5988 store.sin = control->whoFrom->ro._l_addr.sin; 5989 store.sin.sin_port = control->port_from; 5990 } 5991 #else 5992 len = sizeof(struct sockaddr_in); 5993 store.sin = control->whoFrom->ro._l_addr.sin; 5994 store.sin.sin_port = control->port_from; 5995 #endif 5996 break; 5997 #endif 5998 default: 5999 len = 0; 6000 break; 6001 } 6002 memcpy(from, &store, min((size_t)fromlen, len)); 6003 #ifdef INET6 6004 { 6005 struct sockaddr_in6 lsa6, *from6; 6006 6007 from6 = (struct sockaddr_in6 *)from; 6008 sctp_recover_scope_mac(from6, (&lsa6)); 6009 } 6010 #endif 6011 } 6012 if (hold_rlock) { 6013 SCTP_INP_READ_UNLOCK(inp); 6014 hold_rlock = 0; 6015 } 6016 if (hold_sblock) { 6017 SOCKBUF_UNLOCK(&so->so_rcv); 6018 hold_sblock = 0; 6019 } 6020 /* now copy out what data we can */ 6021 if (mp == NULL) { 6022 /* copy out each mbuf in the chain up to length */ 6023 get_more_data: 6024 m = control->data; 6025 while (m) { 6026 /* Move out all we can */ 6027 cp_len = uio->uio_resid; 6028 my_len = SCTP_BUF_LEN(m); 6029 if (cp_len > my_len) { 6030 /* not enough in this buf */ 6031 cp_len = my_len; 6032 } 6033 if (hold_rlock) { 6034 SCTP_INP_READ_UNLOCK(inp); 6035 hold_rlock = 0; 6036 } 6037 if (cp_len > 0) 6038 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6039 /* re-read */ 6040 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6041 goto release; 6042 } 6043 6044 if ((control->do_not_ref_stcb == 0) && stcb && 6045 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6046 no_rcv_needed = 1; 6047 } 6048 if (error) { 6049 /* error we are out of here */ 6050 goto release; 6051 } 6052 SCTP_INP_READ_LOCK(inp); 6053 hold_rlock = 1; 6054 if (cp_len == SCTP_BUF_LEN(m)) { 6055 if ((SCTP_BUF_NEXT(m) == NULL) && 6056 (control->end_added)) { 6057 out_flags |= MSG_EOR; 6058 if ((control->do_not_ref_stcb == 0) && 6059 (control->stcb != NULL) && 6060 ((control->spec_flags & M_NOTIFICATION) == 0)) 6061 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6062 } 6063 if (control->spec_flags & M_NOTIFICATION) { 6064 out_flags |= MSG_NOTIFICATION; 6065 } 6066 /* we ate up the mbuf */ 6067 if (in_flags & MSG_PEEK) { 6068 /* just looking */ 6069 m = SCTP_BUF_NEXT(m); 6070 copied_so_far += cp_len; 6071 } else { 6072 /* dispose of the mbuf */ 6073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6074 sctp_sblog(&so->so_rcv, 6075 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6076 } 6077 sctp_sbfree(control, stcb, &so->so_rcv, m); 6078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6079 sctp_sblog(&so->so_rcv, 6080 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6081 } 6082 copied_so_far += cp_len; 6083 freed_so_far += (uint32_t)cp_len; 6084 freed_so_far += MSIZE; 6085 atomic_subtract_int(&control->length, (int)cp_len); 6086 control->data = sctp_m_free(m); 6087 m = control->data; 6088 /* 6089 * been through it all, must hold sb 6090 * lock ok to null tail 6091 */ 6092 if (control->data == NULL) { 6093 #ifdef INVARIANTS 6094 if ((control->end_added == 0) || 6095 (TAILQ_NEXT(control, next) == NULL)) { 6096 /* 6097 * If the end is not 6098 * added, OR the 6099 * next is NOT null 6100 * we MUST have the 6101 * lock. 6102 */ 6103 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6104 panic("Hmm we don't own the lock?"); 6105 } 6106 } 6107 #endif 6108 control->tail_mbuf = NULL; 6109 #ifdef INVARIANTS 6110 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6111 panic("end_added, nothing left and no MSG_EOR"); 6112 } 6113 #endif 6114 } 6115 } 6116 } else { 6117 /* Do we need to trim the mbuf? */ 6118 if (control->spec_flags & M_NOTIFICATION) { 6119 out_flags |= MSG_NOTIFICATION; 6120 } 6121 if ((in_flags & MSG_PEEK) == 0) { 6122 SCTP_BUF_RESV_UF(m, cp_len); 6123 SCTP_BUF_LEN(m) -= (int)cp_len; 6124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6125 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6126 } 6127 atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); 6128 if ((control->do_not_ref_stcb == 0) && 6129 stcb) { 6130 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6131 } 6132 copied_so_far += cp_len; 6133 freed_so_far += (uint32_t)cp_len; 6134 freed_so_far += MSIZE; 6135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6136 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6137 SCTP_LOG_SBRESULT, 0); 6138 } 6139 atomic_subtract_int(&control->length, (int)cp_len); 6140 } else { 6141 copied_so_far += cp_len; 6142 } 6143 } 6144 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6145 break; 6146 } 6147 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6148 (control->do_not_ref_stcb == 0) && 6149 (freed_so_far >= rwnd_req)) { 6150 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6151 } 6152 } /* end while(m) */ 6153 /* 6154 * At this point we have looked at it all and we either have 6155 * a MSG_EOR/or read all the user wants... <OR> 6156 * control->length == 0. 6157 */ 6158 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6159 /* we are done with this control */ 6160 if (control->length == 0) { 6161 if (control->data) { 6162 #ifdef INVARIANTS 6163 panic("control->data not null at read eor?"); 6164 #else 6165 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6166 sctp_m_freem(control->data); 6167 control->data = NULL; 6168 #endif 6169 } 6170 done_with_control: 6171 if (hold_rlock == 0) { 6172 SCTP_INP_READ_LOCK(inp); 6173 hold_rlock = 1; 6174 } 6175 TAILQ_REMOVE(&inp->read_queue, control, next); 6176 /* Add back any hidden data */ 6177 if (control->held_length) { 6178 held_length = 0; 6179 control->held_length = 0; 6180 wakeup_read_socket = 1; 6181 } 6182 if (control->aux_data) { 6183 sctp_m_free(control->aux_data); 6184 control->aux_data = NULL; 6185 } 6186 no_rcv_needed = control->do_not_ref_stcb; 6187 sctp_free_remote_addr(control->whoFrom); 6188 control->data = NULL; 6189 #ifdef INVARIANTS 6190 if (control->on_strm_q) { 6191 panic("About to free ctl:%p so:%p and its in %d", 6192 control, so, control->on_strm_q); 6193 } 6194 #endif 6195 sctp_free_a_readq(stcb, control); 6196 control = NULL; 6197 if ((freed_so_far >= rwnd_req) && 6198 (no_rcv_needed == 0)) 6199 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6200 6201 } else { 6202 /* 6203 * The user did not read all of this 6204 * message, turn off the returned MSG_EOR 6205 * since we are leaving more behind on the 6206 * control to read. 6207 */ 6208 #ifdef INVARIANTS 6209 if (control->end_added && 6210 (control->data == NULL) && 6211 (control->tail_mbuf == NULL)) { 6212 panic("Gak, control->length is corrupt?"); 6213 } 6214 #endif 6215 no_rcv_needed = control->do_not_ref_stcb; 6216 out_flags &= ~MSG_EOR; 6217 } 6218 } 6219 if (out_flags & MSG_EOR) { 6220 goto release; 6221 } 6222 if ((uio->uio_resid == 0) || 6223 ((in_eeor_mode) && 6224 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6225 goto release; 6226 } 6227 /* 6228 * If I hit here the receiver wants more and this message is 6229 * NOT done (pd-api). So two questions. Can we block? if not 6230 * we are done. Did the user NOT set MSG_WAITALL? 6231 */ 6232 if (block_allowed == 0) { 6233 goto release; 6234 } 6235 /* 6236 * We need to wait for more data a few things: - We don't 6237 * release the I/O lock so we don't get someone else 6238 * reading. - We must be sure to account for the case where 6239 * what is added is NOT to our control when we wakeup. 6240 */ 6241 6242 /* 6243 * Do we need to tell the transport a rwnd update might be 6244 * needed before we go to sleep? 6245 */ 6246 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6247 ((freed_so_far >= rwnd_req) && 6248 (control->do_not_ref_stcb == 0) && 6249 (no_rcv_needed == 0))) { 6250 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6251 } 6252 wait_some_more: 6253 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6254 goto release; 6255 } 6256 6257 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6258 goto release; 6259 6260 if (hold_rlock == 1) { 6261 SCTP_INP_READ_UNLOCK(inp); 6262 hold_rlock = 0; 6263 } 6264 if (hold_sblock == 0) { 6265 SOCKBUF_LOCK(&so->so_rcv); 6266 hold_sblock = 1; 6267 } 6268 if ((copied_so_far) && (control->length == 0) && 6269 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6270 goto release; 6271 } 6272 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6273 error = sbwait(so, SO_RCV); 6274 if (error) { 6275 goto release; 6276 } 6277 control->held_length = 0; 6278 } 6279 if (hold_sblock) { 6280 SOCKBUF_UNLOCK(&so->so_rcv); 6281 hold_sblock = 0; 6282 } 6283 if (control->length == 0) { 6284 /* still nothing here */ 6285 if (control->end_added == 1) { 6286 /* he aborted, or is done i.e.did a shutdown */ 6287 out_flags |= MSG_EOR; 6288 if (control->pdapi_aborted) { 6289 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6290 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6291 6292 out_flags |= MSG_TRUNC; 6293 } else { 6294 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6295 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6296 } 6297 goto done_with_control; 6298 } 6299 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6300 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6301 held_length = 0; 6302 } 6303 goto wait_some_more; 6304 } else if (control->data == NULL) { 6305 /* 6306 * we must re-sync since data is probably being 6307 * added 6308 */ 6309 SCTP_INP_READ_LOCK(inp); 6310 if ((control->length > 0) && (control->data == NULL)) { 6311 /* 6312 * big trouble.. we have the lock and its 6313 * corrupt? 6314 */ 6315 #ifdef INVARIANTS 6316 panic("Impossible data==NULL length !=0"); 6317 #endif 6318 out_flags |= MSG_EOR; 6319 out_flags |= MSG_TRUNC; 6320 control->length = 0; 6321 SCTP_INP_READ_UNLOCK(inp); 6322 goto done_with_control; 6323 } 6324 SCTP_INP_READ_UNLOCK(inp); 6325 /* We will fall around to get more data */ 6326 } 6327 goto get_more_data; 6328 } else { 6329 /*- 6330 * Give caller back the mbuf chain, 6331 * store in uio_resid the length 6332 */ 6333 wakeup_read_socket = 0; 6334 if ((control->end_added == 0) || 6335 (TAILQ_NEXT(control, next) == NULL)) { 6336 /* Need to get rlock */ 6337 if (hold_rlock == 0) { 6338 SCTP_INP_READ_LOCK(inp); 6339 hold_rlock = 1; 6340 } 6341 } 6342 if (control->end_added) { 6343 out_flags |= MSG_EOR; 6344 if ((control->do_not_ref_stcb == 0) && 6345 (control->stcb != NULL) && 6346 ((control->spec_flags & M_NOTIFICATION) == 0)) 6347 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6348 } 6349 if (control->spec_flags & M_NOTIFICATION) { 6350 out_flags |= MSG_NOTIFICATION; 6351 } 6352 uio->uio_resid = control->length; 6353 *mp = control->data; 6354 m = control->data; 6355 while (m) { 6356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6357 sctp_sblog(&so->so_rcv, 6358 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6359 } 6360 sctp_sbfree(control, stcb, &so->so_rcv, m); 6361 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6362 freed_so_far += MSIZE; 6363 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6364 sctp_sblog(&so->so_rcv, 6365 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6366 } 6367 m = SCTP_BUF_NEXT(m); 6368 } 6369 control->data = control->tail_mbuf = NULL; 6370 control->length = 0; 6371 if (out_flags & MSG_EOR) { 6372 /* Done with this control */ 6373 goto done_with_control; 6374 } 6375 } 6376 release: 6377 if (hold_rlock == 1) { 6378 SCTP_INP_READ_UNLOCK(inp); 6379 hold_rlock = 0; 6380 } 6381 if (hold_sblock == 1) { 6382 SOCKBUF_UNLOCK(&so->so_rcv); 6383 hold_sblock = 0; 6384 } 6385 6386 SOCK_IO_RECV_UNLOCK(so); 6387 sockbuf_lock = 0; 6388 6389 release_unlocked: 6390 if (hold_sblock) { 6391 SOCKBUF_UNLOCK(&so->so_rcv); 6392 hold_sblock = 0; 6393 } 6394 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6395 if ((freed_so_far >= rwnd_req) && 6396 (control && (control->do_not_ref_stcb == 0)) && 6397 (no_rcv_needed == 0)) 6398 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6399 } 6400 out: 6401 if (msg_flags) { 6402 *msg_flags = out_flags; 6403 } 6404 if (((out_flags & MSG_EOR) == 0) && 6405 ((in_flags & MSG_PEEK) == 0) && 6406 (sinfo) && 6407 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6408 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6409 struct sctp_extrcvinfo *s_extra; 6410 6411 s_extra = (struct sctp_extrcvinfo *)sinfo; 6412 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6413 } 6414 if (hold_rlock == 1) { 6415 SCTP_INP_READ_UNLOCK(inp); 6416 } 6417 if (hold_sblock) { 6418 SOCKBUF_UNLOCK(&so->so_rcv); 6419 } 6420 if (sockbuf_lock) { 6421 SOCK_IO_RECV_UNLOCK(so); 6422 } 6423 6424 if (freecnt_applied) { 6425 /* 6426 * The lock on the socket buffer protects us so the free 6427 * code will stop. But since we used the socketbuf lock and 6428 * the sender uses the tcb_lock to increment, we need to use 6429 * the atomic add to the refcnt. 6430 */ 6431 if (stcb == NULL) { 6432 #ifdef INVARIANTS 6433 panic("stcb for refcnt has gone NULL?"); 6434 goto stage_left; 6435 #else 6436 goto stage_left; 6437 #endif 6438 } 6439 /* Save the value back for next time */ 6440 stcb->freed_by_sorcv_sincelast = freed_so_far; 6441 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6442 } 6443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6444 if (stcb) { 6445 sctp_misc_ints(SCTP_SORECV_DONE, 6446 freed_so_far, 6447 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6448 stcb->asoc.my_rwnd, 6449 SCTP_SBAVAIL(&so->so_rcv)); 6450 } else { 6451 sctp_misc_ints(SCTP_SORECV_DONE, 6452 freed_so_far, 6453 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6454 0, 6455 SCTP_SBAVAIL(&so->so_rcv)); 6456 } 6457 } 6458 stage_left: 6459 if (wakeup_read_socket) { 6460 sctp_sorwakeup(inp, so); 6461 } 6462 return (error); 6463 } 6464 6465 #ifdef SCTP_MBUF_LOGGING 6466 struct mbuf * 6467 sctp_m_free(struct mbuf *m) 6468 { 6469 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6470 sctp_log_mb(m, SCTP_MBUF_IFREE); 6471 } 6472 return (m_free(m)); 6473 } 6474 6475 void 6476 sctp_m_freem(struct mbuf *mb) 6477 { 6478 while (mb != NULL) 6479 mb = sctp_m_free(mb); 6480 } 6481 6482 #endif 6483 6484 int 6485 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6486 { 6487 /* 6488 * Given a local address. For all associations that holds the 6489 * address, request a peer-set-primary. 6490 */ 6491 struct sctp_ifa *ifa; 6492 struct sctp_laddr *wi; 6493 6494 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6495 if (ifa == NULL) { 6496 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6497 return (EADDRNOTAVAIL); 6498 } 6499 /* 6500 * Now that we have the ifa we must awaken the iterator with this 6501 * message. 6502 */ 6503 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6504 if (wi == NULL) { 6505 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6506 return (ENOMEM); 6507 } 6508 /* Now incr the count and int wi structure */ 6509 SCTP_INCR_LADDR_COUNT(); 6510 memset(wi, 0, sizeof(*wi)); 6511 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6512 wi->ifa = ifa; 6513 wi->action = SCTP_SET_PRIM_ADDR; 6514 atomic_add_int(&ifa->refcount, 1); 6515 6516 /* Now add it to the work queue */ 6517 SCTP_WQ_ADDR_LOCK(); 6518 /* 6519 * Should this really be a tailq? As it is we will process the 6520 * newest first :-0 6521 */ 6522 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6523 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6524 (struct sctp_inpcb *)NULL, 6525 (struct sctp_tcb *)NULL, 6526 (struct sctp_nets *)NULL); 6527 SCTP_WQ_ADDR_UNLOCK(); 6528 return (0); 6529 } 6530 6531 int 6532 sctp_soreceive(struct socket *so, 6533 struct sockaddr **psa, 6534 struct uio *uio, 6535 struct mbuf **mp0, 6536 struct mbuf **controlp, 6537 int *flagsp) 6538 { 6539 int error, fromlen; 6540 uint8_t sockbuf[256]; 6541 struct sockaddr *from; 6542 struct sctp_extrcvinfo sinfo; 6543 int filling_sinfo = 1; 6544 int flags; 6545 struct sctp_inpcb *inp; 6546 6547 inp = (struct sctp_inpcb *)so->so_pcb; 6548 /* pickup the assoc we are reading from */ 6549 if (inp == NULL) { 6550 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6551 return (EINVAL); 6552 } 6553 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6554 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6555 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6556 (controlp == NULL)) { 6557 /* user does not want the sndrcv ctl */ 6558 filling_sinfo = 0; 6559 } 6560 if (psa) { 6561 from = (struct sockaddr *)sockbuf; 6562 fromlen = sizeof(sockbuf); 6563 from->sa_len = 0; 6564 } else { 6565 from = NULL; 6566 fromlen = 0; 6567 } 6568 6569 if (filling_sinfo) { 6570 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6571 } 6572 if (flagsp != NULL) { 6573 flags = *flagsp; 6574 } else { 6575 flags = 0; 6576 } 6577 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6578 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6579 if (flagsp != NULL) { 6580 *flagsp = flags; 6581 } 6582 if (controlp != NULL) { 6583 /* copy back the sinfo in a CMSG format */ 6584 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6585 *controlp = sctp_build_ctl_nchunk(inp, 6586 (struct sctp_sndrcvinfo *)&sinfo); 6587 } else { 6588 *controlp = NULL; 6589 } 6590 } 6591 if (psa) { 6592 /* copy back the address info */ 6593 if (from && from->sa_len) { 6594 *psa = sodupsockaddr(from, M_NOWAIT); 6595 } else { 6596 *psa = NULL; 6597 } 6598 } 6599 return (error); 6600 } 6601 6602 int 6603 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6604 int totaddr, int *error) 6605 { 6606 int added = 0; 6607 int i; 6608 struct sctp_inpcb *inp; 6609 struct sockaddr *sa; 6610 size_t incr = 0; 6611 #ifdef INET 6612 struct sockaddr_in *sin; 6613 #endif 6614 #ifdef INET6 6615 struct sockaddr_in6 *sin6; 6616 #endif 6617 6618 sa = addr; 6619 inp = stcb->sctp_ep; 6620 *error = 0; 6621 for (i = 0; i < totaddr; i++) { 6622 switch (sa->sa_family) { 6623 #ifdef INET 6624 case AF_INET: 6625 incr = sizeof(struct sockaddr_in); 6626 sin = (struct sockaddr_in *)sa; 6627 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6628 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6629 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6630 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6631 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6632 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6633 *error = EINVAL; 6634 goto out_now; 6635 } 6636 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6637 SCTP_DONOT_SETSCOPE, 6638 SCTP_ADDR_IS_CONFIRMED)) { 6639 /* assoc gone no un-lock */ 6640 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6641 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6642 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6643 *error = ENOBUFS; 6644 goto out_now; 6645 } 6646 added++; 6647 break; 6648 #endif 6649 #ifdef INET6 6650 case AF_INET6: 6651 incr = sizeof(struct sockaddr_in6); 6652 sin6 = (struct sockaddr_in6 *)sa; 6653 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6654 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6655 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6656 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6657 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6658 *error = EINVAL; 6659 goto out_now; 6660 } 6661 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6662 SCTP_DONOT_SETSCOPE, 6663 SCTP_ADDR_IS_CONFIRMED)) { 6664 /* assoc gone no un-lock */ 6665 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6666 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6667 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6668 *error = ENOBUFS; 6669 goto out_now; 6670 } 6671 added++; 6672 break; 6673 #endif 6674 default: 6675 break; 6676 } 6677 sa = (struct sockaddr *)((caddr_t)sa + incr); 6678 } 6679 out_now: 6680 return (added); 6681 } 6682 6683 int 6684 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6685 unsigned int totaddr, 6686 unsigned int *num_v4, unsigned int *num_v6, 6687 unsigned int limit) 6688 { 6689 struct sockaddr *sa; 6690 struct sctp_tcb *stcb; 6691 unsigned int incr, at, i; 6692 6693 at = 0; 6694 sa = addr; 6695 *num_v6 = *num_v4 = 0; 6696 /* account and validate addresses */ 6697 if (totaddr == 0) { 6698 return (EINVAL); 6699 } 6700 for (i = 0; i < totaddr; i++) { 6701 if (at + sizeof(struct sockaddr) > limit) { 6702 return (EINVAL); 6703 } 6704 switch (sa->sa_family) { 6705 #ifdef INET 6706 case AF_INET: 6707 incr = (unsigned int)sizeof(struct sockaddr_in); 6708 if (sa->sa_len != incr) { 6709 return (EINVAL); 6710 } 6711 (*num_v4) += 1; 6712 break; 6713 #endif 6714 #ifdef INET6 6715 case AF_INET6: 6716 { 6717 struct sockaddr_in6 *sin6; 6718 6719 incr = (unsigned int)sizeof(struct sockaddr_in6); 6720 if (sa->sa_len != incr) { 6721 return (EINVAL); 6722 } 6723 sin6 = (struct sockaddr_in6 *)sa; 6724 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6725 /* Must be non-mapped for connectx */ 6726 return (EINVAL); 6727 } 6728 (*num_v6) += 1; 6729 break; 6730 } 6731 #endif 6732 default: 6733 return (EINVAL); 6734 } 6735 if ((at + incr) > limit) { 6736 return (EINVAL); 6737 } 6738 SCTP_INP_INCR_REF(inp); 6739 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6740 if (stcb != NULL) { 6741 SCTP_TCB_UNLOCK(stcb); 6742 return (EALREADY); 6743 } else { 6744 SCTP_INP_DECR_REF(inp); 6745 } 6746 at += incr; 6747 sa = (struct sockaddr *)((caddr_t)sa + incr); 6748 } 6749 return (0); 6750 } 6751 6752 /* 6753 * sctp_bindx(ADD) for one address. 6754 * assumes all arguments are valid/checked by caller. 6755 */ 6756 void 6757 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6758 struct sockaddr *sa, uint32_t vrf_id, int *error, 6759 void *p) 6760 { 6761 #if defined(INET) && defined(INET6) 6762 struct sockaddr_in sin; 6763 #endif 6764 #ifdef INET6 6765 struct sockaddr_in6 *sin6; 6766 #endif 6767 #ifdef INET 6768 struct sockaddr_in *sinp; 6769 #endif 6770 struct sockaddr *addr_to_use; 6771 struct sctp_inpcb *lep; 6772 uint16_t port; 6773 6774 /* see if we're bound all already! */ 6775 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6776 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6777 *error = EINVAL; 6778 return; 6779 } 6780 switch (sa->sa_family) { 6781 #ifdef INET6 6782 case AF_INET6: 6783 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6784 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6785 *error = EINVAL; 6786 return; 6787 } 6788 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6789 /* can only bind v6 on PF_INET6 sockets */ 6790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6791 *error = EINVAL; 6792 return; 6793 } 6794 sin6 = (struct sockaddr_in6 *)sa; 6795 port = sin6->sin6_port; 6796 #ifdef INET 6797 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6798 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6799 SCTP_IPV6_V6ONLY(inp)) { 6800 /* can't bind v4-mapped on PF_INET sockets */ 6801 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6802 *error = EINVAL; 6803 return; 6804 } 6805 in6_sin6_2_sin(&sin, sin6); 6806 addr_to_use = (struct sockaddr *)&sin; 6807 } else { 6808 addr_to_use = sa; 6809 } 6810 #else 6811 addr_to_use = sa; 6812 #endif 6813 break; 6814 #endif 6815 #ifdef INET 6816 case AF_INET: 6817 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6818 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6819 *error = EINVAL; 6820 return; 6821 } 6822 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6823 SCTP_IPV6_V6ONLY(inp)) { 6824 /* can't bind v4 on PF_INET sockets */ 6825 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6826 *error = EINVAL; 6827 return; 6828 } 6829 sinp = (struct sockaddr_in *)sa; 6830 port = sinp->sin_port; 6831 addr_to_use = sa; 6832 break; 6833 #endif 6834 default: 6835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6836 *error = EINVAL; 6837 return; 6838 } 6839 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6840 if (p == NULL) { 6841 /* Can't get proc for Net/Open BSD */ 6842 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6843 *error = EINVAL; 6844 return; 6845 } 6846 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6847 return; 6848 } 6849 /* Validate the incoming port. */ 6850 if ((port != 0) && (port != inp->sctp_lport)) { 6851 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6852 *error = EINVAL; 6853 return; 6854 } 6855 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6856 if (lep == NULL) { 6857 /* add the address */ 6858 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6859 SCTP_ADD_IP_ADDRESS, vrf_id); 6860 } else { 6861 if (lep != inp) { 6862 *error = EADDRINUSE; 6863 } 6864 SCTP_INP_DECR_REF(lep); 6865 } 6866 } 6867 6868 /* 6869 * sctp_bindx(DELETE) for one address. 6870 * assumes all arguments are valid/checked by caller. 6871 */ 6872 void 6873 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6874 struct sockaddr *sa, uint32_t vrf_id, int *error) 6875 { 6876 struct sockaddr *addr_to_use; 6877 #if defined(INET) && defined(INET6) 6878 struct sockaddr_in6 *sin6; 6879 struct sockaddr_in sin; 6880 #endif 6881 6882 /* see if we're bound all already! */ 6883 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6885 *error = EINVAL; 6886 return; 6887 } 6888 switch (sa->sa_family) { 6889 #ifdef INET6 6890 case AF_INET6: 6891 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6893 *error = EINVAL; 6894 return; 6895 } 6896 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6897 /* can only bind v6 on PF_INET6 sockets */ 6898 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6899 *error = EINVAL; 6900 return; 6901 } 6902 #ifdef INET 6903 sin6 = (struct sockaddr_in6 *)sa; 6904 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6905 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6906 SCTP_IPV6_V6ONLY(inp)) { 6907 /* can't bind mapped-v4 on PF_INET sockets */ 6908 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6909 *error = EINVAL; 6910 return; 6911 } 6912 in6_sin6_2_sin(&sin, sin6); 6913 addr_to_use = (struct sockaddr *)&sin; 6914 } else { 6915 addr_to_use = sa; 6916 } 6917 #else 6918 addr_to_use = sa; 6919 #endif 6920 break; 6921 #endif 6922 #ifdef INET 6923 case AF_INET: 6924 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6925 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6926 *error = EINVAL; 6927 return; 6928 } 6929 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6930 SCTP_IPV6_V6ONLY(inp)) { 6931 /* can't bind v4 on PF_INET sockets */ 6932 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6933 *error = EINVAL; 6934 return; 6935 } 6936 addr_to_use = sa; 6937 break; 6938 #endif 6939 default: 6940 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6941 *error = EINVAL; 6942 return; 6943 } 6944 /* No lock required mgmt_ep_sa does its own locking. */ 6945 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6946 vrf_id); 6947 } 6948 6949 /* 6950 * returns the valid local address count for an assoc, taking into account 6951 * all scoping rules 6952 */ 6953 int 6954 sctp_local_addr_count(struct sctp_tcb *stcb) 6955 { 6956 int loopback_scope; 6957 #if defined(INET) 6958 int ipv4_local_scope, ipv4_addr_legal; 6959 #endif 6960 #if defined(INET6) 6961 int local_scope, site_scope, ipv6_addr_legal; 6962 #endif 6963 struct sctp_vrf *vrf; 6964 struct sctp_ifn *sctp_ifn; 6965 struct sctp_ifa *sctp_ifa; 6966 int count = 0; 6967 6968 /* Turn on all the appropriate scopes */ 6969 loopback_scope = stcb->asoc.scope.loopback_scope; 6970 #if defined(INET) 6971 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6972 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6973 #endif 6974 #if defined(INET6) 6975 local_scope = stcb->asoc.scope.local_scope; 6976 site_scope = stcb->asoc.scope.site_scope; 6977 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6978 #endif 6979 SCTP_IPI_ADDR_RLOCK(); 6980 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6981 if (vrf == NULL) { 6982 /* no vrf, no addresses */ 6983 SCTP_IPI_ADDR_RUNLOCK(); 6984 return (0); 6985 } 6986 6987 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6988 /* 6989 * bound all case: go through all ifns on the vrf 6990 */ 6991 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6992 if ((loopback_scope == 0) && 6993 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6994 continue; 6995 } 6996 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6997 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6998 continue; 6999 switch (sctp_ifa->address.sa.sa_family) { 7000 #ifdef INET 7001 case AF_INET: 7002 if (ipv4_addr_legal) { 7003 struct sockaddr_in *sin; 7004 7005 sin = &sctp_ifa->address.sin; 7006 if (sin->sin_addr.s_addr == 0) { 7007 /* 7008 * skip unspecified 7009 * addrs 7010 */ 7011 continue; 7012 } 7013 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7014 &sin->sin_addr) != 0) { 7015 continue; 7016 } 7017 if ((ipv4_local_scope == 0) && 7018 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7019 continue; 7020 } 7021 /* count this one */ 7022 count++; 7023 } else { 7024 continue; 7025 } 7026 break; 7027 #endif 7028 #ifdef INET6 7029 case AF_INET6: 7030 if (ipv6_addr_legal) { 7031 struct sockaddr_in6 *sin6; 7032 7033 sin6 = &sctp_ifa->address.sin6; 7034 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7035 continue; 7036 } 7037 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7038 &sin6->sin6_addr) != 0) { 7039 continue; 7040 } 7041 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7042 if (local_scope == 0) 7043 continue; 7044 if (sin6->sin6_scope_id == 0) { 7045 if (sa6_recoverscope(sin6) != 0) 7046 /* 7047 * 7048 * bad 7049 * link 7050 * 7051 * local 7052 * 7053 * address 7054 */ 7055 continue; 7056 } 7057 } 7058 if ((site_scope == 0) && 7059 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7060 continue; 7061 } 7062 /* count this one */ 7063 count++; 7064 } 7065 break; 7066 #endif 7067 default: 7068 /* TSNH */ 7069 break; 7070 } 7071 } 7072 } 7073 } else { 7074 /* 7075 * subset bound case 7076 */ 7077 struct sctp_laddr *laddr; 7078 7079 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7080 sctp_nxt_addr) { 7081 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7082 continue; 7083 } 7084 /* count this one */ 7085 count++; 7086 } 7087 } 7088 SCTP_IPI_ADDR_RUNLOCK(); 7089 return (count); 7090 } 7091 7092 #if defined(SCTP_LOCAL_TRACE_BUF) 7093 7094 void 7095 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7096 { 7097 uint32_t saveindex, newindex; 7098 7099 do { 7100 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7101 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7102 newindex = 1; 7103 } else { 7104 newindex = saveindex + 1; 7105 } 7106 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7107 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7108 saveindex = 0; 7109 } 7110 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7111 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7112 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7113 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7114 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7115 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7116 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7117 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7118 } 7119 7120 #endif 7121 static bool 7122 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7123 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7124 { 7125 struct ip *iph; 7126 #ifdef INET6 7127 struct ip6_hdr *ip6; 7128 #endif 7129 struct mbuf *sp, *last; 7130 struct udphdr *uhdr; 7131 uint16_t port; 7132 7133 if ((m->m_flags & M_PKTHDR) == 0) { 7134 /* Can't handle one that is not a pkt hdr */ 7135 goto out; 7136 } 7137 /* Pull the src port */ 7138 iph = mtod(m, struct ip *); 7139 uhdr = (struct udphdr *)((caddr_t)iph + off); 7140 port = uhdr->uh_sport; 7141 /* 7142 * Split out the mbuf chain. Leave the IP header in m, place the 7143 * rest in the sp. 7144 */ 7145 sp = m_split(m, off, M_NOWAIT); 7146 if (sp == NULL) { 7147 /* Gak, drop packet, we can't do a split */ 7148 goto out; 7149 } 7150 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7151 /* Gak, packet can't have an SCTP header in it - too small */ 7152 m_freem(sp); 7153 goto out; 7154 } 7155 /* Now pull up the UDP header and SCTP header together */ 7156 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7157 if (sp == NULL) { 7158 /* Gak pullup failed */ 7159 goto out; 7160 } 7161 /* Trim out the UDP header */ 7162 m_adj(sp, sizeof(struct udphdr)); 7163 7164 /* Now reconstruct the mbuf chain */ 7165 for (last = m; last->m_next; last = last->m_next); 7166 last->m_next = sp; 7167 m->m_pkthdr.len += sp->m_pkthdr.len; 7168 /* 7169 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7170 * checksum and it was valid. Since CSUM_DATA_VALID == 7171 * CSUM_SCTP_VALID this would imply that the HW also verified the 7172 * SCTP checksum. Therefore, clear the bit. 7173 */ 7174 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7175 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7176 m->m_pkthdr.len, 7177 if_name(m->m_pkthdr.rcvif), 7178 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7179 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7180 iph = mtod(m, struct ip *); 7181 switch (iph->ip_v) { 7182 #ifdef INET 7183 case IPVERSION: 7184 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7185 sctp_input_with_port(m, off, port); 7186 break; 7187 #endif 7188 #ifdef INET6 7189 case IPV6_VERSION >> 4: 7190 ip6 = mtod(m, struct ip6_hdr *); 7191 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7192 sctp6_input_with_port(&m, &off, port); 7193 break; 7194 #endif 7195 default: 7196 goto out; 7197 break; 7198 } 7199 return (true); 7200 out: 7201 m_freem(m); 7202 7203 return (true); 7204 } 7205 7206 #ifdef INET 7207 static void 7208 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 7209 { 7210 struct icmp *icmp = param.icmp; 7211 struct ip *outer_ip, *inner_ip; 7212 struct sctphdr *sh; 7213 struct udphdr *udp; 7214 struct sctp_inpcb *inp; 7215 struct sctp_tcb *stcb; 7216 struct sctp_nets *net; 7217 struct sctp_init_chunk *ch; 7218 struct sockaddr_in src, dst; 7219 uint8_t type, code; 7220 7221 inner_ip = &icmp->icmp_ip; 7222 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7223 if (ntohs(outer_ip->ip_len) < 7224 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7225 return; 7226 } 7227 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7228 sh = (struct sctphdr *)(udp + 1); 7229 memset(&src, 0, sizeof(struct sockaddr_in)); 7230 src.sin_family = AF_INET; 7231 src.sin_len = sizeof(struct sockaddr_in); 7232 src.sin_port = sh->src_port; 7233 src.sin_addr = inner_ip->ip_src; 7234 memset(&dst, 0, sizeof(struct sockaddr_in)); 7235 dst.sin_family = AF_INET; 7236 dst.sin_len = sizeof(struct sockaddr_in); 7237 dst.sin_port = sh->dest_port; 7238 dst.sin_addr = inner_ip->ip_dst; 7239 /* 7240 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7241 * holds our local endpoint address. Thus we reverse the dst and the 7242 * src in the lookup. 7243 */ 7244 inp = NULL; 7245 net = NULL; 7246 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7247 (struct sockaddr *)&src, 7248 &inp, &net, 1, 7249 SCTP_DEFAULT_VRFID); 7250 if ((stcb != NULL) && 7251 (net != NULL) && 7252 (inp != NULL)) { 7253 /* Check the UDP port numbers */ 7254 if ((udp->uh_dport != net->port) || 7255 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7256 SCTP_TCB_UNLOCK(stcb); 7257 return; 7258 } 7259 /* Check the verification tag */ 7260 if (ntohl(sh->v_tag) != 0) { 7261 /* 7262 * This must be the verification tag used for 7263 * sending out packets. We don't consider packets 7264 * reflecting the verification tag. 7265 */ 7266 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7267 SCTP_TCB_UNLOCK(stcb); 7268 return; 7269 } 7270 } else { 7271 if (ntohs(outer_ip->ip_len) >= 7272 sizeof(struct ip) + 7273 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7274 /* 7275 * In this case we can check if we got an 7276 * INIT chunk and if the initiate tag 7277 * matches. 7278 */ 7279 ch = (struct sctp_init_chunk *)(sh + 1); 7280 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7281 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7282 SCTP_TCB_UNLOCK(stcb); 7283 return; 7284 } 7285 } else { 7286 SCTP_TCB_UNLOCK(stcb); 7287 return; 7288 } 7289 } 7290 type = icmp->icmp_type; 7291 code = icmp->icmp_code; 7292 if ((type == ICMP_UNREACH) && 7293 (code == ICMP_UNREACH_PORT)) { 7294 code = ICMP_UNREACH_PROTOCOL; 7295 } 7296 sctp_notify(inp, stcb, net, type, code, 7297 ntohs(inner_ip->ip_len), 7298 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7299 } else { 7300 if ((stcb == NULL) && (inp != NULL)) { 7301 /* reduce ref-count */ 7302 SCTP_INP_WLOCK(inp); 7303 SCTP_INP_DECR_REF(inp); 7304 SCTP_INP_WUNLOCK(inp); 7305 } 7306 if (stcb) { 7307 SCTP_TCB_UNLOCK(stcb); 7308 } 7309 } 7310 return; 7311 } 7312 #endif 7313 7314 #ifdef INET6 7315 static void 7316 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 7317 { 7318 struct ip6ctlparam *ip6cp = param.ip6cp; 7319 struct sctp_inpcb *inp; 7320 struct sctp_tcb *stcb; 7321 struct sctp_nets *net; 7322 struct sctphdr sh; 7323 struct udphdr udp; 7324 struct sockaddr_in6 src, dst; 7325 uint8_t type, code; 7326 7327 /* 7328 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7329 */ 7330 if (ip6cp->ip6c_m == NULL) { 7331 return; 7332 } 7333 /* 7334 * Check if we can safely examine the ports and the verification tag 7335 * of the SCTP common header. 7336 */ 7337 if (ip6cp->ip6c_m->m_pkthdr.len < 7338 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7339 return; 7340 } 7341 /* Copy out the UDP header. */ 7342 memset(&udp, 0, sizeof(struct udphdr)); 7343 m_copydata(ip6cp->ip6c_m, 7344 ip6cp->ip6c_off, 7345 sizeof(struct udphdr), 7346 (caddr_t)&udp); 7347 /* Copy out the port numbers and the verification tag. */ 7348 memset(&sh, 0, sizeof(struct sctphdr)); 7349 m_copydata(ip6cp->ip6c_m, 7350 ip6cp->ip6c_off + sizeof(struct udphdr), 7351 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7352 (caddr_t)&sh); 7353 memset(&src, 0, sizeof(struct sockaddr_in6)); 7354 src.sin6_family = AF_INET6; 7355 src.sin6_len = sizeof(struct sockaddr_in6); 7356 src.sin6_port = sh.src_port; 7357 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7358 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7359 return; 7360 } 7361 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7362 dst.sin6_family = AF_INET6; 7363 dst.sin6_len = sizeof(struct sockaddr_in6); 7364 dst.sin6_port = sh.dest_port; 7365 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7366 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7367 return; 7368 } 7369 inp = NULL; 7370 net = NULL; 7371 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7372 (struct sockaddr *)&src, 7373 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7374 if ((stcb != NULL) && 7375 (net != NULL) && 7376 (inp != NULL)) { 7377 /* Check the UDP port numbers */ 7378 if ((udp.uh_dport != net->port) || 7379 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7380 SCTP_TCB_UNLOCK(stcb); 7381 return; 7382 } 7383 /* Check the verification tag */ 7384 if (ntohl(sh.v_tag) != 0) { 7385 /* 7386 * This must be the verification tag used for 7387 * sending out packets. We don't consider packets 7388 * reflecting the verification tag. 7389 */ 7390 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7391 SCTP_TCB_UNLOCK(stcb); 7392 return; 7393 } 7394 } else { 7395 if (ip6cp->ip6c_m->m_pkthdr.len >= 7396 ip6cp->ip6c_off + sizeof(struct udphdr) + 7397 sizeof(struct sctphdr) + 7398 sizeof(struct sctp_chunkhdr) + 7399 offsetof(struct sctp_init, a_rwnd)) { 7400 /* 7401 * In this case we can check if we got an 7402 * INIT chunk and if the initiate tag 7403 * matches. 7404 */ 7405 uint32_t initiate_tag; 7406 uint8_t chunk_type; 7407 7408 m_copydata(ip6cp->ip6c_m, 7409 ip6cp->ip6c_off + 7410 sizeof(struct udphdr) + 7411 sizeof(struct sctphdr), 7412 sizeof(uint8_t), 7413 (caddr_t)&chunk_type); 7414 m_copydata(ip6cp->ip6c_m, 7415 ip6cp->ip6c_off + 7416 sizeof(struct udphdr) + 7417 sizeof(struct sctphdr) + 7418 sizeof(struct sctp_chunkhdr), 7419 sizeof(uint32_t), 7420 (caddr_t)&initiate_tag); 7421 if ((chunk_type != SCTP_INITIATION) || 7422 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7423 SCTP_TCB_UNLOCK(stcb); 7424 return; 7425 } 7426 } else { 7427 SCTP_TCB_UNLOCK(stcb); 7428 return; 7429 } 7430 } 7431 type = ip6cp->ip6c_icmp6->icmp6_type; 7432 code = ip6cp->ip6c_icmp6->icmp6_code; 7433 if ((type == ICMP6_DST_UNREACH) && 7434 (code == ICMP6_DST_UNREACH_NOPORT)) { 7435 type = ICMP6_PARAM_PROB; 7436 code = ICMP6_PARAMPROB_NEXTHEADER; 7437 } 7438 sctp6_notify(inp, stcb, net, type, code, 7439 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7440 } else { 7441 if ((stcb == NULL) && (inp != NULL)) { 7442 /* reduce inp's ref-count */ 7443 SCTP_INP_WLOCK(inp); 7444 SCTP_INP_DECR_REF(inp); 7445 SCTP_INP_WUNLOCK(inp); 7446 } 7447 if (stcb) { 7448 SCTP_TCB_UNLOCK(stcb); 7449 } 7450 } 7451 } 7452 #endif 7453 7454 void 7455 sctp_over_udp_stop(void) 7456 { 7457 /* 7458 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7459 * for writing! 7460 */ 7461 #ifdef INET 7462 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7463 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7464 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7465 } 7466 #endif 7467 #ifdef INET6 7468 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7469 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7470 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7471 } 7472 #endif 7473 } 7474 7475 int 7476 sctp_over_udp_start(void) 7477 { 7478 uint16_t port; 7479 int ret; 7480 #ifdef INET 7481 struct sockaddr_in sin; 7482 #endif 7483 #ifdef INET6 7484 struct sockaddr_in6 sin6; 7485 #endif 7486 /* 7487 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7488 * for writing! 7489 */ 7490 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7491 if (ntohs(port) == 0) { 7492 /* Must have a port set */ 7493 return (EINVAL); 7494 } 7495 #ifdef INET 7496 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7497 /* Already running -- must stop first */ 7498 return (EALREADY); 7499 } 7500 #endif 7501 #ifdef INET6 7502 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7503 /* Already running -- must stop first */ 7504 return (EALREADY); 7505 } 7506 #endif 7507 #ifdef INET 7508 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7509 SOCK_DGRAM, IPPROTO_UDP, 7510 curthread->td_ucred, curthread))) { 7511 sctp_over_udp_stop(); 7512 return (ret); 7513 } 7514 /* Call the special UDP hook. */ 7515 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7516 sctp_recv_udp_tunneled_packet, 7517 sctp_recv_icmp_tunneled_packet, 7518 NULL))) { 7519 sctp_over_udp_stop(); 7520 return (ret); 7521 } 7522 /* Ok, we have a socket, bind it to the port. */ 7523 memset(&sin, 0, sizeof(struct sockaddr_in)); 7524 sin.sin_len = sizeof(struct sockaddr_in); 7525 sin.sin_family = AF_INET; 7526 sin.sin_port = htons(port); 7527 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7528 (struct sockaddr *)&sin, curthread))) { 7529 sctp_over_udp_stop(); 7530 return (ret); 7531 } 7532 #endif 7533 #ifdef INET6 7534 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7535 SOCK_DGRAM, IPPROTO_UDP, 7536 curthread->td_ucred, curthread))) { 7537 sctp_over_udp_stop(); 7538 return (ret); 7539 } 7540 /* Call the special UDP hook. */ 7541 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7542 sctp_recv_udp_tunneled_packet, 7543 sctp_recv_icmp6_tunneled_packet, 7544 NULL))) { 7545 sctp_over_udp_stop(); 7546 return (ret); 7547 } 7548 /* Ok, we have a socket, bind it to the port. */ 7549 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7550 sin6.sin6_len = sizeof(struct sockaddr_in6); 7551 sin6.sin6_family = AF_INET6; 7552 sin6.sin6_port = htons(port); 7553 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7554 (struct sockaddr *)&sin6, curthread))) { 7555 sctp_over_udp_stop(); 7556 return (ret); 7557 } 7558 #endif 7559 return (0); 7560 } 7561 7562 /* 7563 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7564 * If all arguments are zero, zero is returned. 7565 */ 7566 uint32_t 7567 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7568 { 7569 if (mtu1 > 0) { 7570 if (mtu2 > 0) { 7571 if (mtu3 > 0) { 7572 return (min(mtu1, min(mtu2, mtu3))); 7573 } else { 7574 return (min(mtu1, mtu2)); 7575 } 7576 } else { 7577 if (mtu3 > 0) { 7578 return (min(mtu1, mtu3)); 7579 } else { 7580 return (mtu1); 7581 } 7582 } 7583 } else { 7584 if (mtu2 > 0) { 7585 if (mtu3 > 0) { 7586 return (min(mtu2, mtu3)); 7587 } else { 7588 return (mtu2); 7589 } 7590 } else { 7591 return (mtu3); 7592 } 7593 } 7594 } 7595 7596 void 7597 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7598 { 7599 struct in_conninfo inc; 7600 7601 memset(&inc, 0, sizeof(struct in_conninfo)); 7602 inc.inc_fibnum = fibnum; 7603 switch (addr->sa.sa_family) { 7604 #ifdef INET 7605 case AF_INET: 7606 inc.inc_faddr = addr->sin.sin_addr; 7607 break; 7608 #endif 7609 #ifdef INET6 7610 case AF_INET6: 7611 inc.inc_flags |= INC_ISIPV6; 7612 inc.inc6_faddr = addr->sin6.sin6_addr; 7613 break; 7614 #endif 7615 default: 7616 return; 7617 } 7618 tcp_hc_updatemtu(&inc, (u_long)mtu); 7619 } 7620 7621 uint32_t 7622 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7623 { 7624 struct in_conninfo inc; 7625 7626 memset(&inc, 0, sizeof(struct in_conninfo)); 7627 inc.inc_fibnum = fibnum; 7628 switch (addr->sa.sa_family) { 7629 #ifdef INET 7630 case AF_INET: 7631 inc.inc_faddr = addr->sin.sin_addr; 7632 break; 7633 #endif 7634 #ifdef INET6 7635 case AF_INET6: 7636 inc.inc_flags |= INC_ISIPV6; 7637 inc.inc6_faddr = addr->sin6.sin6_addr; 7638 break; 7639 #endif 7640 default: 7641 return (0); 7642 } 7643 return ((uint32_t)tcp_hc_getmtu(&inc)); 7644 } 7645 7646 void 7647 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7648 { 7649 #if defined(KDTRACE_HOOKS) 7650 int old_state = stcb->asoc.state; 7651 #endif 7652 7653 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7654 ("sctp_set_state: Can't set substate (new_state = %x)", 7655 new_state)); 7656 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7657 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7658 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7659 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7660 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7661 } 7662 #if defined(KDTRACE_HOOKS) 7663 if (((old_state & SCTP_STATE_MASK) != new_state) && 7664 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7665 (new_state == SCTP_STATE_INUSE))) { 7666 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7667 } 7668 #endif 7669 } 7670 7671 void 7672 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7673 { 7674 #if defined(KDTRACE_HOOKS) 7675 int old_state = stcb->asoc.state; 7676 #endif 7677 7678 KASSERT((substate & SCTP_STATE_MASK) == 0, 7679 ("sctp_add_substate: Can't set state (substate = %x)", 7680 substate)); 7681 stcb->asoc.state |= substate; 7682 #if defined(KDTRACE_HOOKS) 7683 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7684 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7685 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7686 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7687 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7688 } 7689 #endif 7690 } 7691