1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->rcv_edmid = inp->rcv_edmid; 1153 asoc->snd_edmid = SCTP_EDMID_NONE; 1154 asoc->sctp_cmt_pf = (uint8_t)0; 1155 asoc->sctp_frag_point = inp->sctp_frag_point; 1156 asoc->sctp_features = inp->sctp_features; 1157 asoc->default_dscp = inp->sctp_ep.default_dscp; 1158 asoc->max_cwnd = inp->max_cwnd; 1159 #ifdef INET6 1160 if (inp->sctp_ep.default_flowlabel) { 1161 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1162 } else { 1163 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1164 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1165 asoc->default_flowlabel &= 0x000fffff; 1166 asoc->default_flowlabel |= 0x80000000; 1167 } else { 1168 asoc->default_flowlabel = 0; 1169 } 1170 } 1171 #endif 1172 asoc->sb_send_resv = 0; 1173 if (override_tag) { 1174 asoc->my_vtag = override_tag; 1175 } else { 1176 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1177 } 1178 /* Get the nonce tags */ 1179 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1180 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1181 asoc->vrf_id = vrf_id; 1182 1183 #ifdef SCTP_ASOCLOG_OF_TSNS 1184 asoc->tsn_in_at = 0; 1185 asoc->tsn_out_at = 0; 1186 asoc->tsn_in_wrapped = 0; 1187 asoc->tsn_out_wrapped = 0; 1188 asoc->cumack_log_at = 0; 1189 asoc->cumack_log_atsnt = 0; 1190 #endif 1191 #ifdef SCTP_FS_SPEC_LOG 1192 asoc->fs_index = 0; 1193 #endif 1194 asoc->refcnt = 0; 1195 asoc->assoc_up_sent = 0; 1196 if (override_tag) { 1197 asoc->init_seq_number = initial_tsn; 1198 } else { 1199 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1200 } 1201 asoc->asconf_seq_out = asoc->init_seq_number; 1202 asoc->str_reset_seq_out = asoc->init_seq_number; 1203 asoc->sending_seq = asoc->init_seq_number; 1204 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1205 /* we are optimistic here */ 1206 asoc->peer_supports_nat = 0; 1207 asoc->sent_queue_retran_cnt = 0; 1208 1209 /* for CMT */ 1210 asoc->last_net_cmt_send_started = NULL; 1211 1212 asoc->last_acked_seq = asoc->init_seq_number - 1; 1213 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1214 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1215 1216 /* here we are different, we hold the next one we expect */ 1217 asoc->str_reset_seq_in = asoc->init_seq_number; 1218 1219 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1220 asoc->initial_rto = inp->sctp_ep.initial_rto; 1221 1222 asoc->default_mtu = inp->sctp_ep.default_mtu; 1223 asoc->max_init_times = inp->sctp_ep.max_init_times; 1224 asoc->max_send_times = inp->sctp_ep.max_send_times; 1225 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1226 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1227 asoc->free_chunk_cnt = 0; 1228 1229 asoc->iam_blocking = 0; 1230 asoc->context = inp->sctp_context; 1231 asoc->local_strreset_support = inp->local_strreset_support; 1232 asoc->def_send = inp->def_send; 1233 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1234 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1235 asoc->pr_sctp_cnt = 0; 1236 asoc->total_output_queue_size = 0; 1237 1238 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1239 asoc->scope.ipv6_addr_legal = 1; 1240 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1241 asoc->scope.ipv4_addr_legal = 1; 1242 } else { 1243 asoc->scope.ipv4_addr_legal = 0; 1244 } 1245 } else { 1246 asoc->scope.ipv6_addr_legal = 0; 1247 asoc->scope.ipv4_addr_legal = 1; 1248 } 1249 1250 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1251 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1252 1253 asoc->smallest_mtu = 0; 1254 asoc->minrto = inp->sctp_ep.sctp_minrto; 1255 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1256 1257 asoc->stream_locked_on = 0; 1258 asoc->ecn_echo_cnt_onq = 0; 1259 asoc->stream_locked = 0; 1260 1261 asoc->send_sack = 1; 1262 1263 LIST_INIT(&asoc->sctp_restricted_addrs); 1264 1265 TAILQ_INIT(&asoc->nets); 1266 TAILQ_INIT(&asoc->pending_reply_queue); 1267 TAILQ_INIT(&asoc->asconf_ack_sent); 1268 /* Setup to fill the hb random cache at first HB */ 1269 asoc->hb_random_idx = 4; 1270 1271 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1272 1273 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1274 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1275 1276 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1277 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1278 1279 /* 1280 * Now the stream parameters, here we allocate space for all streams 1281 * that we request by default. 1282 */ 1283 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1284 o_strms; 1285 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1286 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1287 SCTP_M_STRMO); 1288 if (asoc->strmout == NULL) { 1289 /* big trouble no memory */ 1290 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1291 return (ENOMEM); 1292 } 1293 SCTP_TCB_LOCK(stcb); 1294 for (i = 0; i < asoc->streamoutcnt; i++) { 1295 /* 1296 * inbound side must be set to 0xffff, also NOTE when we get 1297 * the INIT-ACK back (for INIT sender) we MUST reduce the 1298 * count (streamoutcnt) but first check if we sent to any of 1299 * the upper streams that were dropped (if some were). Those 1300 * that were dropped must be notified to the upper layer as 1301 * failed to send. 1302 */ 1303 TAILQ_INIT(&asoc->strmout[i].outqueue); 1304 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1305 asoc->strmout[i].chunks_on_queues = 0; 1306 #if defined(SCTP_DETAILED_STR_STATS) 1307 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1308 asoc->strmout[i].abandoned_sent[j] = 0; 1309 asoc->strmout[i].abandoned_unsent[j] = 0; 1310 } 1311 #else 1312 asoc->strmout[i].abandoned_sent[0] = 0; 1313 asoc->strmout[i].abandoned_unsent[0] = 0; 1314 #endif 1315 asoc->strmout[i].next_mid_ordered = 0; 1316 asoc->strmout[i].next_mid_unordered = 0; 1317 asoc->strmout[i].sid = i; 1318 asoc->strmout[i].last_msg_incomplete = 0; 1319 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1320 } 1321 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1322 SCTP_TCB_UNLOCK(stcb); 1323 1324 /* Now the mapping array */ 1325 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1326 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1327 SCTP_M_MAP); 1328 if (asoc->mapping_array == NULL) { 1329 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1334 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1335 SCTP_M_MAP); 1336 if (asoc->nr_mapping_array == NULL) { 1337 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1338 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1339 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1340 return (ENOMEM); 1341 } 1342 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1343 1344 /* Now the init of the other outqueues */ 1345 TAILQ_INIT(&asoc->free_chunks); 1346 TAILQ_INIT(&asoc->control_send_queue); 1347 TAILQ_INIT(&asoc->asconf_send_queue); 1348 TAILQ_INIT(&asoc->send_queue); 1349 TAILQ_INIT(&asoc->sent_queue); 1350 TAILQ_INIT(&asoc->resetHead); 1351 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1352 TAILQ_INIT(&asoc->asconf_queue); 1353 /* authentication fields */ 1354 asoc->authinfo.random = NULL; 1355 asoc->authinfo.active_keyid = 0; 1356 asoc->authinfo.assoc_key = NULL; 1357 asoc->authinfo.assoc_keyid = 0; 1358 asoc->authinfo.recv_key = NULL; 1359 asoc->authinfo.recv_keyid = 0; 1360 LIST_INIT(&asoc->shared_keys); 1361 asoc->marked_retrans = 0; 1362 asoc->port = inp->sctp_ep.port; 1363 asoc->timoinit = 0; 1364 asoc->timodata = 0; 1365 asoc->timosack = 0; 1366 asoc->timoshutdown = 0; 1367 asoc->timoheartbeat = 0; 1368 asoc->timocookie = 0; 1369 asoc->timoshutdownack = 0; 1370 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1371 asoc->discontinuity_time = asoc->start_time; 1372 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1373 asoc->abandoned_unsent[i] = 0; 1374 asoc->abandoned_sent[i] = 0; 1375 } 1376 /* 1377 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1378 * freed later when the association is freed. 1379 */ 1380 return (0); 1381 } 1382 1383 void 1384 sctp_print_mapping_array(struct sctp_association *asoc) 1385 { 1386 unsigned int i, limit; 1387 1388 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1389 asoc->mapping_array_size, 1390 asoc->mapping_array_base_tsn, 1391 asoc->cumulative_tsn, 1392 asoc->highest_tsn_inside_map, 1393 asoc->highest_tsn_inside_nr_map); 1394 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1395 if (asoc->mapping_array[limit - 1] != 0) { 1396 break; 1397 } 1398 } 1399 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1400 for (i = 0; i < limit; i++) { 1401 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1402 } 1403 if (limit % 16) 1404 SCTP_PRINTF("\n"); 1405 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1406 if (asoc->nr_mapping_array[limit - 1]) { 1407 break; 1408 } 1409 } 1410 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1411 for (i = 0; i < limit; i++) { 1412 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1413 } 1414 if (limit % 16) 1415 SCTP_PRINTF("\n"); 1416 } 1417 1418 int 1419 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1420 { 1421 /* mapping array needs to grow */ 1422 uint8_t *new_array1, *new_array2; 1423 uint32_t new_size; 1424 1425 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1426 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1427 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1428 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1429 /* can't get more, forget it */ 1430 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1431 if (new_array1) { 1432 SCTP_FREE(new_array1, SCTP_M_MAP); 1433 } 1434 if (new_array2) { 1435 SCTP_FREE(new_array2, SCTP_M_MAP); 1436 } 1437 return (-1); 1438 } 1439 memset(new_array1, 0, new_size); 1440 memset(new_array2, 0, new_size); 1441 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1442 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1443 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1444 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1445 asoc->mapping_array = new_array1; 1446 asoc->nr_mapping_array = new_array2; 1447 asoc->mapping_array_size = new_size; 1448 return (0); 1449 } 1450 1451 static void 1452 sctp_iterator_work(struct sctp_iterator *it) 1453 { 1454 struct epoch_tracker et; 1455 struct sctp_inpcb *tinp; 1456 int iteration_count = 0; 1457 int inp_skip = 0; 1458 int first_in = 1; 1459 1460 NET_EPOCH_ENTER(et); 1461 SCTP_INP_INFO_RLOCK(); 1462 SCTP_ITERATOR_LOCK(); 1463 sctp_it_ctl.cur_it = it; 1464 if (it->inp) { 1465 SCTP_INP_RLOCK(it->inp); 1466 SCTP_INP_DECR_REF(it->inp); 1467 } 1468 if (it->inp == NULL) { 1469 /* iterator is complete */ 1470 done_with_iterator: 1471 sctp_it_ctl.cur_it = NULL; 1472 SCTP_ITERATOR_UNLOCK(); 1473 SCTP_INP_INFO_RUNLOCK(); 1474 if (it->function_atend != NULL) { 1475 (*it->function_atend) (it->pointer, it->val); 1476 } 1477 SCTP_FREE(it, SCTP_M_ITER); 1478 NET_EPOCH_EXIT(et); 1479 return; 1480 } 1481 select_a_new_ep: 1482 if (first_in) { 1483 first_in = 0; 1484 } else { 1485 SCTP_INP_RLOCK(it->inp); 1486 } 1487 while (((it->pcb_flags) && 1488 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1489 ((it->pcb_features) && 1490 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1491 /* endpoint flags or features don't match, so keep looking */ 1492 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1493 SCTP_INP_RUNLOCK(it->inp); 1494 goto done_with_iterator; 1495 } 1496 tinp = it->inp; 1497 it->inp = LIST_NEXT(it->inp, sctp_list); 1498 it->stcb = NULL; 1499 SCTP_INP_RUNLOCK(tinp); 1500 if (it->inp == NULL) { 1501 goto done_with_iterator; 1502 } 1503 SCTP_INP_RLOCK(it->inp); 1504 } 1505 /* now go through each assoc which is in the desired state */ 1506 if (it->done_current_ep == 0) { 1507 if (it->function_inp != NULL) 1508 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1509 it->done_current_ep = 1; 1510 } 1511 if (it->stcb == NULL) { 1512 /* run the per instance function */ 1513 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1514 } 1515 if ((inp_skip) || it->stcb == NULL) { 1516 if (it->function_inp_end != NULL) { 1517 inp_skip = (*it->function_inp_end) (it->inp, 1518 it->pointer, 1519 it->val); 1520 } 1521 SCTP_INP_RUNLOCK(it->inp); 1522 goto no_stcb; 1523 } 1524 while (it->stcb != NULL) { 1525 SCTP_TCB_LOCK(it->stcb); 1526 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1527 /* not in the right state... keep looking */ 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 goto next_assoc; 1530 } 1531 /* see if we have limited out the iterator loop */ 1532 iteration_count++; 1533 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1534 /* Pause to let others grab the lock */ 1535 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1536 SCTP_TCB_UNLOCK(it->stcb); 1537 SCTP_INP_INCR_REF(it->inp); 1538 SCTP_INP_RUNLOCK(it->inp); 1539 SCTP_ITERATOR_UNLOCK(); 1540 SCTP_INP_INFO_RUNLOCK(); 1541 SCTP_INP_INFO_RLOCK(); 1542 SCTP_ITERATOR_LOCK(); 1543 if (sctp_it_ctl.iterator_flags) { 1544 /* We won't be staying here */ 1545 SCTP_INP_DECR_REF(it->inp); 1546 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1547 if (sctp_it_ctl.iterator_flags & 1548 SCTP_ITERATOR_STOP_CUR_IT) { 1549 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1550 goto done_with_iterator; 1551 } 1552 if (sctp_it_ctl.iterator_flags & 1553 SCTP_ITERATOR_STOP_CUR_INP) { 1554 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1555 goto no_stcb; 1556 } 1557 /* If we reach here huh? */ 1558 SCTP_PRINTF("Unknown it ctl flag %x\n", 1559 sctp_it_ctl.iterator_flags); 1560 sctp_it_ctl.iterator_flags = 0; 1561 } 1562 SCTP_INP_RLOCK(it->inp); 1563 SCTP_INP_DECR_REF(it->inp); 1564 SCTP_TCB_LOCK(it->stcb); 1565 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1566 iteration_count = 0; 1567 } 1568 KASSERT(it->inp == it->stcb->sctp_ep, 1569 ("%s: stcb %p does not belong to inp %p, but inp %p", 1570 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1571 SCTP_INP_RLOCK_ASSERT(it->inp); 1572 SCTP_TCB_LOCK_ASSERT(it->stcb); 1573 1574 /* run function on this one */ 1575 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1576 SCTP_INP_RLOCK_ASSERT(it->inp); 1577 SCTP_TCB_LOCK_ASSERT(it->stcb); 1578 1579 /* 1580 * we lie here, it really needs to have its own type but 1581 * first I must verify that this won't effect things :-0 1582 */ 1583 if (it->no_chunk_output == 0) { 1584 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1585 SCTP_INP_RLOCK_ASSERT(it->inp); 1586 SCTP_TCB_LOCK_ASSERT(it->stcb); 1587 } 1588 1589 SCTP_TCB_UNLOCK(it->stcb); 1590 next_assoc: 1591 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1592 if (it->stcb == NULL) { 1593 /* Run last function */ 1594 if (it->function_inp_end != NULL) { 1595 inp_skip = (*it->function_inp_end) (it->inp, 1596 it->pointer, 1597 it->val); 1598 } 1599 } 1600 } 1601 SCTP_INP_RUNLOCK(it->inp); 1602 no_stcb: 1603 /* done with all assocs on this endpoint, move on to next endpoint */ 1604 it->done_current_ep = 0; 1605 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1606 it->inp = NULL; 1607 } else { 1608 it->inp = LIST_NEXT(it->inp, sctp_list); 1609 } 1610 it->stcb = NULL; 1611 if (it->inp == NULL) { 1612 goto done_with_iterator; 1613 } 1614 goto select_a_new_ep; 1615 } 1616 1617 void 1618 sctp_iterator_worker(void) 1619 { 1620 struct sctp_iterator *it; 1621 1622 /* This function is called with the WQ lock in place */ 1623 sctp_it_ctl.iterator_running = 1; 1624 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1625 /* now lets work on this one */ 1626 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1627 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1628 CURVNET_SET(it->vn); 1629 sctp_iterator_work(it); 1630 CURVNET_RESTORE(); 1631 SCTP_IPI_ITERATOR_WQ_LOCK(); 1632 /* sa_ignore FREED_MEMORY */ 1633 } 1634 sctp_it_ctl.iterator_running = 0; 1635 return; 1636 } 1637 1638 static void 1639 sctp_handle_addr_wq(void) 1640 { 1641 /* deal with the ADDR wq from the rtsock calls */ 1642 struct sctp_laddr *wi, *nwi; 1643 struct sctp_asconf_iterator *asc; 1644 1645 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1646 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1647 if (asc == NULL) { 1648 /* Try later, no memory */ 1649 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1650 (struct sctp_inpcb *)NULL, 1651 (struct sctp_tcb *)NULL, 1652 (struct sctp_nets *)NULL); 1653 return; 1654 } 1655 LIST_INIT(&asc->list_of_work); 1656 asc->cnt = 0; 1657 1658 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1659 LIST_REMOVE(wi, sctp_nxt_addr); 1660 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1661 asc->cnt++; 1662 } 1663 1664 if (asc->cnt == 0) { 1665 SCTP_FREE(asc, SCTP_M_ASC_IT); 1666 } else { 1667 int ret; 1668 1669 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1670 sctp_asconf_iterator_stcb, 1671 NULL, /* No ep end for boundall */ 1672 SCTP_PCB_FLAGS_BOUNDALL, 1673 SCTP_PCB_ANY_FEATURES, 1674 SCTP_ASOC_ANY_STATE, 1675 (void *)asc, 0, 1676 sctp_asconf_iterator_end, NULL, 0); 1677 if (ret) { 1678 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1679 /* 1680 * Freeing if we are stopping or put back on the 1681 * addr_wq. 1682 */ 1683 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1684 sctp_asconf_iterator_end(asc, 0); 1685 } else { 1686 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1687 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1688 } 1689 SCTP_FREE(asc, SCTP_M_ASC_IT); 1690 } 1691 } 1692 } 1693 } 1694 1695 /*- 1696 * The following table shows which pointers for the inp, stcb, or net are 1697 * stored for each timer after it was started. 1698 * 1699 *|Name |Timer |inp |stcb|net | 1700 *|-----------------------------|-----------------------------|----|----|----| 1701 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1704 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1705 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1706 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1708 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1709 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1710 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1711 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1712 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1713 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1714 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1715 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1716 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1717 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1718 */ 1719 1720 void 1721 sctp_timeout_handler(void *t) 1722 { 1723 struct epoch_tracker et; 1724 struct timeval tv; 1725 struct sctp_inpcb *inp; 1726 struct sctp_tcb *stcb; 1727 struct sctp_nets *net; 1728 struct sctp_timer *tmr; 1729 struct mbuf *op_err; 1730 int type; 1731 int i, secret; 1732 bool did_output, released_asoc_reference; 1733 1734 /* 1735 * If inp, stcb or net are not NULL, then references to these were 1736 * added when the timer was started, and must be released before 1737 * this function returns. 1738 */ 1739 tmr = (struct sctp_timer *)t; 1740 inp = (struct sctp_inpcb *)tmr->ep; 1741 stcb = (struct sctp_tcb *)tmr->tcb; 1742 net = (struct sctp_nets *)tmr->net; 1743 CURVNET_SET((struct vnet *)tmr->vnet); 1744 NET_EPOCH_ENTER(et); 1745 released_asoc_reference = false; 1746 1747 #ifdef SCTP_AUDITING_ENABLED 1748 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1749 sctp_auditing(3, inp, stcb, net); 1750 #endif 1751 1752 /* sanity checks... */ 1753 KASSERT(tmr->self == NULL || tmr->self == tmr, 1754 ("sctp_timeout_handler: tmr->self corrupted")); 1755 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1756 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1757 type = tmr->type; 1758 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1759 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1760 type, stcb, stcb->sctp_ep)); 1761 tmr->stopped_from = 0xa001; 1762 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1763 SCTPDBG(SCTP_DEBUG_TIMER2, 1764 "Timer type %d handler exiting due to CLOSED association.\n", 1765 type); 1766 goto out_decr; 1767 } 1768 tmr->stopped_from = 0xa002; 1769 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1770 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1771 SCTPDBG(SCTP_DEBUG_TIMER2, 1772 "Timer type %d handler exiting due to not being active.\n", 1773 type); 1774 goto out_decr; 1775 } 1776 1777 tmr->stopped_from = 0xa003; 1778 if (stcb) { 1779 SCTP_TCB_LOCK(stcb); 1780 /* 1781 * Release reference so that association can be freed if 1782 * necessary below. This is safe now that we have acquired 1783 * the lock. 1784 */ 1785 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1786 released_asoc_reference = true; 1787 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1788 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1789 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1790 SCTPDBG(SCTP_DEBUG_TIMER2, 1791 "Timer type %d handler exiting due to CLOSED association.\n", 1792 type); 1793 goto out; 1794 } 1795 } else if (inp != NULL) { 1796 SCTP_INP_WLOCK(inp); 1797 } else { 1798 SCTP_WQ_ADDR_LOCK(); 1799 } 1800 1801 /* Record in stopped_from which timeout occurred. */ 1802 tmr->stopped_from = type; 1803 /* mark as being serviced now */ 1804 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1805 /* 1806 * Callout has been rescheduled. 1807 */ 1808 goto out; 1809 } 1810 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1811 /* 1812 * Not active, so no action. 1813 */ 1814 goto out; 1815 } 1816 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1817 1818 /* call the handler for the appropriate timer type */ 1819 switch (type) { 1820 case SCTP_TIMER_TYPE_SEND: 1821 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1822 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1823 type, inp, stcb, net)); 1824 SCTP_STAT_INCR(sctps_timodata); 1825 stcb->asoc.timodata++; 1826 stcb->asoc.num_send_timers_up--; 1827 if (stcb->asoc.num_send_timers_up < 0) { 1828 stcb->asoc.num_send_timers_up = 0; 1829 } 1830 SCTP_TCB_LOCK_ASSERT(stcb); 1831 if (sctp_t3rxt_timer(inp, stcb, net)) { 1832 /* no need to unlock on tcb its gone */ 1833 1834 goto out_decr; 1835 } 1836 SCTP_TCB_LOCK_ASSERT(stcb); 1837 #ifdef SCTP_AUDITING_ENABLED 1838 sctp_auditing(4, inp, stcb, net); 1839 #endif 1840 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1841 did_output = true; 1842 if ((stcb->asoc.num_send_timers_up == 0) && 1843 (stcb->asoc.sent_queue_cnt > 0)) { 1844 struct sctp_tmit_chunk *chk; 1845 1846 /* 1847 * Safeguard. If there on some on the sent queue 1848 * somewhere but no timers running something is 1849 * wrong... so we start a timer on the first chunk 1850 * on the send queue on whatever net it is sent to. 1851 */ 1852 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1853 if (chk->whoTo != NULL) { 1854 break; 1855 } 1856 } 1857 if (chk != NULL) { 1858 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1859 } 1860 } 1861 break; 1862 case SCTP_TIMER_TYPE_INIT: 1863 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1864 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1865 type, inp, stcb, net)); 1866 SCTP_STAT_INCR(sctps_timoinit); 1867 stcb->asoc.timoinit++; 1868 if (sctp_t1init_timer(inp, stcb, net)) { 1869 /* no need to unlock on tcb its gone */ 1870 goto out_decr; 1871 } 1872 did_output = false; 1873 break; 1874 case SCTP_TIMER_TYPE_RECV: 1875 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1876 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1877 type, inp, stcb, net)); 1878 SCTP_STAT_INCR(sctps_timosack); 1879 stcb->asoc.timosack++; 1880 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1881 #ifdef SCTP_AUDITING_ENABLED 1882 sctp_auditing(4, inp, stcb, NULL); 1883 #endif 1884 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1885 did_output = true; 1886 break; 1887 case SCTP_TIMER_TYPE_SHUTDOWN: 1888 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1889 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1890 type, inp, stcb, net)); 1891 SCTP_STAT_INCR(sctps_timoshutdown); 1892 stcb->asoc.timoshutdown++; 1893 if (sctp_shutdown_timer(inp, stcb, net)) { 1894 /* no need to unlock on tcb its gone */ 1895 goto out_decr; 1896 } 1897 #ifdef SCTP_AUDITING_ENABLED 1898 sctp_auditing(4, inp, stcb, net); 1899 #endif 1900 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1901 did_output = true; 1902 break; 1903 case SCTP_TIMER_TYPE_HEARTBEAT: 1904 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1905 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1906 type, inp, stcb, net)); 1907 SCTP_STAT_INCR(sctps_timoheartbeat); 1908 stcb->asoc.timoheartbeat++; 1909 if (sctp_heartbeat_timer(inp, stcb, net)) { 1910 /* no need to unlock on tcb its gone */ 1911 goto out_decr; 1912 } 1913 #ifdef SCTP_AUDITING_ENABLED 1914 sctp_auditing(4, inp, stcb, net); 1915 #endif 1916 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1917 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1918 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1919 did_output = true; 1920 } else { 1921 did_output = false; 1922 } 1923 break; 1924 case SCTP_TIMER_TYPE_COOKIE: 1925 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1926 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1927 type, inp, stcb, net)); 1928 SCTP_STAT_INCR(sctps_timocookie); 1929 stcb->asoc.timocookie++; 1930 if (sctp_cookie_timer(inp, stcb, net)) { 1931 /* no need to unlock on tcb its gone */ 1932 goto out_decr; 1933 } 1934 #ifdef SCTP_AUDITING_ENABLED 1935 sctp_auditing(4, inp, stcb, net); 1936 #endif 1937 /* 1938 * We consider T3 and Cookie timer pretty much the same with 1939 * respect to where from in chunk_output. 1940 */ 1941 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1942 did_output = true; 1943 break; 1944 case SCTP_TIMER_TYPE_NEWCOOKIE: 1945 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1946 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1947 type, inp, stcb, net)); 1948 SCTP_STAT_INCR(sctps_timosecret); 1949 (void)SCTP_GETTIME_TIMEVAL(&tv); 1950 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1951 inp->sctp_ep.last_secret_number = 1952 inp->sctp_ep.current_secret_number; 1953 inp->sctp_ep.current_secret_number++; 1954 if (inp->sctp_ep.current_secret_number >= 1955 SCTP_HOW_MANY_SECRETS) { 1956 inp->sctp_ep.current_secret_number = 0; 1957 } 1958 secret = (int)inp->sctp_ep.current_secret_number; 1959 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1960 inp->sctp_ep.secret_key[secret][i] = 1961 sctp_select_initial_TSN(&inp->sctp_ep); 1962 } 1963 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1964 did_output = false; 1965 break; 1966 case SCTP_TIMER_TYPE_PATHMTURAISE: 1967 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1968 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1969 type, inp, stcb, net)); 1970 SCTP_STAT_INCR(sctps_timopathmtu); 1971 sctp_pathmtu_timer(inp, stcb, net); 1972 did_output = false; 1973 break; 1974 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1975 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1976 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1977 type, inp, stcb, net)); 1978 if (sctp_shutdownack_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 SCTP_STAT_INCR(sctps_timoshutdownack); 1983 stcb->asoc.timoshutdownack++; 1984 #ifdef SCTP_AUDITING_ENABLED 1985 sctp_auditing(4, inp, stcb, net); 1986 #endif 1987 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1988 did_output = true; 1989 break; 1990 case SCTP_TIMER_TYPE_ASCONF: 1991 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1992 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1993 type, inp, stcb, net)); 1994 SCTP_STAT_INCR(sctps_timoasconf); 1995 if (sctp_asconf_timer(inp, stcb, net)) { 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 } 1999 #ifdef SCTP_AUDITING_ENABLED 2000 sctp_auditing(4, inp, stcb, net); 2001 #endif 2002 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2003 did_output = true; 2004 break; 2005 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2006 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2007 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2008 type, inp, stcb, net)); 2009 SCTP_STAT_INCR(sctps_timoshutdownguard); 2010 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2011 "Shutdown guard timer expired"); 2012 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 case SCTP_TIMER_TYPE_AUTOCLOSE: 2016 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2017 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2018 type, inp, stcb, net)); 2019 SCTP_STAT_INCR(sctps_timoautoclose); 2020 sctp_autoclose_timer(inp, stcb); 2021 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2022 did_output = true; 2023 break; 2024 case SCTP_TIMER_TYPE_STRRESET: 2025 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2026 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2027 type, inp, stcb, net)); 2028 SCTP_STAT_INCR(sctps_timostrmrst); 2029 if (sctp_strreset_timer(inp, stcb)) { 2030 /* no need to unlock on tcb its gone */ 2031 goto out_decr; 2032 } 2033 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2034 did_output = true; 2035 break; 2036 case SCTP_TIMER_TYPE_INPKILL: 2037 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoinpkill); 2041 /* 2042 * special case, take away our increment since WE are the 2043 * killer 2044 */ 2045 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2046 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2047 SCTP_INP_DECR_REF(inp); 2048 SCTP_INP_WUNLOCK(inp); 2049 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2050 SCTP_CALLED_FROM_INPKILL_TIMER); 2051 inp = NULL; 2052 goto out_decr; 2053 case SCTP_TIMER_TYPE_ASOCKILL: 2054 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2055 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2056 type, inp, stcb, net)); 2057 SCTP_STAT_INCR(sctps_timoassockill); 2058 /* Can we free it yet? */ 2059 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2060 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2061 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2062 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2063 /* 2064 * free asoc, always unlocks (or destroy's) so prevent 2065 * duplicate unlock or unlock of a free mtx :-0 2066 */ 2067 stcb = NULL; 2068 goto out_decr; 2069 case SCTP_TIMER_TYPE_ADDR_WQ: 2070 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2071 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2072 type, inp, stcb, net)); 2073 sctp_handle_addr_wq(); 2074 did_output = true; 2075 break; 2076 case SCTP_TIMER_TYPE_PRIM_DELETED: 2077 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2078 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2079 type, inp, stcb, net)); 2080 SCTP_STAT_INCR(sctps_timodelprim); 2081 sctp_delete_prim_timer(inp, stcb); 2082 did_output = false; 2083 break; 2084 default: 2085 #ifdef INVARIANTS 2086 panic("Unknown timer type %d", type); 2087 #else 2088 goto out; 2089 #endif 2090 } 2091 #ifdef SCTP_AUDITING_ENABLED 2092 sctp_audit_log(0xF1, (uint8_t)type); 2093 if (inp != NULL) 2094 sctp_auditing(5, inp, stcb, net); 2095 #endif 2096 if (did_output && (stcb != NULL)) { 2097 /* 2098 * Now we need to clean up the control chunk chain if an 2099 * ECNE is on it. It must be marked as UNSENT again so next 2100 * call will continue to send it until such time that we get 2101 * a CWR, to remove it. It is, however, less likely that we 2102 * will find a ecn echo on the chain though. 2103 */ 2104 sctp_fix_ecn_echo(&stcb->asoc); 2105 } 2106 out: 2107 if (stcb != NULL) { 2108 SCTP_TCB_UNLOCK(stcb); 2109 } else if (inp != NULL) { 2110 SCTP_INP_WUNLOCK(inp); 2111 } else { 2112 SCTP_WQ_ADDR_UNLOCK(); 2113 } 2114 2115 out_decr: 2116 /* These reference counts were incremented in sctp_timer_start(). */ 2117 if (inp != NULL) { 2118 SCTP_INP_DECR_REF(inp); 2119 } 2120 if ((stcb != NULL) && !released_asoc_reference) { 2121 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2122 } 2123 if (net != NULL) { 2124 sctp_free_remote_addr(net); 2125 } 2126 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2127 CURVNET_RESTORE(); 2128 NET_EPOCH_EXIT(et); 2129 } 2130 2131 /*- 2132 * The following table shows which parameters must be provided 2133 * when calling sctp_timer_start(). For parameters not being 2134 * provided, NULL must be used. 2135 * 2136 * |Name |inp |stcb|net | 2137 * |-----------------------------|----|----|----| 2138 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2143 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2145 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2146 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2147 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2148 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2149 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2150 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2151 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2152 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2153 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2154 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2155 * 2156 */ 2157 2158 void 2159 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2160 struct sctp_nets *net) 2161 { 2162 struct sctp_timer *tmr; 2163 uint32_t to_ticks; 2164 uint32_t rndval, jitter; 2165 2166 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2167 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2168 t_type, stcb, stcb->sctp_ep)); 2169 tmr = NULL; 2170 if (stcb != NULL) { 2171 SCTP_TCB_LOCK_ASSERT(stcb); 2172 } else if (inp != NULL) { 2173 SCTP_INP_WLOCK_ASSERT(inp); 2174 } else { 2175 SCTP_WQ_ADDR_LOCK_ASSERT(); 2176 } 2177 if (stcb != NULL) { 2178 /* 2179 * Don't restart timer on association that's about to be 2180 * killed. 2181 */ 2182 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2183 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2184 SCTPDBG(SCTP_DEBUG_TIMER2, 2185 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2186 t_type, inp, stcb, net); 2187 return; 2188 } 2189 /* Don't restart timer on net that's been removed. */ 2190 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2191 SCTPDBG(SCTP_DEBUG_TIMER2, 2192 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2193 t_type, inp, stcb, net); 2194 return; 2195 } 2196 } 2197 switch (t_type) { 2198 case SCTP_TIMER_TYPE_SEND: 2199 /* Here we use the RTO timer. */ 2200 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2201 #ifdef INVARIANTS 2202 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2203 t_type, inp, stcb, net); 2204 #else 2205 return; 2206 #endif 2207 } 2208 tmr = &net->rxt_timer; 2209 if (net->RTO == 0) { 2210 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2211 } else { 2212 to_ticks = sctp_msecs_to_ticks(net->RTO); 2213 } 2214 break; 2215 case SCTP_TIMER_TYPE_INIT: 2216 /* 2217 * Here we use the INIT timer default usually about 1 2218 * second. 2219 */ 2220 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2221 #ifdef INVARIANTS 2222 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2223 t_type, inp, stcb, net); 2224 #else 2225 return; 2226 #endif 2227 } 2228 tmr = &net->rxt_timer; 2229 if (net->RTO == 0) { 2230 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2231 } else { 2232 to_ticks = sctp_msecs_to_ticks(net->RTO); 2233 } 2234 break; 2235 case SCTP_TIMER_TYPE_RECV: 2236 /* 2237 * Here we use the Delayed-Ack timer value from the inp, 2238 * usually about 200ms. 2239 */ 2240 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2241 #ifdef INVARIANTS 2242 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2243 t_type, inp, stcb, net); 2244 #else 2245 return; 2246 #endif 2247 } 2248 tmr = &stcb->asoc.dack_timer; 2249 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2250 break; 2251 case SCTP_TIMER_TYPE_SHUTDOWN: 2252 /* Here we use the RTO of the destination. */ 2253 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2254 #ifdef INVARIANTS 2255 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2256 t_type, inp, stcb, net); 2257 #else 2258 return; 2259 #endif 2260 } 2261 tmr = &net->rxt_timer; 2262 if (net->RTO == 0) { 2263 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2264 } else { 2265 to_ticks = sctp_msecs_to_ticks(net->RTO); 2266 } 2267 break; 2268 case SCTP_TIMER_TYPE_HEARTBEAT: 2269 /* 2270 * The net is used here so that we can add in the RTO. Even 2271 * though we use a different timer. We also add the HB timer 2272 * PLUS a random jitter. 2273 */ 2274 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2275 #ifdef INVARIANTS 2276 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2277 t_type, inp, stcb, net); 2278 #else 2279 return; 2280 #endif 2281 } 2282 if ((net->dest_state & SCTP_ADDR_NOHB) && 2283 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2284 SCTPDBG(SCTP_DEBUG_TIMER2, 2285 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2286 t_type, inp, stcb, net); 2287 return; 2288 } 2289 tmr = &net->hb_timer; 2290 if (net->RTO == 0) { 2291 to_ticks = stcb->asoc.initial_rto; 2292 } else { 2293 to_ticks = net->RTO; 2294 } 2295 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2296 jitter = rndval % to_ticks; 2297 if (to_ticks > 1) { 2298 to_ticks >>= 1; 2299 } 2300 if (jitter < (UINT32_MAX - to_ticks)) { 2301 to_ticks += jitter; 2302 } else { 2303 to_ticks = UINT32_MAX; 2304 } 2305 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2306 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2307 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2308 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2309 to_ticks += net->heart_beat_delay; 2310 } else { 2311 to_ticks = UINT32_MAX; 2312 } 2313 } 2314 /* 2315 * Now we must convert the to_ticks that are now in ms to 2316 * ticks. 2317 */ 2318 to_ticks = sctp_msecs_to_ticks(to_ticks); 2319 break; 2320 case SCTP_TIMER_TYPE_COOKIE: 2321 /* 2322 * Here we can use the RTO timer from the network since one 2323 * RTT was complete. If a retransmission happened then we 2324 * will be using the RTO initial value. 2325 */ 2326 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2327 #ifdef INVARIANTS 2328 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2329 t_type, inp, stcb, net); 2330 #else 2331 return; 2332 #endif 2333 } 2334 tmr = &net->rxt_timer; 2335 if (net->RTO == 0) { 2336 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2337 } else { 2338 to_ticks = sctp_msecs_to_ticks(net->RTO); 2339 } 2340 break; 2341 case SCTP_TIMER_TYPE_NEWCOOKIE: 2342 /* 2343 * Nothing needed but the endpoint here usually about 60 2344 * minutes. 2345 */ 2346 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2347 #ifdef INVARIANTS 2348 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2349 t_type, inp, stcb, net); 2350 #else 2351 return; 2352 #endif 2353 } 2354 tmr = &inp->sctp_ep.signature_change; 2355 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2356 break; 2357 case SCTP_TIMER_TYPE_PATHMTURAISE: 2358 /* 2359 * Here we use the value found in the EP for PMTUD, usually 2360 * about 10 minutes. 2361 */ 2362 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2363 #ifdef INVARIANTS 2364 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2365 t_type, inp, stcb, net); 2366 #else 2367 return; 2368 #endif 2369 } 2370 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2371 SCTPDBG(SCTP_DEBUG_TIMER2, 2372 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2373 t_type, inp, stcb, net); 2374 return; 2375 } 2376 tmr = &net->pmtu_timer; 2377 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2378 break; 2379 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2380 /* Here we use the RTO of the destination. */ 2381 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2382 #ifdef INVARIANTS 2383 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2384 t_type, inp, stcb, net); 2385 #else 2386 return; 2387 #endif 2388 } 2389 tmr = &net->rxt_timer; 2390 if (net->RTO == 0) { 2391 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2392 } else { 2393 to_ticks = sctp_msecs_to_ticks(net->RTO); 2394 } 2395 break; 2396 case SCTP_TIMER_TYPE_ASCONF: 2397 /* 2398 * Here the timer comes from the stcb but its value is from 2399 * the net's RTO. 2400 */ 2401 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2402 #ifdef INVARIANTS 2403 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2404 t_type, inp, stcb, net); 2405 #else 2406 return; 2407 #endif 2408 } 2409 tmr = &stcb->asoc.asconf_timer; 2410 if (net->RTO == 0) { 2411 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2412 } else { 2413 to_ticks = sctp_msecs_to_ticks(net->RTO); 2414 } 2415 break; 2416 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2417 /* 2418 * Here we use the endpoints shutdown guard timer usually 2419 * about 3 minutes. 2420 */ 2421 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2422 #ifdef INVARIANTS 2423 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2424 t_type, inp, stcb, net); 2425 #else 2426 return; 2427 #endif 2428 } 2429 tmr = &stcb->asoc.shut_guard_timer; 2430 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2431 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2432 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2433 } else { 2434 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2435 } 2436 } else { 2437 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2438 } 2439 break; 2440 case SCTP_TIMER_TYPE_AUTOCLOSE: 2441 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2442 #ifdef INVARIANTS 2443 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2444 t_type, inp, stcb, net); 2445 #else 2446 return; 2447 #endif 2448 } 2449 tmr = &stcb->asoc.autoclose_timer; 2450 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2451 break; 2452 case SCTP_TIMER_TYPE_STRRESET: 2453 /* 2454 * Here the timer comes from the stcb but its value is from 2455 * the net's RTO. 2456 */ 2457 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2458 #ifdef INVARIANTS 2459 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2460 t_type, inp, stcb, net); 2461 #else 2462 return; 2463 #endif 2464 } 2465 tmr = &stcb->asoc.strreset_timer; 2466 if (net->RTO == 0) { 2467 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2468 } else { 2469 to_ticks = sctp_msecs_to_ticks(net->RTO); 2470 } 2471 break; 2472 case SCTP_TIMER_TYPE_INPKILL: 2473 /* 2474 * The inp is setup to die. We re-use the signature_change 2475 * timer since that has stopped and we are in the GONE 2476 * state. 2477 */ 2478 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 tmr = &inp->sctp_ep.signature_change; 2487 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2488 break; 2489 case SCTP_TIMER_TYPE_ASOCKILL: 2490 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2491 #ifdef INVARIANTS 2492 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2493 t_type, inp, stcb, net); 2494 #else 2495 return; 2496 #endif 2497 } 2498 tmr = &stcb->asoc.strreset_timer; 2499 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2500 break; 2501 case SCTP_TIMER_TYPE_ADDR_WQ: 2502 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2503 #ifdef INVARIANTS 2504 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2505 t_type, inp, stcb, net); 2506 #else 2507 return; 2508 #endif 2509 } 2510 /* Only 1 tick away :-) */ 2511 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2512 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2513 break; 2514 case SCTP_TIMER_TYPE_PRIM_DELETED: 2515 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2516 #ifdef INVARIANTS 2517 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2518 t_type, inp, stcb, net); 2519 #else 2520 return; 2521 #endif 2522 } 2523 tmr = &stcb->asoc.delete_prim_timer; 2524 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2525 break; 2526 default: 2527 #ifdef INVARIANTS 2528 panic("Unknown timer type %d", t_type); 2529 #else 2530 return; 2531 #endif 2532 } 2533 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2534 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2535 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2536 /* 2537 * We do NOT allow you to have it already running. If it is, 2538 * we leave the current one up unchanged. 2539 */ 2540 SCTPDBG(SCTP_DEBUG_TIMER2, 2541 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2542 t_type, inp, stcb, net); 2543 return; 2544 } 2545 /* At this point we can proceed. */ 2546 if (t_type == SCTP_TIMER_TYPE_SEND) { 2547 stcb->asoc.num_send_timers_up++; 2548 } 2549 tmr->stopped_from = 0; 2550 tmr->type = t_type; 2551 tmr->ep = (void *)inp; 2552 tmr->tcb = (void *)stcb; 2553 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2554 tmr->net = NULL; 2555 } else { 2556 tmr->net = (void *)net; 2557 } 2558 tmr->self = (void *)tmr; 2559 tmr->vnet = (void *)curvnet; 2560 tmr->ticks = sctp_get_tick_count(); 2561 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2562 SCTPDBG(SCTP_DEBUG_TIMER2, 2563 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2564 t_type, to_ticks, inp, stcb, net); 2565 /* 2566 * If this is a newly scheduled callout, as opposed to a 2567 * rescheduled one, increment relevant reference counts. 2568 */ 2569 if (tmr->ep != NULL) { 2570 SCTP_INP_INCR_REF(inp); 2571 } 2572 if (tmr->tcb != NULL) { 2573 atomic_add_int(&stcb->asoc.refcnt, 1); 2574 } 2575 if (tmr->net != NULL) { 2576 atomic_add_int(&net->ref_count, 1); 2577 } 2578 } else { 2579 /* 2580 * This should not happen, since we checked for pending 2581 * above. 2582 */ 2583 SCTPDBG(SCTP_DEBUG_TIMER2, 2584 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2585 t_type, to_ticks, inp, stcb, net); 2586 } 2587 return; 2588 } 2589 2590 /*- 2591 * The following table shows which parameters must be provided 2592 * when calling sctp_timer_stop(). For parameters not being 2593 * provided, NULL must be used. 2594 * 2595 * |Name |inp |stcb|net | 2596 * |-----------------------------|----|----|----| 2597 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2600 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2601 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2604 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2605 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2606 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2608 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2610 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2611 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2612 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2613 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2614 * 2615 */ 2616 2617 void 2618 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2619 struct sctp_nets *net, uint32_t from) 2620 { 2621 struct sctp_timer *tmr; 2622 2623 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2624 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2625 t_type, stcb, stcb->sctp_ep)); 2626 if (stcb != NULL) { 2627 SCTP_TCB_LOCK_ASSERT(stcb); 2628 } else if (inp != NULL) { 2629 SCTP_INP_WLOCK_ASSERT(inp); 2630 } else { 2631 SCTP_WQ_ADDR_LOCK_ASSERT(); 2632 } 2633 tmr = NULL; 2634 switch (t_type) { 2635 case SCTP_TIMER_TYPE_SEND: 2636 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2637 #ifdef INVARIANTS 2638 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2639 t_type, inp, stcb, net); 2640 #else 2641 return; 2642 #endif 2643 } 2644 tmr = &net->rxt_timer; 2645 break; 2646 case SCTP_TIMER_TYPE_INIT: 2647 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2648 #ifdef INVARIANTS 2649 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2650 t_type, inp, stcb, net); 2651 #else 2652 return; 2653 #endif 2654 } 2655 tmr = &net->rxt_timer; 2656 break; 2657 case SCTP_TIMER_TYPE_RECV: 2658 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2659 #ifdef INVARIANTS 2660 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2661 t_type, inp, stcb, net); 2662 #else 2663 return; 2664 #endif 2665 } 2666 tmr = &stcb->asoc.dack_timer; 2667 break; 2668 case SCTP_TIMER_TYPE_SHUTDOWN: 2669 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2670 #ifdef INVARIANTS 2671 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2672 t_type, inp, stcb, net); 2673 #else 2674 return; 2675 #endif 2676 } 2677 tmr = &net->rxt_timer; 2678 break; 2679 case SCTP_TIMER_TYPE_HEARTBEAT: 2680 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2681 #ifdef INVARIANTS 2682 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2683 t_type, inp, stcb, net); 2684 #else 2685 return; 2686 #endif 2687 } 2688 tmr = &net->hb_timer; 2689 break; 2690 case SCTP_TIMER_TYPE_COOKIE: 2691 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2692 #ifdef INVARIANTS 2693 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2694 t_type, inp, stcb, net); 2695 #else 2696 return; 2697 #endif 2698 } 2699 tmr = &net->rxt_timer; 2700 break; 2701 case SCTP_TIMER_TYPE_NEWCOOKIE: 2702 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2703 #ifdef INVARIANTS 2704 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2705 t_type, inp, stcb, net); 2706 #else 2707 return; 2708 #endif 2709 } 2710 tmr = &inp->sctp_ep.signature_change; 2711 break; 2712 case SCTP_TIMER_TYPE_PATHMTURAISE: 2713 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2714 #ifdef INVARIANTS 2715 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2716 t_type, inp, stcb, net); 2717 #else 2718 return; 2719 #endif 2720 } 2721 tmr = &net->pmtu_timer; 2722 break; 2723 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2724 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2725 #ifdef INVARIANTS 2726 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2727 t_type, inp, stcb, net); 2728 #else 2729 return; 2730 #endif 2731 } 2732 tmr = &net->rxt_timer; 2733 break; 2734 case SCTP_TIMER_TYPE_ASCONF: 2735 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2736 #ifdef INVARIANTS 2737 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2738 t_type, inp, stcb, net); 2739 #else 2740 return; 2741 #endif 2742 } 2743 tmr = &stcb->asoc.asconf_timer; 2744 break; 2745 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2746 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2747 #ifdef INVARIANTS 2748 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2749 t_type, inp, stcb, net); 2750 #else 2751 return; 2752 #endif 2753 } 2754 tmr = &stcb->asoc.shut_guard_timer; 2755 break; 2756 case SCTP_TIMER_TYPE_AUTOCLOSE: 2757 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2758 #ifdef INVARIANTS 2759 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2760 t_type, inp, stcb, net); 2761 #else 2762 return; 2763 #endif 2764 } 2765 tmr = &stcb->asoc.autoclose_timer; 2766 break; 2767 case SCTP_TIMER_TYPE_STRRESET: 2768 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2769 #ifdef INVARIANTS 2770 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2771 t_type, inp, stcb, net); 2772 #else 2773 return; 2774 #endif 2775 } 2776 tmr = &stcb->asoc.strreset_timer; 2777 break; 2778 case SCTP_TIMER_TYPE_INPKILL: 2779 /* 2780 * The inp is setup to die. We re-use the signature_change 2781 * timer since that has stopped and we are in the GONE 2782 * state. 2783 */ 2784 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2785 #ifdef INVARIANTS 2786 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2787 t_type, inp, stcb, net); 2788 #else 2789 return; 2790 #endif 2791 } 2792 tmr = &inp->sctp_ep.signature_change; 2793 break; 2794 case SCTP_TIMER_TYPE_ASOCKILL: 2795 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2796 #ifdef INVARIANTS 2797 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2798 t_type, inp, stcb, net); 2799 #else 2800 return; 2801 #endif 2802 } 2803 tmr = &stcb->asoc.strreset_timer; 2804 break; 2805 case SCTP_TIMER_TYPE_ADDR_WQ: 2806 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2807 #ifdef INVARIANTS 2808 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2809 t_type, inp, stcb, net); 2810 #else 2811 return; 2812 #endif 2813 } 2814 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2815 break; 2816 case SCTP_TIMER_TYPE_PRIM_DELETED: 2817 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2818 #ifdef INVARIANTS 2819 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2820 t_type, inp, stcb, net); 2821 #else 2822 return; 2823 #endif 2824 } 2825 tmr = &stcb->asoc.delete_prim_timer; 2826 break; 2827 default: 2828 #ifdef INVARIANTS 2829 panic("Unknown timer type %d", t_type); 2830 #else 2831 return; 2832 #endif 2833 } 2834 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2835 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2836 (tmr->type != t_type)) { 2837 /* 2838 * Ok we have a timer that is under joint use. Cookie timer 2839 * per chance with the SEND timer. We therefore are NOT 2840 * running the timer that the caller wants stopped. So just 2841 * return. 2842 */ 2843 SCTPDBG(SCTP_DEBUG_TIMER2, 2844 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2845 t_type, inp, stcb, net); 2846 return; 2847 } 2848 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2849 stcb->asoc.num_send_timers_up--; 2850 if (stcb->asoc.num_send_timers_up < 0) { 2851 stcb->asoc.num_send_timers_up = 0; 2852 } 2853 } 2854 tmr->self = NULL; 2855 tmr->stopped_from = from; 2856 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2857 KASSERT(tmr->ep == inp, 2858 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2859 t_type, inp, tmr->ep)); 2860 KASSERT(tmr->tcb == stcb, 2861 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2862 t_type, stcb, tmr->tcb)); 2863 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2864 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2865 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2866 t_type, net, tmr->net)); 2867 SCTPDBG(SCTP_DEBUG_TIMER2, 2868 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2869 t_type, inp, stcb, net); 2870 /* 2871 * If the timer was actually stopped, decrement reference 2872 * counts that were incremented in sctp_timer_start(). 2873 */ 2874 if (tmr->ep != NULL) { 2875 tmr->ep = NULL; 2876 SCTP_INP_DECR_REF(inp); 2877 } 2878 if (tmr->tcb != NULL) { 2879 tmr->tcb = NULL; 2880 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2881 } 2882 if (tmr->net != NULL) { 2883 struct sctp_nets *tmr_net; 2884 2885 /* 2886 * Can't use net, since it doesn't work for 2887 * SCTP_TIMER_TYPE_ASCONF. 2888 */ 2889 tmr_net = tmr->net; 2890 tmr->net = NULL; 2891 sctp_free_remote_addr(tmr_net); 2892 } 2893 } else { 2894 SCTPDBG(SCTP_DEBUG_TIMER2, 2895 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2896 t_type, inp, stcb, net); 2897 } 2898 return; 2899 } 2900 2901 uint32_t 2902 sctp_calculate_len(struct mbuf *m) 2903 { 2904 struct mbuf *at; 2905 uint32_t tlen; 2906 2907 tlen = 0; 2908 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2909 tlen += SCTP_BUF_LEN(at); 2910 } 2911 return (tlen); 2912 } 2913 2914 /* 2915 * Given an association and starting time of the current RTT period, update 2916 * RTO in number of msecs. net should point to the current network. 2917 * Return 1, if an RTO update was performed, return 0 if no update was 2918 * performed due to invalid starting point. 2919 */ 2920 2921 int 2922 sctp_calculate_rto(struct sctp_tcb *stcb, 2923 struct sctp_association *asoc, 2924 struct sctp_nets *net, 2925 struct timeval *old, 2926 int rtt_from_sack) 2927 { 2928 struct timeval now; 2929 uint64_t rtt_us; /* RTT in us */ 2930 int32_t rtt; /* RTT in ms */ 2931 uint32_t new_rto; 2932 int first_measure = 0; 2933 2934 /************************/ 2935 /* 1. calculate new RTT */ 2936 /************************/ 2937 /* get the current time */ 2938 if (stcb->asoc.use_precise_time) { 2939 (void)SCTP_GETPTIME_TIMEVAL(&now); 2940 } else { 2941 (void)SCTP_GETTIME_TIMEVAL(&now); 2942 } 2943 if ((old->tv_sec > now.tv_sec) || 2944 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2945 /* The starting point is in the future. */ 2946 return (0); 2947 } 2948 timevalsub(&now, old); 2949 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2950 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2951 /* The RTT is larger than a sane value. */ 2952 return (0); 2953 } 2954 /* store the current RTT in us */ 2955 net->rtt = rtt_us; 2956 /* compute rtt in ms */ 2957 rtt = (int32_t)(net->rtt / 1000); 2958 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2959 /* 2960 * Tell the CC module that a new update has just occurred 2961 * from a sack 2962 */ 2963 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2964 } 2965 /* 2966 * Do we need to determine the lan? We do this only on sacks i.e. 2967 * RTT being determined from data not non-data (HB/INIT->INITACK). 2968 */ 2969 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2970 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2971 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2972 net->lan_type = SCTP_LAN_INTERNET; 2973 } else { 2974 net->lan_type = SCTP_LAN_LOCAL; 2975 } 2976 } 2977 2978 /***************************/ 2979 /* 2. update RTTVAR & SRTT */ 2980 /***************************/ 2981 /*- 2982 * Compute the scaled average lastsa and the 2983 * scaled variance lastsv as described in van Jacobson 2984 * Paper "Congestion Avoidance and Control", Annex A. 2985 * 2986 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2987 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2988 */ 2989 if (net->RTO_measured) { 2990 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2991 net->lastsa += rtt; 2992 if (rtt < 0) { 2993 rtt = -rtt; 2994 } 2995 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2996 net->lastsv += rtt; 2997 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2998 rto_logging(net, SCTP_LOG_RTTVAR); 2999 } 3000 } else { 3001 /* First RTO measurement */ 3002 net->RTO_measured = 1; 3003 first_measure = 1; 3004 net->lastsa = rtt << SCTP_RTT_SHIFT; 3005 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3007 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3008 } 3009 } 3010 if (net->lastsv == 0) { 3011 net->lastsv = SCTP_CLOCK_GRANULARITY; 3012 } 3013 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3014 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3015 (stcb->asoc.sat_network_lockout == 0)) { 3016 stcb->asoc.sat_network = 1; 3017 } else if ((!first_measure) && stcb->asoc.sat_network) { 3018 stcb->asoc.sat_network = 0; 3019 stcb->asoc.sat_network_lockout = 1; 3020 } 3021 /* bound it, per C6/C7 in Section 5.3.1 */ 3022 if (new_rto < stcb->asoc.minrto) { 3023 new_rto = stcb->asoc.minrto; 3024 } 3025 if (new_rto > stcb->asoc.maxrto) { 3026 new_rto = stcb->asoc.maxrto; 3027 } 3028 net->RTO = new_rto; 3029 return (1); 3030 } 3031 3032 /* 3033 * return a pointer to a contiguous piece of data from the given mbuf chain 3034 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3035 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3036 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3037 */ 3038 caddr_t 3039 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3040 { 3041 uint32_t count; 3042 uint8_t *ptr; 3043 3044 ptr = in_ptr; 3045 if ((off < 0) || (len <= 0)) 3046 return (NULL); 3047 3048 /* find the desired start location */ 3049 while ((m != NULL) && (off > 0)) { 3050 if (off < SCTP_BUF_LEN(m)) 3051 break; 3052 off -= SCTP_BUF_LEN(m); 3053 m = SCTP_BUF_NEXT(m); 3054 } 3055 if (m == NULL) 3056 return (NULL); 3057 3058 /* is the current mbuf large enough (eg. contiguous)? */ 3059 if ((SCTP_BUF_LEN(m) - off) >= len) { 3060 return (mtod(m, caddr_t)+off); 3061 } else { 3062 /* else, it spans more than one mbuf, so save a temp copy... */ 3063 while ((m != NULL) && (len > 0)) { 3064 count = min(SCTP_BUF_LEN(m) - off, len); 3065 memcpy(ptr, mtod(m, caddr_t)+off, count); 3066 len -= count; 3067 ptr += count; 3068 off = 0; 3069 m = SCTP_BUF_NEXT(m); 3070 } 3071 if ((m == NULL) && (len > 0)) 3072 return (NULL); 3073 else 3074 return ((caddr_t)in_ptr); 3075 } 3076 } 3077 3078 struct sctp_paramhdr * 3079 sctp_get_next_param(struct mbuf *m, 3080 int offset, 3081 struct sctp_paramhdr *pull, 3082 int pull_limit) 3083 { 3084 /* This just provides a typed signature to Peter's Pull routine */ 3085 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3086 (uint8_t *)pull)); 3087 } 3088 3089 struct mbuf * 3090 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3091 { 3092 struct mbuf *m_last; 3093 caddr_t dp; 3094 3095 if (padlen > 3) { 3096 return (NULL); 3097 } 3098 if (padlen <= M_TRAILINGSPACE(m)) { 3099 /* 3100 * The easy way. We hope the majority of the time we hit 3101 * here :) 3102 */ 3103 m_last = m; 3104 } else { 3105 /* Hard way we must grow the mbuf chain */ 3106 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3107 if (m_last == NULL) { 3108 return (NULL); 3109 } 3110 SCTP_BUF_LEN(m_last) = 0; 3111 SCTP_BUF_NEXT(m_last) = NULL; 3112 SCTP_BUF_NEXT(m) = m_last; 3113 } 3114 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3115 SCTP_BUF_LEN(m_last) += padlen; 3116 memset(dp, 0, padlen); 3117 return (m_last); 3118 } 3119 3120 struct mbuf * 3121 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3122 { 3123 /* find the last mbuf in chain and pad it */ 3124 struct mbuf *m_at; 3125 3126 if (last_mbuf != NULL) { 3127 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3128 } else { 3129 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3130 if (SCTP_BUF_NEXT(m_at) == NULL) { 3131 return (sctp_add_pad_tombuf(m_at, padval)); 3132 } 3133 } 3134 } 3135 return (NULL); 3136 } 3137 3138 static void 3139 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3140 uint16_t error, struct sctp_abort_chunk *abort, 3141 bool from_peer, bool timedout, int so_locked) 3142 { 3143 struct mbuf *m_notify; 3144 struct sctp_assoc_change *sac; 3145 struct sctp_queued_to_read *control; 3146 unsigned int notif_len; 3147 uint16_t abort_len; 3148 unsigned int i; 3149 3150 KASSERT(abort == NULL || from_peer, 3151 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3152 KASSERT(!from_peer || !timedout, 3153 ("sctp_notify_assoc_change: timeouts can only be local")); 3154 if (stcb == NULL) { 3155 return; 3156 } 3157 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3158 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3159 if (abort != NULL) { 3160 abort_len = ntohs(abort->ch.chunk_length); 3161 /* 3162 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3163 * contiguous. 3164 */ 3165 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3166 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3167 } 3168 } else { 3169 abort_len = 0; 3170 } 3171 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3172 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3173 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3174 notif_len += abort_len; 3175 } 3176 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3177 if (m_notify == NULL) { 3178 /* Retry with smaller value. */ 3179 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3180 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3181 if (m_notify == NULL) { 3182 goto set_error; 3183 } 3184 } 3185 SCTP_BUF_NEXT(m_notify) = NULL; 3186 sac = mtod(m_notify, struct sctp_assoc_change *); 3187 memset(sac, 0, notif_len); 3188 sac->sac_type = SCTP_ASSOC_CHANGE; 3189 sac->sac_flags = 0; 3190 sac->sac_length = sizeof(struct sctp_assoc_change); 3191 sac->sac_state = state; 3192 sac->sac_error = error; 3193 if (state == SCTP_CANT_STR_ASSOC) { 3194 sac->sac_outbound_streams = 0; 3195 sac->sac_inbound_streams = 0; 3196 } else { 3197 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3198 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3199 } 3200 sac->sac_assoc_id = sctp_get_associd(stcb); 3201 if (notif_len > sizeof(struct sctp_assoc_change)) { 3202 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3203 i = 0; 3204 if (stcb->asoc.prsctp_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3206 } 3207 if (stcb->asoc.auth_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3209 } 3210 if (stcb->asoc.asconf_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3212 } 3213 if (stcb->asoc.idata_supported == 1) { 3214 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3215 } 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3217 if (stcb->asoc.reconfig_supported == 1) { 3218 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3219 } 3220 sac->sac_length += i; 3221 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3222 memcpy(sac->sac_info, abort, abort_len); 3223 sac->sac_length += abort_len; 3224 } 3225 } 3226 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3228 0, 0, stcb->asoc.context, 0, 0, 0, 3229 m_notify); 3230 if (control != NULL) { 3231 control->length = SCTP_BUF_LEN(m_notify); 3232 control->spec_flags = M_NOTIFICATION; 3233 /* not that we need this */ 3234 control->tail_mbuf = m_notify; 3235 sctp_add_to_readq(stcb->sctp_ep, stcb, 3236 control, 3237 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3238 so_locked); 3239 } else { 3240 sctp_m_freem(m_notify); 3241 } 3242 } 3243 /* 3244 * For 1-to-1 style sockets, we send up and error when an ABORT 3245 * comes in. 3246 */ 3247 set_error: 3248 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3249 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3250 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3251 SOCK_LOCK(stcb->sctp_socket); 3252 if (from_peer) { 3253 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3255 stcb->sctp_socket->so_error = ECONNREFUSED; 3256 } else { 3257 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3258 stcb->sctp_socket->so_error = ECONNRESET; 3259 } 3260 } else { 3261 if (timedout) { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3263 stcb->sctp_socket->so_error = ETIMEDOUT; 3264 } else { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3266 stcb->sctp_socket->so_error = ECONNABORTED; 3267 } 3268 } 3269 SOCK_UNLOCK(stcb->sctp_socket); 3270 } 3271 /* Wake ANY sleepers */ 3272 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3273 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3274 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3275 socantrcvmore(stcb->sctp_socket); 3276 } 3277 sorwakeup(stcb->sctp_socket); 3278 sowwakeup(stcb->sctp_socket); 3279 } 3280 3281 static void 3282 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3283 struct sockaddr *sa, uint32_t error, int so_locked) 3284 { 3285 struct mbuf *m_notify; 3286 struct sctp_paddr_change *spc; 3287 struct sctp_queued_to_read *control; 3288 3289 if ((stcb == NULL) || 3290 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3291 /* event not enabled */ 3292 return; 3293 } 3294 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3295 if (m_notify == NULL) 3296 return; 3297 SCTP_BUF_LEN(m_notify) = 0; 3298 spc = mtod(m_notify, struct sctp_paddr_change *); 3299 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3300 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3301 spc->spc_flags = 0; 3302 spc->spc_length = sizeof(struct sctp_paddr_change); 3303 switch (sa->sa_family) { 3304 #ifdef INET 3305 case AF_INET: 3306 #ifdef INET6 3307 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3308 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3309 (struct sockaddr_in6 *)&spc->spc_aaddr); 3310 } else { 3311 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3312 } 3313 #else 3314 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3315 #endif 3316 break; 3317 #endif 3318 #ifdef INET6 3319 case AF_INET6: 3320 { 3321 struct sockaddr_in6 *sin6; 3322 3323 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3324 3325 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3326 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3327 if (sin6->sin6_scope_id == 0) { 3328 /* recover scope_id for user */ 3329 (void)sa6_recoverscope(sin6); 3330 } else { 3331 /* clear embedded scope_id for user */ 3332 in6_clearscope(&sin6->sin6_addr); 3333 } 3334 } 3335 break; 3336 } 3337 #endif 3338 default: 3339 /* TSNH */ 3340 break; 3341 } 3342 spc->spc_state = state; 3343 spc->spc_error = error; 3344 spc->spc_assoc_id = sctp_get_associd(stcb); 3345 3346 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3347 SCTP_BUF_NEXT(m_notify) = NULL; 3348 3349 /* append to socket */ 3350 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3351 0, 0, stcb->asoc.context, 0, 0, 0, 3352 m_notify); 3353 if (control == NULL) { 3354 /* no memory */ 3355 sctp_m_freem(m_notify); 3356 return; 3357 } 3358 control->length = SCTP_BUF_LEN(m_notify); 3359 control->spec_flags = M_NOTIFICATION; 3360 /* not that we need this */ 3361 control->tail_mbuf = m_notify; 3362 sctp_add_to_readq(stcb->sctp_ep, stcb, 3363 control, 3364 &stcb->sctp_socket->so_rcv, 1, 3365 SCTP_READ_LOCK_NOT_HELD, 3366 so_locked); 3367 } 3368 3369 static void 3370 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3371 struct sctp_tmit_chunk *chk, int so_locked) 3372 { 3373 struct mbuf *m_notify; 3374 struct sctp_send_failed *ssf; 3375 struct sctp_send_failed_event *ssfe; 3376 struct sctp_queued_to_read *control; 3377 struct sctp_chunkhdr *chkhdr; 3378 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3379 3380 if ((stcb == NULL) || 3381 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3382 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3383 /* event not enabled */ 3384 return; 3385 } 3386 3387 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3388 notifhdr_len = sizeof(struct sctp_send_failed_event); 3389 } else { 3390 notifhdr_len = sizeof(struct sctp_send_failed); 3391 } 3392 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3393 if (m_notify == NULL) 3394 /* no space left */ 3395 return; 3396 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3397 if (stcb->asoc.idata_supported) { 3398 chkhdr_len = sizeof(struct sctp_idata_chunk); 3399 } else { 3400 chkhdr_len = sizeof(struct sctp_data_chunk); 3401 } 3402 /* Use some defaults in case we can't access the chunk header */ 3403 if (chk->send_size >= chkhdr_len) { 3404 payload_len = chk->send_size - chkhdr_len; 3405 } else { 3406 payload_len = 0; 3407 } 3408 padding_len = 0; 3409 if (chk->data != NULL) { 3410 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3411 if (chkhdr != NULL) { 3412 chk_len = ntohs(chkhdr->chunk_length); 3413 if ((chk_len >= chkhdr_len) && 3414 (chk->send_size >= chk_len) && 3415 (chk->send_size - chk_len < 4)) { 3416 padding_len = chk->send_size - chk_len; 3417 payload_len = chk->send_size - chkhdr_len - padding_len; 3418 } 3419 } 3420 } 3421 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3422 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3423 memset(ssfe, 0, notifhdr_len); 3424 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3425 if (sent) { 3426 ssfe->ssfe_flags = SCTP_DATA_SENT; 3427 } else { 3428 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3429 } 3430 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3431 ssfe->ssfe_error = error; 3432 /* not exactly what the user sent in, but should be close :) */ 3433 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3434 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3435 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3436 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3437 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3438 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3439 } else { 3440 ssf = mtod(m_notify, struct sctp_send_failed *); 3441 memset(ssf, 0, notifhdr_len); 3442 ssf->ssf_type = SCTP_SEND_FAILED; 3443 if (sent) { 3444 ssf->ssf_flags = SCTP_DATA_SENT; 3445 } else { 3446 ssf->ssf_flags = SCTP_DATA_UNSENT; 3447 } 3448 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3449 ssf->ssf_error = error; 3450 /* not exactly what the user sent in, but should be close :) */ 3451 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3452 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3453 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3454 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3455 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3456 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3457 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3458 } 3459 if (chk->data != NULL) { 3460 /* Trim off the sctp chunk header (it should be there) */ 3461 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3462 m_adj(chk->data, chkhdr_len); 3463 m_adj(chk->data, -padding_len); 3464 sctp_mbuf_crush(chk->data); 3465 chk->send_size -= (chkhdr_len + padding_len); 3466 } 3467 } 3468 SCTP_BUF_NEXT(m_notify) = chk->data; 3469 /* Steal off the mbuf */ 3470 chk->data = NULL; 3471 /* 3472 * For this case, we check the actual socket buffer, since the assoc 3473 * is going away we don't want to overfill the socket buffer for a 3474 * non-reader 3475 */ 3476 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3477 sctp_m_freem(m_notify); 3478 return; 3479 } 3480 /* append to socket */ 3481 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3482 0, 0, stcb->asoc.context, 0, 0, 0, 3483 m_notify); 3484 if (control == NULL) { 3485 /* no memory */ 3486 sctp_m_freem(m_notify); 3487 return; 3488 } 3489 control->length = SCTP_BUF_LEN(m_notify); 3490 control->spec_flags = M_NOTIFICATION; 3491 /* not that we need this */ 3492 control->tail_mbuf = m_notify; 3493 sctp_add_to_readq(stcb->sctp_ep, stcb, 3494 control, 3495 &stcb->sctp_socket->so_rcv, 1, 3496 SCTP_READ_LOCK_NOT_HELD, 3497 so_locked); 3498 } 3499 3500 static void 3501 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3502 struct sctp_stream_queue_pending *sp, int so_locked) 3503 { 3504 struct mbuf *m_notify; 3505 struct sctp_send_failed *ssf; 3506 struct sctp_send_failed_event *ssfe; 3507 struct sctp_queued_to_read *control; 3508 int notifhdr_len; 3509 3510 if ((stcb == NULL) || 3511 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3512 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3513 /* event not enabled */ 3514 return; 3515 } 3516 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3517 notifhdr_len = sizeof(struct sctp_send_failed_event); 3518 } else { 3519 notifhdr_len = sizeof(struct sctp_send_failed); 3520 } 3521 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3522 if (m_notify == NULL) { 3523 /* no space left */ 3524 return; 3525 } 3526 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3527 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3528 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3529 memset(ssfe, 0, notifhdr_len); 3530 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3531 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3532 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3533 ssfe->ssfe_error = error; 3534 /* not exactly what the user sent in, but should be close :) */ 3535 ssfe->ssfe_info.snd_sid = sp->sid; 3536 if (sp->some_taken) { 3537 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3538 } else { 3539 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3540 } 3541 ssfe->ssfe_info.snd_ppid = sp->ppid; 3542 ssfe->ssfe_info.snd_context = sp->context; 3543 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3544 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3545 } else { 3546 ssf = mtod(m_notify, struct sctp_send_failed *); 3547 memset(ssf, 0, notifhdr_len); 3548 ssf->ssf_type = SCTP_SEND_FAILED; 3549 ssf->ssf_flags = SCTP_DATA_UNSENT; 3550 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3551 ssf->ssf_error = error; 3552 /* not exactly what the user sent in, but should be close :) */ 3553 ssf->ssf_info.sinfo_stream = sp->sid; 3554 ssf->ssf_info.sinfo_ssn = 0; 3555 if (sp->some_taken) { 3556 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3557 } else { 3558 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3559 } 3560 ssf->ssf_info.sinfo_ppid = sp->ppid; 3561 ssf->ssf_info.sinfo_context = sp->context; 3562 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3563 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3564 } 3565 SCTP_BUF_NEXT(m_notify) = sp->data; 3566 3567 /* Steal off the mbuf */ 3568 sp->data = NULL; 3569 /* 3570 * For this case, we check the actual socket buffer, since the assoc 3571 * is going away we don't want to overfill the socket buffer for a 3572 * non-reader 3573 */ 3574 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3575 sctp_m_freem(m_notify); 3576 return; 3577 } 3578 /* append to socket */ 3579 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3580 0, 0, stcb->asoc.context, 0, 0, 0, 3581 m_notify); 3582 if (control == NULL) { 3583 /* no memory */ 3584 sctp_m_freem(m_notify); 3585 return; 3586 } 3587 control->length = SCTP_BUF_LEN(m_notify); 3588 control->spec_flags = M_NOTIFICATION; 3589 /* not that we need this */ 3590 control->tail_mbuf = m_notify; 3591 sctp_add_to_readq(stcb->sctp_ep, stcb, 3592 control, 3593 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3594 } 3595 3596 static void 3597 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3598 { 3599 struct mbuf *m_notify; 3600 struct sctp_adaptation_event *sai; 3601 struct sctp_queued_to_read *control; 3602 3603 if ((stcb == NULL) || 3604 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3605 /* event not enabled */ 3606 return; 3607 } 3608 3609 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3610 if (m_notify == NULL) 3611 /* no space left */ 3612 return; 3613 SCTP_BUF_LEN(m_notify) = 0; 3614 sai = mtod(m_notify, struct sctp_adaptation_event *); 3615 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3616 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3617 sai->sai_flags = 0; 3618 sai->sai_length = sizeof(struct sctp_adaptation_event); 3619 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3620 sai->sai_assoc_id = sctp_get_associd(stcb); 3621 3622 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3623 SCTP_BUF_NEXT(m_notify) = NULL; 3624 3625 /* append to socket */ 3626 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3627 0, 0, stcb->asoc.context, 0, 0, 0, 3628 m_notify); 3629 if (control == NULL) { 3630 /* no memory */ 3631 sctp_m_freem(m_notify); 3632 return; 3633 } 3634 control->length = SCTP_BUF_LEN(m_notify); 3635 control->spec_flags = M_NOTIFICATION; 3636 /* not that we need this */ 3637 control->tail_mbuf = m_notify; 3638 sctp_add_to_readq(stcb->sctp_ep, stcb, 3639 control, 3640 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3641 } 3642 3643 /* This always must be called with the read-queue LOCKED in the INP */ 3644 static void 3645 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3646 uint32_t val, int so_locked) 3647 { 3648 struct mbuf *m_notify; 3649 struct sctp_pdapi_event *pdapi; 3650 struct sctp_queued_to_read *control; 3651 struct sockbuf *sb; 3652 3653 if ((stcb == NULL) || 3654 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3655 /* event not enabled */ 3656 return; 3657 } 3658 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3659 return; 3660 } 3661 3662 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3663 if (m_notify == NULL) 3664 /* no space left */ 3665 return; 3666 SCTP_BUF_LEN(m_notify) = 0; 3667 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3668 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3669 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3670 pdapi->pdapi_flags = 0; 3671 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3672 pdapi->pdapi_indication = error; 3673 pdapi->pdapi_stream = (val >> 16); 3674 pdapi->pdapi_seq = (val & 0x0000ffff); 3675 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3676 3677 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3678 SCTP_BUF_NEXT(m_notify) = NULL; 3679 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3680 0, 0, stcb->asoc.context, 0, 0, 0, 3681 m_notify); 3682 if (control == NULL) { 3683 /* no memory */ 3684 sctp_m_freem(m_notify); 3685 return; 3686 } 3687 control->length = SCTP_BUF_LEN(m_notify); 3688 control->spec_flags = M_NOTIFICATION; 3689 /* not that we need this */ 3690 control->tail_mbuf = m_notify; 3691 sb = &stcb->sctp_socket->so_rcv; 3692 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3693 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3694 } 3695 sctp_sballoc(stcb, sb, m_notify); 3696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3697 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3698 } 3699 control->end_added = 1; 3700 if (stcb->asoc.control_pdapi) 3701 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3702 else { 3703 /* we really should not see this case */ 3704 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3705 } 3706 if (stcb->sctp_ep && stcb->sctp_socket) { 3707 /* This should always be the case */ 3708 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3709 } 3710 } 3711 3712 static void 3713 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3714 { 3715 struct mbuf *m_notify; 3716 struct sctp_shutdown_event *sse; 3717 struct sctp_queued_to_read *control; 3718 3719 /* 3720 * For TCP model AND UDP connected sockets we will send an error up 3721 * when an SHUTDOWN completes 3722 */ 3723 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3724 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3725 /* mark socket closed for read/write and wakeup! */ 3726 socantsendmore(stcb->sctp_socket); 3727 } 3728 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3729 /* event not enabled */ 3730 return; 3731 } 3732 3733 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3734 if (m_notify == NULL) 3735 /* no space left */ 3736 return; 3737 sse = mtod(m_notify, struct sctp_shutdown_event *); 3738 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3739 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3740 sse->sse_flags = 0; 3741 sse->sse_length = sizeof(struct sctp_shutdown_event); 3742 sse->sse_assoc_id = sctp_get_associd(stcb); 3743 3744 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3745 SCTP_BUF_NEXT(m_notify) = NULL; 3746 3747 /* append to socket */ 3748 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3749 0, 0, stcb->asoc.context, 0, 0, 0, 3750 m_notify); 3751 if (control == NULL) { 3752 /* no memory */ 3753 sctp_m_freem(m_notify); 3754 return; 3755 } 3756 control->length = SCTP_BUF_LEN(m_notify); 3757 control->spec_flags = M_NOTIFICATION; 3758 /* not that we need this */ 3759 control->tail_mbuf = m_notify; 3760 sctp_add_to_readq(stcb->sctp_ep, stcb, 3761 control, 3762 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3763 } 3764 3765 static void 3766 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3767 int so_locked) 3768 { 3769 struct mbuf *m_notify; 3770 struct sctp_sender_dry_event *event; 3771 struct sctp_queued_to_read *control; 3772 3773 if ((stcb == NULL) || 3774 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3775 /* event not enabled */ 3776 return; 3777 } 3778 3779 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3780 if (m_notify == NULL) { 3781 /* no space left */ 3782 return; 3783 } 3784 SCTP_BUF_LEN(m_notify) = 0; 3785 event = mtod(m_notify, struct sctp_sender_dry_event *); 3786 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3787 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3788 event->sender_dry_flags = 0; 3789 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3790 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3791 3792 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3793 SCTP_BUF_NEXT(m_notify) = NULL; 3794 3795 /* append to socket */ 3796 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3797 0, 0, stcb->asoc.context, 0, 0, 0, 3798 m_notify); 3799 if (control == NULL) { 3800 /* no memory */ 3801 sctp_m_freem(m_notify); 3802 return; 3803 } 3804 control->length = SCTP_BUF_LEN(m_notify); 3805 control->spec_flags = M_NOTIFICATION; 3806 /* not that we need this */ 3807 control->tail_mbuf = m_notify; 3808 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3809 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3810 } 3811 3812 void 3813 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3814 { 3815 struct mbuf *m_notify; 3816 struct sctp_queued_to_read *control; 3817 struct sctp_stream_change_event *stradd; 3818 3819 if ((stcb == NULL) || 3820 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3821 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3822 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3823 /* If the socket is gone we are out of here. */ 3824 return; 3825 } 3826 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 3827 /* event not enabled */ 3828 return; 3829 } 3830 3831 if ((stcb->asoc.peer_req_out) && flag) { 3832 /* Peer made the request, don't tell the local user */ 3833 stcb->asoc.peer_req_out = 0; 3834 return; 3835 } 3836 stcb->asoc.peer_req_out = 0; 3837 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3838 if (m_notify == NULL) 3839 /* no space left */ 3840 return; 3841 SCTP_BUF_LEN(m_notify) = 0; 3842 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3843 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3844 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3845 stradd->strchange_flags = flag; 3846 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3847 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3848 stradd->strchange_instrms = numberin; 3849 stradd->strchange_outstrms = numberout; 3850 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3851 SCTP_BUF_NEXT(m_notify) = NULL; 3852 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3853 /* no space */ 3854 sctp_m_freem(m_notify); 3855 return; 3856 } 3857 /* append to socket */ 3858 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3859 0, 0, stcb->asoc.context, 0, 0, 0, 3860 m_notify); 3861 if (control == NULL) { 3862 /* no memory */ 3863 sctp_m_freem(m_notify); 3864 return; 3865 } 3866 control->length = SCTP_BUF_LEN(m_notify); 3867 control->spec_flags = M_NOTIFICATION; 3868 /* not that we need this */ 3869 control->tail_mbuf = m_notify; 3870 sctp_add_to_readq(stcb->sctp_ep, stcb, 3871 control, 3872 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3873 } 3874 3875 void 3876 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3877 { 3878 struct mbuf *m_notify; 3879 struct sctp_queued_to_read *control; 3880 struct sctp_assoc_reset_event *strasoc; 3881 3882 if ((stcb == NULL) || 3883 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3884 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3885 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3886 /* If the socket is gone we are out of here. */ 3887 return; 3888 } 3889 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 3890 /* event not enabled */ 3891 return; 3892 } 3893 3894 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3895 if (m_notify == NULL) 3896 /* no space left */ 3897 return; 3898 SCTP_BUF_LEN(m_notify) = 0; 3899 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3900 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3901 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3902 strasoc->assocreset_flags = flag; 3903 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3904 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3905 strasoc->assocreset_local_tsn = sending_tsn; 3906 strasoc->assocreset_remote_tsn = recv_tsn; 3907 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3908 SCTP_BUF_NEXT(m_notify) = NULL; 3909 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3910 /* no space */ 3911 sctp_m_freem(m_notify); 3912 return; 3913 } 3914 /* append to socket */ 3915 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3916 0, 0, stcb->asoc.context, 0, 0, 0, 3917 m_notify); 3918 if (control == NULL) { 3919 /* no memory */ 3920 sctp_m_freem(m_notify); 3921 return; 3922 } 3923 control->length = SCTP_BUF_LEN(m_notify); 3924 control->spec_flags = M_NOTIFICATION; 3925 /* not that we need this */ 3926 control->tail_mbuf = m_notify; 3927 sctp_add_to_readq(stcb->sctp_ep, stcb, 3928 control, 3929 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3930 } 3931 3932 static void 3933 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3934 int number_entries, uint16_t *list, int flag) 3935 { 3936 struct mbuf *m_notify; 3937 struct sctp_queued_to_read *control; 3938 struct sctp_stream_reset_event *strreset; 3939 int len; 3940 3941 if ((stcb == NULL) || 3942 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3943 /* event not enabled */ 3944 return; 3945 } 3946 3947 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3948 if (m_notify == NULL) 3949 /* no space left */ 3950 return; 3951 SCTP_BUF_LEN(m_notify) = 0; 3952 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3953 if (len > M_TRAILINGSPACE(m_notify)) { 3954 /* never enough room */ 3955 sctp_m_freem(m_notify); 3956 return; 3957 } 3958 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3959 memset(strreset, 0, len); 3960 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3961 strreset->strreset_flags = flag; 3962 strreset->strreset_length = len; 3963 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3964 if (number_entries) { 3965 int i; 3966 3967 for (i = 0; i < number_entries; i++) { 3968 strreset->strreset_stream_list[i] = ntohs(list[i]); 3969 } 3970 } 3971 SCTP_BUF_LEN(m_notify) = len; 3972 SCTP_BUF_NEXT(m_notify) = NULL; 3973 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3974 /* no space */ 3975 sctp_m_freem(m_notify); 3976 return; 3977 } 3978 /* append to socket */ 3979 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3980 0, 0, stcb->asoc.context, 0, 0, 0, 3981 m_notify); 3982 if (control == NULL) { 3983 /* no memory */ 3984 sctp_m_freem(m_notify); 3985 return; 3986 } 3987 control->length = SCTP_BUF_LEN(m_notify); 3988 control->spec_flags = M_NOTIFICATION; 3989 /* not that we need this */ 3990 control->tail_mbuf = m_notify; 3991 sctp_add_to_readq(stcb->sctp_ep, stcb, 3992 control, 3993 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3994 } 3995 3996 static void 3997 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3998 { 3999 struct mbuf *m_notify; 4000 struct sctp_remote_error *sre; 4001 struct sctp_queued_to_read *control; 4002 unsigned int notif_len; 4003 uint16_t chunk_len; 4004 4005 if ((stcb == NULL) || 4006 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4007 return; 4008 } 4009 if (chunk != NULL) { 4010 chunk_len = ntohs(chunk->ch.chunk_length); 4011 /* 4012 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4013 * contiguous. 4014 */ 4015 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4016 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4017 } 4018 } else { 4019 chunk_len = 0; 4020 } 4021 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4022 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4023 if (m_notify == NULL) { 4024 /* Retry with smaller value. */ 4025 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4026 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4027 if (m_notify == NULL) { 4028 return; 4029 } 4030 } 4031 SCTP_BUF_NEXT(m_notify) = NULL; 4032 sre = mtod(m_notify, struct sctp_remote_error *); 4033 memset(sre, 0, notif_len); 4034 sre->sre_type = SCTP_REMOTE_ERROR; 4035 sre->sre_flags = 0; 4036 sre->sre_length = sizeof(struct sctp_remote_error); 4037 sre->sre_error = error; 4038 sre->sre_assoc_id = sctp_get_associd(stcb); 4039 if (notif_len > sizeof(struct sctp_remote_error)) { 4040 memcpy(sre->sre_data, chunk, chunk_len); 4041 sre->sre_length += chunk_len; 4042 } 4043 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4044 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4045 0, 0, stcb->asoc.context, 0, 0, 0, 4046 m_notify); 4047 if (control != NULL) { 4048 control->length = SCTP_BUF_LEN(m_notify); 4049 control->spec_flags = M_NOTIFICATION; 4050 /* not that we need this */ 4051 control->tail_mbuf = m_notify; 4052 sctp_add_to_readq(stcb->sctp_ep, stcb, 4053 control, 4054 &stcb->sctp_socket->so_rcv, 1, 4055 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4056 } else { 4057 sctp_m_freem(m_notify); 4058 } 4059 } 4060 4061 void 4062 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4063 uint32_t error, void *data, int so_locked) 4064 { 4065 if ((stcb == NULL) || 4066 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4067 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4068 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4069 /* If the socket is gone we are out of here */ 4070 return; 4071 } 4072 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4073 return; 4074 } 4075 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4076 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4077 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4078 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4079 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4080 /* Don't report these in front states */ 4081 return; 4082 } 4083 } 4084 switch (notification) { 4085 case SCTP_NOTIFY_ASSOC_UP: 4086 if (stcb->asoc.assoc_up_sent == 0) { 4087 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4088 stcb->asoc.assoc_up_sent = 1; 4089 } 4090 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4091 sctp_notify_adaptation_layer(stcb); 4092 } 4093 if (stcb->asoc.auth_supported == 0) { 4094 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4095 NULL, so_locked); 4096 } 4097 break; 4098 case SCTP_NOTIFY_ASSOC_DOWN: 4099 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4100 break; 4101 case SCTP_NOTIFY_INTERFACE_DOWN: 4102 { 4103 struct sctp_nets *net; 4104 4105 net = (struct sctp_nets *)data; 4106 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4107 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4108 break; 4109 } 4110 case SCTP_NOTIFY_INTERFACE_UP: 4111 { 4112 struct sctp_nets *net; 4113 4114 net = (struct sctp_nets *)data; 4115 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4116 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4117 break; 4118 } 4119 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4120 { 4121 struct sctp_nets *net; 4122 4123 net = (struct sctp_nets *)data; 4124 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4125 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4126 break; 4127 } 4128 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4129 sctp_notify_send_failed2(stcb, error, 4130 (struct sctp_stream_queue_pending *)data, so_locked); 4131 break; 4132 case SCTP_NOTIFY_SENT_DG_FAIL: 4133 sctp_notify_send_failed(stcb, 1, error, 4134 (struct sctp_tmit_chunk *)data, so_locked); 4135 break; 4136 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4137 sctp_notify_send_failed(stcb, 0, error, 4138 (struct sctp_tmit_chunk *)data, so_locked); 4139 break; 4140 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4141 { 4142 uint32_t val; 4143 4144 val = *((uint32_t *)data); 4145 4146 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4147 break; 4148 } 4149 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4150 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4151 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4152 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4153 } else { 4154 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4155 } 4156 break; 4157 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4158 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4159 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4160 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4161 } else { 4162 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4163 } 4164 break; 4165 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4166 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4167 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4168 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4169 } else { 4170 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4171 } 4172 break; 4173 case SCTP_NOTIFY_ASSOC_RESTART: 4174 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4175 if (stcb->asoc.auth_supported == 0) { 4176 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4177 NULL, so_locked); 4178 } 4179 break; 4180 case SCTP_NOTIFY_STR_RESET_SEND: 4181 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4182 break; 4183 case SCTP_NOTIFY_STR_RESET_RECV: 4184 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4185 break; 4186 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4187 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4188 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4189 break; 4190 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4191 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4192 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4193 break; 4194 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4195 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4196 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4197 break; 4198 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4199 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4200 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4201 break; 4202 case SCTP_NOTIFY_ASCONF_ADD_IP: 4203 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4204 error, so_locked); 4205 break; 4206 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4207 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4208 error, so_locked); 4209 break; 4210 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4211 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4212 error, so_locked); 4213 break; 4214 case SCTP_NOTIFY_PEER_SHUTDOWN: 4215 sctp_notify_shutdown_event(stcb); 4216 break; 4217 case SCTP_NOTIFY_AUTH_NEW_KEY: 4218 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4219 (uint16_t)(uintptr_t)data, 4220 so_locked); 4221 break; 4222 case SCTP_NOTIFY_AUTH_FREE_KEY: 4223 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4224 (uint16_t)(uintptr_t)data, 4225 so_locked); 4226 break; 4227 case SCTP_NOTIFY_NO_PEER_AUTH: 4228 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4229 (uint16_t)(uintptr_t)data, 4230 so_locked); 4231 break; 4232 case SCTP_NOTIFY_SENDER_DRY: 4233 sctp_notify_sender_dry_event(stcb, so_locked); 4234 break; 4235 case SCTP_NOTIFY_REMOTE_ERROR: 4236 sctp_notify_remote_error(stcb, error, data); 4237 break; 4238 default: 4239 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4240 __func__, notification, notification); 4241 break; 4242 } /* end switch */ 4243 } 4244 4245 void 4246 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4247 { 4248 struct sctp_association *asoc; 4249 struct sctp_stream_out *outs; 4250 struct sctp_tmit_chunk *chk, *nchk; 4251 struct sctp_stream_queue_pending *sp, *nsp; 4252 int i; 4253 4254 if (stcb == NULL) { 4255 return; 4256 } 4257 asoc = &stcb->asoc; 4258 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4259 /* already being freed */ 4260 return; 4261 } 4262 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4263 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4264 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4265 return; 4266 } 4267 /* now through all the gunk freeing chunks */ 4268 /* sent queue SHOULD be empty */ 4269 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4270 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4271 asoc->sent_queue_cnt--; 4272 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4273 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4274 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4275 #ifdef INVARIANTS 4276 } else { 4277 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4278 #endif 4279 } 4280 } 4281 if (chk->data != NULL) { 4282 sctp_free_bufspace(stcb, asoc, chk, 1); 4283 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4284 error, chk, so_locked); 4285 if (chk->data) { 4286 sctp_m_freem(chk->data); 4287 chk->data = NULL; 4288 } 4289 } 4290 sctp_free_a_chunk(stcb, chk, so_locked); 4291 /* sa_ignore FREED_MEMORY */ 4292 } 4293 /* pending send queue SHOULD be empty */ 4294 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4295 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4296 asoc->send_queue_cnt--; 4297 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4298 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4299 #ifdef INVARIANTS 4300 } else { 4301 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4302 #endif 4303 } 4304 if (chk->data != NULL) { 4305 sctp_free_bufspace(stcb, asoc, chk, 1); 4306 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4307 error, chk, so_locked); 4308 if (chk->data) { 4309 sctp_m_freem(chk->data); 4310 chk->data = NULL; 4311 } 4312 } 4313 sctp_free_a_chunk(stcb, chk, so_locked); 4314 /* sa_ignore FREED_MEMORY */ 4315 } 4316 for (i = 0; i < asoc->streamoutcnt; i++) { 4317 /* For each stream */ 4318 outs = &asoc->strmout[i]; 4319 /* clean up any sends there */ 4320 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4321 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4322 TAILQ_REMOVE(&outs->outqueue, sp, next); 4323 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4324 sctp_free_spbufspace(stcb, asoc, sp); 4325 if (sp->data) { 4326 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4327 error, (void *)sp, so_locked); 4328 if (sp->data) { 4329 sctp_m_freem(sp->data); 4330 sp->data = NULL; 4331 sp->tail_mbuf = NULL; 4332 sp->length = 0; 4333 } 4334 } 4335 if (sp->net) { 4336 sctp_free_remote_addr(sp->net); 4337 sp->net = NULL; 4338 } 4339 /* Free the chunk */ 4340 sctp_free_a_strmoq(stcb, sp, so_locked); 4341 /* sa_ignore FREED_MEMORY */ 4342 } 4343 } 4344 } 4345 4346 void 4347 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4348 uint16_t error, struct sctp_abort_chunk *abort, 4349 int so_locked) 4350 { 4351 if (stcb == NULL) { 4352 return; 4353 } 4354 SCTP_TCB_LOCK_ASSERT(stcb); 4355 4356 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4357 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4358 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4359 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4360 } 4361 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4362 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4363 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4364 return; 4365 } 4366 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4367 /* Tell them we lost the asoc */ 4368 sctp_report_all_outbound(stcb, error, so_locked); 4369 if (from_peer) { 4370 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4371 } else { 4372 if (timeout) { 4373 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4374 } else { 4375 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4376 } 4377 } 4378 } 4379 4380 void 4381 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4382 struct mbuf *m, int iphlen, 4383 struct sockaddr *src, struct sockaddr *dst, 4384 struct sctphdr *sh, struct mbuf *op_err, 4385 uint8_t mflowtype, uint32_t mflowid, 4386 uint32_t vrf_id, uint16_t port) 4387 { 4388 struct sctp_gen_error_cause *cause; 4389 uint32_t vtag; 4390 uint16_t cause_code; 4391 4392 if (stcb != NULL) { 4393 vtag = stcb->asoc.peer_vtag; 4394 vrf_id = stcb->asoc.vrf_id; 4395 if (op_err != NULL) { 4396 /* Read the cause code from the error cause. */ 4397 cause = mtod(op_err, struct sctp_gen_error_cause *); 4398 cause_code = ntohs(cause->code); 4399 } else { 4400 cause_code = 0; 4401 } 4402 } else { 4403 vtag = 0; 4404 } 4405 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4406 mflowtype, mflowid, inp->fibnum, 4407 vrf_id, port); 4408 if (stcb != NULL) { 4409 /* We have a TCB to abort, send notification too */ 4410 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4411 /* Ok, now lets free it */ 4412 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4413 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4414 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4415 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4416 } 4417 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4418 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4419 } 4420 } 4421 #ifdef SCTP_ASOCLOG_OF_TSNS 4422 void 4423 sctp_print_out_track_log(struct sctp_tcb *stcb) 4424 { 4425 #ifdef NOSIY_PRINTS 4426 int i; 4427 4428 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4429 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4430 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4431 SCTP_PRINTF("None rcvd\n"); 4432 goto none_in; 4433 } 4434 if (stcb->asoc.tsn_in_wrapped) { 4435 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4436 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4437 stcb->asoc.in_tsnlog[i].tsn, 4438 stcb->asoc.in_tsnlog[i].strm, 4439 stcb->asoc.in_tsnlog[i].seq, 4440 stcb->asoc.in_tsnlog[i].flgs, 4441 stcb->asoc.in_tsnlog[i].sz); 4442 } 4443 } 4444 if (stcb->asoc.tsn_in_at) { 4445 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4446 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4447 stcb->asoc.in_tsnlog[i].tsn, 4448 stcb->asoc.in_tsnlog[i].strm, 4449 stcb->asoc.in_tsnlog[i].seq, 4450 stcb->asoc.in_tsnlog[i].flgs, 4451 stcb->asoc.in_tsnlog[i].sz); 4452 } 4453 } 4454 none_in: 4455 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4456 if ((stcb->asoc.tsn_out_at == 0) && 4457 (stcb->asoc.tsn_out_wrapped == 0)) { 4458 SCTP_PRINTF("None sent\n"); 4459 } 4460 if (stcb->asoc.tsn_out_wrapped) { 4461 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4462 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4463 stcb->asoc.out_tsnlog[i].tsn, 4464 stcb->asoc.out_tsnlog[i].strm, 4465 stcb->asoc.out_tsnlog[i].seq, 4466 stcb->asoc.out_tsnlog[i].flgs, 4467 stcb->asoc.out_tsnlog[i].sz); 4468 } 4469 } 4470 if (stcb->asoc.tsn_out_at) { 4471 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4472 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4473 stcb->asoc.out_tsnlog[i].tsn, 4474 stcb->asoc.out_tsnlog[i].strm, 4475 stcb->asoc.out_tsnlog[i].seq, 4476 stcb->asoc.out_tsnlog[i].flgs, 4477 stcb->asoc.out_tsnlog[i].sz); 4478 } 4479 } 4480 #endif 4481 } 4482 #endif 4483 4484 void 4485 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4486 struct mbuf *op_err, bool timedout, int so_locked) 4487 { 4488 struct sctp_gen_error_cause *cause; 4489 uint16_t cause_code; 4490 4491 if (stcb == NULL) { 4492 /* Got to have a TCB */ 4493 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4494 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4495 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4496 SCTP_CALLED_DIRECTLY_NOCMPSET); 4497 } 4498 } 4499 return; 4500 } 4501 if (op_err != NULL) { 4502 /* Read the cause code from the error cause. */ 4503 cause = mtod(op_err, struct sctp_gen_error_cause *); 4504 cause_code = ntohs(cause->code); 4505 } else { 4506 cause_code = 0; 4507 } 4508 /* notify the peer */ 4509 sctp_send_abort_tcb(stcb, op_err, so_locked); 4510 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4511 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4512 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4513 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4514 } 4515 /* notify the ulp */ 4516 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4517 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4518 } 4519 /* now free the asoc */ 4520 #ifdef SCTP_ASOCLOG_OF_TSNS 4521 sctp_print_out_track_log(stcb); 4522 #endif 4523 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4524 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4525 } 4526 4527 void 4528 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4529 struct sockaddr *src, struct sockaddr *dst, 4530 struct sctphdr *sh, struct sctp_inpcb *inp, 4531 struct mbuf *cause, 4532 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4533 uint32_t vrf_id, uint16_t port) 4534 { 4535 struct sctp_chunkhdr *ch, chunk_buf; 4536 unsigned int chk_length; 4537 int contains_init_chunk; 4538 4539 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4540 /* Generate a TO address for future reference */ 4541 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4542 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4543 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4544 SCTP_CALLED_DIRECTLY_NOCMPSET); 4545 } 4546 } 4547 contains_init_chunk = 0; 4548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4549 sizeof(*ch), (uint8_t *)&chunk_buf); 4550 while (ch != NULL) { 4551 chk_length = ntohs(ch->chunk_length); 4552 if (chk_length < sizeof(*ch)) { 4553 /* break to abort land */ 4554 break; 4555 } 4556 switch (ch->chunk_type) { 4557 case SCTP_INIT: 4558 contains_init_chunk = 1; 4559 break; 4560 case SCTP_PACKET_DROPPED: 4561 /* we don't respond to pkt-dropped */ 4562 return; 4563 case SCTP_ABORT_ASSOCIATION: 4564 /* we don't respond with an ABORT to an ABORT */ 4565 return; 4566 case SCTP_SHUTDOWN_COMPLETE: 4567 /* 4568 * we ignore it since we are not waiting for it and 4569 * peer is gone 4570 */ 4571 return; 4572 case SCTP_SHUTDOWN_ACK: 4573 sctp_send_shutdown_complete2(src, dst, sh, 4574 mflowtype, mflowid, fibnum, 4575 vrf_id, port); 4576 return; 4577 default: 4578 break; 4579 } 4580 offset += SCTP_SIZE32(chk_length); 4581 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4582 sizeof(*ch), (uint8_t *)&chunk_buf); 4583 } 4584 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4585 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4586 (contains_init_chunk == 0))) { 4587 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4588 mflowtype, mflowid, fibnum, 4589 vrf_id, port); 4590 } 4591 } 4592 4593 /* 4594 * check the inbound datagram to make sure there is not an abort inside it, 4595 * if there is return 1, else return 0. 4596 */ 4597 int 4598 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4599 { 4600 struct sctp_chunkhdr *ch; 4601 struct sctp_init_chunk *init_chk, chunk_buf; 4602 int offset; 4603 unsigned int chk_length; 4604 4605 offset = iphlen + sizeof(struct sctphdr); 4606 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4607 (uint8_t *)&chunk_buf); 4608 while (ch != NULL) { 4609 chk_length = ntohs(ch->chunk_length); 4610 if (chk_length < sizeof(*ch)) { 4611 /* packet is probably corrupt */ 4612 break; 4613 } 4614 /* we seem to be ok, is it an abort? */ 4615 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4616 /* yep, tell them */ 4617 return (1); 4618 } 4619 if ((ch->chunk_type == SCTP_INITIATION) || 4620 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4621 /* need to update the Vtag */ 4622 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4623 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4624 if (init_chk != NULL) { 4625 *vtag = ntohl(init_chk->init.initiate_tag); 4626 } 4627 } 4628 /* Nope, move to the next chunk */ 4629 offset += SCTP_SIZE32(chk_length); 4630 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4631 sizeof(*ch), (uint8_t *)&chunk_buf); 4632 } 4633 return (0); 4634 } 4635 4636 /* 4637 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4638 * set (i.e. it's 0) so, create this function to compare link local scopes 4639 */ 4640 #ifdef INET6 4641 uint32_t 4642 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4643 { 4644 struct sockaddr_in6 a, b; 4645 4646 /* save copies */ 4647 a = *addr1; 4648 b = *addr2; 4649 4650 if (a.sin6_scope_id == 0) 4651 if (sa6_recoverscope(&a)) { 4652 /* can't get scope, so can't match */ 4653 return (0); 4654 } 4655 if (b.sin6_scope_id == 0) 4656 if (sa6_recoverscope(&b)) { 4657 /* can't get scope, so can't match */ 4658 return (0); 4659 } 4660 if (a.sin6_scope_id != b.sin6_scope_id) 4661 return (0); 4662 4663 return (1); 4664 } 4665 4666 /* 4667 * returns a sockaddr_in6 with embedded scope recovered and removed 4668 */ 4669 struct sockaddr_in6 * 4670 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4671 { 4672 /* check and strip embedded scope junk */ 4673 if (addr->sin6_family == AF_INET6) { 4674 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4675 if (addr->sin6_scope_id == 0) { 4676 *store = *addr; 4677 if (!sa6_recoverscope(store)) { 4678 /* use the recovered scope */ 4679 addr = store; 4680 } 4681 } else { 4682 /* else, return the original "to" addr */ 4683 in6_clearscope(&addr->sin6_addr); 4684 } 4685 } 4686 } 4687 return (addr); 4688 } 4689 #endif 4690 4691 /* 4692 * are the two addresses the same? currently a "scopeless" check returns: 1 4693 * if same, 0 if not 4694 */ 4695 int 4696 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4697 { 4698 4699 /* must be valid */ 4700 if (sa1 == NULL || sa2 == NULL) 4701 return (0); 4702 4703 /* must be the same family */ 4704 if (sa1->sa_family != sa2->sa_family) 4705 return (0); 4706 4707 switch (sa1->sa_family) { 4708 #ifdef INET6 4709 case AF_INET6: 4710 { 4711 /* IPv6 addresses */ 4712 struct sockaddr_in6 *sin6_1, *sin6_2; 4713 4714 sin6_1 = (struct sockaddr_in6 *)sa1; 4715 sin6_2 = (struct sockaddr_in6 *)sa2; 4716 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4717 sin6_2)); 4718 } 4719 #endif 4720 #ifdef INET 4721 case AF_INET: 4722 { 4723 /* IPv4 addresses */ 4724 struct sockaddr_in *sin_1, *sin_2; 4725 4726 sin_1 = (struct sockaddr_in *)sa1; 4727 sin_2 = (struct sockaddr_in *)sa2; 4728 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4729 } 4730 #endif 4731 default: 4732 /* we don't do these... */ 4733 return (0); 4734 } 4735 } 4736 4737 void 4738 sctp_print_address(struct sockaddr *sa) 4739 { 4740 #ifdef INET6 4741 char ip6buf[INET6_ADDRSTRLEN]; 4742 #endif 4743 4744 switch (sa->sa_family) { 4745 #ifdef INET6 4746 case AF_INET6: 4747 { 4748 struct sockaddr_in6 *sin6; 4749 4750 sin6 = (struct sockaddr_in6 *)sa; 4751 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4752 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4753 ntohs(sin6->sin6_port), 4754 sin6->sin6_scope_id); 4755 break; 4756 } 4757 #endif 4758 #ifdef INET 4759 case AF_INET: 4760 { 4761 struct sockaddr_in *sin; 4762 unsigned char *p; 4763 4764 sin = (struct sockaddr_in *)sa; 4765 p = (unsigned char *)&sin->sin_addr; 4766 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4767 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4768 break; 4769 } 4770 #endif 4771 default: 4772 SCTP_PRINTF("?\n"); 4773 break; 4774 } 4775 } 4776 4777 void 4778 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4779 struct sctp_inpcb *new_inp, 4780 struct sctp_tcb *stcb, 4781 int waitflags) 4782 { 4783 /* 4784 * go through our old INP and pull off any control structures that 4785 * belong to stcb and move then to the new inp. 4786 */ 4787 struct socket *old_so, *new_so; 4788 struct sctp_queued_to_read *control, *nctl; 4789 struct sctp_readhead tmp_queue; 4790 struct mbuf *m; 4791 int error = 0; 4792 4793 old_so = old_inp->sctp_socket; 4794 new_so = new_inp->sctp_socket; 4795 TAILQ_INIT(&tmp_queue); 4796 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4797 if (error) { 4798 /* 4799 * Gak, can't get I/O lock, we have a problem. data will be 4800 * left stranded.. and we don't dare look at it since the 4801 * other thread may be reading something. Oh well, its a 4802 * screwed up app that does a peeloff OR a accept while 4803 * reading from the main socket... actually its only the 4804 * peeloff() case, since I think read will fail on a 4805 * listening socket.. 4806 */ 4807 return; 4808 } 4809 /* lock the socket buffers */ 4810 SCTP_INP_READ_LOCK(old_inp); 4811 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4812 /* Pull off all for out target stcb */ 4813 if (control->stcb == stcb) { 4814 /* remove it we want it */ 4815 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4816 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4817 m = control->data; 4818 while (m) { 4819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4820 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4821 } 4822 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4824 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4825 } 4826 m = SCTP_BUF_NEXT(m); 4827 } 4828 } 4829 } 4830 SCTP_INP_READ_UNLOCK(old_inp); 4831 /* Remove the recv-lock on the old socket */ 4832 SOCK_IO_RECV_UNLOCK(old_so); 4833 /* Now we move them over to the new socket buffer */ 4834 SCTP_INP_READ_LOCK(new_inp); 4835 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4836 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4837 m = control->data; 4838 while (m) { 4839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4840 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4841 } 4842 sctp_sballoc(stcb, &new_so->so_rcv, m); 4843 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4844 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4845 } 4846 m = SCTP_BUF_NEXT(m); 4847 } 4848 } 4849 SCTP_INP_READ_UNLOCK(new_inp); 4850 } 4851 4852 void 4853 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4854 struct sctp_tcb *stcb, 4855 int so_locked 4856 SCTP_UNUSED 4857 ) 4858 { 4859 if ((inp != NULL) && 4860 (inp->sctp_socket != NULL) && 4861 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4862 !SCTP_IS_LISTENING(inp))) { 4863 sctp_sorwakeup(inp, inp->sctp_socket); 4864 } 4865 } 4866 4867 void 4868 sctp_add_to_readq(struct sctp_inpcb *inp, 4869 struct sctp_tcb *stcb, 4870 struct sctp_queued_to_read *control, 4871 struct sockbuf *sb, 4872 int end, 4873 int inp_read_lock_held, 4874 int so_locked) 4875 { 4876 /* 4877 * Here we must place the control on the end of the socket read 4878 * queue AND increment sb_cc so that select will work properly on 4879 * read. 4880 */ 4881 struct mbuf *m, *prev = NULL; 4882 4883 if (inp == NULL) { 4884 /* Gak, TSNH!! */ 4885 #ifdef INVARIANTS 4886 panic("Gak, inp NULL on add_to_readq"); 4887 #endif 4888 return; 4889 } 4890 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4891 SCTP_INP_READ_LOCK(inp); 4892 } 4893 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4894 if (!control->on_strm_q) { 4895 sctp_free_remote_addr(control->whoFrom); 4896 if (control->data) { 4897 sctp_m_freem(control->data); 4898 control->data = NULL; 4899 } 4900 sctp_free_a_readq(stcb, control); 4901 } 4902 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4903 SCTP_INP_READ_UNLOCK(inp); 4904 } 4905 return; 4906 } 4907 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4908 atomic_add_int(&inp->total_recvs, 1); 4909 if (!control->do_not_ref_stcb) { 4910 atomic_add_int(&stcb->total_recvs, 1); 4911 } 4912 } 4913 m = control->data; 4914 control->held_length = 0; 4915 control->length = 0; 4916 while (m != NULL) { 4917 if (SCTP_BUF_LEN(m) == 0) { 4918 /* Skip mbufs with NO length */ 4919 if (prev == NULL) { 4920 /* First one */ 4921 control->data = sctp_m_free(m); 4922 m = control->data; 4923 } else { 4924 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4925 m = SCTP_BUF_NEXT(prev); 4926 } 4927 if (m == NULL) { 4928 control->tail_mbuf = prev; 4929 } 4930 continue; 4931 } 4932 prev = m; 4933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4934 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4935 } 4936 sctp_sballoc(stcb, sb, m); 4937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4938 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4939 } 4940 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4941 m = SCTP_BUF_NEXT(m); 4942 } 4943 if (prev != NULL) { 4944 control->tail_mbuf = prev; 4945 } else { 4946 /* Everything got collapsed out?? */ 4947 if (!control->on_strm_q) { 4948 sctp_free_remote_addr(control->whoFrom); 4949 sctp_free_a_readq(stcb, control); 4950 } 4951 if (inp_read_lock_held == 0) 4952 SCTP_INP_READ_UNLOCK(inp); 4953 return; 4954 } 4955 if (end) { 4956 control->end_added = 1; 4957 } 4958 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4959 control->on_read_q = 1; 4960 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4961 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4962 } 4963 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4964 SCTP_INP_READ_UNLOCK(inp); 4965 } 4966 } 4967 4968 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4969 *************ALTERNATE ROUTING CODE 4970 */ 4971 4972 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4973 *************ALTERNATE ROUTING CODE 4974 */ 4975 4976 struct mbuf * 4977 sctp_generate_cause(uint16_t code, char *info) 4978 { 4979 struct mbuf *m; 4980 struct sctp_gen_error_cause *cause; 4981 size_t info_len; 4982 uint16_t len; 4983 4984 if ((code == 0) || (info == NULL)) { 4985 return (NULL); 4986 } 4987 info_len = strlen(info); 4988 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4989 return (NULL); 4990 } 4991 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4992 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4993 if (m != NULL) { 4994 SCTP_BUF_LEN(m) = len; 4995 cause = mtod(m, struct sctp_gen_error_cause *); 4996 cause->code = htons(code); 4997 cause->length = htons(len); 4998 memcpy(cause->info, info, info_len); 4999 } 5000 return (m); 5001 } 5002 5003 struct mbuf * 5004 sctp_generate_no_user_data_cause(uint32_t tsn) 5005 { 5006 struct mbuf *m; 5007 struct sctp_error_no_user_data *no_user_data_cause; 5008 uint16_t len; 5009 5010 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5011 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5012 if (m != NULL) { 5013 SCTP_BUF_LEN(m) = len; 5014 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5015 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5016 no_user_data_cause->cause.length = htons(len); 5017 no_user_data_cause->tsn = htonl(tsn); 5018 } 5019 return (m); 5020 } 5021 5022 void 5023 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5024 struct sctp_tmit_chunk *tp1, int chk_cnt) 5025 { 5026 if (tp1->data == NULL) { 5027 return; 5028 } 5029 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5030 #ifdef SCTP_MBCNT_LOGGING 5031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5032 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5033 asoc->total_output_queue_size, 5034 tp1->book_size, 5035 0, 5036 tp1->mbcnt); 5037 } 5038 #endif 5039 if (asoc->total_output_queue_size >= tp1->book_size) { 5040 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5041 } else { 5042 asoc->total_output_queue_size = 0; 5043 } 5044 if ((stcb->sctp_socket != NULL) && 5045 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5046 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5047 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5048 atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_cc), tp1->book_size); 5049 } else { 5050 stcb->sctp_socket->so_snd.sb_cc = 0; 5051 } 5052 } 5053 } 5054 5055 int 5056 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5057 uint8_t sent, int so_locked) 5058 { 5059 struct sctp_stream_out *strq; 5060 struct sctp_tmit_chunk *chk = NULL, *tp2; 5061 struct sctp_stream_queue_pending *sp; 5062 uint32_t mid; 5063 uint16_t sid; 5064 uint8_t foundeom = 0; 5065 int ret_sz = 0; 5066 int notdone; 5067 int do_wakeup_routine = 0; 5068 5069 SCTP_TCB_LOCK_ASSERT(stcb); 5070 5071 sid = tp1->rec.data.sid; 5072 mid = tp1->rec.data.mid; 5073 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5074 stcb->asoc.abandoned_sent[0]++; 5075 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5076 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5077 #if defined(SCTP_DETAILED_STR_STATS) 5078 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5079 #endif 5080 } else { 5081 stcb->asoc.abandoned_unsent[0]++; 5082 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5083 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5084 #if defined(SCTP_DETAILED_STR_STATS) 5085 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5086 #endif 5087 } 5088 do { 5089 ret_sz += tp1->book_size; 5090 if (tp1->data != NULL) { 5091 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5092 sctp_flight_size_decrease(tp1); 5093 sctp_total_flight_decrease(stcb, tp1); 5094 } 5095 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5096 stcb->asoc.peers_rwnd += tp1->send_size; 5097 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5098 if (sent) { 5099 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5100 } else { 5101 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5102 } 5103 if (tp1->data) { 5104 sctp_m_freem(tp1->data); 5105 tp1->data = NULL; 5106 } 5107 do_wakeup_routine = 1; 5108 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5109 stcb->asoc.sent_queue_cnt_removeable--; 5110 } 5111 } 5112 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5113 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5114 SCTP_DATA_NOT_FRAG) { 5115 /* not frag'ed we ae done */ 5116 notdone = 0; 5117 foundeom = 1; 5118 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5119 /* end of frag, we are done */ 5120 notdone = 0; 5121 foundeom = 1; 5122 } else { 5123 /* 5124 * Its a begin or middle piece, we must mark all of 5125 * it 5126 */ 5127 notdone = 1; 5128 tp1 = TAILQ_NEXT(tp1, sctp_next); 5129 } 5130 } while (tp1 && notdone); 5131 if (foundeom == 0) { 5132 /* 5133 * The multi-part message was scattered across the send and 5134 * sent queue. 5135 */ 5136 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5137 if ((tp1->rec.data.sid != sid) || 5138 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5139 break; 5140 } 5141 /* 5142 * save to chk in case we have some on stream out 5143 * queue. If so and we have an un-transmitted one we 5144 * don't have to fudge the TSN. 5145 */ 5146 chk = tp1; 5147 ret_sz += tp1->book_size; 5148 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5149 if (sent) { 5150 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5151 } else { 5152 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5153 } 5154 if (tp1->data) { 5155 sctp_m_freem(tp1->data); 5156 tp1->data = NULL; 5157 } 5158 /* No flight involved here book the size to 0 */ 5159 tp1->book_size = 0; 5160 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5161 foundeom = 1; 5162 } 5163 do_wakeup_routine = 1; 5164 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5165 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5166 /* 5167 * on to the sent queue so we can wait for it to be 5168 * passed by. 5169 */ 5170 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5171 sctp_next); 5172 stcb->asoc.send_queue_cnt--; 5173 stcb->asoc.sent_queue_cnt++; 5174 } 5175 } 5176 if (foundeom == 0) { 5177 /* 5178 * Still no eom found. That means there is stuff left on the 5179 * stream out queue.. yuck. 5180 */ 5181 strq = &stcb->asoc.strmout[sid]; 5182 sp = TAILQ_FIRST(&strq->outqueue); 5183 if (sp != NULL) { 5184 sp->discard_rest = 1; 5185 /* 5186 * We may need to put a chunk on the queue that 5187 * holds the TSN that would have been sent with the 5188 * LAST bit. 5189 */ 5190 if (chk == NULL) { 5191 /* Yep, we have to */ 5192 sctp_alloc_a_chunk(stcb, chk); 5193 if (chk == NULL) { 5194 /* 5195 * we are hosed. All we can do is 5196 * nothing.. which will cause an 5197 * abort if the peer is paying 5198 * attention. 5199 */ 5200 goto oh_well; 5201 } 5202 memset(chk, 0, sizeof(*chk)); 5203 chk->rec.data.rcv_flags = 0; 5204 chk->sent = SCTP_FORWARD_TSN_SKIP; 5205 chk->asoc = &stcb->asoc; 5206 if (stcb->asoc.idata_supported == 0) { 5207 if (sp->sinfo_flags & SCTP_UNORDERED) { 5208 chk->rec.data.mid = 0; 5209 } else { 5210 chk->rec.data.mid = strq->next_mid_ordered; 5211 } 5212 } else { 5213 if (sp->sinfo_flags & SCTP_UNORDERED) { 5214 chk->rec.data.mid = strq->next_mid_unordered; 5215 } else { 5216 chk->rec.data.mid = strq->next_mid_ordered; 5217 } 5218 } 5219 chk->rec.data.sid = sp->sid; 5220 chk->rec.data.ppid = sp->ppid; 5221 chk->rec.data.context = sp->context; 5222 chk->flags = sp->act_flags; 5223 chk->whoTo = NULL; 5224 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5225 strq->chunks_on_queues++; 5226 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5227 stcb->asoc.sent_queue_cnt++; 5228 stcb->asoc.pr_sctp_cnt++; 5229 } 5230 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5231 if (sp->sinfo_flags & SCTP_UNORDERED) { 5232 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5233 } 5234 if (stcb->asoc.idata_supported == 0) { 5235 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5236 strq->next_mid_ordered++; 5237 } 5238 } else { 5239 if (sp->sinfo_flags & SCTP_UNORDERED) { 5240 strq->next_mid_unordered++; 5241 } else { 5242 strq->next_mid_ordered++; 5243 } 5244 } 5245 oh_well: 5246 if (sp->data) { 5247 /* 5248 * Pull any data to free up the SB and allow 5249 * sender to "add more" while we will throw 5250 * away :-) 5251 */ 5252 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5253 ret_sz += sp->length; 5254 do_wakeup_routine = 1; 5255 sp->some_taken = 1; 5256 sctp_m_freem(sp->data); 5257 sp->data = NULL; 5258 sp->tail_mbuf = NULL; 5259 sp->length = 0; 5260 } 5261 } 5262 } 5263 if (do_wakeup_routine) { 5264 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5265 } 5266 return (ret_sz); 5267 } 5268 5269 /* 5270 * checks to see if the given address, sa, is one that is currently known by 5271 * the kernel note: can't distinguish the same address on multiple interfaces 5272 * and doesn't handle multiple addresses with different zone/scope id's note: 5273 * ifa_ifwithaddr() compares the entire sockaddr struct 5274 */ 5275 struct sctp_ifa * 5276 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5277 int holds_lock) 5278 { 5279 struct sctp_laddr *laddr; 5280 5281 if (holds_lock == 0) { 5282 SCTP_INP_RLOCK(inp); 5283 } 5284 5285 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5286 if (laddr->ifa == NULL) 5287 continue; 5288 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5289 continue; 5290 #ifdef INET 5291 if (addr->sa_family == AF_INET) { 5292 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5293 laddr->ifa->address.sin.sin_addr.s_addr) { 5294 /* found him. */ 5295 break; 5296 } 5297 } 5298 #endif 5299 #ifdef INET6 5300 if (addr->sa_family == AF_INET6) { 5301 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5302 &laddr->ifa->address.sin6)) { 5303 /* found him. */ 5304 break; 5305 } 5306 } 5307 #endif 5308 } 5309 if (holds_lock == 0) { 5310 SCTP_INP_RUNLOCK(inp); 5311 } 5312 if (laddr != NULL) { 5313 return (laddr->ifa); 5314 } else { 5315 return (NULL); 5316 } 5317 } 5318 5319 uint32_t 5320 sctp_get_ifa_hash_val(struct sockaddr *addr) 5321 { 5322 switch (addr->sa_family) { 5323 #ifdef INET 5324 case AF_INET: 5325 { 5326 struct sockaddr_in *sin; 5327 5328 sin = (struct sockaddr_in *)addr; 5329 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5330 } 5331 #endif 5332 #ifdef INET6 5333 case AF_INET6: 5334 { 5335 struct sockaddr_in6 *sin6; 5336 uint32_t hash_of_addr; 5337 5338 sin6 = (struct sockaddr_in6 *)addr; 5339 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5340 sin6->sin6_addr.s6_addr32[1] + 5341 sin6->sin6_addr.s6_addr32[2] + 5342 sin6->sin6_addr.s6_addr32[3]); 5343 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5344 return (hash_of_addr); 5345 } 5346 #endif 5347 default: 5348 break; 5349 } 5350 return (0); 5351 } 5352 5353 struct sctp_ifa * 5354 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5355 { 5356 struct sctp_ifa *sctp_ifap; 5357 struct sctp_vrf *vrf; 5358 struct sctp_ifalist *hash_head; 5359 uint32_t hash_of_addr; 5360 5361 if (holds_lock == 0) { 5362 SCTP_IPI_ADDR_RLOCK(); 5363 } else { 5364 SCTP_IPI_ADDR_LOCK_ASSERT(); 5365 } 5366 5367 vrf = sctp_find_vrf(vrf_id); 5368 if (vrf == NULL) { 5369 if (holds_lock == 0) 5370 SCTP_IPI_ADDR_RUNLOCK(); 5371 return (NULL); 5372 } 5373 5374 hash_of_addr = sctp_get_ifa_hash_val(addr); 5375 5376 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5377 if (hash_head == NULL) { 5378 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5379 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5380 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5381 sctp_print_address(addr); 5382 SCTP_PRINTF("No such bucket for address\n"); 5383 if (holds_lock == 0) 5384 SCTP_IPI_ADDR_RUNLOCK(); 5385 5386 return (NULL); 5387 } 5388 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5389 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5390 continue; 5391 #ifdef INET 5392 if (addr->sa_family == AF_INET) { 5393 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5394 sctp_ifap->address.sin.sin_addr.s_addr) { 5395 /* found him. */ 5396 break; 5397 } 5398 } 5399 #endif 5400 #ifdef INET6 5401 if (addr->sa_family == AF_INET6) { 5402 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5403 &sctp_ifap->address.sin6)) { 5404 /* found him. */ 5405 break; 5406 } 5407 } 5408 #endif 5409 } 5410 if (holds_lock == 0) 5411 SCTP_IPI_ADDR_RUNLOCK(); 5412 return (sctp_ifap); 5413 } 5414 5415 static void 5416 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5417 uint32_t rwnd_req) 5418 { 5419 /* User pulled some data, do we need a rwnd update? */ 5420 struct epoch_tracker et; 5421 int r_unlocked = 0; 5422 uint32_t dif, rwnd; 5423 struct socket *so = NULL; 5424 5425 if (stcb == NULL) 5426 return; 5427 5428 atomic_add_int(&stcb->asoc.refcnt, 1); 5429 5430 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5431 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5432 /* Pre-check If we are freeing no update */ 5433 goto no_lock; 5434 } 5435 SCTP_INP_INCR_REF(stcb->sctp_ep); 5436 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5437 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5438 goto out; 5439 } 5440 so = stcb->sctp_socket; 5441 if (so == NULL) { 5442 goto out; 5443 } 5444 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5445 /* Have you have freed enough to look */ 5446 *freed_so_far = 0; 5447 /* Yep, its worth a look and the lock overhead */ 5448 5449 /* Figure out what the rwnd would be */ 5450 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5451 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5452 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5453 } else { 5454 dif = 0; 5455 } 5456 if (dif >= rwnd_req) { 5457 if (hold_rlock) { 5458 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5459 r_unlocked = 1; 5460 } 5461 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5462 /* 5463 * One last check before we allow the guy possibly 5464 * to get in. There is a race, where the guy has not 5465 * reached the gate. In that case 5466 */ 5467 goto out; 5468 } 5469 SCTP_TCB_LOCK(stcb); 5470 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5471 /* No reports here */ 5472 SCTP_TCB_UNLOCK(stcb); 5473 goto out; 5474 } 5475 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5476 NET_EPOCH_ENTER(et); 5477 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5478 5479 sctp_chunk_output(stcb->sctp_ep, stcb, 5480 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5481 /* make sure no timer is running */ 5482 NET_EPOCH_EXIT(et); 5483 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5484 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5485 SCTP_TCB_UNLOCK(stcb); 5486 } else { 5487 /* Update how much we have pending */ 5488 stcb->freed_by_sorcv_sincelast = dif; 5489 } 5490 out: 5491 if (so && r_unlocked && hold_rlock) { 5492 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5493 } 5494 5495 SCTP_INP_DECR_REF(stcb->sctp_ep); 5496 no_lock: 5497 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5498 return; 5499 } 5500 5501 int 5502 sctp_sorecvmsg(struct socket *so, 5503 struct uio *uio, 5504 struct mbuf **mp, 5505 struct sockaddr *from, 5506 int fromlen, 5507 int *msg_flags, 5508 struct sctp_sndrcvinfo *sinfo, 5509 int filling_sinfo) 5510 { 5511 /* 5512 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5513 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5514 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5515 * On the way out we may send out any combination of: 5516 * MSG_NOTIFICATION MSG_EOR 5517 * 5518 */ 5519 struct sctp_inpcb *inp = NULL; 5520 ssize_t my_len = 0; 5521 ssize_t cp_len = 0; 5522 int error = 0; 5523 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5524 struct mbuf *m = NULL; 5525 struct sctp_tcb *stcb = NULL; 5526 int wakeup_read_socket = 0; 5527 int freecnt_applied = 0; 5528 int out_flags = 0, in_flags = 0; 5529 int block_allowed = 1; 5530 uint32_t freed_so_far = 0; 5531 ssize_t copied_so_far = 0; 5532 int in_eeor_mode = 0; 5533 int no_rcv_needed = 0; 5534 uint32_t rwnd_req = 0; 5535 int hold_sblock = 0; 5536 int hold_rlock = 0; 5537 ssize_t slen = 0; 5538 uint32_t held_length = 0; 5539 int sockbuf_lock = 0; 5540 5541 if (uio == NULL) { 5542 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5543 return (EINVAL); 5544 } 5545 5546 if (msg_flags) { 5547 in_flags = *msg_flags; 5548 if (in_flags & MSG_PEEK) 5549 SCTP_STAT_INCR(sctps_read_peeks); 5550 } else { 5551 in_flags = 0; 5552 } 5553 slen = uio->uio_resid; 5554 5555 /* Pull in and set up our int flags */ 5556 if (in_flags & MSG_OOB) { 5557 /* Out of band's NOT supported */ 5558 return (EOPNOTSUPP); 5559 } 5560 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5561 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5562 return (EINVAL); 5563 } 5564 if ((in_flags & (MSG_DONTWAIT 5565 | MSG_NBIO 5566 )) || 5567 SCTP_SO_IS_NBIO(so)) { 5568 block_allowed = 0; 5569 } 5570 /* setup the endpoint */ 5571 inp = (struct sctp_inpcb *)so->so_pcb; 5572 if (inp == NULL) { 5573 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5574 return (EFAULT); 5575 } 5576 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5577 /* Must be at least a MTU's worth */ 5578 if (rwnd_req < SCTP_MIN_RWND) 5579 rwnd_req = SCTP_MIN_RWND; 5580 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5582 sctp_misc_ints(SCTP_SORECV_ENTER, 5583 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5584 } 5585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5586 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5587 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5588 } 5589 5590 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5591 if (error) { 5592 goto release_unlocked; 5593 } 5594 sockbuf_lock = 1; 5595 restart: 5596 5597 restart_nosblocks: 5598 if (hold_sblock == 0) { 5599 SOCKBUF_LOCK(&so->so_rcv); 5600 hold_sblock = 1; 5601 } 5602 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5603 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5604 goto out; 5605 } 5606 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5607 if (so->so_error) { 5608 error = so->so_error; 5609 if ((in_flags & MSG_PEEK) == 0) 5610 so->so_error = 0; 5611 goto out; 5612 } else { 5613 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5614 /* indicate EOF */ 5615 error = 0; 5616 goto out; 5617 } 5618 } 5619 } 5620 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5621 if (so->so_error) { 5622 error = so->so_error; 5623 if ((in_flags & MSG_PEEK) == 0) { 5624 so->so_error = 0; 5625 } 5626 goto out; 5627 } 5628 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5629 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5630 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5631 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5632 /* 5633 * For active open side clear flags for 5634 * re-use passive open is blocked by 5635 * connect. 5636 */ 5637 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5638 /* 5639 * You were aborted, passive side 5640 * always hits here 5641 */ 5642 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5643 error = ECONNRESET; 5644 } 5645 so->so_state &= ~(SS_ISCONNECTING | 5646 SS_ISDISCONNECTING | 5647 SS_ISCONFIRMING | 5648 SS_ISCONNECTED); 5649 if (error == 0) { 5650 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5651 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5652 error = ENOTCONN; 5653 } 5654 } 5655 goto out; 5656 } 5657 } 5658 if (block_allowed) { 5659 error = sbwait(so, SO_RCV); 5660 if (error) { 5661 goto out; 5662 } 5663 held_length = 0; 5664 goto restart_nosblocks; 5665 } else { 5666 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5667 error = EWOULDBLOCK; 5668 goto out; 5669 } 5670 } 5671 if (hold_sblock == 1) { 5672 SOCKBUF_UNLOCK(&so->so_rcv); 5673 hold_sblock = 0; 5674 } 5675 /* we possibly have data we can read */ 5676 /* sa_ignore FREED_MEMORY */ 5677 control = TAILQ_FIRST(&inp->read_queue); 5678 if (control == NULL) { 5679 /* 5680 * This could be happening since the appender did the 5681 * increment but as not yet did the tailq insert onto the 5682 * read_queue 5683 */ 5684 if (hold_rlock == 0) { 5685 SCTP_INP_READ_LOCK(inp); 5686 } 5687 control = TAILQ_FIRST(&inp->read_queue); 5688 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5689 #ifdef INVARIANTS 5690 panic("Huh, its non zero and nothing on control?"); 5691 #endif 5692 SCTP_SB_CLEAR(so->so_rcv); 5693 } 5694 SCTP_INP_READ_UNLOCK(inp); 5695 hold_rlock = 0; 5696 goto restart; 5697 } 5698 5699 if ((control->length == 0) && 5700 (control->do_not_ref_stcb)) { 5701 /* 5702 * Clean up code for freeing assoc that left behind a 5703 * pdapi.. maybe a peer in EEOR that just closed after 5704 * sending and never indicated a EOR. 5705 */ 5706 if (hold_rlock == 0) { 5707 hold_rlock = 1; 5708 SCTP_INP_READ_LOCK(inp); 5709 } 5710 control->held_length = 0; 5711 if (control->data) { 5712 /* Hmm there is data here .. fix */ 5713 struct mbuf *m_tmp; 5714 int cnt = 0; 5715 5716 m_tmp = control->data; 5717 while (m_tmp) { 5718 cnt += SCTP_BUF_LEN(m_tmp); 5719 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5720 control->tail_mbuf = m_tmp; 5721 control->end_added = 1; 5722 } 5723 m_tmp = SCTP_BUF_NEXT(m_tmp); 5724 } 5725 control->length = cnt; 5726 } else { 5727 /* remove it */ 5728 TAILQ_REMOVE(&inp->read_queue, control, next); 5729 /* Add back any hidden data */ 5730 sctp_free_remote_addr(control->whoFrom); 5731 sctp_free_a_readq(stcb, control); 5732 } 5733 if (hold_rlock) { 5734 hold_rlock = 0; 5735 SCTP_INP_READ_UNLOCK(inp); 5736 } 5737 goto restart; 5738 } 5739 if ((control->length == 0) && 5740 (control->end_added == 1)) { 5741 /* 5742 * Do we also need to check for (control->pdapi_aborted == 5743 * 1)? 5744 */ 5745 if (hold_rlock == 0) { 5746 hold_rlock = 1; 5747 SCTP_INP_READ_LOCK(inp); 5748 } 5749 TAILQ_REMOVE(&inp->read_queue, control, next); 5750 if (control->data) { 5751 #ifdef INVARIANTS 5752 panic("control->data not null but control->length == 0"); 5753 #else 5754 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5755 sctp_m_freem(control->data); 5756 control->data = NULL; 5757 #endif 5758 } 5759 if (control->aux_data) { 5760 sctp_m_free(control->aux_data); 5761 control->aux_data = NULL; 5762 } 5763 #ifdef INVARIANTS 5764 if (control->on_strm_q) { 5765 panic("About to free ctl:%p so:%p and its in %d", 5766 control, so, control->on_strm_q); 5767 } 5768 #endif 5769 sctp_free_remote_addr(control->whoFrom); 5770 sctp_free_a_readq(stcb, control); 5771 if (hold_rlock) { 5772 hold_rlock = 0; 5773 SCTP_INP_READ_UNLOCK(inp); 5774 } 5775 goto restart; 5776 } 5777 if (control->length == 0) { 5778 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5779 (filling_sinfo)) { 5780 /* find a more suitable one then this */ 5781 ctl = TAILQ_NEXT(control, next); 5782 while (ctl) { 5783 if ((ctl->stcb != control->stcb) && (ctl->length) && 5784 (ctl->some_taken || 5785 (ctl->spec_flags & M_NOTIFICATION) || 5786 ((ctl->do_not_ref_stcb == 0) && 5787 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5788 ) { 5789 /*- 5790 * If we have a different TCB next, and there is data 5791 * present. If we have already taken some (pdapi), OR we can 5792 * ref the tcb and no delivery as started on this stream, we 5793 * take it. Note we allow a notification on a different 5794 * assoc to be delivered.. 5795 */ 5796 control = ctl; 5797 goto found_one; 5798 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5799 (ctl->length) && 5800 ((ctl->some_taken) || 5801 ((ctl->do_not_ref_stcb == 0) && 5802 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5803 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5804 /*- 5805 * If we have the same tcb, and there is data present, and we 5806 * have the strm interleave feature present. Then if we have 5807 * taken some (pdapi) or we can refer to tht tcb AND we have 5808 * not started a delivery for this stream, we can take it. 5809 * Note we do NOT allow a notification on the same assoc to 5810 * be delivered. 5811 */ 5812 control = ctl; 5813 goto found_one; 5814 } 5815 ctl = TAILQ_NEXT(ctl, next); 5816 } 5817 } 5818 /* 5819 * if we reach here, not suitable replacement is available 5820 * <or> fragment interleave is NOT on. So stuff the sb_cc 5821 * into the our held count, and its time to sleep again. 5822 */ 5823 held_length = SCTP_SBAVAIL(&so->so_rcv); 5824 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5825 goto restart; 5826 } 5827 /* Clear the held length since there is something to read */ 5828 control->held_length = 0; 5829 found_one: 5830 /* 5831 * If we reach here, control has a some data for us to read off. 5832 * Note that stcb COULD be NULL. 5833 */ 5834 if (hold_rlock == 0) { 5835 hold_rlock = 1; 5836 SCTP_INP_READ_LOCK(inp); 5837 } 5838 control->some_taken++; 5839 stcb = control->stcb; 5840 if (stcb) { 5841 if ((control->do_not_ref_stcb == 0) && 5842 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5843 if (freecnt_applied == 0) 5844 stcb = NULL; 5845 } else if (control->do_not_ref_stcb == 0) { 5846 /* you can't free it on me please */ 5847 /* 5848 * The lock on the socket buffer protects us so the 5849 * free code will stop. But since we used the 5850 * socketbuf lock and the sender uses the tcb_lock 5851 * to increment, we need to use the atomic add to 5852 * the refcnt 5853 */ 5854 if (freecnt_applied) { 5855 #ifdef INVARIANTS 5856 panic("refcnt already incremented"); 5857 #else 5858 SCTP_PRINTF("refcnt already incremented?\n"); 5859 #endif 5860 } else { 5861 atomic_add_int(&stcb->asoc.refcnt, 1); 5862 freecnt_applied = 1; 5863 } 5864 /* 5865 * Setup to remember how much we have not yet told 5866 * the peer our rwnd has opened up. Note we grab the 5867 * value from the tcb from last time. Note too that 5868 * sack sending clears this when a sack is sent, 5869 * which is fine. Once we hit the rwnd_req, we then 5870 * will go to the sctp_user_rcvd() that will not 5871 * lock until it KNOWs it MUST send a WUP-SACK. 5872 */ 5873 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5874 stcb->freed_by_sorcv_sincelast = 0; 5875 } 5876 } 5877 if (stcb && 5878 ((control->spec_flags & M_NOTIFICATION) == 0) && 5879 control->do_not_ref_stcb == 0) { 5880 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5881 } 5882 5883 /* First lets get off the sinfo and sockaddr info */ 5884 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5885 sinfo->sinfo_stream = control->sinfo_stream; 5886 sinfo->sinfo_ssn = (uint16_t)control->mid; 5887 sinfo->sinfo_flags = control->sinfo_flags; 5888 sinfo->sinfo_ppid = control->sinfo_ppid; 5889 sinfo->sinfo_context = control->sinfo_context; 5890 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5891 sinfo->sinfo_tsn = control->sinfo_tsn; 5892 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5893 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5894 nxt = TAILQ_NEXT(control, next); 5895 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5896 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5897 struct sctp_extrcvinfo *s_extra; 5898 5899 s_extra = (struct sctp_extrcvinfo *)sinfo; 5900 if ((nxt) && 5901 (nxt->length)) { 5902 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5903 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5904 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5905 } 5906 if (nxt->spec_flags & M_NOTIFICATION) { 5907 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5908 } 5909 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5910 s_extra->serinfo_next_length = nxt->length; 5911 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5912 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5913 if (nxt->tail_mbuf != NULL) { 5914 if (nxt->end_added) { 5915 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5916 } 5917 } 5918 } else { 5919 /* 5920 * we explicitly 0 this, since the memcpy 5921 * got some other things beyond the older 5922 * sinfo_ that is on the control's structure 5923 * :-D 5924 */ 5925 nxt = NULL; 5926 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5927 s_extra->serinfo_next_aid = 0; 5928 s_extra->serinfo_next_length = 0; 5929 s_extra->serinfo_next_ppid = 0; 5930 s_extra->serinfo_next_stream = 0; 5931 } 5932 } 5933 /* 5934 * update off the real current cum-ack, if we have an stcb. 5935 */ 5936 if ((control->do_not_ref_stcb == 0) && stcb) 5937 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5938 /* 5939 * mask off the high bits, we keep the actual chunk bits in 5940 * there. 5941 */ 5942 sinfo->sinfo_flags &= 0x00ff; 5943 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5944 sinfo->sinfo_flags |= SCTP_UNORDERED; 5945 } 5946 } 5947 #ifdef SCTP_ASOCLOG_OF_TSNS 5948 { 5949 int index, newindex; 5950 struct sctp_pcbtsn_rlog *entry; 5951 5952 do { 5953 index = inp->readlog_index; 5954 newindex = index + 1; 5955 if (newindex >= SCTP_READ_LOG_SIZE) { 5956 newindex = 0; 5957 } 5958 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5959 entry = &inp->readlog[index]; 5960 entry->vtag = control->sinfo_assoc_id; 5961 entry->strm = control->sinfo_stream; 5962 entry->seq = (uint16_t)control->mid; 5963 entry->sz = control->length; 5964 entry->flgs = control->sinfo_flags; 5965 } 5966 #endif 5967 if ((fromlen > 0) && (from != NULL)) { 5968 union sctp_sockstore store; 5969 size_t len; 5970 5971 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5972 #ifdef INET6 5973 case AF_INET6: 5974 len = sizeof(struct sockaddr_in6); 5975 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5976 store.sin6.sin6_port = control->port_from; 5977 break; 5978 #endif 5979 #ifdef INET 5980 case AF_INET: 5981 #ifdef INET6 5982 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5983 len = sizeof(struct sockaddr_in6); 5984 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5985 &store.sin6); 5986 store.sin6.sin6_port = control->port_from; 5987 } else { 5988 len = sizeof(struct sockaddr_in); 5989 store.sin = control->whoFrom->ro._l_addr.sin; 5990 store.sin.sin_port = control->port_from; 5991 } 5992 #else 5993 len = sizeof(struct sockaddr_in); 5994 store.sin = control->whoFrom->ro._l_addr.sin; 5995 store.sin.sin_port = control->port_from; 5996 #endif 5997 break; 5998 #endif 5999 default: 6000 len = 0; 6001 break; 6002 } 6003 memcpy(from, &store, min((size_t)fromlen, len)); 6004 #ifdef INET6 6005 { 6006 struct sockaddr_in6 lsa6, *from6; 6007 6008 from6 = (struct sockaddr_in6 *)from; 6009 sctp_recover_scope_mac(from6, (&lsa6)); 6010 } 6011 #endif 6012 } 6013 if (hold_rlock) { 6014 SCTP_INP_READ_UNLOCK(inp); 6015 hold_rlock = 0; 6016 } 6017 if (hold_sblock) { 6018 SOCKBUF_UNLOCK(&so->so_rcv); 6019 hold_sblock = 0; 6020 } 6021 /* now copy out what data we can */ 6022 if (mp == NULL) { 6023 /* copy out each mbuf in the chain up to length */ 6024 get_more_data: 6025 m = control->data; 6026 while (m) { 6027 /* Move out all we can */ 6028 cp_len = uio->uio_resid; 6029 my_len = SCTP_BUF_LEN(m); 6030 if (cp_len > my_len) { 6031 /* not enough in this buf */ 6032 cp_len = my_len; 6033 } 6034 if (hold_rlock) { 6035 SCTP_INP_READ_UNLOCK(inp); 6036 hold_rlock = 0; 6037 } 6038 if (cp_len > 0) 6039 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6040 /* re-read */ 6041 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6042 goto release; 6043 } 6044 6045 if ((control->do_not_ref_stcb == 0) && stcb && 6046 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6047 no_rcv_needed = 1; 6048 } 6049 if (error) { 6050 /* error we are out of here */ 6051 goto release; 6052 } 6053 SCTP_INP_READ_LOCK(inp); 6054 hold_rlock = 1; 6055 if (cp_len == SCTP_BUF_LEN(m)) { 6056 if ((SCTP_BUF_NEXT(m) == NULL) && 6057 (control->end_added)) { 6058 out_flags |= MSG_EOR; 6059 if ((control->do_not_ref_stcb == 0) && 6060 (control->stcb != NULL) && 6061 ((control->spec_flags & M_NOTIFICATION) == 0)) 6062 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6063 } 6064 if (control->spec_flags & M_NOTIFICATION) { 6065 out_flags |= MSG_NOTIFICATION; 6066 } 6067 /* we ate up the mbuf */ 6068 if (in_flags & MSG_PEEK) { 6069 /* just looking */ 6070 m = SCTP_BUF_NEXT(m); 6071 copied_so_far += cp_len; 6072 } else { 6073 /* dispose of the mbuf */ 6074 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6075 sctp_sblog(&so->so_rcv, 6076 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6077 } 6078 sctp_sbfree(control, stcb, &so->so_rcv, m); 6079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6080 sctp_sblog(&so->so_rcv, 6081 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6082 } 6083 copied_so_far += cp_len; 6084 freed_so_far += (uint32_t)cp_len; 6085 freed_so_far += MSIZE; 6086 atomic_subtract_int(&control->length, (int)cp_len); 6087 control->data = sctp_m_free(m); 6088 m = control->data; 6089 /* 6090 * been through it all, must hold sb 6091 * lock ok to null tail 6092 */ 6093 if (control->data == NULL) { 6094 #ifdef INVARIANTS 6095 if ((control->end_added == 0) || 6096 (TAILQ_NEXT(control, next) == NULL)) { 6097 /* 6098 * If the end is not 6099 * added, OR the 6100 * next is NOT null 6101 * we MUST have the 6102 * lock. 6103 */ 6104 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6105 panic("Hmm we don't own the lock?"); 6106 } 6107 } 6108 #endif 6109 control->tail_mbuf = NULL; 6110 #ifdef INVARIANTS 6111 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6112 panic("end_added, nothing left and no MSG_EOR"); 6113 } 6114 #endif 6115 } 6116 } 6117 } else { 6118 /* Do we need to trim the mbuf? */ 6119 if (control->spec_flags & M_NOTIFICATION) { 6120 out_flags |= MSG_NOTIFICATION; 6121 } 6122 if ((in_flags & MSG_PEEK) == 0) { 6123 SCTP_BUF_RESV_UF(m, cp_len); 6124 SCTP_BUF_LEN(m) -= (int)cp_len; 6125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6126 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6127 } 6128 atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); 6129 if ((control->do_not_ref_stcb == 0) && 6130 stcb) { 6131 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6132 } 6133 copied_so_far += cp_len; 6134 freed_so_far += (uint32_t)cp_len; 6135 freed_so_far += MSIZE; 6136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6137 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6138 SCTP_LOG_SBRESULT, 0); 6139 } 6140 atomic_subtract_int(&control->length, (int)cp_len); 6141 } else { 6142 copied_so_far += cp_len; 6143 } 6144 } 6145 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6146 break; 6147 } 6148 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6149 (control->do_not_ref_stcb == 0) && 6150 (freed_so_far >= rwnd_req)) { 6151 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6152 } 6153 } /* end while(m) */ 6154 /* 6155 * At this point we have looked at it all and we either have 6156 * a MSG_EOR/or read all the user wants... <OR> 6157 * control->length == 0. 6158 */ 6159 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6160 /* we are done with this control */ 6161 if (control->length == 0) { 6162 if (control->data) { 6163 #ifdef INVARIANTS 6164 panic("control->data not null at read eor?"); 6165 #else 6166 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6167 sctp_m_freem(control->data); 6168 control->data = NULL; 6169 #endif 6170 } 6171 done_with_control: 6172 if (hold_rlock == 0) { 6173 SCTP_INP_READ_LOCK(inp); 6174 hold_rlock = 1; 6175 } 6176 TAILQ_REMOVE(&inp->read_queue, control, next); 6177 /* Add back any hidden data */ 6178 if (control->held_length) { 6179 held_length = 0; 6180 control->held_length = 0; 6181 wakeup_read_socket = 1; 6182 } 6183 if (control->aux_data) { 6184 sctp_m_free(control->aux_data); 6185 control->aux_data = NULL; 6186 } 6187 no_rcv_needed = control->do_not_ref_stcb; 6188 sctp_free_remote_addr(control->whoFrom); 6189 control->data = NULL; 6190 #ifdef INVARIANTS 6191 if (control->on_strm_q) { 6192 panic("About to free ctl:%p so:%p and its in %d", 6193 control, so, control->on_strm_q); 6194 } 6195 #endif 6196 sctp_free_a_readq(stcb, control); 6197 control = NULL; 6198 if ((freed_so_far >= rwnd_req) && 6199 (no_rcv_needed == 0)) 6200 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6201 6202 } else { 6203 /* 6204 * The user did not read all of this 6205 * message, turn off the returned MSG_EOR 6206 * since we are leaving more behind on the 6207 * control to read. 6208 */ 6209 #ifdef INVARIANTS 6210 if (control->end_added && 6211 (control->data == NULL) && 6212 (control->tail_mbuf == NULL)) { 6213 panic("Gak, control->length is corrupt?"); 6214 } 6215 #endif 6216 no_rcv_needed = control->do_not_ref_stcb; 6217 out_flags &= ~MSG_EOR; 6218 } 6219 } 6220 if (out_flags & MSG_EOR) { 6221 goto release; 6222 } 6223 if ((uio->uio_resid == 0) || 6224 ((in_eeor_mode) && 6225 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6226 goto release; 6227 } 6228 /* 6229 * If I hit here the receiver wants more and this message is 6230 * NOT done (pd-api). So two questions. Can we block? if not 6231 * we are done. Did the user NOT set MSG_WAITALL? 6232 */ 6233 if (block_allowed == 0) { 6234 goto release; 6235 } 6236 /* 6237 * We need to wait for more data a few things: - We don't 6238 * release the I/O lock so we don't get someone else 6239 * reading. - We must be sure to account for the case where 6240 * what is added is NOT to our control when we wakeup. 6241 */ 6242 6243 /* 6244 * Do we need to tell the transport a rwnd update might be 6245 * needed before we go to sleep? 6246 */ 6247 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6248 ((freed_so_far >= rwnd_req) && 6249 (control->do_not_ref_stcb == 0) && 6250 (no_rcv_needed == 0))) { 6251 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6252 } 6253 wait_some_more: 6254 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6255 goto release; 6256 } 6257 6258 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6259 goto release; 6260 6261 if (hold_rlock == 1) { 6262 SCTP_INP_READ_UNLOCK(inp); 6263 hold_rlock = 0; 6264 } 6265 if (hold_sblock == 0) { 6266 SOCKBUF_LOCK(&so->so_rcv); 6267 hold_sblock = 1; 6268 } 6269 if ((copied_so_far) && (control->length == 0) && 6270 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6271 goto release; 6272 } 6273 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6274 error = sbwait(so, SO_RCV); 6275 if (error) { 6276 goto release; 6277 } 6278 control->held_length = 0; 6279 } 6280 if (hold_sblock) { 6281 SOCKBUF_UNLOCK(&so->so_rcv); 6282 hold_sblock = 0; 6283 } 6284 if (control->length == 0) { 6285 /* still nothing here */ 6286 if (control->end_added == 1) { 6287 /* he aborted, or is done i.e.did a shutdown */ 6288 out_flags |= MSG_EOR; 6289 if (control->pdapi_aborted) { 6290 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6291 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6292 6293 out_flags |= MSG_TRUNC; 6294 } else { 6295 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6296 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6297 } 6298 goto done_with_control; 6299 } 6300 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6301 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6302 held_length = 0; 6303 } 6304 goto wait_some_more; 6305 } else if (control->data == NULL) { 6306 /* 6307 * we must re-sync since data is probably being 6308 * added 6309 */ 6310 SCTP_INP_READ_LOCK(inp); 6311 if ((control->length > 0) && (control->data == NULL)) { 6312 /* 6313 * big trouble.. we have the lock and its 6314 * corrupt? 6315 */ 6316 #ifdef INVARIANTS 6317 panic("Impossible data==NULL length !=0"); 6318 #endif 6319 out_flags |= MSG_EOR; 6320 out_flags |= MSG_TRUNC; 6321 control->length = 0; 6322 SCTP_INP_READ_UNLOCK(inp); 6323 goto done_with_control; 6324 } 6325 SCTP_INP_READ_UNLOCK(inp); 6326 /* We will fall around to get more data */ 6327 } 6328 goto get_more_data; 6329 } else { 6330 /*- 6331 * Give caller back the mbuf chain, 6332 * store in uio_resid the length 6333 */ 6334 wakeup_read_socket = 0; 6335 if ((control->end_added == 0) || 6336 (TAILQ_NEXT(control, next) == NULL)) { 6337 /* Need to get rlock */ 6338 if (hold_rlock == 0) { 6339 SCTP_INP_READ_LOCK(inp); 6340 hold_rlock = 1; 6341 } 6342 } 6343 if (control->end_added) { 6344 out_flags |= MSG_EOR; 6345 if ((control->do_not_ref_stcb == 0) && 6346 (control->stcb != NULL) && 6347 ((control->spec_flags & M_NOTIFICATION) == 0)) 6348 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6349 } 6350 if (control->spec_flags & M_NOTIFICATION) { 6351 out_flags |= MSG_NOTIFICATION; 6352 } 6353 uio->uio_resid = control->length; 6354 *mp = control->data; 6355 m = control->data; 6356 while (m) { 6357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6358 sctp_sblog(&so->so_rcv, 6359 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6360 } 6361 sctp_sbfree(control, stcb, &so->so_rcv, m); 6362 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6363 freed_so_far += MSIZE; 6364 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6365 sctp_sblog(&so->so_rcv, 6366 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6367 } 6368 m = SCTP_BUF_NEXT(m); 6369 } 6370 control->data = control->tail_mbuf = NULL; 6371 control->length = 0; 6372 if (out_flags & MSG_EOR) { 6373 /* Done with this control */ 6374 goto done_with_control; 6375 } 6376 } 6377 release: 6378 if (hold_rlock == 1) { 6379 SCTP_INP_READ_UNLOCK(inp); 6380 hold_rlock = 0; 6381 } 6382 if (hold_sblock == 1) { 6383 SOCKBUF_UNLOCK(&so->so_rcv); 6384 hold_sblock = 0; 6385 } 6386 6387 SOCK_IO_RECV_UNLOCK(so); 6388 sockbuf_lock = 0; 6389 6390 release_unlocked: 6391 if (hold_sblock) { 6392 SOCKBUF_UNLOCK(&so->so_rcv); 6393 hold_sblock = 0; 6394 } 6395 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6396 if ((freed_so_far >= rwnd_req) && 6397 (control && (control->do_not_ref_stcb == 0)) && 6398 (no_rcv_needed == 0)) 6399 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6400 } 6401 out: 6402 if (msg_flags) { 6403 *msg_flags = out_flags; 6404 } 6405 if (((out_flags & MSG_EOR) == 0) && 6406 ((in_flags & MSG_PEEK) == 0) && 6407 (sinfo) && 6408 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6409 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6410 struct sctp_extrcvinfo *s_extra; 6411 6412 s_extra = (struct sctp_extrcvinfo *)sinfo; 6413 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6414 } 6415 if (hold_rlock == 1) { 6416 SCTP_INP_READ_UNLOCK(inp); 6417 } 6418 if (hold_sblock) { 6419 SOCKBUF_UNLOCK(&so->so_rcv); 6420 } 6421 if (sockbuf_lock) { 6422 SOCK_IO_RECV_UNLOCK(so); 6423 } 6424 6425 if (freecnt_applied) { 6426 /* 6427 * The lock on the socket buffer protects us so the free 6428 * code will stop. But since we used the socketbuf lock and 6429 * the sender uses the tcb_lock to increment, we need to use 6430 * the atomic add to the refcnt. 6431 */ 6432 if (stcb == NULL) { 6433 #ifdef INVARIANTS 6434 panic("stcb for refcnt has gone NULL?"); 6435 goto stage_left; 6436 #else 6437 goto stage_left; 6438 #endif 6439 } 6440 /* Save the value back for next time */ 6441 stcb->freed_by_sorcv_sincelast = freed_so_far; 6442 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6443 } 6444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6445 if (stcb) { 6446 sctp_misc_ints(SCTP_SORECV_DONE, 6447 freed_so_far, 6448 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6449 stcb->asoc.my_rwnd, 6450 SCTP_SBAVAIL(&so->so_rcv)); 6451 } else { 6452 sctp_misc_ints(SCTP_SORECV_DONE, 6453 freed_so_far, 6454 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6455 0, 6456 SCTP_SBAVAIL(&so->so_rcv)); 6457 } 6458 } 6459 stage_left: 6460 if (wakeup_read_socket) { 6461 sctp_sorwakeup(inp, so); 6462 } 6463 return (error); 6464 } 6465 6466 #ifdef SCTP_MBUF_LOGGING 6467 struct mbuf * 6468 sctp_m_free(struct mbuf *m) 6469 { 6470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6471 sctp_log_mb(m, SCTP_MBUF_IFREE); 6472 } 6473 return (m_free(m)); 6474 } 6475 6476 void 6477 sctp_m_freem(struct mbuf *mb) 6478 { 6479 while (mb != NULL) 6480 mb = sctp_m_free(mb); 6481 } 6482 6483 #endif 6484 6485 int 6486 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6487 { 6488 /* 6489 * Given a local address. For all associations that holds the 6490 * address, request a peer-set-primary. 6491 */ 6492 struct sctp_ifa *ifa; 6493 struct sctp_laddr *wi; 6494 6495 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6496 if (ifa == NULL) { 6497 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6498 return (EADDRNOTAVAIL); 6499 } 6500 /* 6501 * Now that we have the ifa we must awaken the iterator with this 6502 * message. 6503 */ 6504 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6505 if (wi == NULL) { 6506 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6507 return (ENOMEM); 6508 } 6509 /* Now incr the count and int wi structure */ 6510 SCTP_INCR_LADDR_COUNT(); 6511 memset(wi, 0, sizeof(*wi)); 6512 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6513 wi->ifa = ifa; 6514 wi->action = SCTP_SET_PRIM_ADDR; 6515 atomic_add_int(&ifa->refcount, 1); 6516 6517 /* Now add it to the work queue */ 6518 SCTP_WQ_ADDR_LOCK(); 6519 /* 6520 * Should this really be a tailq? As it is we will process the 6521 * newest first :-0 6522 */ 6523 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6524 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6525 (struct sctp_inpcb *)NULL, 6526 (struct sctp_tcb *)NULL, 6527 (struct sctp_nets *)NULL); 6528 SCTP_WQ_ADDR_UNLOCK(); 6529 return (0); 6530 } 6531 6532 int 6533 sctp_soreceive(struct socket *so, 6534 struct sockaddr **psa, 6535 struct uio *uio, 6536 struct mbuf **mp0, 6537 struct mbuf **controlp, 6538 int *flagsp) 6539 { 6540 int error, fromlen; 6541 uint8_t sockbuf[256]; 6542 struct sockaddr *from; 6543 struct sctp_extrcvinfo sinfo; 6544 int filling_sinfo = 1; 6545 int flags; 6546 struct sctp_inpcb *inp; 6547 6548 inp = (struct sctp_inpcb *)so->so_pcb; 6549 /* pickup the assoc we are reading from */ 6550 if (inp == NULL) { 6551 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6552 return (EINVAL); 6553 } 6554 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6555 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6556 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6557 (controlp == NULL)) { 6558 /* user does not want the sndrcv ctl */ 6559 filling_sinfo = 0; 6560 } 6561 if (psa) { 6562 from = (struct sockaddr *)sockbuf; 6563 fromlen = sizeof(sockbuf); 6564 from->sa_len = 0; 6565 } else { 6566 from = NULL; 6567 fromlen = 0; 6568 } 6569 6570 if (filling_sinfo) { 6571 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6572 } 6573 if (flagsp != NULL) { 6574 flags = *flagsp; 6575 } else { 6576 flags = 0; 6577 } 6578 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6579 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6580 if (flagsp != NULL) { 6581 *flagsp = flags; 6582 } 6583 if (controlp != NULL) { 6584 /* copy back the sinfo in a CMSG format */ 6585 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6586 *controlp = sctp_build_ctl_nchunk(inp, 6587 (struct sctp_sndrcvinfo *)&sinfo); 6588 } else { 6589 *controlp = NULL; 6590 } 6591 } 6592 if (psa) { 6593 /* copy back the address info */ 6594 if (from && from->sa_len) { 6595 *psa = sodupsockaddr(from, M_NOWAIT); 6596 } else { 6597 *psa = NULL; 6598 } 6599 } 6600 return (error); 6601 } 6602 6603 int 6604 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6605 int totaddr, int *error) 6606 { 6607 int added = 0; 6608 int i; 6609 struct sctp_inpcb *inp; 6610 struct sockaddr *sa; 6611 size_t incr = 0; 6612 #ifdef INET 6613 struct sockaddr_in *sin; 6614 #endif 6615 #ifdef INET6 6616 struct sockaddr_in6 *sin6; 6617 #endif 6618 6619 sa = addr; 6620 inp = stcb->sctp_ep; 6621 *error = 0; 6622 for (i = 0; i < totaddr; i++) { 6623 switch (sa->sa_family) { 6624 #ifdef INET 6625 case AF_INET: 6626 incr = sizeof(struct sockaddr_in); 6627 sin = (struct sockaddr_in *)sa; 6628 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6629 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6630 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6631 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6632 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6633 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6634 *error = EINVAL; 6635 goto out_now; 6636 } 6637 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6638 SCTP_DONOT_SETSCOPE, 6639 SCTP_ADDR_IS_CONFIRMED)) { 6640 /* assoc gone no un-lock */ 6641 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6642 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6643 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6644 *error = ENOBUFS; 6645 goto out_now; 6646 } 6647 added++; 6648 break; 6649 #endif 6650 #ifdef INET6 6651 case AF_INET6: 6652 incr = sizeof(struct sockaddr_in6); 6653 sin6 = (struct sockaddr_in6 *)sa; 6654 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6655 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6656 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6657 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6658 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6659 *error = EINVAL; 6660 goto out_now; 6661 } 6662 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6663 SCTP_DONOT_SETSCOPE, 6664 SCTP_ADDR_IS_CONFIRMED)) { 6665 /* assoc gone no un-lock */ 6666 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6667 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6668 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6669 *error = ENOBUFS; 6670 goto out_now; 6671 } 6672 added++; 6673 break; 6674 #endif 6675 default: 6676 break; 6677 } 6678 sa = (struct sockaddr *)((caddr_t)sa + incr); 6679 } 6680 out_now: 6681 return (added); 6682 } 6683 6684 int 6685 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6686 unsigned int totaddr, 6687 unsigned int *num_v4, unsigned int *num_v6, 6688 unsigned int limit) 6689 { 6690 struct sockaddr *sa; 6691 struct sctp_tcb *stcb; 6692 unsigned int incr, at, i; 6693 6694 at = 0; 6695 sa = addr; 6696 *num_v6 = *num_v4 = 0; 6697 /* account and validate addresses */ 6698 if (totaddr == 0) { 6699 return (EINVAL); 6700 } 6701 for (i = 0; i < totaddr; i++) { 6702 if (at + sizeof(struct sockaddr) > limit) { 6703 return (EINVAL); 6704 } 6705 switch (sa->sa_family) { 6706 #ifdef INET 6707 case AF_INET: 6708 incr = (unsigned int)sizeof(struct sockaddr_in); 6709 if (sa->sa_len != incr) { 6710 return (EINVAL); 6711 } 6712 (*num_v4) += 1; 6713 break; 6714 #endif 6715 #ifdef INET6 6716 case AF_INET6: 6717 { 6718 struct sockaddr_in6 *sin6; 6719 6720 incr = (unsigned int)sizeof(struct sockaddr_in6); 6721 if (sa->sa_len != incr) { 6722 return (EINVAL); 6723 } 6724 sin6 = (struct sockaddr_in6 *)sa; 6725 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6726 /* Must be non-mapped for connectx */ 6727 return (EINVAL); 6728 } 6729 (*num_v6) += 1; 6730 break; 6731 } 6732 #endif 6733 default: 6734 return (EINVAL); 6735 } 6736 if ((at + incr) > limit) { 6737 return (EINVAL); 6738 } 6739 SCTP_INP_INCR_REF(inp); 6740 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6741 if (stcb != NULL) { 6742 SCTP_TCB_UNLOCK(stcb); 6743 return (EALREADY); 6744 } else { 6745 SCTP_INP_DECR_REF(inp); 6746 } 6747 at += incr; 6748 sa = (struct sockaddr *)((caddr_t)sa + incr); 6749 } 6750 return (0); 6751 } 6752 6753 /* 6754 * sctp_bindx(ADD) for one address. 6755 * assumes all arguments are valid/checked by caller. 6756 */ 6757 void 6758 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6759 struct sockaddr *sa, uint32_t vrf_id, int *error, 6760 void *p) 6761 { 6762 #if defined(INET) && defined(INET6) 6763 struct sockaddr_in sin; 6764 #endif 6765 #ifdef INET6 6766 struct sockaddr_in6 *sin6; 6767 #endif 6768 #ifdef INET 6769 struct sockaddr_in *sinp; 6770 #endif 6771 struct sockaddr *addr_to_use; 6772 struct sctp_inpcb *lep; 6773 uint16_t port; 6774 6775 /* see if we're bound all already! */ 6776 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6777 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6778 *error = EINVAL; 6779 return; 6780 } 6781 switch (sa->sa_family) { 6782 #ifdef INET6 6783 case AF_INET6: 6784 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6785 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6786 *error = EINVAL; 6787 return; 6788 } 6789 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6790 /* can only bind v6 on PF_INET6 sockets */ 6791 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6792 *error = EINVAL; 6793 return; 6794 } 6795 sin6 = (struct sockaddr_in6 *)sa; 6796 port = sin6->sin6_port; 6797 #ifdef INET 6798 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6799 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6800 SCTP_IPV6_V6ONLY(inp)) { 6801 /* can't bind v4-mapped on PF_INET sockets */ 6802 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6803 *error = EINVAL; 6804 return; 6805 } 6806 in6_sin6_2_sin(&sin, sin6); 6807 addr_to_use = (struct sockaddr *)&sin; 6808 } else { 6809 addr_to_use = sa; 6810 } 6811 #else 6812 addr_to_use = sa; 6813 #endif 6814 break; 6815 #endif 6816 #ifdef INET 6817 case AF_INET: 6818 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6820 *error = EINVAL; 6821 return; 6822 } 6823 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6824 SCTP_IPV6_V6ONLY(inp)) { 6825 /* can't bind v4 on PF_INET sockets */ 6826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6827 *error = EINVAL; 6828 return; 6829 } 6830 sinp = (struct sockaddr_in *)sa; 6831 port = sinp->sin_port; 6832 addr_to_use = sa; 6833 break; 6834 #endif 6835 default: 6836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6837 *error = EINVAL; 6838 return; 6839 } 6840 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6841 if (p == NULL) { 6842 /* Can't get proc for Net/Open BSD */ 6843 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6844 *error = EINVAL; 6845 return; 6846 } 6847 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6848 return; 6849 } 6850 /* Validate the incoming port. */ 6851 if ((port != 0) && (port != inp->sctp_lport)) { 6852 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6853 *error = EINVAL; 6854 return; 6855 } 6856 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6857 if (lep == NULL) { 6858 /* add the address */ 6859 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6860 SCTP_ADD_IP_ADDRESS, vrf_id); 6861 } else { 6862 if (lep != inp) { 6863 *error = EADDRINUSE; 6864 } 6865 SCTP_INP_DECR_REF(lep); 6866 } 6867 } 6868 6869 /* 6870 * sctp_bindx(DELETE) for one address. 6871 * assumes all arguments are valid/checked by caller. 6872 */ 6873 void 6874 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6875 struct sockaddr *sa, uint32_t vrf_id, int *error) 6876 { 6877 struct sockaddr *addr_to_use; 6878 #if defined(INET) && defined(INET6) 6879 struct sockaddr_in6 *sin6; 6880 struct sockaddr_in sin; 6881 #endif 6882 6883 /* see if we're bound all already! */ 6884 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6885 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6886 *error = EINVAL; 6887 return; 6888 } 6889 switch (sa->sa_family) { 6890 #ifdef INET6 6891 case AF_INET6: 6892 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6893 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6894 *error = EINVAL; 6895 return; 6896 } 6897 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6898 /* can only bind v6 on PF_INET6 sockets */ 6899 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6900 *error = EINVAL; 6901 return; 6902 } 6903 #ifdef INET 6904 sin6 = (struct sockaddr_in6 *)sa; 6905 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6906 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6907 SCTP_IPV6_V6ONLY(inp)) { 6908 /* can't bind mapped-v4 on PF_INET sockets */ 6909 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6910 *error = EINVAL; 6911 return; 6912 } 6913 in6_sin6_2_sin(&sin, sin6); 6914 addr_to_use = (struct sockaddr *)&sin; 6915 } else { 6916 addr_to_use = sa; 6917 } 6918 #else 6919 addr_to_use = sa; 6920 #endif 6921 break; 6922 #endif 6923 #ifdef INET 6924 case AF_INET: 6925 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6926 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6927 *error = EINVAL; 6928 return; 6929 } 6930 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6931 SCTP_IPV6_V6ONLY(inp)) { 6932 /* can't bind v4 on PF_INET sockets */ 6933 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6934 *error = EINVAL; 6935 return; 6936 } 6937 addr_to_use = sa; 6938 break; 6939 #endif 6940 default: 6941 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6942 *error = EINVAL; 6943 return; 6944 } 6945 /* No lock required mgmt_ep_sa does its own locking. */ 6946 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6947 vrf_id); 6948 } 6949 6950 /* 6951 * returns the valid local address count for an assoc, taking into account 6952 * all scoping rules 6953 */ 6954 int 6955 sctp_local_addr_count(struct sctp_tcb *stcb) 6956 { 6957 int loopback_scope; 6958 #if defined(INET) 6959 int ipv4_local_scope, ipv4_addr_legal; 6960 #endif 6961 #if defined(INET6) 6962 int local_scope, site_scope, ipv6_addr_legal; 6963 #endif 6964 struct sctp_vrf *vrf; 6965 struct sctp_ifn *sctp_ifn; 6966 struct sctp_ifa *sctp_ifa; 6967 int count = 0; 6968 6969 /* Turn on all the appropriate scopes */ 6970 loopback_scope = stcb->asoc.scope.loopback_scope; 6971 #if defined(INET) 6972 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6973 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6974 #endif 6975 #if defined(INET6) 6976 local_scope = stcb->asoc.scope.local_scope; 6977 site_scope = stcb->asoc.scope.site_scope; 6978 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6979 #endif 6980 SCTP_IPI_ADDR_RLOCK(); 6981 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6982 if (vrf == NULL) { 6983 /* no vrf, no addresses */ 6984 SCTP_IPI_ADDR_RUNLOCK(); 6985 return (0); 6986 } 6987 6988 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6989 /* 6990 * bound all case: go through all ifns on the vrf 6991 */ 6992 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6993 if ((loopback_scope == 0) && 6994 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6995 continue; 6996 } 6997 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6998 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6999 continue; 7000 switch (sctp_ifa->address.sa.sa_family) { 7001 #ifdef INET 7002 case AF_INET: 7003 if (ipv4_addr_legal) { 7004 struct sockaddr_in *sin; 7005 7006 sin = &sctp_ifa->address.sin; 7007 if (sin->sin_addr.s_addr == 0) { 7008 /* 7009 * skip unspecified 7010 * addrs 7011 */ 7012 continue; 7013 } 7014 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7015 &sin->sin_addr) != 0) { 7016 continue; 7017 } 7018 if ((ipv4_local_scope == 0) && 7019 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7020 continue; 7021 } 7022 /* count this one */ 7023 count++; 7024 } else { 7025 continue; 7026 } 7027 break; 7028 #endif 7029 #ifdef INET6 7030 case AF_INET6: 7031 if (ipv6_addr_legal) { 7032 struct sockaddr_in6 *sin6; 7033 7034 sin6 = &sctp_ifa->address.sin6; 7035 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7036 continue; 7037 } 7038 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7039 &sin6->sin6_addr) != 0) { 7040 continue; 7041 } 7042 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7043 if (local_scope == 0) 7044 continue; 7045 if (sin6->sin6_scope_id == 0) { 7046 if (sa6_recoverscope(sin6) != 0) 7047 /* 7048 * 7049 * bad 7050 * link 7051 * 7052 * local 7053 * 7054 * address 7055 */ 7056 continue; 7057 } 7058 } 7059 if ((site_scope == 0) && 7060 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7061 continue; 7062 } 7063 /* count this one */ 7064 count++; 7065 } 7066 break; 7067 #endif 7068 default: 7069 /* TSNH */ 7070 break; 7071 } 7072 } 7073 } 7074 } else { 7075 /* 7076 * subset bound case 7077 */ 7078 struct sctp_laddr *laddr; 7079 7080 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7081 sctp_nxt_addr) { 7082 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7083 continue; 7084 } 7085 /* count this one */ 7086 count++; 7087 } 7088 } 7089 SCTP_IPI_ADDR_RUNLOCK(); 7090 return (count); 7091 } 7092 7093 #if defined(SCTP_LOCAL_TRACE_BUF) 7094 7095 void 7096 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7097 { 7098 uint32_t saveindex, newindex; 7099 7100 do { 7101 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7102 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7103 newindex = 1; 7104 } else { 7105 newindex = saveindex + 1; 7106 } 7107 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7108 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7109 saveindex = 0; 7110 } 7111 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7112 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7113 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7114 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7115 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7116 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7117 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7118 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7119 } 7120 7121 #endif 7122 static bool 7123 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7124 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7125 { 7126 struct ip *iph; 7127 #ifdef INET6 7128 struct ip6_hdr *ip6; 7129 #endif 7130 struct mbuf *sp, *last; 7131 struct udphdr *uhdr; 7132 uint16_t port; 7133 7134 if ((m->m_flags & M_PKTHDR) == 0) { 7135 /* Can't handle one that is not a pkt hdr */ 7136 goto out; 7137 } 7138 /* Pull the src port */ 7139 iph = mtod(m, struct ip *); 7140 uhdr = (struct udphdr *)((caddr_t)iph + off); 7141 port = uhdr->uh_sport; 7142 /* 7143 * Split out the mbuf chain. Leave the IP header in m, place the 7144 * rest in the sp. 7145 */ 7146 sp = m_split(m, off, M_NOWAIT); 7147 if (sp == NULL) { 7148 /* Gak, drop packet, we can't do a split */ 7149 goto out; 7150 } 7151 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7152 /* Gak, packet can't have an SCTP header in it - too small */ 7153 m_freem(sp); 7154 goto out; 7155 } 7156 /* Now pull up the UDP header and SCTP header together */ 7157 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7158 if (sp == NULL) { 7159 /* Gak pullup failed */ 7160 goto out; 7161 } 7162 /* Trim out the UDP header */ 7163 m_adj(sp, sizeof(struct udphdr)); 7164 7165 /* Now reconstruct the mbuf chain */ 7166 for (last = m; last->m_next; last = last->m_next); 7167 last->m_next = sp; 7168 m->m_pkthdr.len += sp->m_pkthdr.len; 7169 /* 7170 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7171 * checksum and it was valid. Since CSUM_DATA_VALID == 7172 * CSUM_SCTP_VALID this would imply that the HW also verified the 7173 * SCTP checksum. Therefore, clear the bit. 7174 */ 7175 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7176 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7177 m->m_pkthdr.len, 7178 if_name(m->m_pkthdr.rcvif), 7179 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7180 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7181 iph = mtod(m, struct ip *); 7182 switch (iph->ip_v) { 7183 #ifdef INET 7184 case IPVERSION: 7185 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7186 sctp_input_with_port(m, off, port); 7187 break; 7188 #endif 7189 #ifdef INET6 7190 case IPV6_VERSION >> 4: 7191 ip6 = mtod(m, struct ip6_hdr *); 7192 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7193 sctp6_input_with_port(&m, &off, port); 7194 break; 7195 #endif 7196 default: 7197 goto out; 7198 break; 7199 } 7200 return (true); 7201 out: 7202 m_freem(m); 7203 7204 return (true); 7205 } 7206 7207 #ifdef INET 7208 static void 7209 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 7210 { 7211 struct icmp *icmp = param.icmp; 7212 struct ip *outer_ip, *inner_ip; 7213 struct sctphdr *sh; 7214 struct udphdr *udp; 7215 struct sctp_inpcb *inp; 7216 struct sctp_tcb *stcb; 7217 struct sctp_nets *net; 7218 struct sctp_init_chunk *ch; 7219 struct sockaddr_in src, dst; 7220 uint8_t type, code; 7221 7222 inner_ip = &icmp->icmp_ip; 7223 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7224 if (ntohs(outer_ip->ip_len) < 7225 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7226 return; 7227 } 7228 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7229 sh = (struct sctphdr *)(udp + 1); 7230 memset(&src, 0, sizeof(struct sockaddr_in)); 7231 src.sin_family = AF_INET; 7232 src.sin_len = sizeof(struct sockaddr_in); 7233 src.sin_port = sh->src_port; 7234 src.sin_addr = inner_ip->ip_src; 7235 memset(&dst, 0, sizeof(struct sockaddr_in)); 7236 dst.sin_family = AF_INET; 7237 dst.sin_len = sizeof(struct sockaddr_in); 7238 dst.sin_port = sh->dest_port; 7239 dst.sin_addr = inner_ip->ip_dst; 7240 /* 7241 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7242 * holds our local endpoint address. Thus we reverse the dst and the 7243 * src in the lookup. 7244 */ 7245 inp = NULL; 7246 net = NULL; 7247 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7248 (struct sockaddr *)&src, 7249 &inp, &net, 1, 7250 SCTP_DEFAULT_VRFID); 7251 if ((stcb != NULL) && 7252 (net != NULL) && 7253 (inp != NULL)) { 7254 /* Check the UDP port numbers */ 7255 if ((udp->uh_dport != net->port) || 7256 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7257 SCTP_TCB_UNLOCK(stcb); 7258 return; 7259 } 7260 /* Check the verification tag */ 7261 if (ntohl(sh->v_tag) != 0) { 7262 /* 7263 * This must be the verification tag used for 7264 * sending out packets. We don't consider packets 7265 * reflecting the verification tag. 7266 */ 7267 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7268 SCTP_TCB_UNLOCK(stcb); 7269 return; 7270 } 7271 } else { 7272 if (ntohs(outer_ip->ip_len) >= 7273 sizeof(struct ip) + 7274 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7275 /* 7276 * In this case we can check if we got an 7277 * INIT chunk and if the initiate tag 7278 * matches. 7279 */ 7280 ch = (struct sctp_init_chunk *)(sh + 1); 7281 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7282 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7283 SCTP_TCB_UNLOCK(stcb); 7284 return; 7285 } 7286 } else { 7287 SCTP_TCB_UNLOCK(stcb); 7288 return; 7289 } 7290 } 7291 type = icmp->icmp_type; 7292 code = icmp->icmp_code; 7293 if ((type == ICMP_UNREACH) && 7294 (code == ICMP_UNREACH_PORT)) { 7295 code = ICMP_UNREACH_PROTOCOL; 7296 } 7297 sctp_notify(inp, stcb, net, type, code, 7298 ntohs(inner_ip->ip_len), 7299 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7300 } else { 7301 if ((stcb == NULL) && (inp != NULL)) { 7302 /* reduce ref-count */ 7303 SCTP_INP_WLOCK(inp); 7304 SCTP_INP_DECR_REF(inp); 7305 SCTP_INP_WUNLOCK(inp); 7306 } 7307 if (stcb) { 7308 SCTP_TCB_UNLOCK(stcb); 7309 } 7310 } 7311 return; 7312 } 7313 #endif 7314 7315 #ifdef INET6 7316 static void 7317 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 7318 { 7319 struct ip6ctlparam *ip6cp = param.ip6cp; 7320 struct sctp_inpcb *inp; 7321 struct sctp_tcb *stcb; 7322 struct sctp_nets *net; 7323 struct sctphdr sh; 7324 struct udphdr udp; 7325 struct sockaddr_in6 src, dst; 7326 uint8_t type, code; 7327 7328 /* 7329 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7330 */ 7331 if (ip6cp->ip6c_m == NULL) { 7332 return; 7333 } 7334 /* 7335 * Check if we can safely examine the ports and the verification tag 7336 * of the SCTP common header. 7337 */ 7338 if (ip6cp->ip6c_m->m_pkthdr.len < 7339 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7340 return; 7341 } 7342 /* Copy out the UDP header. */ 7343 memset(&udp, 0, sizeof(struct udphdr)); 7344 m_copydata(ip6cp->ip6c_m, 7345 ip6cp->ip6c_off, 7346 sizeof(struct udphdr), 7347 (caddr_t)&udp); 7348 /* Copy out the port numbers and the verification tag. */ 7349 memset(&sh, 0, sizeof(struct sctphdr)); 7350 m_copydata(ip6cp->ip6c_m, 7351 ip6cp->ip6c_off + sizeof(struct udphdr), 7352 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7353 (caddr_t)&sh); 7354 memset(&src, 0, sizeof(struct sockaddr_in6)); 7355 src.sin6_family = AF_INET6; 7356 src.sin6_len = sizeof(struct sockaddr_in6); 7357 src.sin6_port = sh.src_port; 7358 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7359 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7360 return; 7361 } 7362 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7363 dst.sin6_family = AF_INET6; 7364 dst.sin6_len = sizeof(struct sockaddr_in6); 7365 dst.sin6_port = sh.dest_port; 7366 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7367 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7368 return; 7369 } 7370 inp = NULL; 7371 net = NULL; 7372 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7373 (struct sockaddr *)&src, 7374 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7375 if ((stcb != NULL) && 7376 (net != NULL) && 7377 (inp != NULL)) { 7378 /* Check the UDP port numbers */ 7379 if ((udp.uh_dport != net->port) || 7380 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7381 SCTP_TCB_UNLOCK(stcb); 7382 return; 7383 } 7384 /* Check the verification tag */ 7385 if (ntohl(sh.v_tag) != 0) { 7386 /* 7387 * This must be the verification tag used for 7388 * sending out packets. We don't consider packets 7389 * reflecting the verification tag. 7390 */ 7391 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7392 SCTP_TCB_UNLOCK(stcb); 7393 return; 7394 } 7395 } else { 7396 if (ip6cp->ip6c_m->m_pkthdr.len >= 7397 ip6cp->ip6c_off + sizeof(struct udphdr) + 7398 sizeof(struct sctphdr) + 7399 sizeof(struct sctp_chunkhdr) + 7400 offsetof(struct sctp_init, a_rwnd)) { 7401 /* 7402 * In this case we can check if we got an 7403 * INIT chunk and if the initiate tag 7404 * matches. 7405 */ 7406 uint32_t initiate_tag; 7407 uint8_t chunk_type; 7408 7409 m_copydata(ip6cp->ip6c_m, 7410 ip6cp->ip6c_off + 7411 sizeof(struct udphdr) + 7412 sizeof(struct sctphdr), 7413 sizeof(uint8_t), 7414 (caddr_t)&chunk_type); 7415 m_copydata(ip6cp->ip6c_m, 7416 ip6cp->ip6c_off + 7417 sizeof(struct udphdr) + 7418 sizeof(struct sctphdr) + 7419 sizeof(struct sctp_chunkhdr), 7420 sizeof(uint32_t), 7421 (caddr_t)&initiate_tag); 7422 if ((chunk_type != SCTP_INITIATION) || 7423 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7424 SCTP_TCB_UNLOCK(stcb); 7425 return; 7426 } 7427 } else { 7428 SCTP_TCB_UNLOCK(stcb); 7429 return; 7430 } 7431 } 7432 type = ip6cp->ip6c_icmp6->icmp6_type; 7433 code = ip6cp->ip6c_icmp6->icmp6_code; 7434 if ((type == ICMP6_DST_UNREACH) && 7435 (code == ICMP6_DST_UNREACH_NOPORT)) { 7436 type = ICMP6_PARAM_PROB; 7437 code = ICMP6_PARAMPROB_NEXTHEADER; 7438 } 7439 sctp6_notify(inp, stcb, net, type, code, 7440 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7441 } else { 7442 if ((stcb == NULL) && (inp != NULL)) { 7443 /* reduce inp's ref-count */ 7444 SCTP_INP_WLOCK(inp); 7445 SCTP_INP_DECR_REF(inp); 7446 SCTP_INP_WUNLOCK(inp); 7447 } 7448 if (stcb) { 7449 SCTP_TCB_UNLOCK(stcb); 7450 } 7451 } 7452 } 7453 #endif 7454 7455 void 7456 sctp_over_udp_stop(void) 7457 { 7458 /* 7459 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7460 * for writing! 7461 */ 7462 #ifdef INET 7463 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7464 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7465 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7466 } 7467 #endif 7468 #ifdef INET6 7469 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7470 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7471 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7472 } 7473 #endif 7474 } 7475 7476 int 7477 sctp_over_udp_start(void) 7478 { 7479 uint16_t port; 7480 int ret; 7481 #ifdef INET 7482 struct sockaddr_in sin; 7483 #endif 7484 #ifdef INET6 7485 struct sockaddr_in6 sin6; 7486 #endif 7487 /* 7488 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7489 * for writing! 7490 */ 7491 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7492 if (ntohs(port) == 0) { 7493 /* Must have a port set */ 7494 return (EINVAL); 7495 } 7496 #ifdef INET 7497 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7498 /* Already running -- must stop first */ 7499 return (EALREADY); 7500 } 7501 #endif 7502 #ifdef INET6 7503 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7504 /* Already running -- must stop first */ 7505 return (EALREADY); 7506 } 7507 #endif 7508 #ifdef INET 7509 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7510 SOCK_DGRAM, IPPROTO_UDP, 7511 curthread->td_ucred, curthread))) { 7512 sctp_over_udp_stop(); 7513 return (ret); 7514 } 7515 /* Call the special UDP hook. */ 7516 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7517 sctp_recv_udp_tunneled_packet, 7518 sctp_recv_icmp_tunneled_packet, 7519 NULL))) { 7520 sctp_over_udp_stop(); 7521 return (ret); 7522 } 7523 /* Ok, we have a socket, bind it to the port. */ 7524 memset(&sin, 0, sizeof(struct sockaddr_in)); 7525 sin.sin_len = sizeof(struct sockaddr_in); 7526 sin.sin_family = AF_INET; 7527 sin.sin_port = htons(port); 7528 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7529 (struct sockaddr *)&sin, curthread))) { 7530 sctp_over_udp_stop(); 7531 return (ret); 7532 } 7533 #endif 7534 #ifdef INET6 7535 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7536 SOCK_DGRAM, IPPROTO_UDP, 7537 curthread->td_ucred, curthread))) { 7538 sctp_over_udp_stop(); 7539 return (ret); 7540 } 7541 /* Call the special UDP hook. */ 7542 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7543 sctp_recv_udp_tunneled_packet, 7544 sctp_recv_icmp6_tunneled_packet, 7545 NULL))) { 7546 sctp_over_udp_stop(); 7547 return (ret); 7548 } 7549 /* Ok, we have a socket, bind it to the port. */ 7550 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7551 sin6.sin6_len = sizeof(struct sockaddr_in6); 7552 sin6.sin6_family = AF_INET6; 7553 sin6.sin6_port = htons(port); 7554 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7555 (struct sockaddr *)&sin6, curthread))) { 7556 sctp_over_udp_stop(); 7557 return (ret); 7558 } 7559 #endif 7560 return (0); 7561 } 7562 7563 /* 7564 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7565 * If all arguments are zero, zero is returned. 7566 */ 7567 uint32_t 7568 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7569 { 7570 if (mtu1 > 0) { 7571 if (mtu2 > 0) { 7572 if (mtu3 > 0) { 7573 return (min(mtu1, min(mtu2, mtu3))); 7574 } else { 7575 return (min(mtu1, mtu2)); 7576 } 7577 } else { 7578 if (mtu3 > 0) { 7579 return (min(mtu1, mtu3)); 7580 } else { 7581 return (mtu1); 7582 } 7583 } 7584 } else { 7585 if (mtu2 > 0) { 7586 if (mtu3 > 0) { 7587 return (min(mtu2, mtu3)); 7588 } else { 7589 return (mtu2); 7590 } 7591 } else { 7592 return (mtu3); 7593 } 7594 } 7595 } 7596 7597 void 7598 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7599 { 7600 struct in_conninfo inc; 7601 7602 memset(&inc, 0, sizeof(struct in_conninfo)); 7603 inc.inc_fibnum = fibnum; 7604 switch (addr->sa.sa_family) { 7605 #ifdef INET 7606 case AF_INET: 7607 inc.inc_faddr = addr->sin.sin_addr; 7608 break; 7609 #endif 7610 #ifdef INET6 7611 case AF_INET6: 7612 inc.inc_flags |= INC_ISIPV6; 7613 inc.inc6_faddr = addr->sin6.sin6_addr; 7614 break; 7615 #endif 7616 default: 7617 return; 7618 } 7619 tcp_hc_updatemtu(&inc, (u_long)mtu); 7620 } 7621 7622 uint32_t 7623 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7624 { 7625 struct in_conninfo inc; 7626 7627 memset(&inc, 0, sizeof(struct in_conninfo)); 7628 inc.inc_fibnum = fibnum; 7629 switch (addr->sa.sa_family) { 7630 #ifdef INET 7631 case AF_INET: 7632 inc.inc_faddr = addr->sin.sin_addr; 7633 break; 7634 #endif 7635 #ifdef INET6 7636 case AF_INET6: 7637 inc.inc_flags |= INC_ISIPV6; 7638 inc.inc6_faddr = addr->sin6.sin6_addr; 7639 break; 7640 #endif 7641 default: 7642 return (0); 7643 } 7644 return ((uint32_t)tcp_hc_getmtu(&inc)); 7645 } 7646 7647 void 7648 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7649 { 7650 #if defined(KDTRACE_HOOKS) 7651 int old_state = stcb->asoc.state; 7652 #endif 7653 7654 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7655 ("sctp_set_state: Can't set substate (new_state = %x)", 7656 new_state)); 7657 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7658 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7659 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7660 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7661 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7662 } 7663 #if defined(KDTRACE_HOOKS) 7664 if (((old_state & SCTP_STATE_MASK) != new_state) && 7665 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7666 (new_state == SCTP_STATE_INUSE))) { 7667 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7668 } 7669 #endif 7670 } 7671 7672 void 7673 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7674 { 7675 #if defined(KDTRACE_HOOKS) 7676 int old_state = stcb->asoc.state; 7677 #endif 7678 7679 KASSERT((substate & SCTP_STATE_MASK) == 0, 7680 ("sctp_add_substate: Can't set state (substate = %x)", 7681 substate)); 7682 stcb->asoc.state |= substate; 7683 #if defined(KDTRACE_HOOKS) 7684 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7685 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7686 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7687 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7688 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7689 } 7690 #endif 7691 } 7692