1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #include <netinet/sctp_pcb.h> 37 #include <netinet/sctputil.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #ifdef INET6 41 #include <netinet6/sctp6_var.h> 42 #endif 43 #include <netinet/sctp_header.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_auth.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_bsd_addr.h> 51 #include <netinet/sctp_kdtrace.h> 52 #if defined(INET6) || defined(INET) 53 #include <netinet/tcp_var.h> 54 #endif 55 #include <netinet/udp.h> 56 #include <netinet/udp_var.h> 57 #include <sys/proc.h> 58 #ifdef INET6 59 #include <netinet/icmp6.h> 60 #endif 61 62 #ifndef KTR_SCTP 63 #define KTR_SCTP KTR_SUBSYS 64 #endif 65 66 extern const struct sctp_cc_functions sctp_cc_functions[]; 67 extern const struct sctp_ss_functions sctp_ss_functions[]; 68 69 void 70 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 71 { 72 #if defined(SCTP_LOCAL_TRACE_BUF) 73 struct sctp_cwnd_log sctp_clog; 74 75 sctp_clog.x.sb.stcb = stcb; 76 sctp_clog.x.sb.so_sbcc = SCTP_SBAVAIL(sb); 77 if (stcb) 78 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 79 else 80 sctp_clog.x.sb.stcb_sbcc = 0; 81 sctp_clog.x.sb.incr = incr; 82 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 83 SCTP_LOG_EVENT_SB, 84 from, 85 sctp_clog.x.misc.log1, 86 sctp_clog.x.misc.log2, 87 sctp_clog.x.misc.log3, 88 sctp_clog.x.misc.log4); 89 #endif 90 } 91 92 void 93 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 94 { 95 #if defined(SCTP_LOCAL_TRACE_BUF) 96 struct sctp_cwnd_log sctp_clog; 97 98 sctp_clog.x.close.inp = (void *)inp; 99 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 100 if (stcb) { 101 sctp_clog.x.close.stcb = (void *)stcb; 102 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 103 } else { 104 sctp_clog.x.close.stcb = 0; 105 sctp_clog.x.close.state = 0; 106 } 107 sctp_clog.x.close.loc = loc; 108 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 109 SCTP_LOG_EVENT_CLOSE, 110 0, 111 sctp_clog.x.misc.log1, 112 sctp_clog.x.misc.log2, 113 sctp_clog.x.misc.log3, 114 sctp_clog.x.misc.log4); 115 #endif 116 } 117 118 void 119 rto_logging(struct sctp_nets *net, int from) 120 { 121 #if defined(SCTP_LOCAL_TRACE_BUF) 122 struct sctp_cwnd_log sctp_clog; 123 124 memset(&sctp_clog, 0, sizeof(sctp_clog)); 125 sctp_clog.x.rto.net = (void *)net; 126 sctp_clog.x.rto.rtt = net->rtt / 1000; 127 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 128 SCTP_LOG_EVENT_RTT, 129 from, 130 sctp_clog.x.misc.log1, 131 sctp_clog.x.misc.log2, 132 sctp_clog.x.misc.log3, 133 sctp_clog.x.misc.log4); 134 #endif 135 } 136 137 void 138 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 139 { 140 #if defined(SCTP_LOCAL_TRACE_BUF) 141 struct sctp_cwnd_log sctp_clog; 142 143 sctp_clog.x.strlog.stcb = stcb; 144 sctp_clog.x.strlog.n_tsn = tsn; 145 sctp_clog.x.strlog.n_sseq = sseq; 146 sctp_clog.x.strlog.e_tsn = 0; 147 sctp_clog.x.strlog.e_sseq = 0; 148 sctp_clog.x.strlog.strm = stream; 149 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 150 SCTP_LOG_EVENT_STRM, 151 from, 152 sctp_clog.x.misc.log1, 153 sctp_clog.x.misc.log2, 154 sctp_clog.x.misc.log3, 155 sctp_clog.x.misc.log4); 156 #endif 157 } 158 159 void 160 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 161 { 162 #if defined(SCTP_LOCAL_TRACE_BUF) 163 struct sctp_cwnd_log sctp_clog; 164 165 sctp_clog.x.nagle.stcb = (void *)stcb; 166 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 167 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 168 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 169 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 170 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 171 SCTP_LOG_EVENT_NAGLE, 172 action, 173 sctp_clog.x.misc.log1, 174 sctp_clog.x.misc.log2, 175 sctp_clog.x.misc.log3, 176 sctp_clog.x.misc.log4); 177 #endif 178 } 179 180 void 181 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 182 { 183 #if defined(SCTP_LOCAL_TRACE_BUF) 184 struct sctp_cwnd_log sctp_clog; 185 186 sctp_clog.x.sack.cumack = cumack; 187 sctp_clog.x.sack.oldcumack = old_cumack; 188 sctp_clog.x.sack.tsn = tsn; 189 sctp_clog.x.sack.numGaps = gaps; 190 sctp_clog.x.sack.numDups = dups; 191 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 192 SCTP_LOG_EVENT_SACK, 193 from, 194 sctp_clog.x.misc.log1, 195 sctp_clog.x.misc.log2, 196 sctp_clog.x.misc.log3, 197 sctp_clog.x.misc.log4); 198 #endif 199 } 200 201 void 202 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 203 { 204 #if defined(SCTP_LOCAL_TRACE_BUF) 205 struct sctp_cwnd_log sctp_clog; 206 207 memset(&sctp_clog, 0, sizeof(sctp_clog)); 208 sctp_clog.x.map.base = map; 209 sctp_clog.x.map.cum = cum; 210 sctp_clog.x.map.high = high; 211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 212 SCTP_LOG_EVENT_MAP, 213 from, 214 sctp_clog.x.misc.log1, 215 sctp_clog.x.misc.log2, 216 sctp_clog.x.misc.log3, 217 sctp_clog.x.misc.log4); 218 #endif 219 } 220 221 void 222 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 223 { 224 #if defined(SCTP_LOCAL_TRACE_BUF) 225 struct sctp_cwnd_log sctp_clog; 226 227 memset(&sctp_clog, 0, sizeof(sctp_clog)); 228 sctp_clog.x.fr.largest_tsn = biggest_tsn; 229 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 230 sctp_clog.x.fr.tsn = tsn; 231 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 232 SCTP_LOG_EVENT_FR, 233 from, 234 sctp_clog.x.misc.log1, 235 sctp_clog.x.misc.log2, 236 sctp_clog.x.misc.log3, 237 sctp_clog.x.misc.log4); 238 #endif 239 } 240 241 #ifdef SCTP_MBUF_LOGGING 242 void 243 sctp_log_mb(struct mbuf *m, int from) 244 { 245 #if defined(SCTP_LOCAL_TRACE_BUF) 246 struct sctp_cwnd_log sctp_clog; 247 248 sctp_clog.x.mb.mp = m; 249 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 250 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 251 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 252 if (SCTP_BUF_IS_EXTENDED(m)) { 253 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 254 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 255 } else { 256 sctp_clog.x.mb.ext = 0; 257 sctp_clog.x.mb.refcnt = 0; 258 } 259 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 260 SCTP_LOG_EVENT_MBUF, 261 from, 262 sctp_clog.x.misc.log1, 263 sctp_clog.x.misc.log2, 264 sctp_clog.x.misc.log3, 265 sctp_clog.x.misc.log4); 266 #endif 267 } 268 269 void 270 sctp_log_mbc(struct mbuf *m, int from) 271 { 272 struct mbuf *mat; 273 274 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 275 sctp_log_mb(mat, from); 276 } 277 } 278 #endif 279 280 void 281 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 282 { 283 #if defined(SCTP_LOCAL_TRACE_BUF) 284 struct sctp_cwnd_log sctp_clog; 285 286 if (control == NULL) { 287 SCTP_PRINTF("Gak log of NULL?\n"); 288 return; 289 } 290 sctp_clog.x.strlog.stcb = control->stcb; 291 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 292 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 293 sctp_clog.x.strlog.strm = control->sinfo_stream; 294 if (poschk != NULL) { 295 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 296 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 297 } else { 298 sctp_clog.x.strlog.e_tsn = 0; 299 sctp_clog.x.strlog.e_sseq = 0; 300 } 301 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 302 SCTP_LOG_EVENT_STRM, 303 from, 304 sctp_clog.x.misc.log1, 305 sctp_clog.x.misc.log2, 306 sctp_clog.x.misc.log3, 307 sctp_clog.x.misc.log4); 308 #endif 309 } 310 311 void 312 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 313 { 314 #if defined(SCTP_LOCAL_TRACE_BUF) 315 struct sctp_cwnd_log sctp_clog; 316 317 sctp_clog.x.cwnd.net = net; 318 if (stcb->asoc.send_queue_cnt > 255) 319 sctp_clog.x.cwnd.cnt_in_send = 255; 320 else 321 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 322 if (stcb->asoc.stream_queue_cnt > 255) 323 sctp_clog.x.cwnd.cnt_in_str = 255; 324 else 325 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 326 327 if (net) { 328 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 329 sctp_clog.x.cwnd.inflight = net->flight_size; 330 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 331 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 332 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 333 } 334 if (SCTP_CWNDLOG_PRESEND == from) { 335 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 336 } 337 sctp_clog.x.cwnd.cwnd_augment = augment; 338 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 339 SCTP_LOG_EVENT_CWND, 340 from, 341 sctp_clog.x.misc.log1, 342 sctp_clog.x.misc.log2, 343 sctp_clog.x.misc.log3, 344 sctp_clog.x.misc.log4); 345 #endif 346 } 347 348 void 349 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 350 { 351 #if defined(SCTP_LOCAL_TRACE_BUF) 352 struct sctp_cwnd_log sctp_clog; 353 354 memset(&sctp_clog, 0, sizeof(sctp_clog)); 355 if (inp) { 356 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 357 358 } else { 359 sctp_clog.x.lock.sock = (void *)NULL; 360 } 361 sctp_clog.x.lock.inp = (void *)inp; 362 if (stcb) { 363 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 364 } else { 365 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 366 } 367 if (inp) { 368 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 369 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 370 } else { 371 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 372 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 373 } 374 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 375 if (inp && (inp->sctp_socket)) { 376 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 377 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 378 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 379 } else { 380 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 381 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 382 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 383 } 384 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 385 SCTP_LOG_LOCK_EVENT, 386 from, 387 sctp_clog.x.misc.log1, 388 sctp_clog.x.misc.log2, 389 sctp_clog.x.misc.log3, 390 sctp_clog.x.misc.log4); 391 #endif 392 } 393 394 void 395 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 396 { 397 #if defined(SCTP_LOCAL_TRACE_BUF) 398 struct sctp_cwnd_log sctp_clog; 399 400 memset(&sctp_clog, 0, sizeof(sctp_clog)); 401 sctp_clog.x.cwnd.net = net; 402 sctp_clog.x.cwnd.cwnd_new_value = error; 403 sctp_clog.x.cwnd.inflight = net->flight_size; 404 sctp_clog.x.cwnd.cwnd_augment = burst; 405 if (stcb->asoc.send_queue_cnt > 255) 406 sctp_clog.x.cwnd.cnt_in_send = 255; 407 else 408 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 409 if (stcb->asoc.stream_queue_cnt > 255) 410 sctp_clog.x.cwnd.cnt_in_str = 255; 411 else 412 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 413 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 414 SCTP_LOG_EVENT_MAXBURST, 415 from, 416 sctp_clog.x.misc.log1, 417 sctp_clog.x.misc.log2, 418 sctp_clog.x.misc.log3, 419 sctp_clog.x.misc.log4); 420 #endif 421 } 422 423 void 424 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 425 { 426 #if defined(SCTP_LOCAL_TRACE_BUF) 427 struct sctp_cwnd_log sctp_clog; 428 429 sctp_clog.x.rwnd.rwnd = peers_rwnd; 430 sctp_clog.x.rwnd.send_size = snd_size; 431 sctp_clog.x.rwnd.overhead = overhead; 432 sctp_clog.x.rwnd.new_rwnd = 0; 433 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 434 SCTP_LOG_EVENT_RWND, 435 from, 436 sctp_clog.x.misc.log1, 437 sctp_clog.x.misc.log2, 438 sctp_clog.x.misc.log3, 439 sctp_clog.x.misc.log4); 440 #endif 441 } 442 443 void 444 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 445 { 446 #if defined(SCTP_LOCAL_TRACE_BUF) 447 struct sctp_cwnd_log sctp_clog; 448 449 sctp_clog.x.rwnd.rwnd = peers_rwnd; 450 sctp_clog.x.rwnd.send_size = flight_size; 451 sctp_clog.x.rwnd.overhead = overhead; 452 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 453 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 454 SCTP_LOG_EVENT_RWND, 455 from, 456 sctp_clog.x.misc.log1, 457 sctp_clog.x.misc.log2, 458 sctp_clog.x.misc.log3, 459 sctp_clog.x.misc.log4); 460 #endif 461 } 462 463 #ifdef SCTP_MBCNT_LOGGING 464 static void 465 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 466 { 467 #if defined(SCTP_LOCAL_TRACE_BUF) 468 struct sctp_cwnd_log sctp_clog; 469 470 sctp_clog.x.mbcnt.total_queue_size = total_oq; 471 sctp_clog.x.mbcnt.size_change = book; 472 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 473 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 474 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 475 SCTP_LOG_EVENT_MBCNT, 476 from, 477 sctp_clog.x.misc.log1, 478 sctp_clog.x.misc.log2, 479 sctp_clog.x.misc.log3, 480 sctp_clog.x.misc.log4); 481 #endif 482 } 483 #endif 484 485 void 486 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 487 { 488 #if defined(SCTP_LOCAL_TRACE_BUF) 489 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 490 SCTP_LOG_MISC_EVENT, 491 from, 492 a, b, c, d); 493 #endif 494 } 495 496 void 497 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 498 { 499 #if defined(SCTP_LOCAL_TRACE_BUF) 500 struct sctp_cwnd_log sctp_clog; 501 502 sctp_clog.x.wake.stcb = (void *)stcb; 503 sctp_clog.x.wake.wake_cnt = wake_cnt; 504 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 505 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 506 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 507 508 if (stcb->asoc.stream_queue_cnt < 0xff) 509 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 510 else 511 sctp_clog.x.wake.stream_qcnt = 0xff; 512 513 if (stcb->asoc.chunks_on_out_queue < 0xff) 514 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 515 else 516 sctp_clog.x.wake.chunks_on_oque = 0xff; 517 518 sctp_clog.x.wake.sctpflags = 0; 519 /* set in the defered mode stuff */ 520 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 521 sctp_clog.x.wake.sctpflags |= 1; 522 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 523 sctp_clog.x.wake.sctpflags |= 2; 524 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 525 sctp_clog.x.wake.sctpflags |= 4; 526 /* what about the sb */ 527 if (stcb->sctp_socket) { 528 struct socket *so = stcb->sctp_socket; 529 530 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 531 } else { 532 sctp_clog.x.wake.sbflags = 0xff; 533 } 534 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 535 SCTP_LOG_EVENT_WAKE, 536 from, 537 sctp_clog.x.misc.log1, 538 sctp_clog.x.misc.log2, 539 sctp_clog.x.misc.log3, 540 sctp_clog.x.misc.log4); 541 #endif 542 } 543 544 void 545 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 546 { 547 #if defined(SCTP_LOCAL_TRACE_BUF) 548 struct sctp_cwnd_log sctp_clog; 549 550 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 551 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 552 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 553 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 554 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 555 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 556 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 557 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 558 SCTP_LOG_EVENT_BLOCK, 559 from, 560 sctp_clog.x.misc.log1, 561 sctp_clog.x.misc.log2, 562 sctp_clog.x.misc.log3, 563 sctp_clog.x.misc.log4); 564 #endif 565 } 566 567 int 568 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 569 { 570 /* May need to fix this if ktrdump does not work */ 571 return (0); 572 } 573 574 #ifdef SCTP_AUDITING_ENABLED 575 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 576 static int sctp_audit_indx = 0; 577 578 static 579 void 580 sctp_print_audit_report(void) 581 { 582 int i; 583 int cnt; 584 585 cnt = 0; 586 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 587 if ((sctp_audit_data[i][0] == 0xe0) && 588 (sctp_audit_data[i][1] == 0x01)) { 589 cnt = 0; 590 SCTP_PRINTF("\n"); 591 } else if (sctp_audit_data[i][0] == 0xf0) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if ((sctp_audit_data[i][0] == 0xc0) && 595 (sctp_audit_data[i][1] == 0x01)) { 596 SCTP_PRINTF("\n"); 597 cnt = 0; 598 } 599 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 600 (uint32_t)sctp_audit_data[i][1]); 601 cnt++; 602 if ((cnt % 14) == 0) 603 SCTP_PRINTF("\n"); 604 } 605 for (i = 0; i < sctp_audit_indx; i++) { 606 if ((sctp_audit_data[i][0] == 0xe0) && 607 (sctp_audit_data[i][1] == 0x01)) { 608 cnt = 0; 609 SCTP_PRINTF("\n"); 610 } else if (sctp_audit_data[i][0] == 0xf0) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if ((sctp_audit_data[i][0] == 0xc0) && 614 (sctp_audit_data[i][1] == 0x01)) { 615 SCTP_PRINTF("\n"); 616 cnt = 0; 617 } 618 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 619 (uint32_t)sctp_audit_data[i][1]); 620 cnt++; 621 if ((cnt % 14) == 0) 622 SCTP_PRINTF("\n"); 623 } 624 SCTP_PRINTF("\n"); 625 } 626 627 void 628 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 629 struct sctp_nets *net) 630 { 631 int resend_cnt, tot_out, rep, tot_book_cnt; 632 struct sctp_nets *lnet; 633 struct sctp_tmit_chunk *chk; 634 635 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 636 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 637 sctp_audit_indx++; 638 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 639 sctp_audit_indx = 0; 640 } 641 if (inp == NULL) { 642 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 643 sctp_audit_data[sctp_audit_indx][1] = 0x01; 644 sctp_audit_indx++; 645 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 646 sctp_audit_indx = 0; 647 } 648 return; 649 } 650 if (stcb == NULL) { 651 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 652 sctp_audit_data[sctp_audit_indx][1] = 0x02; 653 sctp_audit_indx++; 654 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 655 sctp_audit_indx = 0; 656 } 657 return; 658 } 659 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 660 sctp_audit_data[sctp_audit_indx][1] = 661 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 662 sctp_audit_indx++; 663 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 664 sctp_audit_indx = 0; 665 } 666 rep = 0; 667 tot_book_cnt = 0; 668 resend_cnt = tot_out = 0; 669 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 670 if (chk->sent == SCTP_DATAGRAM_RESEND) { 671 resend_cnt++; 672 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 673 tot_out += chk->book_size; 674 tot_book_cnt++; 675 } 676 } 677 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 678 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 679 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 680 sctp_audit_indx++; 681 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 682 sctp_audit_indx = 0; 683 } 684 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 685 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 686 rep = 1; 687 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 688 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 689 sctp_audit_data[sctp_audit_indx][1] = 690 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 691 sctp_audit_indx++; 692 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 693 sctp_audit_indx = 0; 694 } 695 } 696 if (tot_out != stcb->asoc.total_flight) { 697 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 698 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 699 sctp_audit_indx++; 700 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 701 sctp_audit_indx = 0; 702 } 703 rep = 1; 704 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 705 (int)stcb->asoc.total_flight); 706 stcb->asoc.total_flight = tot_out; 707 } 708 if (tot_book_cnt != stcb->asoc.total_flight_count) { 709 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 710 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 711 sctp_audit_indx++; 712 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 713 sctp_audit_indx = 0; 714 } 715 rep = 1; 716 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 717 718 stcb->asoc.total_flight_count = tot_book_cnt; 719 } 720 tot_out = 0; 721 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 722 tot_out += lnet->flight_size; 723 } 724 if (tot_out != stcb->asoc.total_flight) { 725 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 726 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 727 sctp_audit_indx++; 728 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 729 sctp_audit_indx = 0; 730 } 731 rep = 1; 732 SCTP_PRINTF("real flight:%d net total was %d\n", 733 stcb->asoc.total_flight, tot_out); 734 /* now corrective action */ 735 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 736 tot_out = 0; 737 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 738 if ((chk->whoTo == lnet) && 739 (chk->sent < SCTP_DATAGRAM_RESEND)) { 740 tot_out += chk->book_size; 741 } 742 } 743 if (lnet->flight_size != tot_out) { 744 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 745 (void *)lnet, lnet->flight_size, 746 tot_out); 747 lnet->flight_size = tot_out; 748 } 749 } 750 } 751 if (rep) { 752 sctp_print_audit_report(); 753 } 754 } 755 756 void 757 sctp_audit_log(uint8_t ev, uint8_t fd) 758 { 759 760 sctp_audit_data[sctp_audit_indx][0] = ev; 761 sctp_audit_data[sctp_audit_indx][1] = fd; 762 sctp_audit_indx++; 763 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 764 sctp_audit_indx = 0; 765 } 766 } 767 768 #endif 769 770 /* 771 * The conversion from time to ticks and vice versa is done by rounding 772 * upwards. This way we can test in the code the time to be positive and 773 * know that this corresponds to a positive number of ticks. 774 */ 775 776 uint32_t 777 sctp_msecs_to_ticks(uint32_t msecs) 778 { 779 uint64_t temp; 780 uint32_t ticks; 781 782 if (hz == 1000) { 783 ticks = msecs; 784 } else { 785 temp = (((uint64_t)msecs * hz) + 999) / 1000; 786 if (temp > UINT32_MAX) { 787 ticks = UINT32_MAX; 788 } else { 789 ticks = (uint32_t)temp; 790 } 791 } 792 return (ticks); 793 } 794 795 uint32_t 796 sctp_ticks_to_msecs(uint32_t ticks) 797 { 798 uint64_t temp; 799 uint32_t msecs; 800 801 if (hz == 1000) { 802 msecs = ticks; 803 } else { 804 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 805 if (temp > UINT32_MAX) { 806 msecs = UINT32_MAX; 807 } else { 808 msecs = (uint32_t)temp; 809 } 810 } 811 return (msecs); 812 } 813 814 uint32_t 815 sctp_secs_to_ticks(uint32_t secs) 816 { 817 uint64_t temp; 818 uint32_t ticks; 819 820 temp = (uint64_t)secs * hz; 821 if (temp > UINT32_MAX) { 822 ticks = UINT32_MAX; 823 } else { 824 ticks = (uint32_t)temp; 825 } 826 return (ticks); 827 } 828 829 uint32_t 830 sctp_ticks_to_secs(uint32_t ticks) 831 { 832 uint64_t temp; 833 uint32_t secs; 834 835 temp = ((uint64_t)ticks + (hz - 1)) / hz; 836 if (temp > UINT32_MAX) { 837 secs = UINT32_MAX; 838 } else { 839 secs = (uint32_t)temp; 840 } 841 return (secs); 842 } 843 844 /* 845 * sctp_stop_timers_for_shutdown() should be called 846 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 847 * state to make sure that all timers are stopped. 848 */ 849 void 850 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 851 { 852 struct sctp_inpcb *inp; 853 struct sctp_nets *net; 854 855 inp = stcb->sctp_ep; 856 857 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 858 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 859 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 860 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 861 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 863 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 865 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 866 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 868 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 869 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 870 } 871 } 872 873 void 874 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 875 { 876 struct sctp_inpcb *inp; 877 struct sctp_nets *net; 878 879 inp = stcb->sctp_ep; 880 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 881 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 882 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 883 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 884 if (stop_assoc_kill_timer) { 885 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 887 } 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 890 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 891 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 892 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 894 /* Mobility adaptation */ 895 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 897 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 898 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 900 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 901 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 902 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 904 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 908 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 910 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 912 } 913 } 914 915 /* 916 * A list of sizes based on typical mtu's, used only if next hop size not 917 * returned. These values MUST be multiples of 4 and MUST be ordered. 918 */ 919 static uint32_t sctp_mtu_sizes[] = { 920 68, 921 296, 922 508, 923 512, 924 544, 925 576, 926 1004, 927 1492, 928 1500, 929 1536, 930 2000, 931 2048, 932 4352, 933 4464, 934 8168, 935 17912, 936 32000, 937 65532 938 }; 939 940 /* 941 * Return the largest MTU in sctp_mtu_sizes smaller than val. 942 * If val is smaller than the minimum, just return the largest 943 * multiple of 4 smaller or equal to val. 944 * Ensure that the result is a multiple of 4. 945 */ 946 uint32_t 947 sctp_get_prev_mtu(uint32_t val) 948 { 949 uint32_t i; 950 951 val &= 0xfffffffc; 952 if (val <= sctp_mtu_sizes[0]) { 953 return (val); 954 } 955 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 956 if (val <= sctp_mtu_sizes[i]) { 957 break; 958 } 959 } 960 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 961 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 962 return (sctp_mtu_sizes[i - 1]); 963 } 964 965 /* 966 * Return the smallest MTU in sctp_mtu_sizes larger than val. 967 * If val is larger than the maximum, just return the largest multiple of 4 smaller 968 * or equal to val. 969 * Ensure that the result is a multiple of 4. 970 */ 971 uint32_t 972 sctp_get_next_mtu(uint32_t val) 973 { 974 /* select another MTU that is just bigger than this one */ 975 uint32_t i; 976 977 val &= 0xfffffffc; 978 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 979 if (val < sctp_mtu_sizes[i]) { 980 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 981 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 982 return (sctp_mtu_sizes[i]); 983 } 984 } 985 return (val); 986 } 987 988 void 989 sctp_fill_random_store(struct sctp_pcb *m) 990 { 991 /* 992 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 993 * our counter. The result becomes our good random numbers and we 994 * then setup to give these out. Note that we do no locking to 995 * protect this. This is ok, since if competing folks call this we 996 * will get more gobbled gook in the random store which is what we 997 * want. There is a danger that two guys will use the same random 998 * numbers, but thats ok too since that is random as well :-> 999 */ 1000 m->store_at = 0; 1001 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1002 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1003 sizeof(m->random_counter), (uint8_t *)m->random_store); 1004 m->random_counter++; 1005 } 1006 1007 uint32_t 1008 sctp_select_initial_TSN(struct sctp_pcb *inp) 1009 { 1010 /* 1011 * A true implementation should use random selection process to get 1012 * the initial stream sequence number, using RFC1750 as a good 1013 * guideline 1014 */ 1015 uint32_t x, *xp; 1016 uint8_t *p; 1017 int store_at, new_store; 1018 1019 if (inp->initial_sequence_debug != 0) { 1020 uint32_t ret; 1021 1022 ret = inp->initial_sequence_debug; 1023 inp->initial_sequence_debug++; 1024 return (ret); 1025 } 1026 retry: 1027 store_at = inp->store_at; 1028 new_store = store_at + sizeof(uint32_t); 1029 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1030 new_store = 0; 1031 } 1032 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1033 goto retry; 1034 } 1035 if (new_store == 0) { 1036 /* Refill the random store */ 1037 sctp_fill_random_store(inp); 1038 } 1039 p = &inp->random_store[store_at]; 1040 xp = (uint32_t *)p; 1041 x = *xp; 1042 return (x); 1043 } 1044 1045 uint32_t 1046 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1047 { 1048 uint32_t x; 1049 struct timeval now; 1050 1051 if (check) { 1052 (void)SCTP_GETTIME_TIMEVAL(&now); 1053 } 1054 for (;;) { 1055 x = sctp_select_initial_TSN(&inp->sctp_ep); 1056 if (x == 0) { 1057 /* we never use 0 */ 1058 continue; 1059 } 1060 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1061 break; 1062 } 1063 } 1064 return (x); 1065 } 1066 1067 int32_t 1068 sctp_map_assoc_state(int kernel_state) 1069 { 1070 int32_t user_state; 1071 1072 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1073 user_state = SCTP_CLOSED; 1074 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1075 user_state = SCTP_SHUTDOWN_PENDING; 1076 } else { 1077 switch (kernel_state & SCTP_STATE_MASK) { 1078 case SCTP_STATE_EMPTY: 1079 user_state = SCTP_CLOSED; 1080 break; 1081 case SCTP_STATE_INUSE: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_COOKIE_WAIT: 1085 user_state = SCTP_COOKIE_WAIT; 1086 break; 1087 case SCTP_STATE_COOKIE_ECHOED: 1088 user_state = SCTP_COOKIE_ECHOED; 1089 break; 1090 case SCTP_STATE_OPEN: 1091 user_state = SCTP_ESTABLISHED; 1092 break; 1093 case SCTP_STATE_SHUTDOWN_SENT: 1094 user_state = SCTP_SHUTDOWN_SENT; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_RECEIVED: 1097 user_state = SCTP_SHUTDOWN_RECEIVED; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1100 user_state = SCTP_SHUTDOWN_ACK_SENT; 1101 break; 1102 default: 1103 user_state = SCTP_CLOSED; 1104 break; 1105 } 1106 } 1107 return (user_state); 1108 } 1109 1110 int 1111 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1112 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1113 uint16_t o_strms) 1114 { 1115 struct sctp_association *asoc; 1116 1117 /* 1118 * Anything set to zero is taken care of by the allocation routine's 1119 * bzero 1120 */ 1121 1122 /* 1123 * Up front select what scoping to apply on addresses I tell my peer 1124 * Not sure what to do with these right now, we will need to come up 1125 * with a way to set them. We may need to pass them through from the 1126 * caller in the sctp_aloc_assoc() function. 1127 */ 1128 int i; 1129 #if defined(SCTP_DETAILED_STR_STATS) 1130 int j; 1131 #endif 1132 1133 asoc = &stcb->asoc; 1134 /* init all variables to a known value. */ 1135 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1136 asoc->max_burst = inp->sctp_ep.max_burst; 1137 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1138 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1139 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1140 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1141 asoc->ecn_supported = inp->ecn_supported; 1142 asoc->prsctp_supported = inp->prsctp_supported; 1143 asoc->auth_supported = inp->auth_supported; 1144 asoc->asconf_supported = inp->asconf_supported; 1145 asoc->reconfig_supported = inp->reconfig_supported; 1146 asoc->nrsack_supported = inp->nrsack_supported; 1147 asoc->pktdrop_supported = inp->pktdrop_supported; 1148 asoc->idata_supported = inp->idata_supported; 1149 asoc->rcv_edmid = inp->rcv_edmid; 1150 asoc->snd_edmid = SCTP_EDMID_NONE; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 if (override_tag) { 1194 asoc->init_seq_number = initial_tsn; 1195 } else { 1196 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1197 } 1198 asoc->asconf_seq_out = asoc->init_seq_number; 1199 asoc->str_reset_seq_out = asoc->init_seq_number; 1200 asoc->sending_seq = asoc->init_seq_number; 1201 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1202 /* we are optimistic here */ 1203 asoc->peer_supports_nat = 0; 1204 asoc->sent_queue_retran_cnt = 0; 1205 1206 /* for CMT */ 1207 asoc->last_net_cmt_send_started = NULL; 1208 1209 asoc->last_acked_seq = asoc->init_seq_number - 1; 1210 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1211 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1212 1213 /* here we are different, we hold the next one we expect */ 1214 asoc->str_reset_seq_in = asoc->init_seq_number; 1215 1216 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1217 asoc->initial_rto = inp->sctp_ep.initial_rto; 1218 1219 asoc->default_mtu = inp->sctp_ep.default_mtu; 1220 asoc->max_init_times = inp->sctp_ep.max_init_times; 1221 asoc->max_send_times = inp->sctp_ep.max_send_times; 1222 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1223 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1224 asoc->free_chunk_cnt = 0; 1225 1226 asoc->iam_blocking = 0; 1227 asoc->context = inp->sctp_context; 1228 asoc->local_strreset_support = inp->local_strreset_support; 1229 asoc->def_send = inp->def_send; 1230 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1231 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1232 asoc->pr_sctp_cnt = 0; 1233 asoc->total_output_queue_size = 0; 1234 1235 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1236 asoc->scope.ipv6_addr_legal = 1; 1237 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1238 asoc->scope.ipv4_addr_legal = 1; 1239 } else { 1240 asoc->scope.ipv4_addr_legal = 0; 1241 } 1242 } else { 1243 asoc->scope.ipv6_addr_legal = 0; 1244 asoc->scope.ipv4_addr_legal = 1; 1245 } 1246 1247 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1248 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1249 1250 asoc->smallest_mtu = 0; 1251 asoc->minrto = inp->sctp_ep.sctp_minrto; 1252 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1253 1254 asoc->stream_locked_on = 0; 1255 asoc->ecn_echo_cnt_onq = 0; 1256 asoc->stream_locked = 0; 1257 1258 asoc->send_sack = 1; 1259 1260 LIST_INIT(&asoc->sctp_restricted_addrs); 1261 1262 TAILQ_INIT(&asoc->nets); 1263 TAILQ_INIT(&asoc->pending_reply_queue); 1264 TAILQ_INIT(&asoc->asconf_ack_sent); 1265 /* Setup to fill the hb random cache at first HB */ 1266 asoc->hb_random_idx = 4; 1267 1268 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1269 1270 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1271 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1272 1273 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1274 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1275 1276 /* 1277 * Now the stream parameters, here we allocate space for all streams 1278 * that we request by default. 1279 */ 1280 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1281 o_strms; 1282 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1283 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1284 SCTP_M_STRMO); 1285 if (asoc->strmout == NULL) { 1286 /* big trouble no memory */ 1287 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1288 return (ENOMEM); 1289 } 1290 SCTP_TCB_LOCK(stcb); 1291 for (i = 0; i < asoc->streamoutcnt; i++) { 1292 /* 1293 * inbound side must be set to 0xffff, also NOTE when we get 1294 * the INIT-ACK back (for INIT sender) we MUST reduce the 1295 * count (streamoutcnt) but first check if we sent to any of 1296 * the upper streams that were dropped (if some were). Those 1297 * that were dropped must be notified to the upper layer as 1298 * failed to send. 1299 */ 1300 TAILQ_INIT(&asoc->strmout[i].outqueue); 1301 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1302 asoc->strmout[i].chunks_on_queues = 0; 1303 #if defined(SCTP_DETAILED_STR_STATS) 1304 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1305 asoc->strmout[i].abandoned_sent[j] = 0; 1306 asoc->strmout[i].abandoned_unsent[j] = 0; 1307 } 1308 #else 1309 asoc->strmout[i].abandoned_sent[0] = 0; 1310 asoc->strmout[i].abandoned_unsent[0] = 0; 1311 #endif 1312 asoc->strmout[i].next_mid_ordered = 0; 1313 asoc->strmout[i].next_mid_unordered = 0; 1314 asoc->strmout[i].sid = i; 1315 asoc->strmout[i].last_msg_incomplete = 0; 1316 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1317 } 1318 asoc->ss_functions.sctp_ss_init(stcb, asoc); 1319 SCTP_TCB_UNLOCK(stcb); 1320 1321 /* Now the mapping array */ 1322 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1323 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1324 SCTP_M_MAP); 1325 if (asoc->mapping_array == NULL) { 1326 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1327 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1328 return (ENOMEM); 1329 } 1330 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1331 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1332 SCTP_M_MAP); 1333 if (asoc->nr_mapping_array == NULL) { 1334 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1335 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1336 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1337 return (ENOMEM); 1338 } 1339 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1340 1341 /* Now the init of the other outqueues */ 1342 TAILQ_INIT(&asoc->free_chunks); 1343 TAILQ_INIT(&asoc->control_send_queue); 1344 TAILQ_INIT(&asoc->asconf_send_queue); 1345 TAILQ_INIT(&asoc->send_queue); 1346 TAILQ_INIT(&asoc->sent_queue); 1347 TAILQ_INIT(&asoc->resetHead); 1348 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1349 TAILQ_INIT(&asoc->asconf_queue); 1350 /* authentication fields */ 1351 asoc->authinfo.random = NULL; 1352 asoc->authinfo.active_keyid = 0; 1353 asoc->authinfo.assoc_key = NULL; 1354 asoc->authinfo.assoc_keyid = 0; 1355 asoc->authinfo.recv_key = NULL; 1356 asoc->authinfo.recv_keyid = 0; 1357 LIST_INIT(&asoc->shared_keys); 1358 asoc->marked_retrans = 0; 1359 asoc->port = inp->sctp_ep.port; 1360 asoc->timoinit = 0; 1361 asoc->timodata = 0; 1362 asoc->timosack = 0; 1363 asoc->timoshutdown = 0; 1364 asoc->timoheartbeat = 0; 1365 asoc->timocookie = 0; 1366 asoc->timoshutdownack = 0; 1367 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1368 asoc->discontinuity_time = asoc->start_time; 1369 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1370 asoc->abandoned_unsent[i] = 0; 1371 asoc->abandoned_sent[i] = 0; 1372 } 1373 /* 1374 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1375 * freed later when the association is freed. 1376 */ 1377 return (0); 1378 } 1379 1380 void 1381 sctp_print_mapping_array(struct sctp_association *asoc) 1382 { 1383 unsigned int i, limit; 1384 1385 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1386 asoc->mapping_array_size, 1387 asoc->mapping_array_base_tsn, 1388 asoc->cumulative_tsn, 1389 asoc->highest_tsn_inside_map, 1390 asoc->highest_tsn_inside_nr_map); 1391 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1392 if (asoc->mapping_array[limit - 1] != 0) { 1393 break; 1394 } 1395 } 1396 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1397 for (i = 0; i < limit; i++) { 1398 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1399 } 1400 if (limit % 16) 1401 SCTP_PRINTF("\n"); 1402 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1403 if (asoc->nr_mapping_array[limit - 1]) { 1404 break; 1405 } 1406 } 1407 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1408 for (i = 0; i < limit; i++) { 1409 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1410 } 1411 if (limit % 16) 1412 SCTP_PRINTF("\n"); 1413 } 1414 1415 int 1416 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1417 { 1418 /* mapping array needs to grow */ 1419 uint8_t *new_array1, *new_array2; 1420 uint32_t new_size; 1421 1422 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1423 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1424 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1425 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1426 /* can't get more, forget it */ 1427 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1428 if (new_array1) { 1429 SCTP_FREE(new_array1, SCTP_M_MAP); 1430 } 1431 if (new_array2) { 1432 SCTP_FREE(new_array2, SCTP_M_MAP); 1433 } 1434 return (-1); 1435 } 1436 memset(new_array1, 0, new_size); 1437 memset(new_array2, 0, new_size); 1438 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1439 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1440 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1441 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1442 asoc->mapping_array = new_array1; 1443 asoc->nr_mapping_array = new_array2; 1444 asoc->mapping_array_size = new_size; 1445 return (0); 1446 } 1447 1448 static void 1449 sctp_iterator_work(struct sctp_iterator *it) 1450 { 1451 struct epoch_tracker et; 1452 struct sctp_inpcb *tinp; 1453 int iteration_count = 0; 1454 int inp_skip = 0; 1455 int first_in = 1; 1456 1457 NET_EPOCH_ENTER(et); 1458 SCTP_INP_INFO_RLOCK(); 1459 SCTP_ITERATOR_LOCK(); 1460 sctp_it_ctl.cur_it = it; 1461 if (it->inp) { 1462 SCTP_INP_RLOCK(it->inp); 1463 SCTP_INP_DECR_REF(it->inp); 1464 } 1465 if (it->inp == NULL) { 1466 /* iterator is complete */ 1467 done_with_iterator: 1468 sctp_it_ctl.cur_it = NULL; 1469 SCTP_ITERATOR_UNLOCK(); 1470 SCTP_INP_INFO_RUNLOCK(); 1471 if (it->function_atend != NULL) { 1472 (*it->function_atend) (it->pointer, it->val); 1473 } 1474 SCTP_FREE(it, SCTP_M_ITER); 1475 NET_EPOCH_EXIT(et); 1476 return; 1477 } 1478 select_a_new_ep: 1479 if (first_in) { 1480 first_in = 0; 1481 } else { 1482 SCTP_INP_RLOCK(it->inp); 1483 } 1484 while (((it->pcb_flags) && 1485 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1486 ((it->pcb_features) && 1487 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1488 /* endpoint flags or features don't match, so keep looking */ 1489 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1490 SCTP_INP_RUNLOCK(it->inp); 1491 goto done_with_iterator; 1492 } 1493 tinp = it->inp; 1494 it->inp = LIST_NEXT(it->inp, sctp_list); 1495 it->stcb = NULL; 1496 SCTP_INP_RUNLOCK(tinp); 1497 if (it->inp == NULL) { 1498 goto done_with_iterator; 1499 } 1500 SCTP_INP_RLOCK(it->inp); 1501 } 1502 /* now go through each assoc which is in the desired state */ 1503 if (it->done_current_ep == 0) { 1504 if (it->function_inp != NULL) 1505 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1506 it->done_current_ep = 1; 1507 } 1508 if (it->stcb == NULL) { 1509 /* run the per instance function */ 1510 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1511 } 1512 if ((inp_skip) || it->stcb == NULL) { 1513 if (it->function_inp_end != NULL) { 1514 inp_skip = (*it->function_inp_end) (it->inp, 1515 it->pointer, 1516 it->val); 1517 } 1518 SCTP_INP_RUNLOCK(it->inp); 1519 goto no_stcb; 1520 } 1521 while (it->stcb != NULL) { 1522 SCTP_TCB_LOCK(it->stcb); 1523 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1524 /* not in the right state... keep looking */ 1525 SCTP_TCB_UNLOCK(it->stcb); 1526 goto next_assoc; 1527 } 1528 /* see if we have limited out the iterator loop */ 1529 iteration_count++; 1530 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1531 /* Pause to let others grab the lock */ 1532 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1533 SCTP_TCB_UNLOCK(it->stcb); 1534 SCTP_INP_INCR_REF(it->inp); 1535 SCTP_INP_RUNLOCK(it->inp); 1536 SCTP_ITERATOR_UNLOCK(); 1537 SCTP_INP_INFO_RUNLOCK(); 1538 SCTP_INP_INFO_RLOCK(); 1539 SCTP_ITERATOR_LOCK(); 1540 if (sctp_it_ctl.iterator_flags) { 1541 /* We won't be staying here */ 1542 SCTP_INP_DECR_REF(it->inp); 1543 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_IT) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1547 goto done_with_iterator; 1548 } 1549 if (sctp_it_ctl.iterator_flags & 1550 SCTP_ITERATOR_STOP_CUR_INP) { 1551 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1552 goto no_stcb; 1553 } 1554 /* If we reach here huh? */ 1555 SCTP_PRINTF("Unknown it ctl flag %x\n", 1556 sctp_it_ctl.iterator_flags); 1557 sctp_it_ctl.iterator_flags = 0; 1558 } 1559 SCTP_INP_RLOCK(it->inp); 1560 SCTP_INP_DECR_REF(it->inp); 1561 SCTP_TCB_LOCK(it->stcb); 1562 atomic_subtract_int(&it->stcb->asoc.refcnt, 1); 1563 iteration_count = 0; 1564 } 1565 KASSERT(it->inp == it->stcb->sctp_ep, 1566 ("%s: stcb %p does not belong to inp %p, but inp %p", 1567 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1568 SCTP_INP_RLOCK_ASSERT(it->inp); 1569 SCTP_TCB_LOCK_ASSERT(it->stcb); 1570 1571 /* run function on this one */ 1572 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1573 SCTP_INP_RLOCK_ASSERT(it->inp); 1574 SCTP_TCB_LOCK_ASSERT(it->stcb); 1575 1576 /* 1577 * we lie here, it really needs to have its own type but 1578 * first I must verify that this won't effect things :-0 1579 */ 1580 if (it->no_chunk_output == 0) { 1581 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1582 SCTP_INP_RLOCK_ASSERT(it->inp); 1583 SCTP_TCB_LOCK_ASSERT(it->stcb); 1584 } 1585 1586 SCTP_TCB_UNLOCK(it->stcb); 1587 next_assoc: 1588 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1589 if (it->stcb == NULL) { 1590 /* Run last function */ 1591 if (it->function_inp_end != NULL) { 1592 inp_skip = (*it->function_inp_end) (it->inp, 1593 it->pointer, 1594 it->val); 1595 } 1596 } 1597 } 1598 SCTP_INP_RUNLOCK(it->inp); 1599 no_stcb: 1600 /* done with all assocs on this endpoint, move on to next endpoint */ 1601 it->done_current_ep = 0; 1602 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1603 it->inp = NULL; 1604 } else { 1605 it->inp = LIST_NEXT(it->inp, sctp_list); 1606 } 1607 it->stcb = NULL; 1608 if (it->inp == NULL) { 1609 goto done_with_iterator; 1610 } 1611 goto select_a_new_ep; 1612 } 1613 1614 void 1615 sctp_iterator_worker(void) 1616 { 1617 struct sctp_iterator *it; 1618 1619 /* This function is called with the WQ lock in place */ 1620 sctp_it_ctl.iterator_running = 1; 1621 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1622 /* now lets work on this one */ 1623 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1624 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1625 CURVNET_SET(it->vn); 1626 sctp_iterator_work(it); 1627 CURVNET_RESTORE(); 1628 SCTP_IPI_ITERATOR_WQ_LOCK(); 1629 /* sa_ignore FREED_MEMORY */ 1630 } 1631 sctp_it_ctl.iterator_running = 0; 1632 return; 1633 } 1634 1635 static void 1636 sctp_handle_addr_wq(void) 1637 { 1638 /* deal with the ADDR wq from the rtsock calls */ 1639 struct sctp_laddr *wi, *nwi; 1640 struct sctp_asconf_iterator *asc; 1641 1642 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1643 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1644 if (asc == NULL) { 1645 /* Try later, no memory */ 1646 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1647 (struct sctp_inpcb *)NULL, 1648 (struct sctp_tcb *)NULL, 1649 (struct sctp_nets *)NULL); 1650 return; 1651 } 1652 LIST_INIT(&asc->list_of_work); 1653 asc->cnt = 0; 1654 1655 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1656 LIST_REMOVE(wi, sctp_nxt_addr); 1657 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1658 asc->cnt++; 1659 } 1660 1661 if (asc->cnt == 0) { 1662 SCTP_FREE(asc, SCTP_M_ASC_IT); 1663 } else { 1664 int ret; 1665 1666 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1667 sctp_asconf_iterator_stcb, 1668 NULL, /* No ep end for boundall */ 1669 SCTP_PCB_FLAGS_BOUNDALL, 1670 SCTP_PCB_ANY_FEATURES, 1671 SCTP_ASOC_ANY_STATE, 1672 (void *)asc, 0, 1673 sctp_asconf_iterator_end, NULL, 0); 1674 if (ret) { 1675 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1676 /* 1677 * Freeing if we are stopping or put back on the 1678 * addr_wq. 1679 */ 1680 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1681 sctp_asconf_iterator_end(asc, 0); 1682 } else { 1683 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1684 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1685 } 1686 SCTP_FREE(asc, SCTP_M_ASC_IT); 1687 } 1688 } 1689 } 1690 } 1691 1692 /*- 1693 * The following table shows which pointers for the inp, stcb, or net are 1694 * stored for each timer after it was started. 1695 * 1696 *|Name |Timer |inp |stcb|net | 1697 *|-----------------------------|-----------------------------|----|----|----| 1698 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1699 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1701 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1702 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1703 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1704 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1705 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1706 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1707 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1708 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1709 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1710 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1711 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1712 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1713 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1714 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1715 */ 1716 1717 void 1718 sctp_timeout_handler(void *t) 1719 { 1720 struct epoch_tracker et; 1721 struct timeval tv; 1722 struct sctp_inpcb *inp; 1723 struct sctp_tcb *stcb; 1724 struct sctp_nets *net; 1725 struct sctp_timer *tmr; 1726 struct mbuf *op_err; 1727 int type; 1728 int i, secret; 1729 bool did_output, released_asoc_reference; 1730 1731 /* 1732 * If inp, stcb or net are not NULL, then references to these were 1733 * added when the timer was started, and must be released before 1734 * this function returns. 1735 */ 1736 tmr = (struct sctp_timer *)t; 1737 inp = (struct sctp_inpcb *)tmr->ep; 1738 stcb = (struct sctp_tcb *)tmr->tcb; 1739 net = (struct sctp_nets *)tmr->net; 1740 CURVNET_SET((struct vnet *)tmr->vnet); 1741 NET_EPOCH_ENTER(et); 1742 released_asoc_reference = false; 1743 1744 #ifdef SCTP_AUDITING_ENABLED 1745 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1746 sctp_auditing(3, inp, stcb, net); 1747 #endif 1748 1749 /* sanity checks... */ 1750 KASSERT(tmr->self == NULL || tmr->self == tmr, 1751 ("sctp_timeout_handler: tmr->self corrupted")); 1752 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1753 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1754 type = tmr->type; 1755 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1756 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1757 type, stcb, stcb->sctp_ep)); 1758 tmr->stopped_from = 0xa001; 1759 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1760 SCTPDBG(SCTP_DEBUG_TIMER2, 1761 "Timer type %d handler exiting due to CLOSED association.\n", 1762 type); 1763 goto out_decr; 1764 } 1765 tmr->stopped_from = 0xa002; 1766 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1767 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1768 SCTPDBG(SCTP_DEBUG_TIMER2, 1769 "Timer type %d handler exiting due to not being active.\n", 1770 type); 1771 goto out_decr; 1772 } 1773 1774 tmr->stopped_from = 0xa003; 1775 if (stcb) { 1776 SCTP_TCB_LOCK(stcb); 1777 /* 1778 * Release reference so that association can be freed if 1779 * necessary below. This is safe now that we have acquired 1780 * the lock. 1781 */ 1782 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1783 released_asoc_reference = true; 1784 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1785 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1786 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1787 SCTPDBG(SCTP_DEBUG_TIMER2, 1788 "Timer type %d handler exiting due to CLOSED association.\n", 1789 type); 1790 goto out; 1791 } 1792 } else if (inp != NULL) { 1793 SCTP_INP_WLOCK(inp); 1794 } else { 1795 SCTP_WQ_ADDR_LOCK(); 1796 } 1797 1798 /* Record in stopped_from which timeout occurred. */ 1799 tmr->stopped_from = type; 1800 /* mark as being serviced now */ 1801 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1802 /* 1803 * Callout has been rescheduled. 1804 */ 1805 goto out; 1806 } 1807 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1808 /* 1809 * Not active, so no action. 1810 */ 1811 goto out; 1812 } 1813 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1814 1815 /* call the handler for the appropriate timer type */ 1816 switch (type) { 1817 case SCTP_TIMER_TYPE_SEND: 1818 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1819 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1820 type, inp, stcb, net)); 1821 SCTP_STAT_INCR(sctps_timodata); 1822 stcb->asoc.timodata++; 1823 stcb->asoc.num_send_timers_up--; 1824 if (stcb->asoc.num_send_timers_up < 0) { 1825 stcb->asoc.num_send_timers_up = 0; 1826 } 1827 SCTP_TCB_LOCK_ASSERT(stcb); 1828 if (sctp_t3rxt_timer(inp, stcb, net)) { 1829 /* no need to unlock on tcb its gone */ 1830 1831 goto out_decr; 1832 } 1833 SCTP_TCB_LOCK_ASSERT(stcb); 1834 #ifdef SCTP_AUDITING_ENABLED 1835 sctp_auditing(4, inp, stcb, net); 1836 #endif 1837 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1838 did_output = true; 1839 if ((stcb->asoc.num_send_timers_up == 0) && 1840 (stcb->asoc.sent_queue_cnt > 0)) { 1841 struct sctp_tmit_chunk *chk; 1842 1843 /* 1844 * Safeguard. If there on some on the sent queue 1845 * somewhere but no timers running something is 1846 * wrong... so we start a timer on the first chunk 1847 * on the send queue on whatever net it is sent to. 1848 */ 1849 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1850 if (chk->whoTo != NULL) { 1851 break; 1852 } 1853 } 1854 if (chk != NULL) { 1855 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1856 } 1857 } 1858 break; 1859 case SCTP_TIMER_TYPE_INIT: 1860 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1861 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1862 type, inp, stcb, net)); 1863 SCTP_STAT_INCR(sctps_timoinit); 1864 stcb->asoc.timoinit++; 1865 if (sctp_t1init_timer(inp, stcb, net)) { 1866 /* no need to unlock on tcb its gone */ 1867 goto out_decr; 1868 } 1869 did_output = false; 1870 break; 1871 case SCTP_TIMER_TYPE_RECV: 1872 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1873 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1874 type, inp, stcb, net)); 1875 SCTP_STAT_INCR(sctps_timosack); 1876 stcb->asoc.timosack++; 1877 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1878 #ifdef SCTP_AUDITING_ENABLED 1879 sctp_auditing(4, inp, stcb, NULL); 1880 #endif 1881 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1882 did_output = true; 1883 break; 1884 case SCTP_TIMER_TYPE_SHUTDOWN: 1885 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1886 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1887 type, inp, stcb, net)); 1888 SCTP_STAT_INCR(sctps_timoshutdown); 1889 stcb->asoc.timoshutdown++; 1890 if (sctp_shutdown_timer(inp, stcb, net)) { 1891 /* no need to unlock on tcb its gone */ 1892 goto out_decr; 1893 } 1894 #ifdef SCTP_AUDITING_ENABLED 1895 sctp_auditing(4, inp, stcb, net); 1896 #endif 1897 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1898 did_output = true; 1899 break; 1900 case SCTP_TIMER_TYPE_HEARTBEAT: 1901 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1902 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1903 type, inp, stcb, net)); 1904 SCTP_STAT_INCR(sctps_timoheartbeat); 1905 stcb->asoc.timoheartbeat++; 1906 if (sctp_heartbeat_timer(inp, stcb, net)) { 1907 /* no need to unlock on tcb its gone */ 1908 goto out_decr; 1909 } 1910 #ifdef SCTP_AUDITING_ENABLED 1911 sctp_auditing(4, inp, stcb, net); 1912 #endif 1913 if ((net->dest_state & SCTP_ADDR_NOHB) == 0) { 1914 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1915 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1916 did_output = true; 1917 } else { 1918 did_output = false; 1919 } 1920 break; 1921 case SCTP_TIMER_TYPE_COOKIE: 1922 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1923 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1924 type, inp, stcb, net)); 1925 SCTP_STAT_INCR(sctps_timocookie); 1926 stcb->asoc.timocookie++; 1927 if (sctp_cookie_timer(inp, stcb, net)) { 1928 /* no need to unlock on tcb its gone */ 1929 goto out_decr; 1930 } 1931 #ifdef SCTP_AUDITING_ENABLED 1932 sctp_auditing(4, inp, stcb, net); 1933 #endif 1934 /* 1935 * We consider T3 and Cookie timer pretty much the same with 1936 * respect to where from in chunk_output. 1937 */ 1938 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1939 did_output = true; 1940 break; 1941 case SCTP_TIMER_TYPE_NEWCOOKIE: 1942 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1943 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1944 type, inp, stcb, net)); 1945 SCTP_STAT_INCR(sctps_timosecret); 1946 (void)SCTP_GETTIME_TIMEVAL(&tv); 1947 inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; 1948 inp->sctp_ep.last_secret_number = 1949 inp->sctp_ep.current_secret_number; 1950 inp->sctp_ep.current_secret_number++; 1951 if (inp->sctp_ep.current_secret_number >= 1952 SCTP_HOW_MANY_SECRETS) { 1953 inp->sctp_ep.current_secret_number = 0; 1954 } 1955 secret = (int)inp->sctp_ep.current_secret_number; 1956 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1957 inp->sctp_ep.secret_key[secret][i] = 1958 sctp_select_initial_TSN(&inp->sctp_ep); 1959 } 1960 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1961 did_output = false; 1962 break; 1963 case SCTP_TIMER_TYPE_PATHMTURAISE: 1964 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1965 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1966 type, inp, stcb, net)); 1967 SCTP_STAT_INCR(sctps_timopathmtu); 1968 sctp_pathmtu_timer(inp, stcb, net); 1969 did_output = false; 1970 break; 1971 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1972 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1973 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1974 type, inp, stcb, net)); 1975 if (sctp_shutdownack_timer(inp, stcb, net)) { 1976 /* no need to unlock on tcb its gone */ 1977 goto out_decr; 1978 } 1979 SCTP_STAT_INCR(sctps_timoshutdownack); 1980 stcb->asoc.timoshutdownack++; 1981 #ifdef SCTP_AUDITING_ENABLED 1982 sctp_auditing(4, inp, stcb, net); 1983 #endif 1984 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1985 did_output = true; 1986 break; 1987 case SCTP_TIMER_TYPE_ASCONF: 1988 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1989 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1990 type, inp, stcb, net)); 1991 SCTP_STAT_INCR(sctps_timoasconf); 1992 if (sctp_asconf_timer(inp, stcb, net)) { 1993 /* no need to unlock on tcb its gone */ 1994 goto out_decr; 1995 } 1996 #ifdef SCTP_AUDITING_ENABLED 1997 sctp_auditing(4, inp, stcb, net); 1998 #endif 1999 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 2000 did_output = true; 2001 break; 2002 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2003 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2004 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2005 type, inp, stcb, net)); 2006 SCTP_STAT_INCR(sctps_timoshutdownguard); 2007 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2008 "Shutdown guard timer expired"); 2009 sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); 2010 /* no need to unlock on tcb its gone */ 2011 goto out_decr; 2012 case SCTP_TIMER_TYPE_AUTOCLOSE: 2013 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2014 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2015 type, inp, stcb, net)); 2016 SCTP_STAT_INCR(sctps_timoautoclose); 2017 sctp_autoclose_timer(inp, stcb); 2018 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2019 did_output = true; 2020 break; 2021 case SCTP_TIMER_TYPE_STRRESET: 2022 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2023 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2024 type, inp, stcb, net)); 2025 SCTP_STAT_INCR(sctps_timostrmrst); 2026 if (sctp_strreset_timer(inp, stcb)) { 2027 /* no need to unlock on tcb its gone */ 2028 goto out_decr; 2029 } 2030 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2031 did_output = true; 2032 break; 2033 case SCTP_TIMER_TYPE_INPKILL: 2034 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2035 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2036 type, inp, stcb, net)); 2037 SCTP_STAT_INCR(sctps_timoinpkill); 2038 /* 2039 * special case, take away our increment since WE are the 2040 * killer 2041 */ 2042 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2043 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2044 SCTP_INP_DECR_REF(inp); 2045 SCTP_INP_WUNLOCK(inp); 2046 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2047 SCTP_CALLED_FROM_INPKILL_TIMER); 2048 inp = NULL; 2049 goto out_decr; 2050 case SCTP_TIMER_TYPE_ASOCKILL: 2051 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2052 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2053 type, inp, stcb, net)); 2054 SCTP_STAT_INCR(sctps_timoassockill); 2055 /* Can we free it yet? */ 2056 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2057 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2058 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2059 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2060 /* 2061 * free asoc, always unlocks (or destroy's) so prevent 2062 * duplicate unlock or unlock of a free mtx :-0 2063 */ 2064 stcb = NULL; 2065 goto out_decr; 2066 case SCTP_TIMER_TYPE_ADDR_WQ: 2067 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2068 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2069 type, inp, stcb, net)); 2070 sctp_handle_addr_wq(); 2071 did_output = true; 2072 break; 2073 case SCTP_TIMER_TYPE_PRIM_DELETED: 2074 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2075 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2076 type, inp, stcb, net)); 2077 SCTP_STAT_INCR(sctps_timodelprim); 2078 sctp_delete_prim_timer(inp, stcb); 2079 did_output = false; 2080 break; 2081 default: 2082 #ifdef INVARIANTS 2083 panic("Unknown timer type %d", type); 2084 #else 2085 goto out; 2086 #endif 2087 } 2088 #ifdef SCTP_AUDITING_ENABLED 2089 sctp_audit_log(0xF1, (uint8_t)type); 2090 if (inp != NULL) 2091 sctp_auditing(5, inp, stcb, net); 2092 #endif 2093 if (did_output && (stcb != NULL)) { 2094 /* 2095 * Now we need to clean up the control chunk chain if an 2096 * ECNE is on it. It must be marked as UNSENT again so next 2097 * call will continue to send it until such time that we get 2098 * a CWR, to remove it. It is, however, less likely that we 2099 * will find a ecn echo on the chain though. 2100 */ 2101 sctp_fix_ecn_echo(&stcb->asoc); 2102 } 2103 out: 2104 if (stcb != NULL) { 2105 SCTP_TCB_UNLOCK(stcb); 2106 } else if (inp != NULL) { 2107 SCTP_INP_WUNLOCK(inp); 2108 } else { 2109 SCTP_WQ_ADDR_UNLOCK(); 2110 } 2111 2112 out_decr: 2113 /* These reference counts were incremented in sctp_timer_start(). */ 2114 if (inp != NULL) { 2115 SCTP_INP_DECR_REF(inp); 2116 } 2117 if ((stcb != NULL) && !released_asoc_reference) { 2118 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2119 } 2120 if (net != NULL) { 2121 sctp_free_remote_addr(net); 2122 } 2123 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2124 CURVNET_RESTORE(); 2125 NET_EPOCH_EXIT(et); 2126 } 2127 2128 /*- 2129 * The following table shows which parameters must be provided 2130 * when calling sctp_timer_start(). For parameters not being 2131 * provided, NULL must be used. 2132 * 2133 * |Name |inp |stcb|net | 2134 * |-----------------------------|----|----|----| 2135 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2141 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2142 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2143 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2144 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2145 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2146 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2147 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2148 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2149 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2150 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2151 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2152 * 2153 */ 2154 2155 void 2156 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2157 struct sctp_nets *net) 2158 { 2159 struct sctp_timer *tmr; 2160 uint32_t to_ticks; 2161 uint32_t rndval, jitter; 2162 2163 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2164 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2165 t_type, stcb, stcb->sctp_ep)); 2166 tmr = NULL; 2167 if (stcb != NULL) { 2168 SCTP_TCB_LOCK_ASSERT(stcb); 2169 } else if (inp != NULL) { 2170 SCTP_INP_WLOCK_ASSERT(inp); 2171 } else { 2172 SCTP_WQ_ADDR_LOCK_ASSERT(); 2173 } 2174 if (stcb != NULL) { 2175 /* 2176 * Don't restart timer on association that's about to be 2177 * killed. 2178 */ 2179 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2180 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2181 SCTPDBG(SCTP_DEBUG_TIMER2, 2182 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2183 t_type, inp, stcb, net); 2184 return; 2185 } 2186 /* Don't restart timer on net that's been removed. */ 2187 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2188 SCTPDBG(SCTP_DEBUG_TIMER2, 2189 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2190 t_type, inp, stcb, net); 2191 return; 2192 } 2193 } 2194 switch (t_type) { 2195 case SCTP_TIMER_TYPE_SEND: 2196 /* Here we use the RTO timer. */ 2197 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2198 #ifdef INVARIANTS 2199 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2200 t_type, inp, stcb, net); 2201 #else 2202 return; 2203 #endif 2204 } 2205 tmr = &net->rxt_timer; 2206 if (net->RTO == 0) { 2207 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2208 } else { 2209 to_ticks = sctp_msecs_to_ticks(net->RTO); 2210 } 2211 break; 2212 case SCTP_TIMER_TYPE_INIT: 2213 /* 2214 * Here we use the INIT timer default usually about 1 2215 * second. 2216 */ 2217 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2218 #ifdef INVARIANTS 2219 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2220 t_type, inp, stcb, net); 2221 #else 2222 return; 2223 #endif 2224 } 2225 tmr = &net->rxt_timer; 2226 if (net->RTO == 0) { 2227 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2228 } else { 2229 to_ticks = sctp_msecs_to_ticks(net->RTO); 2230 } 2231 break; 2232 case SCTP_TIMER_TYPE_RECV: 2233 /* 2234 * Here we use the Delayed-Ack timer value from the inp, 2235 * usually about 200ms. 2236 */ 2237 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2238 #ifdef INVARIANTS 2239 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2240 t_type, inp, stcb, net); 2241 #else 2242 return; 2243 #endif 2244 } 2245 tmr = &stcb->asoc.dack_timer; 2246 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2247 break; 2248 case SCTP_TIMER_TYPE_SHUTDOWN: 2249 /* Here we use the RTO of the destination. */ 2250 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2251 #ifdef INVARIANTS 2252 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2253 t_type, inp, stcb, net); 2254 #else 2255 return; 2256 #endif 2257 } 2258 tmr = &net->rxt_timer; 2259 if (net->RTO == 0) { 2260 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2261 } else { 2262 to_ticks = sctp_msecs_to_ticks(net->RTO); 2263 } 2264 break; 2265 case SCTP_TIMER_TYPE_HEARTBEAT: 2266 /* 2267 * The net is used here so that we can add in the RTO. Even 2268 * though we use a different timer. We also add the HB timer 2269 * PLUS a random jitter. 2270 */ 2271 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2272 #ifdef INVARIANTS 2273 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2274 t_type, inp, stcb, net); 2275 #else 2276 return; 2277 #endif 2278 } 2279 if ((net->dest_state & SCTP_ADDR_NOHB) && 2280 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2281 SCTPDBG(SCTP_DEBUG_TIMER2, 2282 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2283 t_type, inp, stcb, net); 2284 return; 2285 } 2286 tmr = &net->hb_timer; 2287 if (net->RTO == 0) { 2288 to_ticks = stcb->asoc.initial_rto; 2289 } else { 2290 to_ticks = net->RTO; 2291 } 2292 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2293 jitter = rndval % to_ticks; 2294 if (to_ticks > 1) { 2295 to_ticks >>= 1; 2296 } 2297 if (jitter < (UINT32_MAX - to_ticks)) { 2298 to_ticks += jitter; 2299 } else { 2300 to_ticks = UINT32_MAX; 2301 } 2302 if (!((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2303 (net->dest_state & SCTP_ADDR_REACHABLE)) && 2304 ((net->dest_state & SCTP_ADDR_PF) == 0)) { 2305 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2306 to_ticks += net->heart_beat_delay; 2307 } else { 2308 to_ticks = UINT32_MAX; 2309 } 2310 } 2311 /* 2312 * Now we must convert the to_ticks that are now in ms to 2313 * ticks. 2314 */ 2315 to_ticks = sctp_msecs_to_ticks(to_ticks); 2316 break; 2317 case SCTP_TIMER_TYPE_COOKIE: 2318 /* 2319 * Here we can use the RTO timer from the network since one 2320 * RTT was complete. If a retransmission happened then we 2321 * will be using the RTO initial value. 2322 */ 2323 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2324 #ifdef INVARIANTS 2325 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2326 t_type, inp, stcb, net); 2327 #else 2328 return; 2329 #endif 2330 } 2331 tmr = &net->rxt_timer; 2332 if (net->RTO == 0) { 2333 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2334 } else { 2335 to_ticks = sctp_msecs_to_ticks(net->RTO); 2336 } 2337 break; 2338 case SCTP_TIMER_TYPE_NEWCOOKIE: 2339 /* 2340 * Nothing needed but the endpoint here usually about 60 2341 * minutes. 2342 */ 2343 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2344 #ifdef INVARIANTS 2345 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2346 t_type, inp, stcb, net); 2347 #else 2348 return; 2349 #endif 2350 } 2351 tmr = &inp->sctp_ep.signature_change; 2352 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2353 break; 2354 case SCTP_TIMER_TYPE_PATHMTURAISE: 2355 /* 2356 * Here we use the value found in the EP for PMTUD, usually 2357 * about 10 minutes. 2358 */ 2359 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2360 #ifdef INVARIANTS 2361 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2362 t_type, inp, stcb, net); 2363 #else 2364 return; 2365 #endif 2366 } 2367 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2368 SCTPDBG(SCTP_DEBUG_TIMER2, 2369 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2370 t_type, inp, stcb, net); 2371 return; 2372 } 2373 tmr = &net->pmtu_timer; 2374 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2375 break; 2376 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2377 /* Here we use the RTO of the destination. */ 2378 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2379 #ifdef INVARIANTS 2380 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2381 t_type, inp, stcb, net); 2382 #else 2383 return; 2384 #endif 2385 } 2386 tmr = &net->rxt_timer; 2387 if (net->RTO == 0) { 2388 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2389 } else { 2390 to_ticks = sctp_msecs_to_ticks(net->RTO); 2391 } 2392 break; 2393 case SCTP_TIMER_TYPE_ASCONF: 2394 /* 2395 * Here the timer comes from the stcb but its value is from 2396 * the net's RTO. 2397 */ 2398 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2399 #ifdef INVARIANTS 2400 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2401 t_type, inp, stcb, net); 2402 #else 2403 return; 2404 #endif 2405 } 2406 tmr = &stcb->asoc.asconf_timer; 2407 if (net->RTO == 0) { 2408 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(net->RTO); 2411 } 2412 break; 2413 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2414 /* 2415 * Here we use the endpoints shutdown guard timer usually 2416 * about 3 minutes. 2417 */ 2418 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2419 #ifdef INVARIANTS 2420 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2421 t_type, inp, stcb, net); 2422 #else 2423 return; 2424 #endif 2425 } 2426 tmr = &stcb->asoc.shut_guard_timer; 2427 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2428 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2429 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2430 } else { 2431 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2432 } 2433 } else { 2434 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2435 } 2436 break; 2437 case SCTP_TIMER_TYPE_AUTOCLOSE: 2438 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2439 #ifdef INVARIANTS 2440 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2441 t_type, inp, stcb, net); 2442 #else 2443 return; 2444 #endif 2445 } 2446 tmr = &stcb->asoc.autoclose_timer; 2447 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2448 break; 2449 case SCTP_TIMER_TYPE_STRRESET: 2450 /* 2451 * Here the timer comes from the stcb but its value is from 2452 * the net's RTO. 2453 */ 2454 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &stcb->asoc.strreset_timer; 2463 if (net->RTO == 0) { 2464 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2465 } else { 2466 to_ticks = sctp_msecs_to_ticks(net->RTO); 2467 } 2468 break; 2469 case SCTP_TIMER_TYPE_INPKILL: 2470 /* 2471 * The inp is setup to die. We re-use the signature_change 2472 * timer since that has stopped and we are in the GONE 2473 * state. 2474 */ 2475 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2476 #ifdef INVARIANTS 2477 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2478 t_type, inp, stcb, net); 2479 #else 2480 return; 2481 #endif 2482 } 2483 tmr = &inp->sctp_ep.signature_change; 2484 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2485 break; 2486 case SCTP_TIMER_TYPE_ASOCKILL: 2487 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2488 #ifdef INVARIANTS 2489 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2490 t_type, inp, stcb, net); 2491 #else 2492 return; 2493 #endif 2494 } 2495 tmr = &stcb->asoc.strreset_timer; 2496 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2497 break; 2498 case SCTP_TIMER_TYPE_ADDR_WQ: 2499 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2500 #ifdef INVARIANTS 2501 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2502 t_type, inp, stcb, net); 2503 #else 2504 return; 2505 #endif 2506 } 2507 /* Only 1 tick away :-) */ 2508 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2509 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2510 break; 2511 case SCTP_TIMER_TYPE_PRIM_DELETED: 2512 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2513 #ifdef INVARIANTS 2514 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2515 t_type, inp, stcb, net); 2516 #else 2517 return; 2518 #endif 2519 } 2520 tmr = &stcb->asoc.delete_prim_timer; 2521 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2522 break; 2523 default: 2524 #ifdef INVARIANTS 2525 panic("Unknown timer type %d", t_type); 2526 #else 2527 return; 2528 #endif 2529 } 2530 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2531 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2532 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2533 /* 2534 * We do NOT allow you to have it already running. If it is, 2535 * we leave the current one up unchanged. 2536 */ 2537 SCTPDBG(SCTP_DEBUG_TIMER2, 2538 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2539 t_type, inp, stcb, net); 2540 return; 2541 } 2542 /* At this point we can proceed. */ 2543 if (t_type == SCTP_TIMER_TYPE_SEND) { 2544 stcb->asoc.num_send_timers_up++; 2545 } 2546 tmr->stopped_from = 0; 2547 tmr->type = t_type; 2548 tmr->ep = (void *)inp; 2549 tmr->tcb = (void *)stcb; 2550 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2551 tmr->net = NULL; 2552 } else { 2553 tmr->net = (void *)net; 2554 } 2555 tmr->self = (void *)tmr; 2556 tmr->vnet = (void *)curvnet; 2557 tmr->ticks = sctp_get_tick_count(); 2558 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2559 SCTPDBG(SCTP_DEBUG_TIMER2, 2560 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2561 t_type, to_ticks, inp, stcb, net); 2562 /* 2563 * If this is a newly scheduled callout, as opposed to a 2564 * rescheduled one, increment relevant reference counts. 2565 */ 2566 if (tmr->ep != NULL) { 2567 SCTP_INP_INCR_REF(inp); 2568 } 2569 if (tmr->tcb != NULL) { 2570 atomic_add_int(&stcb->asoc.refcnt, 1); 2571 } 2572 if (tmr->net != NULL) { 2573 atomic_add_int(&net->ref_count, 1); 2574 } 2575 } else { 2576 /* 2577 * This should not happen, since we checked for pending 2578 * above. 2579 */ 2580 SCTPDBG(SCTP_DEBUG_TIMER2, 2581 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2582 t_type, to_ticks, inp, stcb, net); 2583 } 2584 return; 2585 } 2586 2587 /*- 2588 * The following table shows which parameters must be provided 2589 * when calling sctp_timer_stop(). For parameters not being 2590 * provided, NULL must be used. 2591 * 2592 * |Name |inp |stcb|net | 2593 * |-----------------------------|----|----|----| 2594 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2595 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2596 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2598 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2599 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2600 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2601 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2602 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2603 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2604 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2605 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2606 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2607 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2608 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2609 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2610 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2611 * 2612 */ 2613 2614 void 2615 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2616 struct sctp_nets *net, uint32_t from) 2617 { 2618 struct sctp_timer *tmr; 2619 2620 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2621 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2622 t_type, stcb, stcb->sctp_ep)); 2623 if (stcb != NULL) { 2624 SCTP_TCB_LOCK_ASSERT(stcb); 2625 } else if (inp != NULL) { 2626 SCTP_INP_WLOCK_ASSERT(inp); 2627 } else { 2628 SCTP_WQ_ADDR_LOCK_ASSERT(); 2629 } 2630 tmr = NULL; 2631 switch (t_type) { 2632 case SCTP_TIMER_TYPE_SEND: 2633 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2634 #ifdef INVARIANTS 2635 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2636 t_type, inp, stcb, net); 2637 #else 2638 return; 2639 #endif 2640 } 2641 tmr = &net->rxt_timer; 2642 break; 2643 case SCTP_TIMER_TYPE_INIT: 2644 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2645 #ifdef INVARIANTS 2646 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2647 t_type, inp, stcb, net); 2648 #else 2649 return; 2650 #endif 2651 } 2652 tmr = &net->rxt_timer; 2653 break; 2654 case SCTP_TIMER_TYPE_RECV: 2655 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2656 #ifdef INVARIANTS 2657 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2658 t_type, inp, stcb, net); 2659 #else 2660 return; 2661 #endif 2662 } 2663 tmr = &stcb->asoc.dack_timer; 2664 break; 2665 case SCTP_TIMER_TYPE_SHUTDOWN: 2666 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2667 #ifdef INVARIANTS 2668 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2669 t_type, inp, stcb, net); 2670 #else 2671 return; 2672 #endif 2673 } 2674 tmr = &net->rxt_timer; 2675 break; 2676 case SCTP_TIMER_TYPE_HEARTBEAT: 2677 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2678 #ifdef INVARIANTS 2679 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2680 t_type, inp, stcb, net); 2681 #else 2682 return; 2683 #endif 2684 } 2685 tmr = &net->hb_timer; 2686 break; 2687 case SCTP_TIMER_TYPE_COOKIE: 2688 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2689 #ifdef INVARIANTS 2690 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2691 t_type, inp, stcb, net); 2692 #else 2693 return; 2694 #endif 2695 } 2696 tmr = &net->rxt_timer; 2697 break; 2698 case SCTP_TIMER_TYPE_NEWCOOKIE: 2699 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2700 #ifdef INVARIANTS 2701 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2702 t_type, inp, stcb, net); 2703 #else 2704 return; 2705 #endif 2706 } 2707 tmr = &inp->sctp_ep.signature_change; 2708 break; 2709 case SCTP_TIMER_TYPE_PATHMTURAISE: 2710 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2711 #ifdef INVARIANTS 2712 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2713 t_type, inp, stcb, net); 2714 #else 2715 return; 2716 #endif 2717 } 2718 tmr = &net->pmtu_timer; 2719 break; 2720 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2721 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2722 #ifdef INVARIANTS 2723 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2724 t_type, inp, stcb, net); 2725 #else 2726 return; 2727 #endif 2728 } 2729 tmr = &net->rxt_timer; 2730 break; 2731 case SCTP_TIMER_TYPE_ASCONF: 2732 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2733 #ifdef INVARIANTS 2734 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2735 t_type, inp, stcb, net); 2736 #else 2737 return; 2738 #endif 2739 } 2740 tmr = &stcb->asoc.asconf_timer; 2741 break; 2742 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2743 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2744 #ifdef INVARIANTS 2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2746 t_type, inp, stcb, net); 2747 #else 2748 return; 2749 #endif 2750 } 2751 tmr = &stcb->asoc.shut_guard_timer; 2752 break; 2753 case SCTP_TIMER_TYPE_AUTOCLOSE: 2754 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2755 #ifdef INVARIANTS 2756 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2757 t_type, inp, stcb, net); 2758 #else 2759 return; 2760 #endif 2761 } 2762 tmr = &stcb->asoc.autoclose_timer; 2763 break; 2764 case SCTP_TIMER_TYPE_STRRESET: 2765 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2766 #ifdef INVARIANTS 2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2768 t_type, inp, stcb, net); 2769 #else 2770 return; 2771 #endif 2772 } 2773 tmr = &stcb->asoc.strreset_timer; 2774 break; 2775 case SCTP_TIMER_TYPE_INPKILL: 2776 /* 2777 * The inp is setup to die. We re-use the signature_change 2778 * timer since that has stopped and we are in the GONE 2779 * state. 2780 */ 2781 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2782 #ifdef INVARIANTS 2783 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2784 t_type, inp, stcb, net); 2785 #else 2786 return; 2787 #endif 2788 } 2789 tmr = &inp->sctp_ep.signature_change; 2790 break; 2791 case SCTP_TIMER_TYPE_ASOCKILL: 2792 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2793 #ifdef INVARIANTS 2794 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2795 t_type, inp, stcb, net); 2796 #else 2797 return; 2798 #endif 2799 } 2800 tmr = &stcb->asoc.strreset_timer; 2801 break; 2802 case SCTP_TIMER_TYPE_ADDR_WQ: 2803 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2804 #ifdef INVARIANTS 2805 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2806 t_type, inp, stcb, net); 2807 #else 2808 return; 2809 #endif 2810 } 2811 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2812 break; 2813 case SCTP_TIMER_TYPE_PRIM_DELETED: 2814 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2815 #ifdef INVARIANTS 2816 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2817 t_type, inp, stcb, net); 2818 #else 2819 return; 2820 #endif 2821 } 2822 tmr = &stcb->asoc.delete_prim_timer; 2823 break; 2824 default: 2825 #ifdef INVARIANTS 2826 panic("Unknown timer type %d", t_type); 2827 #else 2828 return; 2829 #endif 2830 } 2831 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2832 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2833 (tmr->type != t_type)) { 2834 /* 2835 * Ok we have a timer that is under joint use. Cookie timer 2836 * per chance with the SEND timer. We therefore are NOT 2837 * running the timer that the caller wants stopped. So just 2838 * return. 2839 */ 2840 SCTPDBG(SCTP_DEBUG_TIMER2, 2841 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2842 t_type, inp, stcb, net); 2843 return; 2844 } 2845 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2846 stcb->asoc.num_send_timers_up--; 2847 if (stcb->asoc.num_send_timers_up < 0) { 2848 stcb->asoc.num_send_timers_up = 0; 2849 } 2850 } 2851 tmr->self = NULL; 2852 tmr->stopped_from = from; 2853 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2854 KASSERT(tmr->ep == inp, 2855 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2856 t_type, inp, tmr->ep)); 2857 KASSERT(tmr->tcb == stcb, 2858 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2859 t_type, stcb, tmr->tcb)); 2860 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2861 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2862 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2863 t_type, net, tmr->net)); 2864 SCTPDBG(SCTP_DEBUG_TIMER2, 2865 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2866 t_type, inp, stcb, net); 2867 /* 2868 * If the timer was actually stopped, decrement reference 2869 * counts that were incremented in sctp_timer_start(). 2870 */ 2871 if (tmr->ep != NULL) { 2872 tmr->ep = NULL; 2873 SCTP_INP_DECR_REF(inp); 2874 } 2875 if (tmr->tcb != NULL) { 2876 tmr->tcb = NULL; 2877 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2878 } 2879 if (tmr->net != NULL) { 2880 struct sctp_nets *tmr_net; 2881 2882 /* 2883 * Can't use net, since it doesn't work for 2884 * SCTP_TIMER_TYPE_ASCONF. 2885 */ 2886 tmr_net = tmr->net; 2887 tmr->net = NULL; 2888 sctp_free_remote_addr(tmr_net); 2889 } 2890 } else { 2891 SCTPDBG(SCTP_DEBUG_TIMER2, 2892 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2893 t_type, inp, stcb, net); 2894 } 2895 return; 2896 } 2897 2898 uint32_t 2899 sctp_calculate_len(struct mbuf *m) 2900 { 2901 struct mbuf *at; 2902 uint32_t tlen; 2903 2904 tlen = 0; 2905 for (at = m; at != NULL; at = SCTP_BUF_NEXT(at)) { 2906 tlen += SCTP_BUF_LEN(at); 2907 } 2908 return (tlen); 2909 } 2910 2911 /* 2912 * Given an association and starting time of the current RTT period, update 2913 * RTO in number of msecs. net should point to the current network. 2914 * Return 1, if an RTO update was performed, return 0 if no update was 2915 * performed due to invalid starting point. 2916 */ 2917 2918 int 2919 sctp_calculate_rto(struct sctp_tcb *stcb, 2920 struct sctp_association *asoc, 2921 struct sctp_nets *net, 2922 struct timeval *old, 2923 int rtt_from_sack) 2924 { 2925 struct timeval now; 2926 uint64_t rtt_us; /* RTT in us */ 2927 int32_t rtt; /* RTT in ms */ 2928 uint32_t new_rto; 2929 int first_measure = 0; 2930 2931 /************************/ 2932 /* 1. calculate new RTT */ 2933 /************************/ 2934 /* get the current time */ 2935 if (stcb->asoc.use_precise_time) { 2936 (void)SCTP_GETPTIME_TIMEVAL(&now); 2937 } else { 2938 (void)SCTP_GETTIME_TIMEVAL(&now); 2939 } 2940 if ((old->tv_sec > now.tv_sec) || 2941 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2942 /* The starting point is in the future. */ 2943 return (0); 2944 } 2945 timevalsub(&now, old); 2946 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2947 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2948 /* The RTT is larger than a sane value. */ 2949 return (0); 2950 } 2951 /* store the current RTT in us */ 2952 net->rtt = rtt_us; 2953 /* compute rtt in ms */ 2954 rtt = (int32_t)(net->rtt / 1000); 2955 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2956 /* 2957 * Tell the CC module that a new update has just occurred 2958 * from a sack 2959 */ 2960 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2961 } 2962 /* 2963 * Do we need to determine the lan? We do this only on sacks i.e. 2964 * RTT being determined from data not non-data (HB/INIT->INITACK). 2965 */ 2966 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2967 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2968 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2969 net->lan_type = SCTP_LAN_INTERNET; 2970 } else { 2971 net->lan_type = SCTP_LAN_LOCAL; 2972 } 2973 } 2974 2975 /***************************/ 2976 /* 2. update RTTVAR & SRTT */ 2977 /***************************/ 2978 /*- 2979 * Compute the scaled average lastsa and the 2980 * scaled variance lastsv as described in van Jacobson 2981 * Paper "Congestion Avoidance and Control", Annex A. 2982 * 2983 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2984 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2985 */ 2986 if (net->RTO_measured) { 2987 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2988 net->lastsa += rtt; 2989 if (rtt < 0) { 2990 rtt = -rtt; 2991 } 2992 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2993 net->lastsv += rtt; 2994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2995 rto_logging(net, SCTP_LOG_RTTVAR); 2996 } 2997 } else { 2998 /* First RTO measurement */ 2999 net->RTO_measured = 1; 3000 first_measure = 1; 3001 net->lastsa = rtt << SCTP_RTT_SHIFT; 3002 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3004 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3005 } 3006 } 3007 if (net->lastsv == 0) { 3008 net->lastsv = SCTP_CLOCK_GRANULARITY; 3009 } 3010 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3011 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3012 (stcb->asoc.sat_network_lockout == 0)) { 3013 stcb->asoc.sat_network = 1; 3014 } else if ((!first_measure) && stcb->asoc.sat_network) { 3015 stcb->asoc.sat_network = 0; 3016 stcb->asoc.sat_network_lockout = 1; 3017 } 3018 /* bound it, per C6/C7 in Section 5.3.1 */ 3019 if (new_rto < stcb->asoc.minrto) { 3020 new_rto = stcb->asoc.minrto; 3021 } 3022 if (new_rto > stcb->asoc.maxrto) { 3023 new_rto = stcb->asoc.maxrto; 3024 } 3025 net->RTO = new_rto; 3026 return (1); 3027 } 3028 3029 /* 3030 * return a pointer to a contiguous piece of data from the given mbuf chain 3031 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3032 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3033 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3034 */ 3035 caddr_t 3036 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3037 { 3038 uint32_t count; 3039 uint8_t *ptr; 3040 3041 ptr = in_ptr; 3042 if ((off < 0) || (len <= 0)) 3043 return (NULL); 3044 3045 /* find the desired start location */ 3046 while ((m != NULL) && (off > 0)) { 3047 if (off < SCTP_BUF_LEN(m)) 3048 break; 3049 off -= SCTP_BUF_LEN(m); 3050 m = SCTP_BUF_NEXT(m); 3051 } 3052 if (m == NULL) 3053 return (NULL); 3054 3055 /* is the current mbuf large enough (eg. contiguous)? */ 3056 if ((SCTP_BUF_LEN(m) - off) >= len) { 3057 return (mtod(m, caddr_t)+off); 3058 } else { 3059 /* else, it spans more than one mbuf, so save a temp copy... */ 3060 while ((m != NULL) && (len > 0)) { 3061 count = min(SCTP_BUF_LEN(m) - off, len); 3062 memcpy(ptr, mtod(m, caddr_t)+off, count); 3063 len -= count; 3064 ptr += count; 3065 off = 0; 3066 m = SCTP_BUF_NEXT(m); 3067 } 3068 if ((m == NULL) && (len > 0)) 3069 return (NULL); 3070 else 3071 return ((caddr_t)in_ptr); 3072 } 3073 } 3074 3075 struct sctp_paramhdr * 3076 sctp_get_next_param(struct mbuf *m, 3077 int offset, 3078 struct sctp_paramhdr *pull, 3079 int pull_limit) 3080 { 3081 /* This just provides a typed signature to Peter's Pull routine */ 3082 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3083 (uint8_t *)pull)); 3084 } 3085 3086 struct mbuf * 3087 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3088 { 3089 struct mbuf *m_last; 3090 caddr_t dp; 3091 3092 if (padlen > 3) { 3093 return (NULL); 3094 } 3095 if (padlen <= M_TRAILINGSPACE(m)) { 3096 /* 3097 * The easy way. We hope the majority of the time we hit 3098 * here :) 3099 */ 3100 m_last = m; 3101 } else { 3102 /* Hard way we must grow the mbuf chain */ 3103 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3104 if (m_last == NULL) { 3105 return (NULL); 3106 } 3107 SCTP_BUF_LEN(m_last) = 0; 3108 SCTP_BUF_NEXT(m_last) = NULL; 3109 SCTP_BUF_NEXT(m) = m_last; 3110 } 3111 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3112 SCTP_BUF_LEN(m_last) += padlen; 3113 memset(dp, 0, padlen); 3114 return (m_last); 3115 } 3116 3117 struct mbuf * 3118 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3119 { 3120 /* find the last mbuf in chain and pad it */ 3121 struct mbuf *m_at; 3122 3123 if (last_mbuf != NULL) { 3124 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3125 } else { 3126 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3127 if (SCTP_BUF_NEXT(m_at) == NULL) { 3128 return (sctp_add_pad_tombuf(m_at, padval)); 3129 } 3130 } 3131 } 3132 return (NULL); 3133 } 3134 3135 static void 3136 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3137 uint16_t error, struct sctp_abort_chunk *abort, 3138 bool from_peer, bool timedout, int so_locked) 3139 { 3140 struct mbuf *m_notify; 3141 struct sctp_assoc_change *sac; 3142 struct sctp_queued_to_read *control; 3143 struct sctp_inpcb *inp; 3144 unsigned int notif_len; 3145 unsigned int i; 3146 uint16_t abort_len; 3147 3148 KASSERT(abort == NULL || from_peer, 3149 ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); 3150 KASSERT(!from_peer || !timedout, 3151 ("sctp_notify_assoc_change: timeouts can only be local")); 3152 KASSERT(stcb != NULL, ("stcb == NULL")); 3153 SCTP_TCB_LOCK_ASSERT(stcb); 3154 inp = stcb->sctp_ep; 3155 SCTP_INP_READ_LOCK_ASSERT(inp); 3156 3157 if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3158 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3159 if (abort != NULL) { 3160 abort_len = ntohs(abort->ch.chunk_length); 3161 /* 3162 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3163 * contiguous. 3164 */ 3165 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3166 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3167 } 3168 } else { 3169 abort_len = 0; 3170 } 3171 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3172 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3173 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3174 notif_len += abort_len; 3175 } 3176 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3177 if (m_notify == NULL) { 3178 /* Retry with smaller value. */ 3179 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3180 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3181 if (m_notify == NULL) { 3182 goto set_error; 3183 } 3184 } 3185 SCTP_BUF_NEXT(m_notify) = NULL; 3186 sac = mtod(m_notify, struct sctp_assoc_change *); 3187 memset(sac, 0, notif_len); 3188 sac->sac_type = SCTP_ASSOC_CHANGE; 3189 sac->sac_flags = 0; 3190 sac->sac_length = sizeof(struct sctp_assoc_change); 3191 sac->sac_state = state; 3192 sac->sac_error = error; 3193 if (state == SCTP_CANT_STR_ASSOC) { 3194 sac->sac_outbound_streams = 0; 3195 sac->sac_inbound_streams = 0; 3196 } else { 3197 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3198 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3199 } 3200 sac->sac_assoc_id = sctp_get_associd(stcb); 3201 if (notif_len > sizeof(struct sctp_assoc_change)) { 3202 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3203 i = 0; 3204 if (stcb->asoc.prsctp_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3206 } 3207 if (stcb->asoc.auth_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3209 } 3210 if (stcb->asoc.asconf_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3212 } 3213 if (stcb->asoc.idata_supported == 1) { 3214 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3215 } 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3217 if (stcb->asoc.reconfig_supported == 1) { 3218 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3219 } 3220 sac->sac_length += i; 3221 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3222 memcpy(sac->sac_info, abort, abort_len); 3223 sac->sac_length += abort_len; 3224 } 3225 } 3226 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3228 0, 0, stcb->asoc.context, 0, 0, 0, 3229 m_notify); 3230 if (control != NULL) { 3231 control->length = SCTP_BUF_LEN(m_notify); 3232 control->spec_flags = M_NOTIFICATION; 3233 /* not that we need this */ 3234 control->tail_mbuf = m_notify; 3235 sctp_add_to_readq(inp, stcb, control, 3236 &stcb->sctp_socket->so_rcv, 1, 3237 SCTP_READ_LOCK_HELD, so_locked); 3238 } else { 3239 sctp_m_freem(m_notify); 3240 } 3241 } 3242 /* 3243 * For 1-to-1 style sockets, we send up and error when an ABORT 3244 * comes in. 3245 */ 3246 set_error: 3247 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3248 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3249 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3250 SOCK_LOCK(stcb->sctp_socket); 3251 if (from_peer) { 3252 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3253 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3254 stcb->sctp_socket->so_error = ECONNREFUSED; 3255 } else { 3256 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3257 stcb->sctp_socket->so_error = ECONNRESET; 3258 } 3259 } else { 3260 if (timedout) { 3261 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3262 stcb->sctp_socket->so_error = ETIMEDOUT; 3263 } else { 3264 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3265 stcb->sctp_socket->so_error = ECONNABORTED; 3266 } 3267 } 3268 SOCK_UNLOCK(stcb->sctp_socket); 3269 } 3270 /* Wake ANY sleepers */ 3271 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3272 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3273 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3274 socantrcvmore(stcb->sctp_socket); 3275 } 3276 sorwakeup(stcb->sctp_socket); 3277 sowwakeup(stcb->sctp_socket); 3278 } 3279 3280 static void 3281 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3282 struct sockaddr *sa, uint32_t error, int so_locked) 3283 { 3284 struct mbuf *m_notify; 3285 struct sctp_paddr_change *spc; 3286 struct sctp_queued_to_read *control; 3287 3288 KASSERT(stcb != NULL, ("stcb == NULL")); 3289 SCTP_TCB_LOCK_ASSERT(stcb); 3290 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3291 3292 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3293 /* event not enabled */ 3294 return; 3295 } 3296 3297 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3298 if (m_notify == NULL) 3299 return; 3300 SCTP_BUF_LEN(m_notify) = 0; 3301 spc = mtod(m_notify, struct sctp_paddr_change *); 3302 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3303 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3304 spc->spc_flags = 0; 3305 spc->spc_length = sizeof(struct sctp_paddr_change); 3306 switch (sa->sa_family) { 3307 #ifdef INET 3308 case AF_INET: 3309 #ifdef INET6 3310 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3311 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3312 (struct sockaddr_in6 *)&spc->spc_aaddr); 3313 } else { 3314 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3315 } 3316 #else 3317 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3318 #endif 3319 break; 3320 #endif 3321 #ifdef INET6 3322 case AF_INET6: 3323 { 3324 struct sockaddr_in6 *sin6; 3325 3326 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3327 3328 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3329 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3330 if (sin6->sin6_scope_id == 0) { 3331 /* recover scope_id for user */ 3332 (void)sa6_recoverscope(sin6); 3333 } else { 3334 /* clear embedded scope_id for user */ 3335 in6_clearscope(&sin6->sin6_addr); 3336 } 3337 } 3338 break; 3339 } 3340 #endif 3341 default: 3342 /* TSNH */ 3343 break; 3344 } 3345 spc->spc_state = state; 3346 spc->spc_error = error; 3347 spc->spc_assoc_id = sctp_get_associd(stcb); 3348 3349 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3350 SCTP_BUF_NEXT(m_notify) = NULL; 3351 3352 /* append to socket */ 3353 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3354 0, 0, stcb->asoc.context, 0, 0, 0, 3355 m_notify); 3356 if (control == NULL) { 3357 /* no memory */ 3358 sctp_m_freem(m_notify); 3359 return; 3360 } 3361 control->length = SCTP_BUF_LEN(m_notify); 3362 control->spec_flags = M_NOTIFICATION; 3363 /* not that we need this */ 3364 control->tail_mbuf = m_notify; 3365 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3366 &stcb->sctp_socket->so_rcv, 1, 3367 SCTP_READ_LOCK_HELD, so_locked); 3368 } 3369 3370 static void 3371 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3372 struct sctp_tmit_chunk *chk, int so_locked) 3373 { 3374 struct mbuf *m_notify; 3375 struct sctp_send_failed *ssf; 3376 struct sctp_send_failed_event *ssfe; 3377 struct sctp_queued_to_read *control; 3378 struct sctp_chunkhdr *chkhdr; 3379 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3380 3381 KASSERT(stcb != NULL, ("stcb == NULL")); 3382 SCTP_TCB_LOCK_ASSERT(stcb); 3383 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3384 3385 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3386 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3387 /* event not enabled */ 3388 return; 3389 } 3390 3391 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3392 notifhdr_len = sizeof(struct sctp_send_failed_event); 3393 } else { 3394 notifhdr_len = sizeof(struct sctp_send_failed); 3395 } 3396 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3397 if (m_notify == NULL) 3398 /* no space left */ 3399 return; 3400 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3401 if (stcb->asoc.idata_supported) { 3402 chkhdr_len = sizeof(struct sctp_idata_chunk); 3403 } else { 3404 chkhdr_len = sizeof(struct sctp_data_chunk); 3405 } 3406 /* Use some defaults in case we can't access the chunk header */ 3407 if (chk->send_size >= chkhdr_len) { 3408 payload_len = chk->send_size - chkhdr_len; 3409 } else { 3410 payload_len = 0; 3411 } 3412 padding_len = 0; 3413 if (chk->data != NULL) { 3414 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3415 if (chkhdr != NULL) { 3416 chk_len = ntohs(chkhdr->chunk_length); 3417 if ((chk_len >= chkhdr_len) && 3418 (chk->send_size >= chk_len) && 3419 (chk->send_size - chk_len < 4)) { 3420 padding_len = chk->send_size - chk_len; 3421 payload_len = chk->send_size - chkhdr_len - padding_len; 3422 } 3423 } 3424 } 3425 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3426 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3427 memset(ssfe, 0, notifhdr_len); 3428 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3429 if (sent) { 3430 ssfe->ssfe_flags = SCTP_DATA_SENT; 3431 } else { 3432 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3433 } 3434 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3435 ssfe->ssfe_error = error; 3436 /* not exactly what the user sent in, but should be close :) */ 3437 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3438 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3439 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3440 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3441 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3442 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3443 } else { 3444 ssf = mtod(m_notify, struct sctp_send_failed *); 3445 memset(ssf, 0, notifhdr_len); 3446 ssf->ssf_type = SCTP_SEND_FAILED; 3447 if (sent) { 3448 ssf->ssf_flags = SCTP_DATA_SENT; 3449 } else { 3450 ssf->ssf_flags = SCTP_DATA_UNSENT; 3451 } 3452 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3453 ssf->ssf_error = error; 3454 /* not exactly what the user sent in, but should be close :) */ 3455 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3456 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3457 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3458 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3459 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3460 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3461 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3462 } 3463 if (chk->data != NULL) { 3464 /* Trim off the sctp chunk header (it should be there) */ 3465 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3466 m_adj(chk->data, chkhdr_len); 3467 m_adj(chk->data, -padding_len); 3468 sctp_mbuf_crush(chk->data); 3469 chk->send_size -= (chkhdr_len + padding_len); 3470 } 3471 } 3472 SCTP_BUF_NEXT(m_notify) = chk->data; 3473 /* Steal off the mbuf */ 3474 chk->data = NULL; 3475 /* 3476 * For this case, we check the actual socket buffer, since the assoc 3477 * is going away we don't want to overfill the socket buffer for a 3478 * non-reader 3479 */ 3480 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3481 sctp_m_freem(m_notify); 3482 return; 3483 } 3484 /* append to socket */ 3485 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3486 0, 0, stcb->asoc.context, 0, 0, 0, 3487 m_notify); 3488 if (control == NULL) { 3489 /* no memory */ 3490 sctp_m_freem(m_notify); 3491 return; 3492 } 3493 control->length = SCTP_BUF_LEN(m_notify); 3494 control->spec_flags = M_NOTIFICATION; 3495 /* not that we need this */ 3496 control->tail_mbuf = m_notify; 3497 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3498 &stcb->sctp_socket->so_rcv, 1, 3499 SCTP_READ_LOCK_HELD, so_locked); 3500 } 3501 3502 static void 3503 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3504 struct sctp_stream_queue_pending *sp, int so_locked) 3505 { 3506 struct mbuf *m_notify; 3507 struct sctp_send_failed *ssf; 3508 struct sctp_send_failed_event *ssfe; 3509 struct sctp_queued_to_read *control; 3510 int notifhdr_len; 3511 3512 KASSERT(stcb != NULL, ("stcb == NULL")); 3513 SCTP_TCB_LOCK_ASSERT(stcb); 3514 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3515 3516 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3517 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3518 /* event not enabled */ 3519 return; 3520 } 3521 3522 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3523 notifhdr_len = sizeof(struct sctp_send_failed_event); 3524 } else { 3525 notifhdr_len = sizeof(struct sctp_send_failed); 3526 } 3527 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3528 if (m_notify == NULL) { 3529 /* no space left */ 3530 return; 3531 } 3532 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3533 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3534 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3535 memset(ssfe, 0, notifhdr_len); 3536 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3537 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3538 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3539 ssfe->ssfe_error = error; 3540 /* not exactly what the user sent in, but should be close :) */ 3541 ssfe->ssfe_info.snd_sid = sp->sid; 3542 if (sp->some_taken) { 3543 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3544 } else { 3545 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3546 } 3547 ssfe->ssfe_info.snd_ppid = sp->ppid; 3548 ssfe->ssfe_info.snd_context = sp->context; 3549 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3550 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3551 } else { 3552 ssf = mtod(m_notify, struct sctp_send_failed *); 3553 memset(ssf, 0, notifhdr_len); 3554 ssf->ssf_type = SCTP_SEND_FAILED; 3555 ssf->ssf_flags = SCTP_DATA_UNSENT; 3556 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3557 ssf->ssf_error = error; 3558 /* not exactly what the user sent in, but should be close :) */ 3559 ssf->ssf_info.sinfo_stream = sp->sid; 3560 ssf->ssf_info.sinfo_ssn = 0; 3561 if (sp->some_taken) { 3562 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3563 } else { 3564 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3565 } 3566 ssf->ssf_info.sinfo_ppid = sp->ppid; 3567 ssf->ssf_info.sinfo_context = sp->context; 3568 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3569 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3570 } 3571 SCTP_BUF_NEXT(m_notify) = sp->data; 3572 3573 /* Steal off the mbuf */ 3574 sp->data = NULL; 3575 /* 3576 * For this case, we check the actual socket buffer, since the assoc 3577 * is going away we don't want to overfill the socket buffer for a 3578 * non-reader 3579 */ 3580 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3581 sctp_m_freem(m_notify); 3582 return; 3583 } 3584 /* append to socket */ 3585 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3586 0, 0, stcb->asoc.context, 0, 0, 0, 3587 m_notify); 3588 if (control == NULL) { 3589 /* no memory */ 3590 sctp_m_freem(m_notify); 3591 return; 3592 } 3593 control->length = SCTP_BUF_LEN(m_notify); 3594 control->spec_flags = M_NOTIFICATION; 3595 /* not that we need this */ 3596 control->tail_mbuf = m_notify; 3597 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3598 &stcb->sctp_socket->so_rcv, 1, 3599 SCTP_READ_LOCK_HELD, so_locked); 3600 } 3601 3602 static void 3603 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, int so_locked) 3604 { 3605 struct mbuf *m_notify; 3606 struct sctp_adaptation_event *sai; 3607 struct sctp_queued_to_read *control; 3608 3609 KASSERT(stcb != NULL, ("stcb == NULL")); 3610 SCTP_TCB_LOCK_ASSERT(stcb); 3611 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3612 3613 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3614 /* event not enabled */ 3615 return; 3616 } 3617 3618 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3619 if (m_notify == NULL) 3620 /* no space left */ 3621 return; 3622 SCTP_BUF_LEN(m_notify) = 0; 3623 sai = mtod(m_notify, struct sctp_adaptation_event *); 3624 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3625 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3626 sai->sai_flags = 0; 3627 sai->sai_length = sizeof(struct sctp_adaptation_event); 3628 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3629 sai->sai_assoc_id = sctp_get_associd(stcb); 3630 3631 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3632 SCTP_BUF_NEXT(m_notify) = NULL; 3633 3634 /* append to socket */ 3635 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3636 0, 0, stcb->asoc.context, 0, 0, 0, 3637 m_notify); 3638 if (control == NULL) { 3639 /* no memory */ 3640 sctp_m_freem(m_notify); 3641 return; 3642 } 3643 control->length = SCTP_BUF_LEN(m_notify); 3644 control->spec_flags = M_NOTIFICATION; 3645 /* not that we need this */ 3646 control->tail_mbuf = m_notify; 3647 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3648 &stcb->sctp_socket->so_rcv, 1, 3649 SCTP_READ_LOCK_HELD, so_locked); 3650 } 3651 3652 static void 3653 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3654 struct sctp_queued_to_read *aborted_control, 3655 int so_locked) 3656 { 3657 struct mbuf *m_notify; 3658 struct sctp_pdapi_event *pdapi; 3659 struct sctp_queued_to_read *control; 3660 struct sockbuf *sb; 3661 3662 KASSERT(aborted_control != NULL, ("aborted_control is NULL")); 3663 KASSERT(stcb != NULL, ("stcb == NULL")); 3664 SCTP_TCB_LOCK_ASSERT(stcb); 3665 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3666 3667 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3668 /* event not enabled */ 3669 return; 3670 } 3671 3672 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3673 if (m_notify == NULL) 3674 /* no space left */ 3675 return; 3676 SCTP_BUF_LEN(m_notify) = 0; 3677 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3678 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3679 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3680 pdapi->pdapi_flags = 0; 3681 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3682 pdapi->pdapi_indication = error; 3683 pdapi->pdapi_stream = aborted_control->sinfo_stream; 3684 pdapi->pdapi_seq = (uint16_t)aborted_control->mid; 3685 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3686 3687 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3688 SCTP_BUF_NEXT(m_notify) = NULL; 3689 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3690 0, 0, stcb->asoc.context, 0, 0, 0, 3691 m_notify); 3692 if (control == NULL) { 3693 /* no memory */ 3694 sctp_m_freem(m_notify); 3695 return; 3696 } 3697 control->length = SCTP_BUF_LEN(m_notify); 3698 control->spec_flags = M_NOTIFICATION; 3699 /* not that we need this */ 3700 control->tail_mbuf = m_notify; 3701 sb = &stcb->sctp_socket->so_rcv; 3702 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3703 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3704 } 3705 sctp_sballoc(stcb, sb, m_notify); 3706 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3707 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3708 } 3709 control->end_added = 1; 3710 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, aborted_control, control, next); 3711 if (stcb->sctp_ep && stcb->sctp_socket) { 3712 /* This should always be the case */ 3713 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3714 } 3715 } 3716 3717 static void 3718 sctp_notify_shutdown_event(struct sctp_tcb *stcb, int so_locked) 3719 { 3720 struct mbuf *m_notify; 3721 struct sctp_shutdown_event *sse; 3722 struct sctp_queued_to_read *control; 3723 3724 KASSERT(stcb != NULL, ("stcb == NULL")); 3725 SCTP_TCB_LOCK_ASSERT(stcb); 3726 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3727 3728 /* 3729 * For TCP model AND UDP connected sockets we will send an error up 3730 * when an SHUTDOWN completes 3731 */ 3732 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3733 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3734 /* mark socket closed for read/write and wakeup! */ 3735 socantsendmore(stcb->sctp_socket); 3736 } 3737 3738 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3739 /* event not enabled */ 3740 return; 3741 } 3742 3743 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3744 if (m_notify == NULL) 3745 /* no space left */ 3746 return; 3747 sse = mtod(m_notify, struct sctp_shutdown_event *); 3748 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3749 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3750 sse->sse_flags = 0; 3751 sse->sse_length = sizeof(struct sctp_shutdown_event); 3752 sse->sse_assoc_id = sctp_get_associd(stcb); 3753 3754 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3755 SCTP_BUF_NEXT(m_notify) = NULL; 3756 3757 /* append to socket */ 3758 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3759 0, 0, stcb->asoc.context, 0, 0, 0, 3760 m_notify); 3761 if (control == NULL) { 3762 /* no memory */ 3763 sctp_m_freem(m_notify); 3764 return; 3765 } 3766 control->length = SCTP_BUF_LEN(m_notify); 3767 control->spec_flags = M_NOTIFICATION; 3768 /* not that we need this */ 3769 control->tail_mbuf = m_notify; 3770 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3771 &stcb->sctp_socket->so_rcv, 1, 3772 SCTP_READ_LOCK_HELD, so_locked); 3773 } 3774 3775 static void 3776 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, int so_locked) 3777 { 3778 struct mbuf *m_notify; 3779 struct sctp_sender_dry_event *event; 3780 struct sctp_queued_to_read *control; 3781 3782 KASSERT(stcb != NULL, ("stcb == NULL")); 3783 SCTP_TCB_LOCK_ASSERT(stcb); 3784 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3785 3786 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3787 /* event not enabled */ 3788 return; 3789 } 3790 3791 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3792 if (m_notify == NULL) { 3793 /* no space left */ 3794 return; 3795 } 3796 SCTP_BUF_LEN(m_notify) = 0; 3797 event = mtod(m_notify, struct sctp_sender_dry_event *); 3798 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3799 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3800 event->sender_dry_flags = 0; 3801 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3802 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3803 3804 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3805 SCTP_BUF_NEXT(m_notify) = NULL; 3806 3807 /* append to socket */ 3808 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3809 0, 0, stcb->asoc.context, 0, 0, 0, 3810 m_notify); 3811 if (control == NULL) { 3812 /* no memory */ 3813 sctp_m_freem(m_notify); 3814 return; 3815 } 3816 control->length = SCTP_BUF_LEN(m_notify); 3817 control->spec_flags = M_NOTIFICATION; 3818 /* not that we need this */ 3819 control->tail_mbuf = m_notify; 3820 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3821 &stcb->sctp_socket->so_rcv, 1, 3822 SCTP_READ_LOCK_HELD, so_locked); 3823 } 3824 3825 static void 3826 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int flag, int so_locked) 3827 { 3828 struct mbuf *m_notify; 3829 struct sctp_queued_to_read *control; 3830 struct sctp_stream_change_event *stradd; 3831 3832 KASSERT(stcb != NULL, ("stcb == NULL")); 3833 SCTP_TCB_LOCK_ASSERT(stcb); 3834 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3835 3836 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT)) { 3837 /* event not enabled */ 3838 return; 3839 } 3840 3841 if ((stcb->asoc.peer_req_out) && flag) { 3842 /* Peer made the request, don't tell the local user */ 3843 stcb->asoc.peer_req_out = 0; 3844 return; 3845 } 3846 stcb->asoc.peer_req_out = 0; 3847 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3848 if (m_notify == NULL) 3849 /* no space left */ 3850 return; 3851 SCTP_BUF_LEN(m_notify) = 0; 3852 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3853 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3854 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3855 stradd->strchange_flags = flag; 3856 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3857 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3858 stradd->strchange_instrms = stcb->asoc.streamincnt; 3859 stradd->strchange_outstrms = stcb->asoc.streamoutcnt; 3860 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3861 SCTP_BUF_NEXT(m_notify) = NULL; 3862 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3863 /* no space */ 3864 sctp_m_freem(m_notify); 3865 return; 3866 } 3867 /* append to socket */ 3868 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3869 0, 0, stcb->asoc.context, 0, 0, 0, 3870 m_notify); 3871 if (control == NULL) { 3872 /* no memory */ 3873 sctp_m_freem(m_notify); 3874 return; 3875 } 3876 control->length = SCTP_BUF_LEN(m_notify); 3877 control->spec_flags = M_NOTIFICATION; 3878 /* not that we need this */ 3879 control->tail_mbuf = m_notify; 3880 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3881 &stcb->sctp_socket->so_rcv, 1, 3882 SCTP_READ_LOCK_HELD, so_locked); 3883 } 3884 3885 static void 3886 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, int flag, int so_locked) 3887 { 3888 struct mbuf *m_notify; 3889 struct sctp_queued_to_read *control; 3890 struct sctp_assoc_reset_event *strasoc; 3891 3892 KASSERT(stcb != NULL, ("stcb == NULL")); 3893 SCTP_TCB_LOCK_ASSERT(stcb); 3894 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3895 3896 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT)) { 3897 /* event not enabled */ 3898 return; 3899 } 3900 3901 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3902 if (m_notify == NULL) 3903 /* no space left */ 3904 return; 3905 SCTP_BUF_LEN(m_notify) = 0; 3906 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3907 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3908 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3909 strasoc->assocreset_flags = flag; 3910 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3911 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3912 strasoc->assocreset_local_tsn = stcb->asoc.sending_seq; 3913 strasoc->assocreset_remote_tsn = stcb->asoc.mapping_array_base_tsn + 1; 3914 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3915 SCTP_BUF_NEXT(m_notify) = NULL; 3916 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3917 /* no space */ 3918 sctp_m_freem(m_notify); 3919 return; 3920 } 3921 /* append to socket */ 3922 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3923 0, 0, stcb->asoc.context, 0, 0, 0, 3924 m_notify); 3925 if (control == NULL) { 3926 /* no memory */ 3927 sctp_m_freem(m_notify); 3928 return; 3929 } 3930 control->length = SCTP_BUF_LEN(m_notify); 3931 control->spec_flags = M_NOTIFICATION; 3932 /* not that we need this */ 3933 control->tail_mbuf = m_notify; 3934 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3935 &stcb->sctp_socket->so_rcv, 1, 3936 SCTP_READ_LOCK_HELD, so_locked); 3937 } 3938 3939 static void 3940 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3941 int number_entries, uint16_t *list, int flag, int so_locked) 3942 { 3943 struct mbuf *m_notify; 3944 struct sctp_queued_to_read *control; 3945 struct sctp_stream_reset_event *strreset; 3946 int len; 3947 3948 KASSERT(stcb != NULL, ("stcb == NULL")); 3949 SCTP_TCB_LOCK_ASSERT(stcb); 3950 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 3951 3952 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) { 3953 /* event not enabled */ 3954 return; 3955 } 3956 3957 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3958 if (m_notify == NULL) 3959 /* no space left */ 3960 return; 3961 SCTP_BUF_LEN(m_notify) = 0; 3962 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3963 if (len > M_TRAILINGSPACE(m_notify)) { 3964 /* never enough room */ 3965 sctp_m_freem(m_notify); 3966 return; 3967 } 3968 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3969 memset(strreset, 0, len); 3970 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3971 strreset->strreset_flags = flag; 3972 strreset->strreset_length = len; 3973 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3974 if (number_entries) { 3975 int i; 3976 3977 for (i = 0; i < number_entries; i++) { 3978 strreset->strreset_stream_list[i] = ntohs(list[i]); 3979 } 3980 } 3981 SCTP_BUF_LEN(m_notify) = len; 3982 SCTP_BUF_NEXT(m_notify) = NULL; 3983 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3984 /* no space */ 3985 sctp_m_freem(m_notify); 3986 return; 3987 } 3988 /* append to socket */ 3989 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3990 0, 0, stcb->asoc.context, 0, 0, 0, 3991 m_notify); 3992 if (control == NULL) { 3993 /* no memory */ 3994 sctp_m_freem(m_notify); 3995 return; 3996 } 3997 control->length = SCTP_BUF_LEN(m_notify); 3998 control->spec_flags = M_NOTIFICATION; 3999 /* not that we need this */ 4000 control->tail_mbuf = m_notify; 4001 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4002 &stcb->sctp_socket->so_rcv, 1, 4003 SCTP_READ_LOCK_HELD, so_locked); 4004 } 4005 4006 static void 4007 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, 4008 struct sctp_error_chunk *chunk, int so_locked) 4009 { 4010 struct mbuf *m_notify; 4011 struct sctp_remote_error *sre; 4012 struct sctp_queued_to_read *control; 4013 unsigned int notif_len; 4014 uint16_t chunk_len; 4015 4016 KASSERT(stcb != NULL, ("stcb == NULL")); 4017 SCTP_TCB_LOCK_ASSERT(stcb); 4018 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 4019 4020 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4021 return; 4022 } 4023 4024 if (chunk != NULL) { 4025 chunk_len = ntohs(chunk->ch.chunk_length); 4026 /* 4027 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4028 * contiguous. 4029 */ 4030 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4031 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4032 } 4033 } else { 4034 chunk_len = 0; 4035 } 4036 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4037 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4038 if (m_notify == NULL) { 4039 /* Retry with smaller value. */ 4040 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4041 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4042 if (m_notify == NULL) { 4043 return; 4044 } 4045 } 4046 SCTP_BUF_NEXT(m_notify) = NULL; 4047 sre = mtod(m_notify, struct sctp_remote_error *); 4048 memset(sre, 0, notif_len); 4049 sre->sre_type = SCTP_REMOTE_ERROR; 4050 sre->sre_flags = 0; 4051 sre->sre_length = sizeof(struct sctp_remote_error); 4052 sre->sre_error = error; 4053 sre->sre_assoc_id = sctp_get_associd(stcb); 4054 if (notif_len > sizeof(struct sctp_remote_error)) { 4055 memcpy(sre->sre_data, chunk, chunk_len); 4056 sre->sre_length += chunk_len; 4057 } 4058 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4059 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4060 0, 0, stcb->asoc.context, 0, 0, 0, 4061 m_notify); 4062 if (control != NULL) { 4063 control->length = SCTP_BUF_LEN(m_notify); 4064 control->spec_flags = M_NOTIFICATION; 4065 /* not that we need this */ 4066 control->tail_mbuf = m_notify; 4067 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 4068 &stcb->sctp_socket->so_rcv, 1, 4069 SCTP_READ_LOCK_HELD, so_locked); 4070 } else { 4071 sctp_m_freem(m_notify); 4072 } 4073 } 4074 4075 void 4076 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4077 uint32_t error, void *data, int so_locked) 4078 { 4079 struct sctp_inpcb *inp; 4080 struct sctp_nets *net; 4081 4082 KASSERT(stcb != NULL, ("stcb == NULL")); 4083 SCTP_TCB_LOCK_ASSERT(stcb); 4084 4085 inp = stcb->sctp_ep; 4086 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4087 return; 4088 } 4089 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4090 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4091 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4092 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4093 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4094 /* Don't report these in front states */ 4095 return; 4096 } 4097 } 4098 if (notification != SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION) { 4099 SCTP_INP_READ_LOCK(inp); 4100 } 4101 SCTP_INP_READ_LOCK_ASSERT(inp); 4102 4103 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4104 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4105 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4106 SCTP_INP_READ_UNLOCK(inp); 4107 return; 4108 } 4109 4110 switch (notification) { 4111 case SCTP_NOTIFY_ASSOC_UP: 4112 if (stcb->asoc.assoc_up_sent == 0) { 4113 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); 4114 stcb->asoc.assoc_up_sent = 1; 4115 } 4116 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4117 sctp_notify_adaptation_layer(stcb, so_locked); 4118 } 4119 if (stcb->asoc.auth_supported == 0) { 4120 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, 0, so_locked); 4121 } 4122 break; 4123 case SCTP_NOTIFY_ASSOC_DOWN: 4124 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); 4125 break; 4126 case SCTP_NOTIFY_INTERFACE_DOWN: 4127 net = (struct sctp_nets *)data; 4128 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4129 &net->ro._l_addr.sa, error, so_locked); 4130 break; 4131 case SCTP_NOTIFY_INTERFACE_UP: 4132 net = (struct sctp_nets *)data; 4133 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4134 &net->ro._l_addr.sa, error, so_locked); 4135 break; 4136 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4137 net = (struct sctp_nets *)data; 4138 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4139 &net->ro._l_addr.sa, error, so_locked); 4140 break; 4141 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4142 sctp_notify_send_failed2(stcb, error, 4143 (struct sctp_stream_queue_pending *)data, so_locked); 4144 break; 4145 case SCTP_NOTIFY_SENT_DG_FAIL: 4146 sctp_notify_send_failed(stcb, 1, error, 4147 (struct sctp_tmit_chunk *)data, so_locked); 4148 break; 4149 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4150 sctp_notify_send_failed(stcb, 0, error, 4151 (struct sctp_tmit_chunk *)data, so_locked); 4152 break; 4153 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4154 sctp_notify_partial_delivery_indication(stcb, error, 4155 (struct sctp_queued_to_read *)data, 4156 so_locked); 4157 break; 4158 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4159 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4160 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4161 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); 4162 } else { 4163 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); 4164 } 4165 break; 4166 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4167 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4168 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4169 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); 4170 } else { 4171 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); 4172 } 4173 break; 4174 case SCTP_NOTIFY_ASSOC_TIMEDOUT: 4175 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4176 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4177 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); 4178 } else { 4179 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); 4180 } 4181 break; 4182 case SCTP_NOTIFY_ASSOC_RESTART: 4183 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); 4184 if (stcb->asoc.auth_supported == 0) { 4185 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, 0, so_locked); 4186 } 4187 break; 4188 case SCTP_NOTIFY_STR_RESET_SEND: 4189 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN, so_locked); 4190 break; 4191 case SCTP_NOTIFY_STR_RESET_RECV: 4192 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING, so_locked); 4193 break; 4194 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4195 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4196 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED), so_locked); 4197 break; 4198 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4199 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4200 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED), so_locked); 4201 break; 4202 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4203 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4204 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED), so_locked); 4205 break; 4206 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4207 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4208 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED), so_locked); 4209 break; 4210 case SCTP_NOTIFY_STR_RESET_ADD: 4211 sctp_notify_stream_reset_add(stcb, error, so_locked); 4212 break; 4213 case SCTP_NOTIFY_STR_RESET_TSN: 4214 sctp_notify_stream_reset_tsn(stcb, error, so_locked); 4215 break; 4216 case SCTP_NOTIFY_ASCONF_ADD_IP: 4217 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4218 error, so_locked); 4219 break; 4220 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4221 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4222 error, so_locked); 4223 break; 4224 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4225 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4226 error, so_locked); 4227 break; 4228 case SCTP_NOTIFY_PEER_SHUTDOWN: 4229 sctp_notify_shutdown_event(stcb, so_locked); 4230 break; 4231 case SCTP_NOTIFY_AUTH_NEW_KEY: 4232 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, 4233 *(uint16_t *)data, so_locked); 4234 break; 4235 case SCTP_NOTIFY_AUTH_FREE_KEY: 4236 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, 4237 *(uint16_t *)data, so_locked); 4238 break; 4239 case SCTP_NOTIFY_NO_PEER_AUTH: 4240 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, 4241 0, so_locked); 4242 break; 4243 case SCTP_NOTIFY_SENDER_DRY: 4244 sctp_notify_sender_dry_event(stcb, so_locked); 4245 break; 4246 case SCTP_NOTIFY_REMOTE_ERROR: 4247 sctp_notify_remote_error(stcb, error, data, so_locked); 4248 break; 4249 default: 4250 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4251 __func__, notification, notification); 4252 break; 4253 } 4254 if (notification != SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION) { 4255 SCTP_INP_READ_UNLOCK(inp); 4256 } 4257 } 4258 4259 void 4260 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4261 { 4262 struct sctp_association *asoc; 4263 struct sctp_stream_out *outs; 4264 struct sctp_tmit_chunk *chk, *nchk; 4265 struct sctp_stream_queue_pending *sp, *nsp; 4266 int i; 4267 4268 if (stcb == NULL) { 4269 return; 4270 } 4271 asoc = &stcb->asoc; 4272 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4273 /* already being freed */ 4274 return; 4275 } 4276 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4277 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4278 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4279 return; 4280 } 4281 /* now through all the gunk freeing chunks */ 4282 /* sent queue SHOULD be empty */ 4283 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4284 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4285 asoc->sent_queue_cnt--; 4286 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4287 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4288 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4289 #ifdef INVARIANTS 4290 } else { 4291 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4292 #endif 4293 } 4294 } 4295 if (chk->data != NULL) { 4296 sctp_free_bufspace(stcb, asoc, chk, 1); 4297 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4298 error, chk, so_locked); 4299 if (chk->data) { 4300 sctp_m_freem(chk->data); 4301 chk->data = NULL; 4302 } 4303 } 4304 sctp_free_a_chunk(stcb, chk, so_locked); 4305 /* sa_ignore FREED_MEMORY */ 4306 } 4307 /* pending send queue SHOULD be empty */ 4308 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4309 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4310 asoc->send_queue_cnt--; 4311 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4312 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4313 #ifdef INVARIANTS 4314 } else { 4315 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4316 #endif 4317 } 4318 if (chk->data != NULL) { 4319 sctp_free_bufspace(stcb, asoc, chk, 1); 4320 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4321 error, chk, so_locked); 4322 if (chk->data) { 4323 sctp_m_freem(chk->data); 4324 chk->data = NULL; 4325 } 4326 } 4327 sctp_free_a_chunk(stcb, chk, so_locked); 4328 /* sa_ignore FREED_MEMORY */ 4329 } 4330 for (i = 0; i < asoc->streamoutcnt; i++) { 4331 /* For each stream */ 4332 outs = &asoc->strmout[i]; 4333 /* clean up any sends there */ 4334 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4335 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4336 TAILQ_REMOVE(&outs->outqueue, sp, next); 4337 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 4338 sctp_free_spbufspace(stcb, asoc, sp); 4339 if (sp->data) { 4340 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4341 error, (void *)sp, so_locked); 4342 if (sp->data) { 4343 sctp_m_freem(sp->data); 4344 sp->data = NULL; 4345 sp->tail_mbuf = NULL; 4346 sp->length = 0; 4347 } 4348 } 4349 if (sp->net) { 4350 sctp_free_remote_addr(sp->net); 4351 sp->net = NULL; 4352 } 4353 /* Free the chunk */ 4354 sctp_free_a_strmoq(stcb, sp, so_locked); 4355 /* sa_ignore FREED_MEMORY */ 4356 } 4357 } 4358 } 4359 4360 void 4361 sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, 4362 uint16_t error, struct sctp_abort_chunk *abort, 4363 int so_locked) 4364 { 4365 if (stcb == NULL) { 4366 return; 4367 } 4368 SCTP_TCB_LOCK_ASSERT(stcb); 4369 4370 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4371 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4372 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4373 sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_WAS_ABORTED); 4374 } 4375 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4376 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4377 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4378 return; 4379 } 4380 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4381 /* Tell them we lost the asoc */ 4382 sctp_report_all_outbound(stcb, error, so_locked); 4383 if (from_peer) { 4384 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4385 } else { 4386 if (timeout) { 4387 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); 4388 } else { 4389 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4390 } 4391 } 4392 } 4393 4394 void 4395 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4396 struct mbuf *m, int iphlen, 4397 struct sockaddr *src, struct sockaddr *dst, 4398 struct sctphdr *sh, struct mbuf *op_err, 4399 uint8_t mflowtype, uint32_t mflowid, 4400 uint32_t vrf_id, uint16_t port) 4401 { 4402 struct sctp_gen_error_cause *cause; 4403 uint32_t vtag; 4404 uint16_t cause_code; 4405 4406 if (stcb != NULL) { 4407 vtag = stcb->asoc.peer_vtag; 4408 vrf_id = stcb->asoc.vrf_id; 4409 if (op_err != NULL) { 4410 /* Read the cause code from the error cause. */ 4411 cause = mtod(op_err, struct sctp_gen_error_cause *); 4412 cause_code = ntohs(cause->code); 4413 } else { 4414 cause_code = 0; 4415 } 4416 } else { 4417 vtag = 0; 4418 } 4419 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4420 mflowtype, mflowid, inp->fibnum, 4421 vrf_id, port); 4422 if (stcb != NULL) { 4423 /* We have a TCB to abort, send notification too */ 4424 sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); 4425 /* Ok, now lets free it */ 4426 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4427 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4428 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4429 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4430 } 4431 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4432 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4433 } 4434 } 4435 #ifdef SCTP_ASOCLOG_OF_TSNS 4436 void 4437 sctp_print_out_track_log(struct sctp_tcb *stcb) 4438 { 4439 #ifdef NOSIY_PRINTS 4440 int i; 4441 4442 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4443 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4444 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4445 SCTP_PRINTF("None rcvd\n"); 4446 goto none_in; 4447 } 4448 if (stcb->asoc.tsn_in_wrapped) { 4449 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4450 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4451 stcb->asoc.in_tsnlog[i].tsn, 4452 stcb->asoc.in_tsnlog[i].strm, 4453 stcb->asoc.in_tsnlog[i].seq, 4454 stcb->asoc.in_tsnlog[i].flgs, 4455 stcb->asoc.in_tsnlog[i].sz); 4456 } 4457 } 4458 if (stcb->asoc.tsn_in_at) { 4459 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4460 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4461 stcb->asoc.in_tsnlog[i].tsn, 4462 stcb->asoc.in_tsnlog[i].strm, 4463 stcb->asoc.in_tsnlog[i].seq, 4464 stcb->asoc.in_tsnlog[i].flgs, 4465 stcb->asoc.in_tsnlog[i].sz); 4466 } 4467 } 4468 none_in: 4469 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4470 if ((stcb->asoc.tsn_out_at == 0) && 4471 (stcb->asoc.tsn_out_wrapped == 0)) { 4472 SCTP_PRINTF("None sent\n"); 4473 } 4474 if (stcb->asoc.tsn_out_wrapped) { 4475 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4476 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4477 stcb->asoc.out_tsnlog[i].tsn, 4478 stcb->asoc.out_tsnlog[i].strm, 4479 stcb->asoc.out_tsnlog[i].seq, 4480 stcb->asoc.out_tsnlog[i].flgs, 4481 stcb->asoc.out_tsnlog[i].sz); 4482 } 4483 } 4484 if (stcb->asoc.tsn_out_at) { 4485 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4486 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4487 stcb->asoc.out_tsnlog[i].tsn, 4488 stcb->asoc.out_tsnlog[i].strm, 4489 stcb->asoc.out_tsnlog[i].seq, 4490 stcb->asoc.out_tsnlog[i].flgs, 4491 stcb->asoc.out_tsnlog[i].sz); 4492 } 4493 } 4494 #endif 4495 } 4496 #endif 4497 4498 void 4499 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4500 struct mbuf *op_err, bool timedout, int so_locked) 4501 { 4502 struct sctp_gen_error_cause *cause; 4503 uint16_t cause_code; 4504 4505 if (stcb == NULL) { 4506 /* Got to have a TCB */ 4507 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4508 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4509 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4510 SCTP_CALLED_DIRECTLY_NOCMPSET); 4511 } 4512 } 4513 return; 4514 } 4515 if (op_err != NULL) { 4516 /* Read the cause code from the error cause. */ 4517 cause = mtod(op_err, struct sctp_gen_error_cause *); 4518 cause_code = ntohs(cause->code); 4519 } else { 4520 cause_code = 0; 4521 } 4522 /* notify the peer */ 4523 sctp_send_abort_tcb(stcb, op_err, so_locked); 4524 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4525 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4526 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4527 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4528 } 4529 /* notify the ulp */ 4530 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4531 sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); 4532 } 4533 /* now free the asoc */ 4534 #ifdef SCTP_ASOCLOG_OF_TSNS 4535 sctp_print_out_track_log(stcb); 4536 #endif 4537 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4538 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4539 } 4540 4541 void 4542 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4543 struct sockaddr *src, struct sockaddr *dst, 4544 struct sctphdr *sh, struct sctp_inpcb *inp, 4545 struct mbuf *cause, 4546 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4547 uint32_t vrf_id, uint16_t port) 4548 { 4549 struct sctp_chunkhdr *ch, chunk_buf; 4550 unsigned int chk_length; 4551 int contains_init_chunk; 4552 4553 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4554 /* Generate a TO address for future reference */ 4555 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4556 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4557 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4558 SCTP_CALLED_DIRECTLY_NOCMPSET); 4559 } 4560 } 4561 contains_init_chunk = 0; 4562 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4563 sizeof(*ch), (uint8_t *)&chunk_buf); 4564 while (ch != NULL) { 4565 chk_length = ntohs(ch->chunk_length); 4566 if (chk_length < sizeof(*ch)) { 4567 /* break to abort land */ 4568 break; 4569 } 4570 switch (ch->chunk_type) { 4571 case SCTP_INIT: 4572 contains_init_chunk = 1; 4573 break; 4574 case SCTP_PACKET_DROPPED: 4575 /* we don't respond to pkt-dropped */ 4576 return; 4577 case SCTP_ABORT_ASSOCIATION: 4578 /* we don't respond with an ABORT to an ABORT */ 4579 return; 4580 case SCTP_SHUTDOWN_COMPLETE: 4581 /* 4582 * we ignore it since we are not waiting for it and 4583 * peer is gone 4584 */ 4585 return; 4586 case SCTP_SHUTDOWN_ACK: 4587 sctp_send_shutdown_complete2(src, dst, sh, 4588 mflowtype, mflowid, fibnum, 4589 vrf_id, port); 4590 return; 4591 default: 4592 break; 4593 } 4594 offset += SCTP_SIZE32(chk_length); 4595 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4596 sizeof(*ch), (uint8_t *)&chunk_buf); 4597 } 4598 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4599 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4600 (contains_init_chunk == 0))) { 4601 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4602 mflowtype, mflowid, fibnum, 4603 vrf_id, port); 4604 } 4605 } 4606 4607 /* 4608 * check the inbound datagram to make sure there is not an abort inside it, 4609 * if there is return 1, else return 0. 4610 */ 4611 int 4612 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4613 { 4614 struct sctp_chunkhdr *ch; 4615 struct sctp_init_chunk *init_chk, chunk_buf; 4616 int offset; 4617 unsigned int chk_length; 4618 4619 offset = iphlen + sizeof(struct sctphdr); 4620 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4621 (uint8_t *)&chunk_buf); 4622 while (ch != NULL) { 4623 chk_length = ntohs(ch->chunk_length); 4624 if (chk_length < sizeof(*ch)) { 4625 /* packet is probably corrupt */ 4626 break; 4627 } 4628 /* we seem to be ok, is it an abort? */ 4629 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4630 /* yep, tell them */ 4631 return (1); 4632 } 4633 if ((ch->chunk_type == SCTP_INITIATION) || 4634 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4635 /* need to update the Vtag */ 4636 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4637 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4638 if (init_chk != NULL) { 4639 *vtag = ntohl(init_chk->init.initiate_tag); 4640 } 4641 } 4642 /* Nope, move to the next chunk */ 4643 offset += SCTP_SIZE32(chk_length); 4644 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4645 sizeof(*ch), (uint8_t *)&chunk_buf); 4646 } 4647 return (0); 4648 } 4649 4650 /* 4651 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4652 * set (i.e. it's 0) so, create this function to compare link local scopes 4653 */ 4654 #ifdef INET6 4655 uint32_t 4656 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4657 { 4658 struct sockaddr_in6 a, b; 4659 4660 /* save copies */ 4661 a = *addr1; 4662 b = *addr2; 4663 4664 if (a.sin6_scope_id == 0) 4665 if (sa6_recoverscope(&a)) { 4666 /* can't get scope, so can't match */ 4667 return (0); 4668 } 4669 if (b.sin6_scope_id == 0) 4670 if (sa6_recoverscope(&b)) { 4671 /* can't get scope, so can't match */ 4672 return (0); 4673 } 4674 if (a.sin6_scope_id != b.sin6_scope_id) 4675 return (0); 4676 4677 return (1); 4678 } 4679 4680 /* 4681 * returns a sockaddr_in6 with embedded scope recovered and removed 4682 */ 4683 struct sockaddr_in6 * 4684 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4685 { 4686 /* check and strip embedded scope junk */ 4687 if (addr->sin6_family == AF_INET6) { 4688 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4689 if (addr->sin6_scope_id == 0) { 4690 *store = *addr; 4691 if (!sa6_recoverscope(store)) { 4692 /* use the recovered scope */ 4693 addr = store; 4694 } 4695 } else { 4696 /* else, return the original "to" addr */ 4697 in6_clearscope(&addr->sin6_addr); 4698 } 4699 } 4700 } 4701 return (addr); 4702 } 4703 #endif 4704 4705 /* 4706 * are the two addresses the same? currently a "scopeless" check returns: 1 4707 * if same, 0 if not 4708 */ 4709 int 4710 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4711 { 4712 4713 /* must be valid */ 4714 if (sa1 == NULL || sa2 == NULL) 4715 return (0); 4716 4717 /* must be the same family */ 4718 if (sa1->sa_family != sa2->sa_family) 4719 return (0); 4720 4721 switch (sa1->sa_family) { 4722 #ifdef INET6 4723 case AF_INET6: 4724 { 4725 /* IPv6 addresses */ 4726 struct sockaddr_in6 *sin6_1, *sin6_2; 4727 4728 sin6_1 = (struct sockaddr_in6 *)sa1; 4729 sin6_2 = (struct sockaddr_in6 *)sa2; 4730 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4731 sin6_2)); 4732 } 4733 #endif 4734 #ifdef INET 4735 case AF_INET: 4736 { 4737 /* IPv4 addresses */ 4738 struct sockaddr_in *sin_1, *sin_2; 4739 4740 sin_1 = (struct sockaddr_in *)sa1; 4741 sin_2 = (struct sockaddr_in *)sa2; 4742 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4743 } 4744 #endif 4745 default: 4746 /* we don't do these... */ 4747 return (0); 4748 } 4749 } 4750 4751 void 4752 sctp_print_address(struct sockaddr *sa) 4753 { 4754 #ifdef INET6 4755 char ip6buf[INET6_ADDRSTRLEN]; 4756 #endif 4757 4758 switch (sa->sa_family) { 4759 #ifdef INET6 4760 case AF_INET6: 4761 { 4762 struct sockaddr_in6 *sin6; 4763 4764 sin6 = (struct sockaddr_in6 *)sa; 4765 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4766 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4767 ntohs(sin6->sin6_port), 4768 sin6->sin6_scope_id); 4769 break; 4770 } 4771 #endif 4772 #ifdef INET 4773 case AF_INET: 4774 { 4775 struct sockaddr_in *sin; 4776 unsigned char *p; 4777 4778 sin = (struct sockaddr_in *)sa; 4779 p = (unsigned char *)&sin->sin_addr; 4780 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4781 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4782 break; 4783 } 4784 #endif 4785 default: 4786 SCTP_PRINTF("?\n"); 4787 break; 4788 } 4789 } 4790 4791 void 4792 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4793 struct sctp_inpcb *new_inp, 4794 struct sctp_tcb *stcb, 4795 int waitflags) 4796 { 4797 /* 4798 * go through our old INP and pull off any control structures that 4799 * belong to stcb and move then to the new inp. 4800 */ 4801 struct socket *old_so, *new_so; 4802 struct sctp_queued_to_read *control, *nctl; 4803 struct sctp_readhead tmp_queue; 4804 struct mbuf *m; 4805 int error = 0; 4806 4807 old_so = old_inp->sctp_socket; 4808 new_so = new_inp->sctp_socket; 4809 TAILQ_INIT(&tmp_queue); 4810 error = SOCK_IO_RECV_LOCK(old_so, waitflags); 4811 if (error) { 4812 /* 4813 * Gak, can't get I/O lock, we have a problem. data will be 4814 * left stranded.. and we don't dare look at it since the 4815 * other thread may be reading something. Oh well, its a 4816 * screwed up app that does a peeloff OR a accept while 4817 * reading from the main socket... actually its only the 4818 * peeloff() case, since I think read will fail on a 4819 * listening socket.. 4820 */ 4821 return; 4822 } 4823 /* lock the socket buffers */ 4824 SCTP_INP_READ_LOCK(old_inp); 4825 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4826 /* Pull off all for out target stcb */ 4827 if (control->stcb == stcb) { 4828 /* remove it we want it */ 4829 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4830 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4831 m = control->data; 4832 while (m) { 4833 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4834 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4835 } 4836 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4837 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4838 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4839 } 4840 m = SCTP_BUF_NEXT(m); 4841 } 4842 } 4843 } 4844 SCTP_INP_READ_UNLOCK(old_inp); 4845 /* Remove the recv-lock on the old socket */ 4846 SOCK_IO_RECV_UNLOCK(old_so); 4847 /* Now we move them over to the new socket buffer */ 4848 SCTP_INP_READ_LOCK(new_inp); 4849 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4850 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4851 m = control->data; 4852 while (m) { 4853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4854 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4855 } 4856 sctp_sballoc(stcb, &new_so->so_rcv, m); 4857 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4858 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4859 } 4860 m = SCTP_BUF_NEXT(m); 4861 } 4862 } 4863 SCTP_INP_READ_UNLOCK(new_inp); 4864 } 4865 4866 void 4867 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4868 struct sctp_tcb *stcb, 4869 int so_locked 4870 SCTP_UNUSED 4871 ) 4872 { 4873 if ((inp != NULL) && 4874 (inp->sctp_socket != NULL) && 4875 (((inp->sctp_flags & (SCTP_PCB_FLAGS_TCPTYPE | SCTP_PCB_FLAGS_IN_TCPPOOL)) == 0) || 4876 !SCTP_IS_LISTENING(inp))) { 4877 sctp_sorwakeup(inp, inp->sctp_socket); 4878 } 4879 } 4880 4881 void 4882 sctp_add_to_readq(struct sctp_inpcb *inp, 4883 struct sctp_tcb *stcb, 4884 struct sctp_queued_to_read *control, 4885 struct sockbuf *sb, 4886 int end, 4887 int inp_read_lock_held, 4888 int so_locked) 4889 { 4890 /* 4891 * Here we must place the control on the end of the socket read 4892 * queue AND increment sb_cc so that select will work properly on 4893 * read. 4894 */ 4895 struct mbuf *m, *prev = NULL; 4896 4897 if (inp == NULL) { 4898 /* Gak, TSNH!! */ 4899 #ifdef INVARIANTS 4900 panic("Gak, inp NULL on add_to_readq"); 4901 #endif 4902 return; 4903 } 4904 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4905 SCTP_INP_READ_LOCK(inp); 4906 } 4907 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4908 if (!control->on_strm_q) { 4909 sctp_free_remote_addr(control->whoFrom); 4910 if (control->data) { 4911 sctp_m_freem(control->data); 4912 control->data = NULL; 4913 } 4914 sctp_free_a_readq(stcb, control); 4915 } 4916 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4917 SCTP_INP_READ_UNLOCK(inp); 4918 } 4919 return; 4920 } 4921 if ((control->spec_flags & M_NOTIFICATION) == 0) { 4922 atomic_add_int(&inp->total_recvs, 1); 4923 if (!control->do_not_ref_stcb) { 4924 atomic_add_int(&stcb->total_recvs, 1); 4925 } 4926 } 4927 m = control->data; 4928 control->held_length = 0; 4929 control->length = 0; 4930 while (m != NULL) { 4931 if (SCTP_BUF_LEN(m) == 0) { 4932 /* Skip mbufs with NO length */ 4933 if (prev == NULL) { 4934 /* First one */ 4935 control->data = sctp_m_free(m); 4936 m = control->data; 4937 } else { 4938 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4939 m = SCTP_BUF_NEXT(prev); 4940 } 4941 if (m == NULL) { 4942 control->tail_mbuf = prev; 4943 } 4944 continue; 4945 } 4946 prev = m; 4947 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4948 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4949 } 4950 sctp_sballoc(stcb, sb, m); 4951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4952 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4953 } 4954 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4955 m = SCTP_BUF_NEXT(m); 4956 } 4957 if (prev != NULL) { 4958 control->tail_mbuf = prev; 4959 } else { 4960 /* Everything got collapsed out?? */ 4961 if (!control->on_strm_q) { 4962 sctp_free_remote_addr(control->whoFrom); 4963 sctp_free_a_readq(stcb, control); 4964 } 4965 if (inp_read_lock_held == 0) 4966 SCTP_INP_READ_UNLOCK(inp); 4967 return; 4968 } 4969 if (end) { 4970 control->end_added = 1; 4971 } 4972 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4973 control->on_read_q = 1; 4974 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4975 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4976 } 4977 if (inp_read_lock_held == SCTP_READ_LOCK_NOT_HELD) { 4978 SCTP_INP_READ_UNLOCK(inp); 4979 } 4980 } 4981 4982 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4983 *************ALTERNATE ROUTING CODE 4984 */ 4985 4986 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4987 *************ALTERNATE ROUTING CODE 4988 */ 4989 4990 struct mbuf * 4991 sctp_generate_cause(uint16_t code, char *info) 4992 { 4993 struct mbuf *m; 4994 struct sctp_gen_error_cause *cause; 4995 size_t info_len; 4996 uint16_t len; 4997 4998 if ((code == 0) || (info == NULL)) { 4999 return (NULL); 5000 } 5001 info_len = strlen(info); 5002 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 5003 return (NULL); 5004 } 5005 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 5006 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5007 if (m != NULL) { 5008 SCTP_BUF_LEN(m) = len; 5009 cause = mtod(m, struct sctp_gen_error_cause *); 5010 cause->code = htons(code); 5011 cause->length = htons(len); 5012 memcpy(cause->info, info, info_len); 5013 } 5014 return (m); 5015 } 5016 5017 struct mbuf * 5018 sctp_generate_no_user_data_cause(uint32_t tsn) 5019 { 5020 struct mbuf *m; 5021 struct sctp_error_no_user_data *no_user_data_cause; 5022 uint16_t len; 5023 5024 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5025 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5026 if (m != NULL) { 5027 SCTP_BUF_LEN(m) = len; 5028 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5029 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5030 no_user_data_cause->cause.length = htons(len); 5031 no_user_data_cause->tsn = htonl(tsn); 5032 } 5033 return (m); 5034 } 5035 5036 void 5037 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5038 struct sctp_tmit_chunk *tp1, int chk_cnt) 5039 { 5040 if (tp1->data == NULL) { 5041 return; 5042 } 5043 atomic_subtract_int(&asoc->chunks_on_out_queue, chk_cnt); 5044 #ifdef SCTP_MBCNT_LOGGING 5045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5046 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5047 asoc->total_output_queue_size, 5048 tp1->book_size, 5049 0, 5050 tp1->mbcnt); 5051 } 5052 #endif 5053 if (asoc->total_output_queue_size >= tp1->book_size) { 5054 atomic_subtract_int(&asoc->total_output_queue_size, tp1->book_size); 5055 } else { 5056 asoc->total_output_queue_size = 0; 5057 } 5058 if ((stcb->sctp_socket != NULL) && 5059 (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5060 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5061 SCTP_SB_DECR(&stcb->sctp_socket->so_snd, tp1->book_size); 5062 } 5063 } 5064 5065 int 5066 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5067 uint8_t sent, int so_locked) 5068 { 5069 struct sctp_stream_out *strq; 5070 struct sctp_tmit_chunk *chk = NULL, *tp2; 5071 struct sctp_stream_queue_pending *sp; 5072 uint32_t mid; 5073 uint16_t sid; 5074 uint8_t foundeom = 0; 5075 int ret_sz = 0; 5076 int notdone; 5077 int do_wakeup_routine = 0; 5078 5079 SCTP_TCB_LOCK_ASSERT(stcb); 5080 5081 sid = tp1->rec.data.sid; 5082 mid = tp1->rec.data.mid; 5083 if (sent || ((tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) { 5084 stcb->asoc.abandoned_sent[0]++; 5085 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5086 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5087 #if defined(SCTP_DETAILED_STR_STATS) 5088 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5089 #endif 5090 } else { 5091 stcb->asoc.abandoned_unsent[0]++; 5092 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5093 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5094 #if defined(SCTP_DETAILED_STR_STATS) 5095 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5096 #endif 5097 } 5098 do { 5099 ret_sz += tp1->book_size; 5100 if (tp1->data != NULL) { 5101 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5102 sctp_flight_size_decrease(tp1); 5103 sctp_total_flight_decrease(stcb, tp1); 5104 } 5105 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5106 stcb->asoc.peers_rwnd += tp1->send_size; 5107 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5108 if (sent) { 5109 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5110 } else { 5111 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5112 } 5113 if (tp1->data) { 5114 sctp_m_freem(tp1->data); 5115 tp1->data = NULL; 5116 } 5117 do_wakeup_routine = 1; 5118 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5119 stcb->asoc.sent_queue_cnt_removeable--; 5120 } 5121 } 5122 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5123 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5124 SCTP_DATA_NOT_FRAG) { 5125 /* not frag'ed we ae done */ 5126 notdone = 0; 5127 foundeom = 1; 5128 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5129 /* end of frag, we are done */ 5130 notdone = 0; 5131 foundeom = 1; 5132 } else { 5133 /* 5134 * Its a begin or middle piece, we must mark all of 5135 * it 5136 */ 5137 notdone = 1; 5138 tp1 = TAILQ_NEXT(tp1, sctp_next); 5139 } 5140 } while (tp1 && notdone); 5141 if (foundeom == 0) { 5142 /* 5143 * The multi-part message was scattered across the send and 5144 * sent queue. 5145 */ 5146 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5147 if ((tp1->rec.data.sid != sid) || 5148 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5149 break; 5150 } 5151 /* 5152 * save to chk in case we have some on stream out 5153 * queue. If so and we have an un-transmitted one we 5154 * don't have to fudge the TSN. 5155 */ 5156 chk = tp1; 5157 ret_sz += tp1->book_size; 5158 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5159 if (sent) { 5160 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5161 } else { 5162 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5163 } 5164 if (tp1->data) { 5165 sctp_m_freem(tp1->data); 5166 tp1->data = NULL; 5167 } 5168 /* No flight involved here book the size to 0 */ 5169 tp1->book_size = 0; 5170 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5171 foundeom = 1; 5172 } 5173 do_wakeup_routine = 1; 5174 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5175 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5176 /* 5177 * on to the sent queue so we can wait for it to be 5178 * passed by. 5179 */ 5180 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5181 sctp_next); 5182 stcb->asoc.send_queue_cnt--; 5183 stcb->asoc.sent_queue_cnt++; 5184 } 5185 } 5186 if (foundeom == 0) { 5187 /* 5188 * Still no eom found. That means there is stuff left on the 5189 * stream out queue.. yuck. 5190 */ 5191 strq = &stcb->asoc.strmout[sid]; 5192 sp = TAILQ_FIRST(&strq->outqueue); 5193 if (sp != NULL) { 5194 sp->discard_rest = 1; 5195 /* 5196 * We may need to put a chunk on the queue that 5197 * holds the TSN that would have been sent with the 5198 * LAST bit. 5199 */ 5200 if (chk == NULL) { 5201 /* Yep, we have to */ 5202 sctp_alloc_a_chunk(stcb, chk); 5203 if (chk == NULL) { 5204 /* 5205 * we are hosed. All we can do is 5206 * nothing.. which will cause an 5207 * abort if the peer is paying 5208 * attention. 5209 */ 5210 goto oh_well; 5211 } 5212 memset(chk, 0, sizeof(*chk)); 5213 chk->rec.data.rcv_flags = 0; 5214 chk->sent = SCTP_FORWARD_TSN_SKIP; 5215 chk->asoc = &stcb->asoc; 5216 if (stcb->asoc.idata_supported == 0) { 5217 if (sp->sinfo_flags & SCTP_UNORDERED) { 5218 chk->rec.data.mid = 0; 5219 } else { 5220 chk->rec.data.mid = strq->next_mid_ordered; 5221 } 5222 } else { 5223 if (sp->sinfo_flags & SCTP_UNORDERED) { 5224 chk->rec.data.mid = strq->next_mid_unordered; 5225 } else { 5226 chk->rec.data.mid = strq->next_mid_ordered; 5227 } 5228 } 5229 chk->rec.data.sid = sp->sid; 5230 chk->rec.data.ppid = sp->ppid; 5231 chk->rec.data.context = sp->context; 5232 chk->flags = sp->act_flags; 5233 chk->whoTo = NULL; 5234 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5235 strq->chunks_on_queues++; 5236 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5237 stcb->asoc.sent_queue_cnt++; 5238 stcb->asoc.pr_sctp_cnt++; 5239 } 5240 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5241 if (sp->sinfo_flags & SCTP_UNORDERED) { 5242 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5243 } 5244 if (stcb->asoc.idata_supported == 0) { 5245 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5246 strq->next_mid_ordered++; 5247 } 5248 } else { 5249 if (sp->sinfo_flags & SCTP_UNORDERED) { 5250 strq->next_mid_unordered++; 5251 } else { 5252 strq->next_mid_ordered++; 5253 } 5254 } 5255 oh_well: 5256 if (sp->data) { 5257 /* 5258 * Pull any data to free up the SB and allow 5259 * sender to "add more" while we will throw 5260 * away :-) 5261 */ 5262 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5263 ret_sz += sp->length; 5264 do_wakeup_routine = 1; 5265 sp->some_taken = 1; 5266 sctp_m_freem(sp->data); 5267 sp->data = NULL; 5268 sp->tail_mbuf = NULL; 5269 sp->length = 0; 5270 } 5271 } 5272 } 5273 if (do_wakeup_routine) { 5274 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5275 } 5276 return (ret_sz); 5277 } 5278 5279 /* 5280 * checks to see if the given address, sa, is one that is currently known by 5281 * the kernel note: can't distinguish the same address on multiple interfaces 5282 * and doesn't handle multiple addresses with different zone/scope id's note: 5283 * ifa_ifwithaddr() compares the entire sockaddr struct 5284 */ 5285 struct sctp_ifa * 5286 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5287 int holds_lock) 5288 { 5289 struct sctp_laddr *laddr; 5290 5291 if (holds_lock == 0) { 5292 SCTP_INP_RLOCK(inp); 5293 } 5294 5295 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5296 if (laddr->ifa == NULL) 5297 continue; 5298 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5299 continue; 5300 #ifdef INET 5301 if (addr->sa_family == AF_INET) { 5302 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5303 laddr->ifa->address.sin.sin_addr.s_addr) { 5304 /* found him. */ 5305 break; 5306 } 5307 } 5308 #endif 5309 #ifdef INET6 5310 if (addr->sa_family == AF_INET6) { 5311 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5312 &laddr->ifa->address.sin6)) { 5313 /* found him. */ 5314 break; 5315 } 5316 } 5317 #endif 5318 } 5319 if (holds_lock == 0) { 5320 SCTP_INP_RUNLOCK(inp); 5321 } 5322 if (laddr != NULL) { 5323 return (laddr->ifa); 5324 } else { 5325 return (NULL); 5326 } 5327 } 5328 5329 uint32_t 5330 sctp_get_ifa_hash_val(struct sockaddr *addr) 5331 { 5332 switch (addr->sa_family) { 5333 #ifdef INET 5334 case AF_INET: 5335 { 5336 struct sockaddr_in *sin; 5337 5338 sin = (struct sockaddr_in *)addr; 5339 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5340 } 5341 #endif 5342 #ifdef INET6 5343 case AF_INET6: 5344 { 5345 struct sockaddr_in6 *sin6; 5346 uint32_t hash_of_addr; 5347 5348 sin6 = (struct sockaddr_in6 *)addr; 5349 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5350 sin6->sin6_addr.s6_addr32[1] + 5351 sin6->sin6_addr.s6_addr32[2] + 5352 sin6->sin6_addr.s6_addr32[3]); 5353 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5354 return (hash_of_addr); 5355 } 5356 #endif 5357 default: 5358 break; 5359 } 5360 return (0); 5361 } 5362 5363 struct sctp_ifa * 5364 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5365 { 5366 struct sctp_ifa *sctp_ifap; 5367 struct sctp_vrf *vrf; 5368 struct sctp_ifalist *hash_head; 5369 uint32_t hash_of_addr; 5370 5371 if (holds_lock == 0) { 5372 SCTP_IPI_ADDR_RLOCK(); 5373 } else { 5374 SCTP_IPI_ADDR_LOCK_ASSERT(); 5375 } 5376 5377 vrf = sctp_find_vrf(vrf_id); 5378 if (vrf == NULL) { 5379 if (holds_lock == 0) 5380 SCTP_IPI_ADDR_RUNLOCK(); 5381 return (NULL); 5382 } 5383 5384 hash_of_addr = sctp_get_ifa_hash_val(addr); 5385 5386 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5387 if (hash_head == NULL) { 5388 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5389 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5390 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5391 sctp_print_address(addr); 5392 SCTP_PRINTF("No such bucket for address\n"); 5393 if (holds_lock == 0) 5394 SCTP_IPI_ADDR_RUNLOCK(); 5395 5396 return (NULL); 5397 } 5398 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5399 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5400 continue; 5401 #ifdef INET 5402 if (addr->sa_family == AF_INET) { 5403 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5404 sctp_ifap->address.sin.sin_addr.s_addr) { 5405 /* found him. */ 5406 break; 5407 } 5408 } 5409 #endif 5410 #ifdef INET6 5411 if (addr->sa_family == AF_INET6) { 5412 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5413 &sctp_ifap->address.sin6)) { 5414 /* found him. */ 5415 break; 5416 } 5417 } 5418 #endif 5419 } 5420 if (holds_lock == 0) 5421 SCTP_IPI_ADDR_RUNLOCK(); 5422 return (sctp_ifap); 5423 } 5424 5425 static void 5426 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5427 uint32_t rwnd_req) 5428 { 5429 /* User pulled some data, do we need a rwnd update? */ 5430 struct epoch_tracker et; 5431 int r_unlocked = 0; 5432 uint32_t dif, rwnd; 5433 struct socket *so = NULL; 5434 5435 if (stcb == NULL) 5436 return; 5437 5438 atomic_add_int(&stcb->asoc.refcnt, 1); 5439 5440 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5441 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5442 /* Pre-check If we are freeing no update */ 5443 goto no_lock; 5444 } 5445 SCTP_INP_INCR_REF(stcb->sctp_ep); 5446 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5447 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5448 goto out; 5449 } 5450 so = stcb->sctp_socket; 5451 if (so == NULL) { 5452 goto out; 5453 } 5454 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5455 /* Have you have freed enough to look */ 5456 *freed_so_far = 0; 5457 /* Yep, its worth a look and the lock overhead */ 5458 5459 /* Figure out what the rwnd would be */ 5460 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5461 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5462 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5463 } else { 5464 dif = 0; 5465 } 5466 if (dif >= rwnd_req) { 5467 if (hold_rlock) { 5468 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5469 r_unlocked = 1; 5470 } 5471 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5472 /* 5473 * One last check before we allow the guy possibly 5474 * to get in. There is a race, where the guy has not 5475 * reached the gate. In that case 5476 */ 5477 goto out; 5478 } 5479 SCTP_TCB_LOCK(stcb); 5480 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5481 /* No reports here */ 5482 SCTP_TCB_UNLOCK(stcb); 5483 goto out; 5484 } 5485 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5486 NET_EPOCH_ENTER(et); 5487 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5488 5489 sctp_chunk_output(stcb->sctp_ep, stcb, 5490 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5491 /* make sure no timer is running */ 5492 NET_EPOCH_EXIT(et); 5493 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5494 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5495 SCTP_TCB_UNLOCK(stcb); 5496 } else { 5497 /* Update how much we have pending */ 5498 stcb->freed_by_sorcv_sincelast = dif; 5499 } 5500 out: 5501 if (so && r_unlocked && hold_rlock) { 5502 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5503 } 5504 5505 SCTP_INP_DECR_REF(stcb->sctp_ep); 5506 no_lock: 5507 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5508 return; 5509 } 5510 5511 int 5512 sctp_sorecvmsg(struct socket *so, 5513 struct uio *uio, 5514 struct mbuf **mp, 5515 struct sockaddr *from, 5516 int fromlen, 5517 int *msg_flags, 5518 struct sctp_sndrcvinfo *sinfo, 5519 int filling_sinfo) 5520 { 5521 /* 5522 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5523 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5524 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5525 * On the way out we may send out any combination of: 5526 * MSG_NOTIFICATION MSG_EOR 5527 * 5528 */ 5529 struct sctp_inpcb *inp = NULL; 5530 ssize_t my_len = 0; 5531 ssize_t cp_len = 0; 5532 int error = 0; 5533 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5534 struct mbuf *m = NULL; 5535 struct sctp_tcb *stcb = NULL; 5536 int wakeup_read_socket = 0; 5537 int freecnt_applied = 0; 5538 int out_flags = 0, in_flags = 0; 5539 int block_allowed = 1; 5540 uint32_t freed_so_far = 0; 5541 ssize_t copied_so_far = 0; 5542 int in_eeor_mode = 0; 5543 int no_rcv_needed = 0; 5544 uint32_t rwnd_req = 0; 5545 int hold_sblock = 0; 5546 int hold_rlock = 0; 5547 ssize_t slen = 0; 5548 uint32_t held_length = 0; 5549 int sockbuf_lock = 0; 5550 5551 if (uio == NULL) { 5552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5553 return (EINVAL); 5554 } 5555 5556 if (msg_flags) { 5557 in_flags = *msg_flags; 5558 if (in_flags & MSG_PEEK) 5559 SCTP_STAT_INCR(sctps_read_peeks); 5560 } else { 5561 in_flags = 0; 5562 } 5563 slen = uio->uio_resid; 5564 5565 /* Pull in and set up our int flags */ 5566 if (in_flags & MSG_OOB) { 5567 /* Out of band's NOT supported */ 5568 return (EOPNOTSUPP); 5569 } 5570 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5571 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5572 return (EINVAL); 5573 } 5574 if ((in_flags & (MSG_DONTWAIT 5575 | MSG_NBIO 5576 )) || 5577 SCTP_SO_IS_NBIO(so)) { 5578 block_allowed = 0; 5579 } 5580 /* setup the endpoint */ 5581 inp = (struct sctp_inpcb *)so->so_pcb; 5582 if (inp == NULL) { 5583 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5584 return (EFAULT); 5585 } 5586 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5587 /* Must be at least a MTU's worth */ 5588 if (rwnd_req < SCTP_MIN_RWND) 5589 rwnd_req = SCTP_MIN_RWND; 5590 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5591 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5592 sctp_misc_ints(SCTP_SORECV_ENTER, 5593 rwnd_req, in_eeor_mode, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5594 } 5595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5596 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5597 rwnd_req, block_allowed, SCTP_SBAVAIL(&so->so_rcv), (uint32_t)uio->uio_resid); 5598 } 5599 5600 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); 5601 if (error) { 5602 goto release_unlocked; 5603 } 5604 sockbuf_lock = 1; 5605 restart: 5606 5607 restart_nosblocks: 5608 if (hold_sblock == 0) { 5609 SOCKBUF_LOCK(&so->so_rcv); 5610 hold_sblock = 1; 5611 } 5612 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5613 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5614 goto out; 5615 } 5616 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && SCTP_SBAVAIL(&so->so_rcv) == 0) { 5617 if (so->so_error) { 5618 error = so->so_error; 5619 if ((in_flags & MSG_PEEK) == 0) 5620 so->so_error = 0; 5621 goto out; 5622 } else { 5623 if (SCTP_SBAVAIL(&so->so_rcv) == 0) { 5624 /* indicate EOF */ 5625 error = 0; 5626 goto out; 5627 } 5628 } 5629 } 5630 if (SCTP_SBAVAIL(&so->so_rcv) <= held_length) { 5631 if (so->so_error) { 5632 error = so->so_error; 5633 if ((in_flags & MSG_PEEK) == 0) { 5634 so->so_error = 0; 5635 } 5636 goto out; 5637 } 5638 if ((SCTP_SBAVAIL(&so->so_rcv) == 0) && 5639 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5640 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5641 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5642 /* 5643 * For active open side clear flags for 5644 * re-use passive open is blocked by 5645 * connect. 5646 */ 5647 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5648 /* 5649 * You were aborted, passive side 5650 * always hits here 5651 */ 5652 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5653 error = ECONNRESET; 5654 } 5655 so->so_state &= ~(SS_ISCONNECTING | 5656 SS_ISDISCONNECTING | 5657 SS_ISCONFIRMING | 5658 SS_ISCONNECTED); 5659 if (error == 0) { 5660 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5661 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5662 error = ENOTCONN; 5663 } 5664 } 5665 goto out; 5666 } 5667 } 5668 if (block_allowed) { 5669 error = sbwait(so, SO_RCV); 5670 if (error) { 5671 goto out; 5672 } 5673 held_length = 0; 5674 goto restart_nosblocks; 5675 } else { 5676 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5677 error = EWOULDBLOCK; 5678 goto out; 5679 } 5680 } 5681 if (hold_sblock == 1) { 5682 SOCKBUF_UNLOCK(&so->so_rcv); 5683 hold_sblock = 0; 5684 } 5685 /* we possibly have data we can read */ 5686 /* sa_ignore FREED_MEMORY */ 5687 control = TAILQ_FIRST(&inp->read_queue); 5688 if (control == NULL) { 5689 /* 5690 * This could be happening since the appender did the 5691 * increment but as not yet did the tailq insert onto the 5692 * read_queue 5693 */ 5694 if (hold_rlock == 0) { 5695 SCTP_INP_READ_LOCK(inp); 5696 } 5697 control = TAILQ_FIRST(&inp->read_queue); 5698 if ((control == NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0)) { 5699 #ifdef INVARIANTS 5700 panic("Huh, its non zero and nothing on control?"); 5701 #endif 5702 SCTP_SB_CLEAR(so->so_rcv); 5703 } 5704 SCTP_INP_READ_UNLOCK(inp); 5705 hold_rlock = 0; 5706 goto restart; 5707 } 5708 5709 if ((control->length == 0) && 5710 (control->do_not_ref_stcb)) { 5711 /* 5712 * Clean up code for freeing assoc that left behind a 5713 * pdapi.. maybe a peer in EEOR that just closed after 5714 * sending and never indicated a EOR. 5715 */ 5716 if (hold_rlock == 0) { 5717 hold_rlock = 1; 5718 SCTP_INP_READ_LOCK(inp); 5719 } 5720 control->held_length = 0; 5721 if (control->data) { 5722 /* Hmm there is data here .. fix */ 5723 struct mbuf *m_tmp; 5724 int cnt = 0; 5725 5726 m_tmp = control->data; 5727 while (m_tmp) { 5728 cnt += SCTP_BUF_LEN(m_tmp); 5729 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5730 control->tail_mbuf = m_tmp; 5731 control->end_added = 1; 5732 } 5733 m_tmp = SCTP_BUF_NEXT(m_tmp); 5734 } 5735 control->length = cnt; 5736 } else { 5737 /* remove it */ 5738 TAILQ_REMOVE(&inp->read_queue, control, next); 5739 /* Add back any hidden data */ 5740 sctp_free_remote_addr(control->whoFrom); 5741 sctp_free_a_readq(stcb, control); 5742 } 5743 if (hold_rlock) { 5744 hold_rlock = 0; 5745 SCTP_INP_READ_UNLOCK(inp); 5746 } 5747 goto restart; 5748 } 5749 if ((control->length == 0) && 5750 (control->end_added == 1)) { 5751 /* 5752 * Do we also need to check for (control->pdapi_aborted == 5753 * 1)? 5754 */ 5755 if (hold_rlock == 0) { 5756 hold_rlock = 1; 5757 SCTP_INP_READ_LOCK(inp); 5758 } 5759 TAILQ_REMOVE(&inp->read_queue, control, next); 5760 if (control->data) { 5761 #ifdef INVARIANTS 5762 panic("control->data not null but control->length == 0"); 5763 #else 5764 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5765 sctp_m_freem(control->data); 5766 control->data = NULL; 5767 #endif 5768 } 5769 if (control->aux_data) { 5770 sctp_m_free(control->aux_data); 5771 control->aux_data = NULL; 5772 } 5773 #ifdef INVARIANTS 5774 if (control->on_strm_q) { 5775 panic("About to free ctl:%p so:%p and its in %d", 5776 control, so, control->on_strm_q); 5777 } 5778 #endif 5779 sctp_free_remote_addr(control->whoFrom); 5780 sctp_free_a_readq(stcb, control); 5781 if (hold_rlock) { 5782 hold_rlock = 0; 5783 SCTP_INP_READ_UNLOCK(inp); 5784 } 5785 goto restart; 5786 } 5787 if (control->length == 0) { 5788 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5789 (filling_sinfo)) { 5790 /* find a more suitable one then this */ 5791 ctl = TAILQ_NEXT(control, next); 5792 while (ctl) { 5793 if ((ctl->stcb != control->stcb) && (ctl->length) && 5794 (ctl->some_taken || 5795 (ctl->spec_flags & M_NOTIFICATION) || 5796 ((ctl->do_not_ref_stcb == 0) && 5797 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5798 ) { 5799 /*- 5800 * If we have a different TCB next, and there is data 5801 * present. If we have already taken some (pdapi), OR we can 5802 * ref the tcb and no delivery as started on this stream, we 5803 * take it. Note we allow a notification on a different 5804 * assoc to be delivered.. 5805 */ 5806 control = ctl; 5807 goto found_one; 5808 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5809 (ctl->length) && 5810 ((ctl->some_taken) || 5811 ((ctl->do_not_ref_stcb == 0) && 5812 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5813 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5814 /*- 5815 * If we have the same tcb, and there is data present, and we 5816 * have the strm interleave feature present. Then if we have 5817 * taken some (pdapi) or we can refer to tht tcb AND we have 5818 * not started a delivery for this stream, we can take it. 5819 * Note we do NOT allow a notification on the same assoc to 5820 * be delivered. 5821 */ 5822 control = ctl; 5823 goto found_one; 5824 } 5825 ctl = TAILQ_NEXT(ctl, next); 5826 } 5827 } 5828 /* 5829 * if we reach here, not suitable replacement is available 5830 * <or> fragment interleave is NOT on. So stuff the sb_cc 5831 * into the our held count, and its time to sleep again. 5832 */ 5833 held_length = SCTP_SBAVAIL(&so->so_rcv); 5834 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 5835 goto restart; 5836 } 5837 /* Clear the held length since there is something to read */ 5838 control->held_length = 0; 5839 found_one: 5840 /* 5841 * If we reach here, control has a some data for us to read off. 5842 * Note that stcb COULD be NULL. 5843 */ 5844 if (hold_rlock == 0) { 5845 hold_rlock = 1; 5846 SCTP_INP_READ_LOCK(inp); 5847 } 5848 control->some_taken++; 5849 stcb = control->stcb; 5850 if (stcb) { 5851 if ((control->do_not_ref_stcb == 0) && 5852 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5853 if (freecnt_applied == 0) 5854 stcb = NULL; 5855 } else if (control->do_not_ref_stcb == 0) { 5856 /* you can't free it on me please */ 5857 /* 5858 * The lock on the socket buffer protects us so the 5859 * free code will stop. But since we used the 5860 * socketbuf lock and the sender uses the tcb_lock 5861 * to increment, we need to use the atomic add to 5862 * the refcnt 5863 */ 5864 if (freecnt_applied) { 5865 #ifdef INVARIANTS 5866 panic("refcnt already incremented"); 5867 #else 5868 SCTP_PRINTF("refcnt already incremented?\n"); 5869 #endif 5870 } else { 5871 atomic_add_int(&stcb->asoc.refcnt, 1); 5872 freecnt_applied = 1; 5873 } 5874 /* 5875 * Setup to remember how much we have not yet told 5876 * the peer our rwnd has opened up. Note we grab the 5877 * value from the tcb from last time. Note too that 5878 * sack sending clears this when a sack is sent, 5879 * which is fine. Once we hit the rwnd_req, we then 5880 * will go to the sctp_user_rcvd() that will not 5881 * lock until it KNOWs it MUST send a WUP-SACK. 5882 */ 5883 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5884 stcb->freed_by_sorcv_sincelast = 0; 5885 } 5886 } 5887 if (stcb && 5888 ((control->spec_flags & M_NOTIFICATION) == 0) && 5889 control->do_not_ref_stcb == 0) { 5890 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5891 } 5892 5893 /* First lets get off the sinfo and sockaddr info */ 5894 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5895 sinfo->sinfo_stream = control->sinfo_stream; 5896 sinfo->sinfo_ssn = (uint16_t)control->mid; 5897 sinfo->sinfo_flags = control->sinfo_flags; 5898 sinfo->sinfo_ppid = control->sinfo_ppid; 5899 sinfo->sinfo_context = control->sinfo_context; 5900 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5901 sinfo->sinfo_tsn = control->sinfo_tsn; 5902 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5903 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5904 nxt = TAILQ_NEXT(control, next); 5905 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5906 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5907 struct sctp_extrcvinfo *s_extra; 5908 5909 s_extra = (struct sctp_extrcvinfo *)sinfo; 5910 if ((nxt) && 5911 (nxt->length)) { 5912 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5913 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5914 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5915 } 5916 if (nxt->spec_flags & M_NOTIFICATION) { 5917 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5918 } 5919 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5920 s_extra->serinfo_next_length = nxt->length; 5921 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5922 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5923 if (nxt->tail_mbuf != NULL) { 5924 if (nxt->end_added) { 5925 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5926 } 5927 } 5928 } else { 5929 /* 5930 * we explicitly 0 this, since the memcpy 5931 * got some other things beyond the older 5932 * sinfo_ that is on the control's structure 5933 * :-D 5934 */ 5935 nxt = NULL; 5936 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5937 s_extra->serinfo_next_aid = 0; 5938 s_extra->serinfo_next_length = 0; 5939 s_extra->serinfo_next_ppid = 0; 5940 s_extra->serinfo_next_stream = 0; 5941 } 5942 } 5943 /* 5944 * update off the real current cum-ack, if we have an stcb. 5945 */ 5946 if ((control->do_not_ref_stcb == 0) && stcb) 5947 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5948 /* 5949 * mask off the high bits, we keep the actual chunk bits in 5950 * there. 5951 */ 5952 sinfo->sinfo_flags &= 0x00ff; 5953 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5954 sinfo->sinfo_flags |= SCTP_UNORDERED; 5955 } 5956 } 5957 #ifdef SCTP_ASOCLOG_OF_TSNS 5958 { 5959 int index, newindex; 5960 struct sctp_pcbtsn_rlog *entry; 5961 5962 do { 5963 index = inp->readlog_index; 5964 newindex = index + 1; 5965 if (newindex >= SCTP_READ_LOG_SIZE) { 5966 newindex = 0; 5967 } 5968 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5969 entry = &inp->readlog[index]; 5970 entry->vtag = control->sinfo_assoc_id; 5971 entry->strm = control->sinfo_stream; 5972 entry->seq = (uint16_t)control->mid; 5973 entry->sz = control->length; 5974 entry->flgs = control->sinfo_flags; 5975 } 5976 #endif 5977 if ((fromlen > 0) && (from != NULL)) { 5978 union sctp_sockstore store; 5979 size_t len; 5980 5981 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5982 #ifdef INET6 5983 case AF_INET6: 5984 len = sizeof(struct sockaddr_in6); 5985 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5986 store.sin6.sin6_port = control->port_from; 5987 break; 5988 #endif 5989 #ifdef INET 5990 case AF_INET: 5991 #ifdef INET6 5992 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5993 len = sizeof(struct sockaddr_in6); 5994 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5995 &store.sin6); 5996 store.sin6.sin6_port = control->port_from; 5997 } else { 5998 len = sizeof(struct sockaddr_in); 5999 store.sin = control->whoFrom->ro._l_addr.sin; 6000 store.sin.sin_port = control->port_from; 6001 } 6002 #else 6003 len = sizeof(struct sockaddr_in); 6004 store.sin = control->whoFrom->ro._l_addr.sin; 6005 store.sin.sin_port = control->port_from; 6006 #endif 6007 break; 6008 #endif 6009 default: 6010 len = 0; 6011 break; 6012 } 6013 memcpy(from, &store, min((size_t)fromlen, len)); 6014 #ifdef INET6 6015 { 6016 struct sockaddr_in6 lsa6, *from6; 6017 6018 from6 = (struct sockaddr_in6 *)from; 6019 sctp_recover_scope_mac(from6, (&lsa6)); 6020 } 6021 #endif 6022 } 6023 if (hold_rlock) { 6024 SCTP_INP_READ_UNLOCK(inp); 6025 hold_rlock = 0; 6026 } 6027 if (hold_sblock) { 6028 SOCKBUF_UNLOCK(&so->so_rcv); 6029 hold_sblock = 0; 6030 } 6031 /* now copy out what data we can */ 6032 if (mp == NULL) { 6033 /* copy out each mbuf in the chain up to length */ 6034 get_more_data: 6035 m = control->data; 6036 while (m) { 6037 /* Move out all we can */ 6038 cp_len = uio->uio_resid; 6039 my_len = SCTP_BUF_LEN(m); 6040 if (cp_len > my_len) { 6041 /* not enough in this buf */ 6042 cp_len = my_len; 6043 } 6044 if (hold_rlock) { 6045 SCTP_INP_READ_UNLOCK(inp); 6046 hold_rlock = 0; 6047 } 6048 if (cp_len > 0) 6049 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6050 /* re-read */ 6051 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6052 goto release; 6053 } 6054 6055 if ((control->do_not_ref_stcb == 0) && stcb && 6056 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6057 no_rcv_needed = 1; 6058 } 6059 if (error) { 6060 /* error we are out of here */ 6061 goto release; 6062 } 6063 SCTP_INP_READ_LOCK(inp); 6064 hold_rlock = 1; 6065 if (cp_len == SCTP_BUF_LEN(m)) { 6066 if ((SCTP_BUF_NEXT(m) == NULL) && 6067 (control->end_added)) { 6068 out_flags |= MSG_EOR; 6069 if ((control->do_not_ref_stcb == 0) && 6070 (control->stcb != NULL) && 6071 ((control->spec_flags & M_NOTIFICATION) == 0)) 6072 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6073 } 6074 if (control->spec_flags & M_NOTIFICATION) { 6075 out_flags |= MSG_NOTIFICATION; 6076 } 6077 /* we ate up the mbuf */ 6078 if (in_flags & MSG_PEEK) { 6079 /* just looking */ 6080 m = SCTP_BUF_NEXT(m); 6081 copied_so_far += cp_len; 6082 } else { 6083 /* dispose of the mbuf */ 6084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6085 sctp_sblog(&so->so_rcv, 6086 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6087 } 6088 sctp_sbfree(control, stcb, &so->so_rcv, m); 6089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6090 sctp_sblog(&so->so_rcv, 6091 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6092 } 6093 copied_so_far += cp_len; 6094 freed_so_far += (uint32_t)cp_len; 6095 freed_so_far += MSIZE; 6096 atomic_subtract_int(&control->length, (int)cp_len); 6097 control->data = sctp_m_free(m); 6098 m = control->data; 6099 /* 6100 * been through it all, must hold sb 6101 * lock ok to null tail 6102 */ 6103 if (control->data == NULL) { 6104 #ifdef INVARIANTS 6105 if ((control->end_added == 0) || 6106 (TAILQ_NEXT(control, next) == NULL)) { 6107 /* 6108 * If the end is not 6109 * added, OR the 6110 * next is NOT null 6111 * we MUST have the 6112 * lock. 6113 */ 6114 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6115 panic("Hmm we don't own the lock?"); 6116 } 6117 } 6118 #endif 6119 control->tail_mbuf = NULL; 6120 #ifdef INVARIANTS 6121 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6122 panic("end_added, nothing left and no MSG_EOR"); 6123 } 6124 #endif 6125 } 6126 } 6127 } else { 6128 /* Do we need to trim the mbuf? */ 6129 if (control->spec_flags & M_NOTIFICATION) { 6130 out_flags |= MSG_NOTIFICATION; 6131 } 6132 if ((in_flags & MSG_PEEK) == 0) { 6133 SCTP_BUF_RESV_UF(m, cp_len); 6134 SCTP_BUF_LEN(m) -= (int)cp_len; 6135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6136 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6137 } 6138 SCTP_SB_DECR(&so->so_rcv, cp_len); 6139 if ((control->do_not_ref_stcb == 0) && 6140 stcb) { 6141 atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); 6142 } 6143 copied_so_far += cp_len; 6144 freed_so_far += (uint32_t)cp_len; 6145 freed_so_far += MSIZE; 6146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6147 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6148 SCTP_LOG_SBRESULT, 0); 6149 } 6150 atomic_subtract_int(&control->length, (int)cp_len); 6151 } else { 6152 copied_so_far += cp_len; 6153 } 6154 } 6155 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6156 break; 6157 } 6158 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6159 (control->do_not_ref_stcb == 0) && 6160 (freed_so_far >= rwnd_req)) { 6161 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6162 } 6163 } /* end while(m) */ 6164 /* 6165 * At this point we have looked at it all and we either have 6166 * a MSG_EOR/or read all the user wants... <OR> 6167 * control->length == 0. 6168 */ 6169 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6170 /* we are done with this control */ 6171 if (control->length == 0) { 6172 if (control->data) { 6173 #ifdef INVARIANTS 6174 panic("control->data not null at read eor?"); 6175 #else 6176 SCTP_PRINTF("Strange, data left in the control buffer .. invariants would panic?\n"); 6177 sctp_m_freem(control->data); 6178 control->data = NULL; 6179 #endif 6180 } 6181 done_with_control: 6182 if (hold_rlock == 0) { 6183 SCTP_INP_READ_LOCK(inp); 6184 hold_rlock = 1; 6185 } 6186 TAILQ_REMOVE(&inp->read_queue, control, next); 6187 /* Add back any hidden data */ 6188 if (control->held_length) { 6189 held_length = 0; 6190 control->held_length = 0; 6191 wakeup_read_socket = 1; 6192 } 6193 if (control->aux_data) { 6194 sctp_m_free(control->aux_data); 6195 control->aux_data = NULL; 6196 } 6197 no_rcv_needed = control->do_not_ref_stcb; 6198 sctp_free_remote_addr(control->whoFrom); 6199 control->data = NULL; 6200 #ifdef INVARIANTS 6201 if (control->on_strm_q) { 6202 panic("About to free ctl:%p so:%p and its in %d", 6203 control, so, control->on_strm_q); 6204 } 6205 #endif 6206 sctp_free_a_readq(stcb, control); 6207 control = NULL; 6208 if ((freed_so_far >= rwnd_req) && 6209 (no_rcv_needed == 0)) 6210 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6211 6212 } else { 6213 /* 6214 * The user did not read all of this 6215 * message, turn off the returned MSG_EOR 6216 * since we are leaving more behind on the 6217 * control to read. 6218 */ 6219 #ifdef INVARIANTS 6220 if (control->end_added && 6221 (control->data == NULL) && 6222 (control->tail_mbuf == NULL)) { 6223 panic("Gak, control->length is corrupt?"); 6224 } 6225 #endif 6226 no_rcv_needed = control->do_not_ref_stcb; 6227 out_flags &= ~MSG_EOR; 6228 } 6229 } 6230 if (out_flags & MSG_EOR) { 6231 goto release; 6232 } 6233 if ((uio->uio_resid == 0) || 6234 ((in_eeor_mode) && 6235 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6236 goto release; 6237 } 6238 /* 6239 * If I hit here the receiver wants more and this message is 6240 * NOT done (pd-api). So two questions. Can we block? if not 6241 * we are done. Did the user NOT set MSG_WAITALL? 6242 */ 6243 if (block_allowed == 0) { 6244 goto release; 6245 } 6246 /* 6247 * We need to wait for more data a few things: - We don't 6248 * release the I/O lock so we don't get someone else 6249 * reading. - We must be sure to account for the case where 6250 * what is added is NOT to our control when we wakeup. 6251 */ 6252 6253 /* 6254 * Do we need to tell the transport a rwnd update might be 6255 * needed before we go to sleep? 6256 */ 6257 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6258 ((freed_so_far >= rwnd_req) && 6259 (control->do_not_ref_stcb == 0) && 6260 (no_rcv_needed == 0))) { 6261 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6262 } 6263 wait_some_more: 6264 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6265 goto release; 6266 } 6267 6268 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6269 goto release; 6270 6271 if (hold_rlock == 1) { 6272 SCTP_INP_READ_UNLOCK(inp); 6273 hold_rlock = 0; 6274 } 6275 if (hold_sblock == 0) { 6276 SOCKBUF_LOCK(&so->so_rcv); 6277 hold_sblock = 1; 6278 } 6279 if ((copied_so_far) && (control->length == 0) && 6280 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6281 goto release; 6282 } 6283 if (SCTP_SBAVAIL(&so->so_rcv) <= control->held_length) { 6284 error = sbwait(so, SO_RCV); 6285 if (error) { 6286 goto release; 6287 } 6288 control->held_length = 0; 6289 } 6290 if (hold_sblock) { 6291 SOCKBUF_UNLOCK(&so->so_rcv); 6292 hold_sblock = 0; 6293 } 6294 if (control->length == 0) { 6295 /* still nothing here */ 6296 if (control->end_added == 1) { 6297 /* he aborted, or is done i.e.did a shutdown */ 6298 out_flags |= MSG_EOR; 6299 if (control->pdapi_aborted) { 6300 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6301 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6302 6303 out_flags |= MSG_TRUNC; 6304 } else { 6305 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6306 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6307 } 6308 goto done_with_control; 6309 } 6310 if (SCTP_SBAVAIL(&so->so_rcv) > held_length) { 6311 control->held_length = SCTP_SBAVAIL(&so->so_rcv); 6312 held_length = 0; 6313 } 6314 goto wait_some_more; 6315 } else if (control->data == NULL) { 6316 /* 6317 * we must re-sync since data is probably being 6318 * added 6319 */ 6320 SCTP_INP_READ_LOCK(inp); 6321 if ((control->length > 0) && (control->data == NULL)) { 6322 /* 6323 * big trouble.. we have the lock and its 6324 * corrupt? 6325 */ 6326 #ifdef INVARIANTS 6327 panic("Impossible data==NULL length !=0"); 6328 #endif 6329 out_flags |= MSG_EOR; 6330 out_flags |= MSG_TRUNC; 6331 control->length = 0; 6332 SCTP_INP_READ_UNLOCK(inp); 6333 goto done_with_control; 6334 } 6335 SCTP_INP_READ_UNLOCK(inp); 6336 /* We will fall around to get more data */ 6337 } 6338 goto get_more_data; 6339 } else { 6340 /*- 6341 * Give caller back the mbuf chain, 6342 * store in uio_resid the length 6343 */ 6344 wakeup_read_socket = 0; 6345 if ((control->end_added == 0) || 6346 (TAILQ_NEXT(control, next) == NULL)) { 6347 /* Need to get rlock */ 6348 if (hold_rlock == 0) { 6349 SCTP_INP_READ_LOCK(inp); 6350 hold_rlock = 1; 6351 } 6352 } 6353 if (control->end_added) { 6354 out_flags |= MSG_EOR; 6355 if ((control->do_not_ref_stcb == 0) && 6356 (control->stcb != NULL) && 6357 ((control->spec_flags & M_NOTIFICATION) == 0)) 6358 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6359 } 6360 if (control->spec_flags & M_NOTIFICATION) { 6361 out_flags |= MSG_NOTIFICATION; 6362 } 6363 uio->uio_resid = control->length; 6364 *mp = control->data; 6365 m = control->data; 6366 while (m) { 6367 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6368 sctp_sblog(&so->so_rcv, 6369 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6370 } 6371 sctp_sbfree(control, stcb, &so->so_rcv, m); 6372 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6373 freed_so_far += MSIZE; 6374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6375 sctp_sblog(&so->so_rcv, 6376 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6377 } 6378 m = SCTP_BUF_NEXT(m); 6379 } 6380 control->data = control->tail_mbuf = NULL; 6381 control->length = 0; 6382 if (out_flags & MSG_EOR) { 6383 /* Done with this control */ 6384 goto done_with_control; 6385 } 6386 } 6387 release: 6388 if (hold_rlock == 1) { 6389 SCTP_INP_READ_UNLOCK(inp); 6390 hold_rlock = 0; 6391 } 6392 if (hold_sblock == 1) { 6393 SOCKBUF_UNLOCK(&so->so_rcv); 6394 hold_sblock = 0; 6395 } 6396 6397 SOCK_IO_RECV_UNLOCK(so); 6398 sockbuf_lock = 0; 6399 6400 release_unlocked: 6401 if (hold_sblock) { 6402 SOCKBUF_UNLOCK(&so->so_rcv); 6403 hold_sblock = 0; 6404 } 6405 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6406 if ((freed_so_far >= rwnd_req) && 6407 (control && (control->do_not_ref_stcb == 0)) && 6408 (no_rcv_needed == 0)) 6409 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6410 } 6411 out: 6412 if (msg_flags) { 6413 *msg_flags = out_flags; 6414 } 6415 if (((out_flags & MSG_EOR) == 0) && 6416 ((in_flags & MSG_PEEK) == 0) && 6417 (sinfo) && 6418 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6419 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6420 struct sctp_extrcvinfo *s_extra; 6421 6422 s_extra = (struct sctp_extrcvinfo *)sinfo; 6423 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6424 } 6425 if (hold_rlock == 1) { 6426 SCTP_INP_READ_UNLOCK(inp); 6427 } 6428 if (hold_sblock) { 6429 SOCKBUF_UNLOCK(&so->so_rcv); 6430 } 6431 if (sockbuf_lock) { 6432 SOCK_IO_RECV_UNLOCK(so); 6433 } 6434 6435 if (freecnt_applied) { 6436 /* 6437 * The lock on the socket buffer protects us so the free 6438 * code will stop. But since we used the socketbuf lock and 6439 * the sender uses the tcb_lock to increment, we need to use 6440 * the atomic add to the refcnt. 6441 */ 6442 if (stcb == NULL) { 6443 #ifdef INVARIANTS 6444 panic("stcb for refcnt has gone NULL?"); 6445 goto stage_left; 6446 #else 6447 goto stage_left; 6448 #endif 6449 } 6450 /* Save the value back for next time */ 6451 stcb->freed_by_sorcv_sincelast = freed_so_far; 6452 atomic_subtract_int(&stcb->asoc.refcnt, 1); 6453 } 6454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6455 if (stcb) { 6456 sctp_misc_ints(SCTP_SORECV_DONE, 6457 freed_so_far, 6458 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6459 stcb->asoc.my_rwnd, 6460 SCTP_SBAVAIL(&so->so_rcv)); 6461 } else { 6462 sctp_misc_ints(SCTP_SORECV_DONE, 6463 freed_so_far, 6464 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6465 0, 6466 SCTP_SBAVAIL(&so->so_rcv)); 6467 } 6468 } 6469 stage_left: 6470 if (wakeup_read_socket) { 6471 sctp_sorwakeup(inp, so); 6472 } 6473 return (error); 6474 } 6475 6476 #ifdef SCTP_MBUF_LOGGING 6477 struct mbuf * 6478 sctp_m_free(struct mbuf *m) 6479 { 6480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6481 sctp_log_mb(m, SCTP_MBUF_IFREE); 6482 } 6483 return (m_free(m)); 6484 } 6485 6486 void 6487 sctp_m_freem(struct mbuf *mb) 6488 { 6489 while (mb != NULL) 6490 mb = sctp_m_free(mb); 6491 } 6492 6493 #endif 6494 6495 int 6496 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6497 { 6498 /* 6499 * Given a local address. For all associations that holds the 6500 * address, request a peer-set-primary. 6501 */ 6502 struct sctp_ifa *ifa; 6503 struct sctp_laddr *wi; 6504 6505 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6506 if (ifa == NULL) { 6507 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6508 return (EADDRNOTAVAIL); 6509 } 6510 /* 6511 * Now that we have the ifa we must awaken the iterator with this 6512 * message. 6513 */ 6514 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6515 if (wi == NULL) { 6516 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6517 return (ENOMEM); 6518 } 6519 /* Now incr the count and int wi structure */ 6520 SCTP_INCR_LADDR_COUNT(); 6521 memset(wi, 0, sizeof(*wi)); 6522 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6523 wi->ifa = ifa; 6524 wi->action = SCTP_SET_PRIM_ADDR; 6525 atomic_add_int(&ifa->refcount, 1); 6526 6527 /* Now add it to the work queue */ 6528 SCTP_WQ_ADDR_LOCK(); 6529 /* 6530 * Should this really be a tailq? As it is we will process the 6531 * newest first :-0 6532 */ 6533 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6534 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6535 (struct sctp_inpcb *)NULL, 6536 (struct sctp_tcb *)NULL, 6537 (struct sctp_nets *)NULL); 6538 SCTP_WQ_ADDR_UNLOCK(); 6539 return (0); 6540 } 6541 6542 int 6543 sctp_soreceive(struct socket *so, 6544 struct sockaddr **psa, 6545 struct uio *uio, 6546 struct mbuf **mp0, 6547 struct mbuf **controlp, 6548 int *flagsp) 6549 { 6550 int error, fromlen; 6551 uint8_t sockbuf[256]; 6552 struct sockaddr *from; 6553 struct sctp_extrcvinfo sinfo; 6554 int filling_sinfo = 1; 6555 int flags; 6556 struct sctp_inpcb *inp; 6557 6558 inp = (struct sctp_inpcb *)so->so_pcb; 6559 /* pickup the assoc we are reading from */ 6560 if (inp == NULL) { 6561 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6562 return (EINVAL); 6563 } 6564 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6565 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6566 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6567 (controlp == NULL)) { 6568 /* user does not want the sndrcv ctl */ 6569 filling_sinfo = 0; 6570 } 6571 if (psa) { 6572 from = (struct sockaddr *)sockbuf; 6573 fromlen = sizeof(sockbuf); 6574 from->sa_len = 0; 6575 } else { 6576 from = NULL; 6577 fromlen = 0; 6578 } 6579 6580 if (filling_sinfo) { 6581 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6582 } 6583 if (flagsp != NULL) { 6584 flags = *flagsp; 6585 } else { 6586 flags = 0; 6587 } 6588 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6589 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6590 if (flagsp != NULL) { 6591 *flagsp = flags; 6592 } 6593 if (controlp != NULL) { 6594 /* copy back the sinfo in a CMSG format */ 6595 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6596 *controlp = sctp_build_ctl_nchunk(inp, 6597 (struct sctp_sndrcvinfo *)&sinfo); 6598 } else { 6599 *controlp = NULL; 6600 } 6601 } 6602 if (psa) { 6603 /* copy back the address info */ 6604 if (from && from->sa_len) { 6605 *psa = sodupsockaddr(from, M_NOWAIT); 6606 } else { 6607 *psa = NULL; 6608 } 6609 } 6610 return (error); 6611 } 6612 6613 int 6614 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6615 int totaddr, int *error) 6616 { 6617 int added = 0; 6618 int i; 6619 struct sctp_inpcb *inp; 6620 struct sockaddr *sa; 6621 size_t incr = 0; 6622 #ifdef INET 6623 struct sockaddr_in *sin; 6624 #endif 6625 #ifdef INET6 6626 struct sockaddr_in6 *sin6; 6627 #endif 6628 6629 sa = addr; 6630 inp = stcb->sctp_ep; 6631 *error = 0; 6632 for (i = 0; i < totaddr; i++) { 6633 switch (sa->sa_family) { 6634 #ifdef INET 6635 case AF_INET: 6636 incr = sizeof(struct sockaddr_in); 6637 sin = (struct sockaddr_in *)sa; 6638 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6639 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6640 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6641 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6642 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6643 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6644 *error = EINVAL; 6645 goto out_now; 6646 } 6647 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6648 SCTP_DONOT_SETSCOPE, 6649 SCTP_ADDR_IS_CONFIRMED)) { 6650 /* assoc gone no un-lock */ 6651 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6652 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6653 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6654 *error = ENOBUFS; 6655 goto out_now; 6656 } 6657 added++; 6658 break; 6659 #endif 6660 #ifdef INET6 6661 case AF_INET6: 6662 incr = sizeof(struct sockaddr_in6); 6663 sin6 = (struct sockaddr_in6 *)sa; 6664 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6665 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6666 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6667 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6668 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6669 *error = EINVAL; 6670 goto out_now; 6671 } 6672 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6673 SCTP_DONOT_SETSCOPE, 6674 SCTP_ADDR_IS_CONFIRMED)) { 6675 /* assoc gone no un-lock */ 6676 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6677 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6678 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6679 *error = ENOBUFS; 6680 goto out_now; 6681 } 6682 added++; 6683 break; 6684 #endif 6685 default: 6686 break; 6687 } 6688 sa = (struct sockaddr *)((caddr_t)sa + incr); 6689 } 6690 out_now: 6691 return (added); 6692 } 6693 6694 int 6695 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6696 unsigned int totaddr, 6697 unsigned int *num_v4, unsigned int *num_v6, 6698 unsigned int limit) 6699 { 6700 struct sockaddr *sa; 6701 struct sctp_tcb *stcb; 6702 unsigned int incr, at, i; 6703 6704 at = 0; 6705 sa = addr; 6706 *num_v6 = *num_v4 = 0; 6707 /* account and validate addresses */ 6708 if (totaddr == 0) { 6709 return (EINVAL); 6710 } 6711 for (i = 0; i < totaddr; i++) { 6712 if (at + sizeof(struct sockaddr) > limit) { 6713 return (EINVAL); 6714 } 6715 switch (sa->sa_family) { 6716 #ifdef INET 6717 case AF_INET: 6718 incr = (unsigned int)sizeof(struct sockaddr_in); 6719 if (sa->sa_len != incr) { 6720 return (EINVAL); 6721 } 6722 (*num_v4) += 1; 6723 break; 6724 #endif 6725 #ifdef INET6 6726 case AF_INET6: 6727 { 6728 struct sockaddr_in6 *sin6; 6729 6730 incr = (unsigned int)sizeof(struct sockaddr_in6); 6731 if (sa->sa_len != incr) { 6732 return (EINVAL); 6733 } 6734 sin6 = (struct sockaddr_in6 *)sa; 6735 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6736 /* Must be non-mapped for connectx */ 6737 return (EINVAL); 6738 } 6739 (*num_v6) += 1; 6740 break; 6741 } 6742 #endif 6743 default: 6744 return (EINVAL); 6745 } 6746 if ((at + incr) > limit) { 6747 return (EINVAL); 6748 } 6749 SCTP_INP_INCR_REF(inp); 6750 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6751 if (stcb != NULL) { 6752 SCTP_TCB_UNLOCK(stcb); 6753 return (EALREADY); 6754 } else { 6755 SCTP_INP_DECR_REF(inp); 6756 } 6757 at += incr; 6758 sa = (struct sockaddr *)((caddr_t)sa + incr); 6759 } 6760 return (0); 6761 } 6762 6763 /* 6764 * sctp_bindx(ADD) for one address. 6765 * assumes all arguments are valid/checked by caller. 6766 */ 6767 void 6768 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6769 struct sockaddr *sa, uint32_t vrf_id, int *error, 6770 void *p) 6771 { 6772 #if defined(INET) && defined(INET6) 6773 struct sockaddr_in sin; 6774 #endif 6775 #ifdef INET6 6776 struct sockaddr_in6 *sin6; 6777 #endif 6778 #ifdef INET 6779 struct sockaddr_in *sinp; 6780 #endif 6781 struct sockaddr *addr_to_use; 6782 struct sctp_inpcb *lep; 6783 uint16_t port; 6784 6785 /* see if we're bound all already! */ 6786 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6787 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6788 *error = EINVAL; 6789 return; 6790 } 6791 switch (sa->sa_family) { 6792 #ifdef INET6 6793 case AF_INET6: 6794 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6796 *error = EINVAL; 6797 return; 6798 } 6799 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6800 /* can only bind v6 on PF_INET6 sockets */ 6801 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6802 *error = EINVAL; 6803 return; 6804 } 6805 sin6 = (struct sockaddr_in6 *)sa; 6806 port = sin6->sin6_port; 6807 #ifdef INET 6808 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6809 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6810 SCTP_IPV6_V6ONLY(inp)) { 6811 /* can't bind v4-mapped on PF_INET sockets */ 6812 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6813 *error = EINVAL; 6814 return; 6815 } 6816 in6_sin6_2_sin(&sin, sin6); 6817 addr_to_use = (struct sockaddr *)&sin; 6818 } else { 6819 addr_to_use = sa; 6820 } 6821 #else 6822 addr_to_use = sa; 6823 #endif 6824 break; 6825 #endif 6826 #ifdef INET 6827 case AF_INET: 6828 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6829 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6830 *error = EINVAL; 6831 return; 6832 } 6833 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6834 SCTP_IPV6_V6ONLY(inp)) { 6835 /* can't bind v4 on PF_INET sockets */ 6836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6837 *error = EINVAL; 6838 return; 6839 } 6840 sinp = (struct sockaddr_in *)sa; 6841 port = sinp->sin_port; 6842 addr_to_use = sa; 6843 break; 6844 #endif 6845 default: 6846 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6847 *error = EINVAL; 6848 return; 6849 } 6850 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6851 if (p == NULL) { 6852 /* Can't get proc for Net/Open BSD */ 6853 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6854 *error = EINVAL; 6855 return; 6856 } 6857 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6858 return; 6859 } 6860 /* Validate the incoming port. */ 6861 if ((port != 0) && (port != inp->sctp_lport)) { 6862 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6863 *error = EINVAL; 6864 return; 6865 } 6866 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6867 if (lep == NULL) { 6868 /* add the address */ 6869 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6870 SCTP_ADD_IP_ADDRESS, vrf_id); 6871 } else { 6872 if (lep != inp) { 6873 *error = EADDRINUSE; 6874 } 6875 SCTP_INP_DECR_REF(lep); 6876 } 6877 } 6878 6879 /* 6880 * sctp_bindx(DELETE) for one address. 6881 * assumes all arguments are valid/checked by caller. 6882 */ 6883 void 6884 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6885 struct sockaddr *sa, uint32_t vrf_id, int *error) 6886 { 6887 struct sockaddr *addr_to_use; 6888 #if defined(INET) && defined(INET6) 6889 struct sockaddr_in6 *sin6; 6890 struct sockaddr_in sin; 6891 #endif 6892 6893 /* see if we're bound all already! */ 6894 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6895 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6896 *error = EINVAL; 6897 return; 6898 } 6899 switch (sa->sa_family) { 6900 #ifdef INET6 6901 case AF_INET6: 6902 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6903 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6904 *error = EINVAL; 6905 return; 6906 } 6907 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6908 /* can only bind v6 on PF_INET6 sockets */ 6909 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6910 *error = EINVAL; 6911 return; 6912 } 6913 #ifdef INET 6914 sin6 = (struct sockaddr_in6 *)sa; 6915 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6916 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6917 SCTP_IPV6_V6ONLY(inp)) { 6918 /* can't bind mapped-v4 on PF_INET sockets */ 6919 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6920 *error = EINVAL; 6921 return; 6922 } 6923 in6_sin6_2_sin(&sin, sin6); 6924 addr_to_use = (struct sockaddr *)&sin; 6925 } else { 6926 addr_to_use = sa; 6927 } 6928 #else 6929 addr_to_use = sa; 6930 #endif 6931 break; 6932 #endif 6933 #ifdef INET 6934 case AF_INET: 6935 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6936 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6937 *error = EINVAL; 6938 return; 6939 } 6940 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6941 SCTP_IPV6_V6ONLY(inp)) { 6942 /* can't bind v4 on PF_INET sockets */ 6943 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6944 *error = EINVAL; 6945 return; 6946 } 6947 addr_to_use = sa; 6948 break; 6949 #endif 6950 default: 6951 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6952 *error = EINVAL; 6953 return; 6954 } 6955 /* No lock required mgmt_ep_sa does its own locking. */ 6956 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6957 vrf_id); 6958 } 6959 6960 /* 6961 * returns the valid local address count for an assoc, taking into account 6962 * all scoping rules 6963 */ 6964 int 6965 sctp_local_addr_count(struct sctp_tcb *stcb) 6966 { 6967 int loopback_scope; 6968 #if defined(INET) 6969 int ipv4_local_scope, ipv4_addr_legal; 6970 #endif 6971 #if defined(INET6) 6972 int local_scope, site_scope, ipv6_addr_legal; 6973 #endif 6974 struct sctp_vrf *vrf; 6975 struct sctp_ifn *sctp_ifn; 6976 struct sctp_ifa *sctp_ifa; 6977 int count = 0; 6978 6979 /* Turn on all the appropriate scopes */ 6980 loopback_scope = stcb->asoc.scope.loopback_scope; 6981 #if defined(INET) 6982 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6983 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6984 #endif 6985 #if defined(INET6) 6986 local_scope = stcb->asoc.scope.local_scope; 6987 site_scope = stcb->asoc.scope.site_scope; 6988 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6989 #endif 6990 SCTP_IPI_ADDR_RLOCK(); 6991 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6992 if (vrf == NULL) { 6993 /* no vrf, no addresses */ 6994 SCTP_IPI_ADDR_RUNLOCK(); 6995 return (0); 6996 } 6997 6998 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6999 /* 7000 * bound all case: go through all ifns on the vrf 7001 */ 7002 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 7003 if ((loopback_scope == 0) && 7004 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 7005 continue; 7006 } 7007 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 7008 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 7009 continue; 7010 switch (sctp_ifa->address.sa.sa_family) { 7011 #ifdef INET 7012 case AF_INET: 7013 if (ipv4_addr_legal) { 7014 struct sockaddr_in *sin; 7015 7016 sin = &sctp_ifa->address.sin; 7017 if (sin->sin_addr.s_addr == 0) { 7018 /* 7019 * skip unspecified 7020 * addrs 7021 */ 7022 continue; 7023 } 7024 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7025 &sin->sin_addr) != 0) { 7026 continue; 7027 } 7028 if ((ipv4_local_scope == 0) && 7029 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7030 continue; 7031 } 7032 /* count this one */ 7033 count++; 7034 } else { 7035 continue; 7036 } 7037 break; 7038 #endif 7039 #ifdef INET6 7040 case AF_INET6: 7041 if (ipv6_addr_legal) { 7042 struct sockaddr_in6 *sin6; 7043 7044 sin6 = &sctp_ifa->address.sin6; 7045 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7046 continue; 7047 } 7048 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7049 &sin6->sin6_addr) != 0) { 7050 continue; 7051 } 7052 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7053 if (local_scope == 0) 7054 continue; 7055 if (sin6->sin6_scope_id == 0) { 7056 if (sa6_recoverscope(sin6) != 0) 7057 /* 7058 * 7059 * bad 7060 * link 7061 * 7062 * local 7063 * 7064 * address 7065 */ 7066 continue; 7067 } 7068 } 7069 if ((site_scope == 0) && 7070 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7071 continue; 7072 } 7073 /* count this one */ 7074 count++; 7075 } 7076 break; 7077 #endif 7078 default: 7079 /* TSNH */ 7080 break; 7081 } 7082 } 7083 } 7084 } else { 7085 /* 7086 * subset bound case 7087 */ 7088 struct sctp_laddr *laddr; 7089 7090 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7091 sctp_nxt_addr) { 7092 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7093 continue; 7094 } 7095 /* count this one */ 7096 count++; 7097 } 7098 } 7099 SCTP_IPI_ADDR_RUNLOCK(); 7100 return (count); 7101 } 7102 7103 #if defined(SCTP_LOCAL_TRACE_BUF) 7104 7105 void 7106 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7107 { 7108 uint32_t saveindex, newindex; 7109 7110 do { 7111 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7112 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7113 newindex = 1; 7114 } else { 7115 newindex = saveindex + 1; 7116 } 7117 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7118 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7119 saveindex = 0; 7120 } 7121 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7122 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7123 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7124 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7125 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7126 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7127 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7128 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7129 } 7130 7131 #endif 7132 static bool 7133 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7134 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7135 { 7136 struct ip *iph; 7137 #ifdef INET6 7138 struct ip6_hdr *ip6; 7139 #endif 7140 struct mbuf *sp, *last; 7141 struct udphdr *uhdr; 7142 uint16_t port; 7143 7144 if ((m->m_flags & M_PKTHDR) == 0) { 7145 /* Can't handle one that is not a pkt hdr */ 7146 goto out; 7147 } 7148 /* Pull the src port */ 7149 iph = mtod(m, struct ip *); 7150 uhdr = (struct udphdr *)((caddr_t)iph + off); 7151 port = uhdr->uh_sport; 7152 /* 7153 * Split out the mbuf chain. Leave the IP header in m, place the 7154 * rest in the sp. 7155 */ 7156 sp = m_split(m, off, M_NOWAIT); 7157 if (sp == NULL) { 7158 /* Gak, drop packet, we can't do a split */ 7159 goto out; 7160 } 7161 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7162 /* Gak, packet can't have an SCTP header in it - too small */ 7163 m_freem(sp); 7164 goto out; 7165 } 7166 /* Now pull up the UDP header and SCTP header together */ 7167 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7168 if (sp == NULL) { 7169 /* Gak pullup failed */ 7170 goto out; 7171 } 7172 /* Trim out the UDP header */ 7173 m_adj(sp, sizeof(struct udphdr)); 7174 7175 /* Now reconstruct the mbuf chain */ 7176 for (last = m; last->m_next; last = last->m_next); 7177 last->m_next = sp; 7178 m->m_pkthdr.len += sp->m_pkthdr.len; 7179 /* 7180 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7181 * checksum and it was valid. Since CSUM_DATA_VALID == 7182 * CSUM_SCTP_VALID this would imply that the HW also verified the 7183 * SCTP checksum. Therefore, clear the bit. 7184 */ 7185 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7186 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7187 m->m_pkthdr.len, 7188 if_name(m->m_pkthdr.rcvif), 7189 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7190 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7191 iph = mtod(m, struct ip *); 7192 switch (iph->ip_v) { 7193 #ifdef INET 7194 case IPVERSION: 7195 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7196 sctp_input_with_port(m, off, port); 7197 break; 7198 #endif 7199 #ifdef INET6 7200 case IPV6_VERSION >> 4: 7201 ip6 = mtod(m, struct ip6_hdr *); 7202 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7203 sctp6_input_with_port(&m, &off, port); 7204 break; 7205 #endif 7206 default: 7207 goto out; 7208 break; 7209 } 7210 return (true); 7211 out: 7212 m_freem(m); 7213 7214 return (true); 7215 } 7216 7217 #ifdef INET 7218 static void 7219 sctp_recv_icmp_tunneled_packet(udp_tun_icmp_param_t param) 7220 { 7221 struct icmp *icmp = param.icmp; 7222 struct ip *outer_ip, *inner_ip; 7223 struct sctphdr *sh; 7224 struct udphdr *udp; 7225 struct sctp_inpcb *inp; 7226 struct sctp_tcb *stcb; 7227 struct sctp_nets *net; 7228 struct sctp_init_chunk *ch; 7229 struct sockaddr_in src, dst; 7230 uint8_t type, code; 7231 7232 inner_ip = &icmp->icmp_ip; 7233 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7234 if (ntohs(outer_ip->ip_len) < 7235 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7236 return; 7237 } 7238 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7239 sh = (struct sctphdr *)(udp + 1); 7240 memset(&src, 0, sizeof(struct sockaddr_in)); 7241 src.sin_family = AF_INET; 7242 src.sin_len = sizeof(struct sockaddr_in); 7243 src.sin_port = sh->src_port; 7244 src.sin_addr = inner_ip->ip_src; 7245 memset(&dst, 0, sizeof(struct sockaddr_in)); 7246 dst.sin_family = AF_INET; 7247 dst.sin_len = sizeof(struct sockaddr_in); 7248 dst.sin_port = sh->dest_port; 7249 dst.sin_addr = inner_ip->ip_dst; 7250 /* 7251 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7252 * holds our local endpoint address. Thus we reverse the dst and the 7253 * src in the lookup. 7254 */ 7255 inp = NULL; 7256 net = NULL; 7257 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7258 (struct sockaddr *)&src, 7259 &inp, &net, 1, 7260 SCTP_DEFAULT_VRFID); 7261 if ((stcb != NULL) && 7262 (net != NULL) && 7263 (inp != NULL)) { 7264 /* Check the UDP port numbers */ 7265 if ((udp->uh_dport != net->port) || 7266 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7267 SCTP_TCB_UNLOCK(stcb); 7268 return; 7269 } 7270 /* Check the verification tag */ 7271 if (ntohl(sh->v_tag) != 0) { 7272 /* 7273 * This must be the verification tag used for 7274 * sending out packets. We don't consider packets 7275 * reflecting the verification tag. 7276 */ 7277 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7278 SCTP_TCB_UNLOCK(stcb); 7279 return; 7280 } 7281 } else { 7282 if (ntohs(outer_ip->ip_len) >= 7283 sizeof(struct ip) + 7284 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7285 /* 7286 * In this case we can check if we got an 7287 * INIT chunk and if the initiate tag 7288 * matches. 7289 */ 7290 ch = (struct sctp_init_chunk *)(sh + 1); 7291 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7292 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7293 SCTP_TCB_UNLOCK(stcb); 7294 return; 7295 } 7296 } else { 7297 SCTP_TCB_UNLOCK(stcb); 7298 return; 7299 } 7300 } 7301 type = icmp->icmp_type; 7302 code = icmp->icmp_code; 7303 if ((type == ICMP_UNREACH) && 7304 (code == ICMP_UNREACH_PORT)) { 7305 code = ICMP_UNREACH_PROTOCOL; 7306 } 7307 sctp_notify(inp, stcb, net, type, code, 7308 ntohs(inner_ip->ip_len), 7309 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7310 } else { 7311 if ((stcb == NULL) && (inp != NULL)) { 7312 /* reduce ref-count */ 7313 SCTP_INP_WLOCK(inp); 7314 SCTP_INP_DECR_REF(inp); 7315 SCTP_INP_WUNLOCK(inp); 7316 } 7317 if (stcb) { 7318 SCTP_TCB_UNLOCK(stcb); 7319 } 7320 } 7321 return; 7322 } 7323 #endif 7324 7325 #ifdef INET6 7326 static void 7327 sctp_recv_icmp6_tunneled_packet(udp_tun_icmp_param_t param) 7328 { 7329 struct ip6ctlparam *ip6cp = param.ip6cp; 7330 struct sctp_inpcb *inp; 7331 struct sctp_tcb *stcb; 7332 struct sctp_nets *net; 7333 struct sctphdr sh; 7334 struct udphdr udp; 7335 struct sockaddr_in6 src, dst; 7336 uint8_t type, code; 7337 7338 /* 7339 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7340 */ 7341 if (ip6cp->ip6c_m == NULL) { 7342 return; 7343 } 7344 /* 7345 * Check if we can safely examine the ports and the verification tag 7346 * of the SCTP common header. 7347 */ 7348 if (ip6cp->ip6c_m->m_pkthdr.len < 7349 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7350 return; 7351 } 7352 /* Copy out the UDP header. */ 7353 memset(&udp, 0, sizeof(struct udphdr)); 7354 m_copydata(ip6cp->ip6c_m, 7355 ip6cp->ip6c_off, 7356 sizeof(struct udphdr), 7357 (caddr_t)&udp); 7358 /* Copy out the port numbers and the verification tag. */ 7359 memset(&sh, 0, sizeof(struct sctphdr)); 7360 m_copydata(ip6cp->ip6c_m, 7361 ip6cp->ip6c_off + sizeof(struct udphdr), 7362 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7363 (caddr_t)&sh); 7364 memset(&src, 0, sizeof(struct sockaddr_in6)); 7365 src.sin6_family = AF_INET6; 7366 src.sin6_len = sizeof(struct sockaddr_in6); 7367 src.sin6_port = sh.src_port; 7368 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7369 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7370 return; 7371 } 7372 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7373 dst.sin6_family = AF_INET6; 7374 dst.sin6_len = sizeof(struct sockaddr_in6); 7375 dst.sin6_port = sh.dest_port; 7376 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7377 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7378 return; 7379 } 7380 inp = NULL; 7381 net = NULL; 7382 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7383 (struct sockaddr *)&src, 7384 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7385 if ((stcb != NULL) && 7386 (net != NULL) && 7387 (inp != NULL)) { 7388 /* Check the UDP port numbers */ 7389 if ((udp.uh_dport != net->port) || 7390 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7391 SCTP_TCB_UNLOCK(stcb); 7392 return; 7393 } 7394 /* Check the verification tag */ 7395 if (ntohl(sh.v_tag) != 0) { 7396 /* 7397 * This must be the verification tag used for 7398 * sending out packets. We don't consider packets 7399 * reflecting the verification tag. 7400 */ 7401 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7402 SCTP_TCB_UNLOCK(stcb); 7403 return; 7404 } 7405 } else { 7406 if (ip6cp->ip6c_m->m_pkthdr.len >= 7407 ip6cp->ip6c_off + sizeof(struct udphdr) + 7408 sizeof(struct sctphdr) + 7409 sizeof(struct sctp_chunkhdr) + 7410 offsetof(struct sctp_init, a_rwnd)) { 7411 /* 7412 * In this case we can check if we got an 7413 * INIT chunk and if the initiate tag 7414 * matches. 7415 */ 7416 uint32_t initiate_tag; 7417 uint8_t chunk_type; 7418 7419 m_copydata(ip6cp->ip6c_m, 7420 ip6cp->ip6c_off + 7421 sizeof(struct udphdr) + 7422 sizeof(struct sctphdr), 7423 sizeof(uint8_t), 7424 (caddr_t)&chunk_type); 7425 m_copydata(ip6cp->ip6c_m, 7426 ip6cp->ip6c_off + 7427 sizeof(struct udphdr) + 7428 sizeof(struct sctphdr) + 7429 sizeof(struct sctp_chunkhdr), 7430 sizeof(uint32_t), 7431 (caddr_t)&initiate_tag); 7432 if ((chunk_type != SCTP_INITIATION) || 7433 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7434 SCTP_TCB_UNLOCK(stcb); 7435 return; 7436 } 7437 } else { 7438 SCTP_TCB_UNLOCK(stcb); 7439 return; 7440 } 7441 } 7442 type = ip6cp->ip6c_icmp6->icmp6_type; 7443 code = ip6cp->ip6c_icmp6->icmp6_code; 7444 if ((type == ICMP6_DST_UNREACH) && 7445 (code == ICMP6_DST_UNREACH_NOPORT)) { 7446 type = ICMP6_PARAM_PROB; 7447 code = ICMP6_PARAMPROB_NEXTHEADER; 7448 } 7449 sctp6_notify(inp, stcb, net, type, code, 7450 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7451 } else { 7452 if ((stcb == NULL) && (inp != NULL)) { 7453 /* reduce inp's ref-count */ 7454 SCTP_INP_WLOCK(inp); 7455 SCTP_INP_DECR_REF(inp); 7456 SCTP_INP_WUNLOCK(inp); 7457 } 7458 if (stcb) { 7459 SCTP_TCB_UNLOCK(stcb); 7460 } 7461 } 7462 } 7463 #endif 7464 7465 void 7466 sctp_over_udp_stop(void) 7467 { 7468 /* 7469 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7470 * for writing! 7471 */ 7472 #ifdef INET 7473 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7474 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7475 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7476 } 7477 #endif 7478 #ifdef INET6 7479 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7480 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7481 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7482 } 7483 #endif 7484 } 7485 7486 int 7487 sctp_over_udp_start(void) 7488 { 7489 uint16_t port; 7490 int ret; 7491 #ifdef INET 7492 struct sockaddr_in sin; 7493 #endif 7494 #ifdef INET6 7495 struct sockaddr_in6 sin6; 7496 #endif 7497 /* 7498 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7499 * for writing! 7500 */ 7501 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7502 if (ntohs(port) == 0) { 7503 /* Must have a port set */ 7504 return (EINVAL); 7505 } 7506 #ifdef INET 7507 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7508 /* Already running -- must stop first */ 7509 return (EALREADY); 7510 } 7511 #endif 7512 #ifdef INET6 7513 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7514 /* Already running -- must stop first */ 7515 return (EALREADY); 7516 } 7517 #endif 7518 #ifdef INET 7519 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7520 SOCK_DGRAM, IPPROTO_UDP, 7521 curthread->td_ucred, curthread))) { 7522 sctp_over_udp_stop(); 7523 return (ret); 7524 } 7525 /* Call the special UDP hook. */ 7526 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7527 sctp_recv_udp_tunneled_packet, 7528 sctp_recv_icmp_tunneled_packet, 7529 NULL))) { 7530 sctp_over_udp_stop(); 7531 return (ret); 7532 } 7533 /* Ok, we have a socket, bind it to the port. */ 7534 memset(&sin, 0, sizeof(struct sockaddr_in)); 7535 sin.sin_len = sizeof(struct sockaddr_in); 7536 sin.sin_family = AF_INET; 7537 sin.sin_port = htons(port); 7538 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7539 (struct sockaddr *)&sin, curthread))) { 7540 sctp_over_udp_stop(); 7541 return (ret); 7542 } 7543 #endif 7544 #ifdef INET6 7545 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7546 SOCK_DGRAM, IPPROTO_UDP, 7547 curthread->td_ucred, curthread))) { 7548 sctp_over_udp_stop(); 7549 return (ret); 7550 } 7551 /* Call the special UDP hook. */ 7552 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7553 sctp_recv_udp_tunneled_packet, 7554 sctp_recv_icmp6_tunneled_packet, 7555 NULL))) { 7556 sctp_over_udp_stop(); 7557 return (ret); 7558 } 7559 /* Ok, we have a socket, bind it to the port. */ 7560 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7561 sin6.sin6_len = sizeof(struct sockaddr_in6); 7562 sin6.sin6_family = AF_INET6; 7563 sin6.sin6_port = htons(port); 7564 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7565 (struct sockaddr *)&sin6, curthread))) { 7566 sctp_over_udp_stop(); 7567 return (ret); 7568 } 7569 #endif 7570 return (0); 7571 } 7572 7573 /* 7574 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7575 * If all arguments are zero, zero is returned. 7576 */ 7577 uint32_t 7578 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7579 { 7580 if (mtu1 > 0) { 7581 if (mtu2 > 0) { 7582 if (mtu3 > 0) { 7583 return (min(mtu1, min(mtu2, mtu3))); 7584 } else { 7585 return (min(mtu1, mtu2)); 7586 } 7587 } else { 7588 if (mtu3 > 0) { 7589 return (min(mtu1, mtu3)); 7590 } else { 7591 return (mtu1); 7592 } 7593 } 7594 } else { 7595 if (mtu2 > 0) { 7596 if (mtu3 > 0) { 7597 return (min(mtu2, mtu3)); 7598 } else { 7599 return (mtu2); 7600 } 7601 } else { 7602 return (mtu3); 7603 } 7604 } 7605 } 7606 7607 void 7608 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7609 { 7610 struct in_conninfo inc; 7611 7612 memset(&inc, 0, sizeof(struct in_conninfo)); 7613 inc.inc_fibnum = fibnum; 7614 switch (addr->sa.sa_family) { 7615 #ifdef INET 7616 case AF_INET: 7617 inc.inc_faddr = addr->sin.sin_addr; 7618 break; 7619 #endif 7620 #ifdef INET6 7621 case AF_INET6: 7622 inc.inc_flags |= INC_ISIPV6; 7623 inc.inc6_faddr = addr->sin6.sin6_addr; 7624 break; 7625 #endif 7626 default: 7627 return; 7628 } 7629 tcp_hc_updatemtu(&inc, (u_long)mtu); 7630 } 7631 7632 uint32_t 7633 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7634 { 7635 struct in_conninfo inc; 7636 7637 memset(&inc, 0, sizeof(struct in_conninfo)); 7638 inc.inc_fibnum = fibnum; 7639 switch (addr->sa.sa_family) { 7640 #ifdef INET 7641 case AF_INET: 7642 inc.inc_faddr = addr->sin.sin_addr; 7643 break; 7644 #endif 7645 #ifdef INET6 7646 case AF_INET6: 7647 inc.inc_flags |= INC_ISIPV6; 7648 inc.inc6_faddr = addr->sin6.sin6_addr; 7649 break; 7650 #endif 7651 default: 7652 return (0); 7653 } 7654 return ((uint32_t)tcp_hc_getmtu(&inc)); 7655 } 7656 7657 void 7658 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7659 { 7660 #if defined(KDTRACE_HOOKS) 7661 int old_state = stcb->asoc.state; 7662 #endif 7663 7664 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7665 ("sctp_set_state: Can't set substate (new_state = %x)", 7666 new_state)); 7667 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7668 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7669 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7670 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7671 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7672 } 7673 #if defined(KDTRACE_HOOKS) 7674 if (((old_state & SCTP_STATE_MASK) != new_state) && 7675 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7676 (new_state == SCTP_STATE_INUSE))) { 7677 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7678 } 7679 #endif 7680 } 7681 7682 void 7683 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7684 { 7685 #if defined(KDTRACE_HOOKS) 7686 int old_state = stcb->asoc.state; 7687 #endif 7688 7689 KASSERT((substate & SCTP_STATE_MASK) == 0, 7690 ("sctp_add_substate: Can't set state (substate = %x)", 7691 substate)); 7692 stcb->asoc.state |= substate; 7693 #if defined(KDTRACE_HOOKS) 7694 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7695 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7696 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7697 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7698 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7699 } 7700 #endif 7701 } 7702