1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1116 { 1117 struct sctp_association *asoc; 1118 1119 /* 1120 * Anything set to zero is taken care of by the allocation routine's 1121 * bzero 1122 */ 1123 1124 /* 1125 * Up front select what scoping to apply on addresses I tell my peer 1126 * Not sure what to do with these right now, we will need to come up 1127 * with a way to set them. We may need to pass them through from the 1128 * caller in the sctp_aloc_assoc() function. 1129 */ 1130 int i; 1131 #if defined(SCTP_DETAILED_STR_STATS) 1132 int j; 1133 #endif 1134 1135 asoc = &stcb->asoc; 1136 /* init all variables to a known value. */ 1137 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1138 asoc->max_burst = inp->sctp_ep.max_burst; 1139 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1140 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1141 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1142 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1143 asoc->ecn_supported = inp->ecn_supported; 1144 asoc->prsctp_supported = inp->prsctp_supported; 1145 asoc->auth_supported = inp->auth_supported; 1146 asoc->asconf_supported = inp->asconf_supported; 1147 asoc->reconfig_supported = inp->reconfig_supported; 1148 asoc->nrsack_supported = inp->nrsack_supported; 1149 asoc->pktdrop_supported = inp->pktdrop_supported; 1150 asoc->idata_supported = inp->idata_supported; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1194 sctp_select_initial_TSN(&inp->sctp_ep); 1195 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1196 /* we are optimisitic here */ 1197 asoc->peer_supports_nat = 0; 1198 asoc->sent_queue_retran_cnt = 0; 1199 1200 /* for CMT */ 1201 asoc->last_net_cmt_send_started = NULL; 1202 1203 /* This will need to be adjusted */ 1204 asoc->last_acked_seq = asoc->init_seq_number - 1; 1205 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1206 asoc->asconf_seq_in = asoc->last_acked_seq; 1207 1208 /* here we are different, we hold the next one we expect */ 1209 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1210 1211 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1212 asoc->initial_rto = inp->sctp_ep.initial_rto; 1213 1214 asoc->default_mtu = inp->sctp_ep.default_mtu; 1215 asoc->max_init_times = inp->sctp_ep.max_init_times; 1216 asoc->max_send_times = inp->sctp_ep.max_send_times; 1217 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1218 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1219 asoc->free_chunk_cnt = 0; 1220 1221 asoc->iam_blocking = 0; 1222 asoc->context = inp->sctp_context; 1223 asoc->local_strreset_support = inp->local_strreset_support; 1224 asoc->def_send = inp->def_send; 1225 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1226 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1227 asoc->pr_sctp_cnt = 0; 1228 asoc->total_output_queue_size = 0; 1229 1230 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1231 asoc->scope.ipv6_addr_legal = 1; 1232 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1233 asoc->scope.ipv4_addr_legal = 1; 1234 } else { 1235 asoc->scope.ipv4_addr_legal = 0; 1236 } 1237 } else { 1238 asoc->scope.ipv6_addr_legal = 0; 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } 1241 1242 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1243 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1244 1245 asoc->smallest_mtu = inp->sctp_frag_point; 1246 asoc->minrto = inp->sctp_ep.sctp_minrto; 1247 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1248 1249 asoc->stream_locked_on = 0; 1250 asoc->ecn_echo_cnt_onq = 0; 1251 asoc->stream_locked = 0; 1252 1253 asoc->send_sack = 1; 1254 1255 LIST_INIT(&asoc->sctp_restricted_addrs); 1256 1257 TAILQ_INIT(&asoc->nets); 1258 TAILQ_INIT(&asoc->pending_reply_queue); 1259 TAILQ_INIT(&asoc->asconf_ack_sent); 1260 /* Setup to fill the hb random cache at first HB */ 1261 asoc->hb_random_idx = 4; 1262 1263 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1264 1265 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1266 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1267 1268 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1269 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1270 1271 /* 1272 * Now the stream parameters, here we allocate space for all streams 1273 * that we request by default. 1274 */ 1275 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1276 o_strms; 1277 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1278 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1279 SCTP_M_STRMO); 1280 if (asoc->strmout == NULL) { 1281 /* big trouble no memory */ 1282 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1283 return (ENOMEM); 1284 } 1285 for (i = 0; i < asoc->streamoutcnt; i++) { 1286 /* 1287 * inbound side must be set to 0xffff, also NOTE when we get 1288 * the INIT-ACK back (for INIT sender) we MUST reduce the 1289 * count (streamoutcnt) but first check if we sent to any of 1290 * the upper streams that were dropped (if some were). Those 1291 * that were dropped must be notified to the upper layer as 1292 * failed to send. 1293 */ 1294 TAILQ_INIT(&asoc->strmout[i].outqueue); 1295 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1296 asoc->strmout[i].chunks_on_queues = 0; 1297 #if defined(SCTP_DETAILED_STR_STATS) 1298 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1299 asoc->strmout[i].abandoned_sent[j] = 0; 1300 asoc->strmout[i].abandoned_unsent[j] = 0; 1301 } 1302 #else 1303 asoc->strmout[i].abandoned_sent[0] = 0; 1304 asoc->strmout[i].abandoned_unsent[0] = 0; 1305 #endif 1306 asoc->strmout[i].next_mid_ordered = 0; 1307 asoc->strmout[i].next_mid_unordered = 0; 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 } 1312 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1313 1314 /* Now the mapping array */ 1315 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1316 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1317 SCTP_M_MAP); 1318 if (asoc->mapping_array == NULL) { 1319 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1320 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1321 return (ENOMEM); 1322 } 1323 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1324 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->nr_mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1330 return (ENOMEM); 1331 } 1332 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1333 1334 /* Now the init of the other outqueues */ 1335 TAILQ_INIT(&asoc->free_chunks); 1336 TAILQ_INIT(&asoc->control_send_queue); 1337 TAILQ_INIT(&asoc->asconf_send_queue); 1338 TAILQ_INIT(&asoc->send_queue); 1339 TAILQ_INIT(&asoc->sent_queue); 1340 TAILQ_INIT(&asoc->resetHead); 1341 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1342 TAILQ_INIT(&asoc->asconf_queue); 1343 /* authentication fields */ 1344 asoc->authinfo.random = NULL; 1345 asoc->authinfo.active_keyid = 0; 1346 asoc->authinfo.assoc_key = NULL; 1347 asoc->authinfo.assoc_keyid = 0; 1348 asoc->authinfo.recv_key = NULL; 1349 asoc->authinfo.recv_keyid = 0; 1350 LIST_INIT(&asoc->shared_keys); 1351 asoc->marked_retrans = 0; 1352 asoc->port = inp->sctp_ep.port; 1353 asoc->timoinit = 0; 1354 asoc->timodata = 0; 1355 asoc->timosack = 0; 1356 asoc->timoshutdown = 0; 1357 asoc->timoheartbeat = 0; 1358 asoc->timocookie = 0; 1359 asoc->timoshutdownack = 0; 1360 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1361 asoc->discontinuity_time = asoc->start_time; 1362 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1363 asoc->abandoned_unsent[i] = 0; 1364 asoc->abandoned_sent[i] = 0; 1365 } 1366 /* 1367 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1368 * freed later when the association is freed. 1369 */ 1370 return (0); 1371 } 1372 1373 void 1374 sctp_print_mapping_array(struct sctp_association *asoc) 1375 { 1376 unsigned int i, limit; 1377 1378 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1379 asoc->mapping_array_size, 1380 asoc->mapping_array_base_tsn, 1381 asoc->cumulative_tsn, 1382 asoc->highest_tsn_inside_map, 1383 asoc->highest_tsn_inside_nr_map); 1384 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1385 if (asoc->mapping_array[limit - 1] != 0) { 1386 break; 1387 } 1388 } 1389 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1390 for (i = 0; i < limit; i++) { 1391 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1392 } 1393 if (limit % 16) 1394 SCTP_PRINTF("\n"); 1395 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1396 if (asoc->nr_mapping_array[limit - 1]) { 1397 break; 1398 } 1399 } 1400 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1401 for (i = 0; i < limit; i++) { 1402 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1403 } 1404 if (limit % 16) 1405 SCTP_PRINTF("\n"); 1406 } 1407 1408 int 1409 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1410 { 1411 /* mapping array needs to grow */ 1412 uint8_t *new_array1, *new_array2; 1413 uint32_t new_size; 1414 1415 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1416 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1417 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1418 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1419 /* can't get more, forget it */ 1420 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1421 if (new_array1) { 1422 SCTP_FREE(new_array1, SCTP_M_MAP); 1423 } 1424 if (new_array2) { 1425 SCTP_FREE(new_array2, SCTP_M_MAP); 1426 } 1427 return (-1); 1428 } 1429 memset(new_array1, 0, new_size); 1430 memset(new_array2, 0, new_size); 1431 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1432 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1433 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1434 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1435 asoc->mapping_array = new_array1; 1436 asoc->nr_mapping_array = new_array2; 1437 asoc->mapping_array_size = new_size; 1438 return (0); 1439 } 1440 1441 static void 1442 sctp_iterator_work(struct sctp_iterator *it) 1443 { 1444 struct epoch_tracker et; 1445 struct sctp_inpcb *tinp; 1446 int iteration_count = 0; 1447 int inp_skip = 0; 1448 int first_in = 1; 1449 1450 NET_EPOCH_ENTER(et); 1451 SCTP_INP_INFO_RLOCK(); 1452 SCTP_ITERATOR_LOCK(); 1453 sctp_it_ctl.cur_it = it; 1454 if (it->inp) { 1455 SCTP_INP_RLOCK(it->inp); 1456 SCTP_INP_DECR_REF(it->inp); 1457 } 1458 if (it->inp == NULL) { 1459 /* iterator is complete */ 1460 done_with_iterator: 1461 sctp_it_ctl.cur_it = NULL; 1462 SCTP_ITERATOR_UNLOCK(); 1463 SCTP_INP_INFO_RUNLOCK(); 1464 if (it->function_atend != NULL) { 1465 (*it->function_atend) (it->pointer, it->val); 1466 } 1467 SCTP_FREE(it, SCTP_M_ITER); 1468 NET_EPOCH_EXIT(et); 1469 return; 1470 } 1471 select_a_new_ep: 1472 if (first_in) { 1473 first_in = 0; 1474 } else { 1475 SCTP_INP_RLOCK(it->inp); 1476 } 1477 while (((it->pcb_flags) && 1478 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1479 ((it->pcb_features) && 1480 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1481 /* endpoint flags or features don't match, so keep looking */ 1482 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1483 SCTP_INP_RUNLOCK(it->inp); 1484 goto done_with_iterator; 1485 } 1486 tinp = it->inp; 1487 it->inp = LIST_NEXT(it->inp, sctp_list); 1488 it->stcb = NULL; 1489 SCTP_INP_RUNLOCK(tinp); 1490 if (it->inp == NULL) { 1491 goto done_with_iterator; 1492 } 1493 SCTP_INP_RLOCK(it->inp); 1494 } 1495 /* now go through each assoc which is in the desired state */ 1496 if (it->done_current_ep == 0) { 1497 if (it->function_inp != NULL) 1498 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1499 it->done_current_ep = 1; 1500 } 1501 if (it->stcb == NULL) { 1502 /* run the per instance function */ 1503 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1504 } 1505 if ((inp_skip) || it->stcb == NULL) { 1506 if (it->function_inp_end != NULL) { 1507 inp_skip = (*it->function_inp_end) (it->inp, 1508 it->pointer, 1509 it->val); 1510 } 1511 SCTP_INP_RUNLOCK(it->inp); 1512 goto no_stcb; 1513 } 1514 while (it->stcb) { 1515 SCTP_TCB_LOCK(it->stcb); 1516 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1517 /* not in the right state... keep looking */ 1518 SCTP_TCB_UNLOCK(it->stcb); 1519 goto next_assoc; 1520 } 1521 /* see if we have limited out the iterator loop */ 1522 iteration_count++; 1523 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1524 /* Pause to let others grab the lock */ 1525 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 SCTP_INP_INCR_REF(it->inp); 1528 SCTP_INP_RUNLOCK(it->inp); 1529 SCTP_ITERATOR_UNLOCK(); 1530 SCTP_INP_INFO_RUNLOCK(); 1531 SCTP_INP_INFO_RLOCK(); 1532 SCTP_ITERATOR_LOCK(); 1533 if (sctp_it_ctl.iterator_flags) { 1534 /* We won't be staying here */ 1535 SCTP_INP_DECR_REF(it->inp); 1536 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1537 if (sctp_it_ctl.iterator_flags & 1538 SCTP_ITERATOR_STOP_CUR_IT) { 1539 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1540 goto done_with_iterator; 1541 } 1542 if (sctp_it_ctl.iterator_flags & 1543 SCTP_ITERATOR_STOP_CUR_INP) { 1544 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1545 goto no_stcb; 1546 } 1547 /* If we reach here huh? */ 1548 SCTP_PRINTF("Unknown it ctl flag %x\n", 1549 sctp_it_ctl.iterator_flags); 1550 sctp_it_ctl.iterator_flags = 0; 1551 } 1552 SCTP_INP_RLOCK(it->inp); 1553 SCTP_INP_DECR_REF(it->inp); 1554 SCTP_TCB_LOCK(it->stcb); 1555 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1556 iteration_count = 0; 1557 } 1558 KASSERT(it->inp == it->stcb->sctp_ep, 1559 ("%s: stcb %p does not belong to inp %p, but inp %p", 1560 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1561 1562 /* run function on this one */ 1563 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1564 1565 /* 1566 * we lie here, it really needs to have its own type but 1567 * first I must verify that this won't effect things :-0 1568 */ 1569 if (it->no_chunk_output == 0) 1570 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 1572 SCTP_TCB_UNLOCK(it->stcb); 1573 next_assoc: 1574 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1575 if (it->stcb == NULL) { 1576 /* Run last function */ 1577 if (it->function_inp_end != NULL) { 1578 inp_skip = (*it->function_inp_end) (it->inp, 1579 it->pointer, 1580 it->val); 1581 } 1582 } 1583 } 1584 SCTP_INP_RUNLOCK(it->inp); 1585 no_stcb: 1586 /* done with all assocs on this endpoint, move on to next endpoint */ 1587 it->done_current_ep = 0; 1588 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1589 it->inp = NULL; 1590 } else { 1591 it->inp = LIST_NEXT(it->inp, sctp_list); 1592 } 1593 it->stcb = NULL; 1594 if (it->inp == NULL) { 1595 goto done_with_iterator; 1596 } 1597 goto select_a_new_ep; 1598 } 1599 1600 void 1601 sctp_iterator_worker(void) 1602 { 1603 struct sctp_iterator *it; 1604 1605 /* This function is called with the WQ lock in place */ 1606 sctp_it_ctl.iterator_running = 1; 1607 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1608 /* now lets work on this one */ 1609 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1610 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1611 CURVNET_SET(it->vn); 1612 sctp_iterator_work(it); 1613 CURVNET_RESTORE(); 1614 SCTP_IPI_ITERATOR_WQ_LOCK(); 1615 /* sa_ignore FREED_MEMORY */ 1616 } 1617 sctp_it_ctl.iterator_running = 0; 1618 return; 1619 } 1620 1621 static void 1622 sctp_handle_addr_wq(void) 1623 { 1624 /* deal with the ADDR wq from the rtsock calls */ 1625 struct sctp_laddr *wi, *nwi; 1626 struct sctp_asconf_iterator *asc; 1627 1628 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1629 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1630 if (asc == NULL) { 1631 /* Try later, no memory */ 1632 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1633 (struct sctp_inpcb *)NULL, 1634 (struct sctp_tcb *)NULL, 1635 (struct sctp_nets *)NULL); 1636 return; 1637 } 1638 LIST_INIT(&asc->list_of_work); 1639 asc->cnt = 0; 1640 1641 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1642 LIST_REMOVE(wi, sctp_nxt_addr); 1643 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1644 asc->cnt++; 1645 } 1646 1647 if (asc->cnt == 0) { 1648 SCTP_FREE(asc, SCTP_M_ASC_IT); 1649 } else { 1650 int ret; 1651 1652 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1653 sctp_asconf_iterator_stcb, 1654 NULL, /* No ep end for boundall */ 1655 SCTP_PCB_FLAGS_BOUNDALL, 1656 SCTP_PCB_ANY_FEATURES, 1657 SCTP_ASOC_ANY_STATE, 1658 (void *)asc, 0, 1659 sctp_asconf_iterator_end, NULL, 0); 1660 if (ret) { 1661 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1662 /* 1663 * Freeing if we are stopping or put back on the 1664 * addr_wq. 1665 */ 1666 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1667 sctp_asconf_iterator_end(asc, 0); 1668 } else { 1669 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1670 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1671 } 1672 SCTP_FREE(asc, SCTP_M_ASC_IT); 1673 } 1674 } 1675 } 1676 } 1677 1678 /*- 1679 * The following table shows which pointers for the inp, stcb, or net are 1680 * stored for each timer after it was started. 1681 * 1682 *|Name |Timer |inp |stcb|net | 1683 *|-----------------------------|-----------------------------|----|----|----| 1684 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1685 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1686 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1687 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1690 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1691 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1696 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1697 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1698 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1700 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1701 */ 1702 1703 void 1704 sctp_timeout_handler(void *t) 1705 { 1706 struct epoch_tracker et; 1707 struct timeval tv; 1708 struct sctp_inpcb *inp; 1709 struct sctp_tcb *stcb; 1710 struct sctp_nets *net; 1711 struct sctp_timer *tmr; 1712 struct mbuf *op_err; 1713 int type; 1714 int i, secret; 1715 bool did_output, released_asoc_reference; 1716 1717 /* 1718 * If inp, stcb or net are not NULL, then references to these were 1719 * added when the timer was started, and must be released before 1720 * this function returns. 1721 */ 1722 tmr = (struct sctp_timer *)t; 1723 inp = (struct sctp_inpcb *)tmr->ep; 1724 stcb = (struct sctp_tcb *)tmr->tcb; 1725 net = (struct sctp_nets *)tmr->net; 1726 CURVNET_SET((struct vnet *)tmr->vnet); 1727 NET_EPOCH_ENTER(et); 1728 released_asoc_reference = false; 1729 1730 #ifdef SCTP_AUDITING_ENABLED 1731 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1732 sctp_auditing(3, inp, stcb, net); 1733 #endif 1734 1735 /* sanity checks... */ 1736 KASSERT(tmr->self == NULL || tmr->self == tmr, 1737 ("sctp_timeout_handler: tmr->self corrupted")); 1738 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1739 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1740 type = tmr->type; 1741 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1742 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1743 type, stcb, stcb->sctp_ep)); 1744 tmr->stopped_from = 0xa001; 1745 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1746 SCTPDBG(SCTP_DEBUG_TIMER2, 1747 "Timer type %d handler exiting due to CLOSED association.\n", 1748 type); 1749 goto out_decr; 1750 } 1751 tmr->stopped_from = 0xa002; 1752 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1753 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1754 SCTPDBG(SCTP_DEBUG_TIMER2, 1755 "Timer type %d handler exiting due to not being active.\n", 1756 type); 1757 goto out_decr; 1758 } 1759 1760 tmr->stopped_from = 0xa003; 1761 if (stcb) { 1762 SCTP_TCB_LOCK(stcb); 1763 /* 1764 * Release reference so that association can be freed if 1765 * necessary below. This is safe now that we have acquired 1766 * the lock. 1767 */ 1768 atomic_add_int(&stcb->asoc.refcnt, -1); 1769 released_asoc_reference = true; 1770 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1771 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1772 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1773 SCTPDBG(SCTP_DEBUG_TIMER2, 1774 "Timer type %d handler exiting due to CLOSED association.\n", 1775 type); 1776 goto out; 1777 } 1778 } else if (inp != NULL) { 1779 SCTP_INP_WLOCK(inp); 1780 } else { 1781 SCTP_WQ_ADDR_LOCK(); 1782 } 1783 1784 /* Record in stopped_from which timeout occurred. */ 1785 tmr->stopped_from = type; 1786 /* mark as being serviced now */ 1787 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1788 /* 1789 * Callout has been rescheduled. 1790 */ 1791 goto out; 1792 } 1793 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1794 /* 1795 * Not active, so no action. 1796 */ 1797 goto out; 1798 } 1799 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1800 1801 /* call the handler for the appropriate timer type */ 1802 switch (type) { 1803 case SCTP_TIMER_TYPE_SEND: 1804 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1805 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1806 type, inp, stcb, net)); 1807 SCTP_STAT_INCR(sctps_timodata); 1808 stcb->asoc.timodata++; 1809 stcb->asoc.num_send_timers_up--; 1810 if (stcb->asoc.num_send_timers_up < 0) { 1811 stcb->asoc.num_send_timers_up = 0; 1812 } 1813 SCTP_TCB_LOCK_ASSERT(stcb); 1814 if (sctp_t3rxt_timer(inp, stcb, net)) { 1815 /* no need to unlock on tcb its gone */ 1816 1817 goto out_decr; 1818 } 1819 SCTP_TCB_LOCK_ASSERT(stcb); 1820 #ifdef SCTP_AUDITING_ENABLED 1821 sctp_auditing(4, inp, stcb, net); 1822 #endif 1823 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1824 did_output = true; 1825 if ((stcb->asoc.num_send_timers_up == 0) && 1826 (stcb->asoc.sent_queue_cnt > 0)) { 1827 struct sctp_tmit_chunk *chk; 1828 1829 /* 1830 * Safeguard. If there on some on the sent queue 1831 * somewhere but no timers running something is 1832 * wrong... so we start a timer on the first chunk 1833 * on the send queue on whatever net it is sent to. 1834 */ 1835 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1836 if (chk->whoTo != NULL) { 1837 break; 1838 } 1839 } 1840 if (chk != NULL) { 1841 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1842 } 1843 } 1844 break; 1845 case SCTP_TIMER_TYPE_INIT: 1846 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1847 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1848 type, inp, stcb, net)); 1849 SCTP_STAT_INCR(sctps_timoinit); 1850 stcb->asoc.timoinit++; 1851 if (sctp_t1init_timer(inp, stcb, net)) { 1852 /* no need to unlock on tcb its gone */ 1853 goto out_decr; 1854 } 1855 did_output = false; 1856 break; 1857 case SCTP_TIMER_TYPE_RECV: 1858 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1859 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1860 type, inp, stcb, net)); 1861 SCTP_STAT_INCR(sctps_timosack); 1862 stcb->asoc.timosack++; 1863 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1864 #ifdef SCTP_AUDITING_ENABLED 1865 sctp_auditing(4, inp, stcb, NULL); 1866 #endif 1867 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1868 did_output = true; 1869 break; 1870 case SCTP_TIMER_TYPE_SHUTDOWN: 1871 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1872 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1873 type, inp, stcb, net)); 1874 SCTP_STAT_INCR(sctps_timoshutdown); 1875 stcb->asoc.timoshutdown++; 1876 if (sctp_shutdown_timer(inp, stcb, net)) { 1877 /* no need to unlock on tcb its gone */ 1878 goto out_decr; 1879 } 1880 #ifdef SCTP_AUDITING_ENABLED 1881 sctp_auditing(4, inp, stcb, net); 1882 #endif 1883 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1884 did_output = true; 1885 break; 1886 case SCTP_TIMER_TYPE_HEARTBEAT: 1887 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1888 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1889 type, inp, stcb, net)); 1890 SCTP_STAT_INCR(sctps_timoheartbeat); 1891 stcb->asoc.timoheartbeat++; 1892 if (sctp_heartbeat_timer(inp, stcb, net)) { 1893 /* no need to unlock on tcb its gone */ 1894 goto out_decr; 1895 } 1896 #ifdef SCTP_AUDITING_ENABLED 1897 sctp_auditing(4, inp, stcb, net); 1898 #endif 1899 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1900 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1901 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1902 did_output = true; 1903 } else { 1904 did_output = false; 1905 } 1906 break; 1907 case SCTP_TIMER_TYPE_COOKIE: 1908 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1909 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1910 type, inp, stcb, net)); 1911 SCTP_STAT_INCR(sctps_timocookie); 1912 stcb->asoc.timocookie++; 1913 if (sctp_cookie_timer(inp, stcb, net)) { 1914 /* no need to unlock on tcb its gone */ 1915 goto out_decr; 1916 } 1917 #ifdef SCTP_AUDITING_ENABLED 1918 sctp_auditing(4, inp, stcb, net); 1919 #endif 1920 /* 1921 * We consider T3 and Cookie timer pretty much the same with 1922 * respect to where from in chunk_output. 1923 */ 1924 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1925 did_output = true; 1926 break; 1927 case SCTP_TIMER_TYPE_NEWCOOKIE: 1928 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1929 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1930 type, inp, stcb, net)); 1931 SCTP_STAT_INCR(sctps_timosecret); 1932 (void)SCTP_GETTIME_TIMEVAL(&tv); 1933 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1934 inp->sctp_ep.last_secret_number = 1935 inp->sctp_ep.current_secret_number; 1936 inp->sctp_ep.current_secret_number++; 1937 if (inp->sctp_ep.current_secret_number >= 1938 SCTP_HOW_MANY_SECRETS) { 1939 inp->sctp_ep.current_secret_number = 0; 1940 } 1941 secret = (int)inp->sctp_ep.current_secret_number; 1942 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1943 inp->sctp_ep.secret_key[secret][i] = 1944 sctp_select_initial_TSN(&inp->sctp_ep); 1945 } 1946 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1947 did_output = false; 1948 break; 1949 case SCTP_TIMER_TYPE_PATHMTURAISE: 1950 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1951 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1952 type, inp, stcb, net)); 1953 SCTP_STAT_INCR(sctps_timopathmtu); 1954 sctp_pathmtu_timer(inp, stcb, net); 1955 did_output = false; 1956 break; 1957 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1958 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1959 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1960 type, inp, stcb, net)); 1961 if (sctp_shutdownack_timer(inp, stcb, net)) { 1962 /* no need to unlock on tcb its gone */ 1963 goto out_decr; 1964 } 1965 SCTP_STAT_INCR(sctps_timoshutdownack); 1966 stcb->asoc.timoshutdownack++; 1967 #ifdef SCTP_AUDITING_ENABLED 1968 sctp_auditing(4, inp, stcb, net); 1969 #endif 1970 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1971 did_output = true; 1972 break; 1973 case SCTP_TIMER_TYPE_ASCONF: 1974 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1975 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1976 type, inp, stcb, net)); 1977 SCTP_STAT_INCR(sctps_timoasconf); 1978 if (sctp_asconf_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1989 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoshutdownguard); 1993 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1994 "Shutdown guard timer expired"); 1995 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 case SCTP_TIMER_TYPE_AUTOCLOSE: 1999 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2000 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2001 type, inp, stcb, net)); 2002 SCTP_STAT_INCR(sctps_timoautoclose); 2003 sctp_autoclose_timer(inp, stcb); 2004 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2005 did_output = true; 2006 break; 2007 case SCTP_TIMER_TYPE_STRRESET: 2008 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2009 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2010 type, inp, stcb, net)); 2011 SCTP_STAT_INCR(sctps_timostrmrst); 2012 if (sctp_strreset_timer(inp, stcb)) { 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 } 2016 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2017 did_output = true; 2018 break; 2019 case SCTP_TIMER_TYPE_INPKILL: 2020 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2021 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2022 type, inp, stcb, net)); 2023 SCTP_STAT_INCR(sctps_timoinpkill); 2024 /* 2025 * special case, take away our increment since WE are the 2026 * killer 2027 */ 2028 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2029 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2030 SCTP_INP_DECR_REF(inp); 2031 SCTP_INP_WUNLOCK(inp); 2032 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2033 SCTP_CALLED_FROM_INPKILL_TIMER); 2034 inp = NULL; 2035 goto out_decr; 2036 case SCTP_TIMER_TYPE_ASOCKILL: 2037 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoassockill); 2041 /* Can we free it yet? */ 2042 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2043 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2044 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2045 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2046 /* 2047 * free asoc, always unlocks (or destroy's) so prevent 2048 * duplicate unlock or unlock of a free mtx :-0 2049 */ 2050 stcb = NULL; 2051 goto out_decr; 2052 case SCTP_TIMER_TYPE_ADDR_WQ: 2053 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2054 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2055 type, inp, stcb, net)); 2056 sctp_handle_addr_wq(); 2057 did_output = true; 2058 break; 2059 case SCTP_TIMER_TYPE_PRIM_DELETED: 2060 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2061 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2062 type, inp, stcb, net)); 2063 SCTP_STAT_INCR(sctps_timodelprim); 2064 sctp_delete_prim_timer(inp, stcb); 2065 did_output = false; 2066 break; 2067 default: 2068 #ifdef INVARIANTS 2069 panic("Unknown timer type %d", type); 2070 #else 2071 goto out; 2072 #endif 2073 } 2074 #ifdef SCTP_AUDITING_ENABLED 2075 sctp_audit_log(0xF1, (uint8_t)type); 2076 if (inp != NULL) 2077 sctp_auditing(5, inp, stcb, net); 2078 #endif 2079 if (did_output && (stcb != NULL)) { 2080 /* 2081 * Now we need to clean up the control chunk chain if an 2082 * ECNE is on it. It must be marked as UNSENT again so next 2083 * call will continue to send it until such time that we get 2084 * a CWR, to remove it. It is, however, less likely that we 2085 * will find a ecn echo on the chain though. 2086 */ 2087 sctp_fix_ecn_echo(&stcb->asoc); 2088 } 2089 out: 2090 if (stcb != NULL) { 2091 SCTP_TCB_UNLOCK(stcb); 2092 } else if (inp != NULL) { 2093 SCTP_INP_WUNLOCK(inp); 2094 } else { 2095 SCTP_WQ_ADDR_UNLOCK(); 2096 } 2097 2098 out_decr: 2099 /* These reference counts were incremented in sctp_timer_start(). */ 2100 if (inp != NULL) { 2101 SCTP_INP_DECR_REF(inp); 2102 } 2103 if ((stcb != NULL) && !released_asoc_reference) { 2104 atomic_add_int(&stcb->asoc.refcnt, -1); 2105 } 2106 if (net != NULL) { 2107 sctp_free_remote_addr(net); 2108 } 2109 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2110 CURVNET_RESTORE(); 2111 NET_EPOCH_EXIT(et); 2112 } 2113 2114 /*- 2115 * The following table shows which parameters must be provided 2116 * when calling sctp_timer_start(). For parameters not being 2117 * provided, NULL must be used. 2118 * 2119 * |Name |inp |stcb|net | 2120 * |-----------------------------|----|----|----| 2121 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2122 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2123 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2124 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2126 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2128 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2133 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2135 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2136 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2137 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2138 * 2139 */ 2140 2141 void 2142 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2143 struct sctp_nets *net) 2144 { 2145 struct sctp_timer *tmr; 2146 uint32_t to_ticks; 2147 uint32_t rndval, jitter; 2148 2149 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2150 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2151 t_type, stcb, stcb->sctp_ep)); 2152 tmr = NULL; 2153 if (stcb != NULL) { 2154 SCTP_TCB_LOCK_ASSERT(stcb); 2155 } else if (inp != NULL) { 2156 SCTP_INP_WLOCK_ASSERT(inp); 2157 } else { 2158 SCTP_WQ_ADDR_LOCK_ASSERT(); 2159 } 2160 if (stcb != NULL) { 2161 /* 2162 * Don't restart timer on association that's about to be 2163 * killed. 2164 */ 2165 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2166 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2167 SCTPDBG(SCTP_DEBUG_TIMER2, 2168 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2169 t_type, inp, stcb, net); 2170 return; 2171 } 2172 /* Don't restart timer on net that's been removed. */ 2173 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2174 SCTPDBG(SCTP_DEBUG_TIMER2, 2175 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2176 t_type, inp, stcb, net); 2177 return; 2178 } 2179 } 2180 switch (t_type) { 2181 case SCTP_TIMER_TYPE_SEND: 2182 /* Here we use the RTO timer. */ 2183 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2184 #ifdef INVARIANTS 2185 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2186 t_type, inp, stcb, net); 2187 #else 2188 return; 2189 #endif 2190 } 2191 tmr = &net->rxt_timer; 2192 if (net->RTO == 0) { 2193 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2194 } else { 2195 to_ticks = sctp_msecs_to_ticks(net->RTO); 2196 } 2197 break; 2198 case SCTP_TIMER_TYPE_INIT: 2199 /* 2200 * Here we use the INIT timer default usually about 1 2201 * second. 2202 */ 2203 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2204 #ifdef INVARIANTS 2205 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2206 t_type, inp, stcb, net); 2207 #else 2208 return; 2209 #endif 2210 } 2211 tmr = &net->rxt_timer; 2212 if (net->RTO == 0) { 2213 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2214 } else { 2215 to_ticks = sctp_msecs_to_ticks(net->RTO); 2216 } 2217 break; 2218 case SCTP_TIMER_TYPE_RECV: 2219 /* 2220 * Here we use the Delayed-Ack timer value from the inp, 2221 * ususually about 200ms. 2222 */ 2223 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2224 #ifdef INVARIANTS 2225 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2226 t_type, inp, stcb, net); 2227 #else 2228 return; 2229 #endif 2230 } 2231 tmr = &stcb->asoc.dack_timer; 2232 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2233 break; 2234 case SCTP_TIMER_TYPE_SHUTDOWN: 2235 /* Here we use the RTO of the destination. */ 2236 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2237 #ifdef INVARIANTS 2238 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2239 t_type, inp, stcb, net); 2240 #else 2241 return; 2242 #endif 2243 } 2244 tmr = &net->rxt_timer; 2245 if (net->RTO == 0) { 2246 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2247 } else { 2248 to_ticks = sctp_msecs_to_ticks(net->RTO); 2249 } 2250 break; 2251 case SCTP_TIMER_TYPE_HEARTBEAT: 2252 /* 2253 * The net is used here so that we can add in the RTO. Even 2254 * though we use a different timer. We also add the HB timer 2255 * PLUS a random jitter. 2256 */ 2257 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2258 #ifdef INVARIANTS 2259 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2260 t_type, inp, stcb, net); 2261 #else 2262 return; 2263 #endif 2264 } 2265 if ((net->dest_state & SCTP_ADDR_NOHB) && 2266 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2267 SCTPDBG(SCTP_DEBUG_TIMER2, 2268 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2269 t_type, inp, stcb, net); 2270 return; 2271 } 2272 tmr = &net->hb_timer; 2273 if (net->RTO == 0) { 2274 to_ticks = stcb->asoc.initial_rto; 2275 } else { 2276 to_ticks = net->RTO; 2277 } 2278 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2279 jitter = rndval % to_ticks; 2280 if (to_ticks > 1) { 2281 to_ticks >>= 1; 2282 } 2283 if (jitter < (UINT32_MAX - to_ticks)) { 2284 to_ticks += jitter; 2285 } else { 2286 to_ticks = UINT32_MAX; 2287 } 2288 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2289 !(net->dest_state & SCTP_ADDR_PF)) { 2290 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2291 to_ticks += net->heart_beat_delay; 2292 } else { 2293 to_ticks = UINT32_MAX; 2294 } 2295 } 2296 /* 2297 * Now we must convert the to_ticks that are now in ms to 2298 * ticks. 2299 */ 2300 to_ticks = sctp_msecs_to_ticks(to_ticks); 2301 break; 2302 case SCTP_TIMER_TYPE_COOKIE: 2303 /* 2304 * Here we can use the RTO timer from the network since one 2305 * RTT was complete. If a retransmission happened then we 2306 * will be using the RTO initial value. 2307 */ 2308 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2309 #ifdef INVARIANTS 2310 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2311 t_type, inp, stcb, net); 2312 #else 2313 return; 2314 #endif 2315 } 2316 tmr = &net->rxt_timer; 2317 if (net->RTO == 0) { 2318 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2319 } else { 2320 to_ticks = sctp_msecs_to_ticks(net->RTO); 2321 } 2322 break; 2323 case SCTP_TIMER_TYPE_NEWCOOKIE: 2324 /* 2325 * Nothing needed but the endpoint here ususually about 60 2326 * minutes. 2327 */ 2328 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2329 #ifdef INVARIANTS 2330 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2331 t_type, inp, stcb, net); 2332 #else 2333 return; 2334 #endif 2335 } 2336 tmr = &inp->sctp_ep.signature_change; 2337 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2338 break; 2339 case SCTP_TIMER_TYPE_PATHMTURAISE: 2340 /* 2341 * Here we use the value found in the EP for PMTUD, 2342 * ususually about 10 minutes. 2343 */ 2344 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2345 #ifdef INVARIANTS 2346 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2347 t_type, inp, stcb, net); 2348 #else 2349 return; 2350 #endif 2351 } 2352 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2353 SCTPDBG(SCTP_DEBUG_TIMER2, 2354 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2355 t_type, inp, stcb, net); 2356 return; 2357 } 2358 tmr = &net->pmtu_timer; 2359 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2360 break; 2361 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2362 /* Here we use the RTO of the destination. */ 2363 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2364 #ifdef INVARIANTS 2365 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2366 t_type, inp, stcb, net); 2367 #else 2368 return; 2369 #endif 2370 } 2371 tmr = &net->rxt_timer; 2372 if (net->RTO == 0) { 2373 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2374 } else { 2375 to_ticks = sctp_msecs_to_ticks(net->RTO); 2376 } 2377 break; 2378 case SCTP_TIMER_TYPE_ASCONF: 2379 /* 2380 * Here the timer comes from the stcb but its value is from 2381 * the net's RTO. 2382 */ 2383 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2384 #ifdef INVARIANTS 2385 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2386 t_type, inp, stcb, net); 2387 #else 2388 return; 2389 #endif 2390 } 2391 tmr = &stcb->asoc.asconf_timer; 2392 if (net->RTO == 0) { 2393 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2394 } else { 2395 to_ticks = sctp_msecs_to_ticks(net->RTO); 2396 } 2397 break; 2398 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2399 /* 2400 * Here we use the endpoints shutdown guard timer usually 2401 * about 3 minutes. 2402 */ 2403 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2404 #ifdef INVARIANTS 2405 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2406 t_type, inp, stcb, net); 2407 #else 2408 return; 2409 #endif 2410 } 2411 tmr = &stcb->asoc.shut_guard_timer; 2412 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2413 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2414 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2415 } else { 2416 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2417 } 2418 } else { 2419 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2420 } 2421 break; 2422 case SCTP_TIMER_TYPE_AUTOCLOSE: 2423 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2424 #ifdef INVARIANTS 2425 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2426 t_type, inp, stcb, net); 2427 #else 2428 return; 2429 #endif 2430 } 2431 tmr = &stcb->asoc.autoclose_timer; 2432 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2433 break; 2434 case SCTP_TIMER_TYPE_STRRESET: 2435 /* 2436 * Here the timer comes from the stcb but its value is from 2437 * the net's RTO. 2438 */ 2439 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2440 #ifdef INVARIANTS 2441 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2442 t_type, inp, stcb, net); 2443 #else 2444 return; 2445 #endif 2446 } 2447 tmr = &stcb->asoc.strreset_timer; 2448 if (net->RTO == 0) { 2449 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2450 } else { 2451 to_ticks = sctp_msecs_to_ticks(net->RTO); 2452 } 2453 break; 2454 case SCTP_TIMER_TYPE_INPKILL: 2455 /* 2456 * The inp is setup to die. We re-use the signature_chage 2457 * timer since that has stopped and we are in the GONE 2458 * state. 2459 */ 2460 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2461 #ifdef INVARIANTS 2462 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2463 t_type, inp, stcb, net); 2464 #else 2465 return; 2466 #endif 2467 } 2468 tmr = &inp->sctp_ep.signature_change; 2469 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2470 break; 2471 case SCTP_TIMER_TYPE_ASOCKILL: 2472 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2473 #ifdef INVARIANTS 2474 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2475 t_type, inp, stcb, net); 2476 #else 2477 return; 2478 #endif 2479 } 2480 tmr = &stcb->asoc.strreset_timer; 2481 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2482 break; 2483 case SCTP_TIMER_TYPE_ADDR_WQ: 2484 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2485 #ifdef INVARIANTS 2486 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2487 t_type, inp, stcb, net); 2488 #else 2489 return; 2490 #endif 2491 } 2492 /* Only 1 tick away :-) */ 2493 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2494 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2495 break; 2496 case SCTP_TIMER_TYPE_PRIM_DELETED: 2497 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2498 #ifdef INVARIANTS 2499 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2500 t_type, inp, stcb, net); 2501 #else 2502 return; 2503 #endif 2504 } 2505 tmr = &stcb->asoc.delete_prim_timer; 2506 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2507 break; 2508 default: 2509 #ifdef INVARIANTS 2510 panic("Unknown timer type %d", t_type); 2511 #else 2512 return; 2513 #endif 2514 } 2515 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2516 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2517 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2518 /* 2519 * We do NOT allow you to have it already running. If it is, 2520 * we leave the current one up unchanged. 2521 */ 2522 SCTPDBG(SCTP_DEBUG_TIMER2, 2523 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2524 t_type, inp, stcb, net); 2525 return; 2526 } 2527 /* At this point we can proceed. */ 2528 if (t_type == SCTP_TIMER_TYPE_SEND) { 2529 stcb->asoc.num_send_timers_up++; 2530 } 2531 tmr->stopped_from = 0; 2532 tmr->type = t_type; 2533 tmr->ep = (void *)inp; 2534 tmr->tcb = (void *)stcb; 2535 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2536 tmr->net = NULL; 2537 } else { 2538 tmr->net = (void *)net; 2539 } 2540 tmr->self = (void *)tmr; 2541 tmr->vnet = (void *)curvnet; 2542 tmr->ticks = sctp_get_tick_count(); 2543 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2544 SCTPDBG(SCTP_DEBUG_TIMER2, 2545 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2546 t_type, to_ticks, inp, stcb, net); 2547 /* 2548 * If this is a newly scheduled callout, as opposed to a 2549 * rescheduled one, increment relevant reference counts. 2550 */ 2551 if (tmr->ep != NULL) { 2552 SCTP_INP_INCR_REF(inp); 2553 } 2554 if (tmr->tcb != NULL) { 2555 atomic_add_int(&stcb->asoc.refcnt, 1); 2556 } 2557 if (tmr->net != NULL) { 2558 atomic_add_int(&net->ref_count, 1); 2559 } 2560 } else { 2561 /* 2562 * This should not happen, since we checked for pending 2563 * above. 2564 */ 2565 SCTPDBG(SCTP_DEBUG_TIMER2, 2566 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2567 t_type, to_ticks, inp, stcb, net); 2568 } 2569 return; 2570 } 2571 2572 /*- 2573 * The following table shows which parameters must be provided 2574 * when calling sctp_timer_stop(). For parameters not being 2575 * provided, NULL must be used. 2576 * 2577 * |Name |inp |stcb|net | 2578 * |-----------------------------|----|----|----| 2579 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2580 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2581 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2582 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2583 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2584 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2585 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2586 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2587 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2588 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2589 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2590 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2591 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2592 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2593 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2594 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2595 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2596 * 2597 */ 2598 2599 void 2600 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2601 struct sctp_nets *net, uint32_t from) 2602 { 2603 struct sctp_timer *tmr; 2604 2605 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2606 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2607 t_type, stcb, stcb->sctp_ep)); 2608 if (stcb != NULL) { 2609 SCTP_TCB_LOCK_ASSERT(stcb); 2610 } else if (inp != NULL) { 2611 SCTP_INP_WLOCK_ASSERT(inp); 2612 } else { 2613 SCTP_WQ_ADDR_LOCK_ASSERT(); 2614 } 2615 tmr = NULL; 2616 switch (t_type) { 2617 case SCTP_TIMER_TYPE_SEND: 2618 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2619 #ifdef INVARIANTS 2620 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2621 t_type, inp, stcb, net); 2622 #else 2623 return; 2624 #endif 2625 } 2626 tmr = &net->rxt_timer; 2627 break; 2628 case SCTP_TIMER_TYPE_INIT: 2629 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2630 #ifdef INVARIANTS 2631 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2632 t_type, inp, stcb, net); 2633 #else 2634 return; 2635 #endif 2636 } 2637 tmr = &net->rxt_timer; 2638 break; 2639 case SCTP_TIMER_TYPE_RECV: 2640 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2641 #ifdef INVARIANTS 2642 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2643 t_type, inp, stcb, net); 2644 #else 2645 return; 2646 #endif 2647 } 2648 tmr = &stcb->asoc.dack_timer; 2649 break; 2650 case SCTP_TIMER_TYPE_SHUTDOWN: 2651 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2652 #ifdef INVARIANTS 2653 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2654 t_type, inp, stcb, net); 2655 #else 2656 return; 2657 #endif 2658 } 2659 tmr = &net->rxt_timer; 2660 break; 2661 case SCTP_TIMER_TYPE_HEARTBEAT: 2662 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2663 #ifdef INVARIANTS 2664 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2665 t_type, inp, stcb, net); 2666 #else 2667 return; 2668 #endif 2669 } 2670 tmr = &net->hb_timer; 2671 break; 2672 case SCTP_TIMER_TYPE_COOKIE: 2673 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2674 #ifdef INVARIANTS 2675 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2676 t_type, inp, stcb, net); 2677 #else 2678 return; 2679 #endif 2680 } 2681 tmr = &net->rxt_timer; 2682 break; 2683 case SCTP_TIMER_TYPE_NEWCOOKIE: 2684 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2685 #ifdef INVARIANTS 2686 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2687 t_type, inp, stcb, net); 2688 #else 2689 return; 2690 #endif 2691 } 2692 tmr = &inp->sctp_ep.signature_change; 2693 break; 2694 case SCTP_TIMER_TYPE_PATHMTURAISE: 2695 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2696 #ifdef INVARIANTS 2697 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2698 t_type, inp, stcb, net); 2699 #else 2700 return; 2701 #endif 2702 } 2703 tmr = &net->pmtu_timer; 2704 break; 2705 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2706 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2707 #ifdef INVARIANTS 2708 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2709 t_type, inp, stcb, net); 2710 #else 2711 return; 2712 #endif 2713 } 2714 tmr = &net->rxt_timer; 2715 break; 2716 case SCTP_TIMER_TYPE_ASCONF: 2717 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2718 #ifdef INVARIANTS 2719 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2720 t_type, inp, stcb, net); 2721 #else 2722 return; 2723 #endif 2724 } 2725 tmr = &stcb->asoc.asconf_timer; 2726 break; 2727 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2728 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2729 #ifdef INVARIANTS 2730 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2731 t_type, inp, stcb, net); 2732 #else 2733 return; 2734 #endif 2735 } 2736 tmr = &stcb->asoc.shut_guard_timer; 2737 break; 2738 case SCTP_TIMER_TYPE_AUTOCLOSE: 2739 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2740 #ifdef INVARIANTS 2741 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2742 t_type, inp, stcb, net); 2743 #else 2744 return; 2745 #endif 2746 } 2747 tmr = &stcb->asoc.autoclose_timer; 2748 break; 2749 case SCTP_TIMER_TYPE_STRRESET: 2750 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2751 #ifdef INVARIANTS 2752 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2753 t_type, inp, stcb, net); 2754 #else 2755 return; 2756 #endif 2757 } 2758 tmr = &stcb->asoc.strreset_timer; 2759 break; 2760 case SCTP_TIMER_TYPE_INPKILL: 2761 /* 2762 * The inp is setup to die. We re-use the signature_chage 2763 * timer since that has stopped and we are in the GONE 2764 * state. 2765 */ 2766 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2767 #ifdef INVARIANTS 2768 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2769 t_type, inp, stcb, net); 2770 #else 2771 return; 2772 #endif 2773 } 2774 tmr = &inp->sctp_ep.signature_change; 2775 break; 2776 case SCTP_TIMER_TYPE_ASOCKILL: 2777 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2778 #ifdef INVARIANTS 2779 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2780 t_type, inp, stcb, net); 2781 #else 2782 return; 2783 #endif 2784 } 2785 tmr = &stcb->asoc.strreset_timer; 2786 break; 2787 case SCTP_TIMER_TYPE_ADDR_WQ: 2788 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2789 #ifdef INVARIANTS 2790 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2791 t_type, inp, stcb, net); 2792 #else 2793 return; 2794 #endif 2795 } 2796 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2797 break; 2798 case SCTP_TIMER_TYPE_PRIM_DELETED: 2799 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2800 #ifdef INVARIANTS 2801 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2802 t_type, inp, stcb, net); 2803 #else 2804 return; 2805 #endif 2806 } 2807 tmr = &stcb->asoc.delete_prim_timer; 2808 break; 2809 default: 2810 #ifdef INVARIANTS 2811 panic("Unknown timer type %d", t_type); 2812 #else 2813 return; 2814 #endif 2815 } 2816 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2817 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2818 (tmr->type != t_type)) { 2819 /* 2820 * Ok we have a timer that is under joint use. Cookie timer 2821 * per chance with the SEND timer. We therefore are NOT 2822 * running the timer that the caller wants stopped. So just 2823 * return. 2824 */ 2825 SCTPDBG(SCTP_DEBUG_TIMER2, 2826 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2827 t_type, inp, stcb, net); 2828 return; 2829 } 2830 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2831 stcb->asoc.num_send_timers_up--; 2832 if (stcb->asoc.num_send_timers_up < 0) { 2833 stcb->asoc.num_send_timers_up = 0; 2834 } 2835 } 2836 tmr->self = NULL; 2837 tmr->stopped_from = from; 2838 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2839 KASSERT(tmr->ep == inp, 2840 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2841 t_type, inp, tmr->ep)); 2842 KASSERT(tmr->tcb == stcb, 2843 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2844 t_type, stcb, tmr->tcb)); 2845 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2846 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2847 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2848 t_type, net, tmr->net)); 2849 SCTPDBG(SCTP_DEBUG_TIMER2, 2850 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2851 t_type, inp, stcb, net); 2852 /* 2853 * If the timer was actually stopped, decrement reference 2854 * counts that were incremented in sctp_timer_start(). 2855 */ 2856 if (tmr->ep != NULL) { 2857 SCTP_INP_DECR_REF(inp); 2858 tmr->ep = NULL; 2859 } 2860 if (tmr->tcb != NULL) { 2861 atomic_add_int(&stcb->asoc.refcnt, -1); 2862 tmr->tcb = NULL; 2863 } 2864 if (tmr->net != NULL) { 2865 /* 2866 * Can't use net, since it doesn't work for 2867 * SCTP_TIMER_TYPE_ASCONF. 2868 */ 2869 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2870 tmr->net = NULL; 2871 } 2872 } else { 2873 SCTPDBG(SCTP_DEBUG_TIMER2, 2874 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2875 t_type, inp, stcb, net); 2876 } 2877 return; 2878 } 2879 2880 uint32_t 2881 sctp_calculate_len(struct mbuf *m) 2882 { 2883 uint32_t tlen = 0; 2884 struct mbuf *at; 2885 2886 at = m; 2887 while (at) { 2888 tlen += SCTP_BUF_LEN(at); 2889 at = SCTP_BUF_NEXT(at); 2890 } 2891 return (tlen); 2892 } 2893 2894 void 2895 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2896 struct sctp_association *asoc, uint32_t mtu) 2897 { 2898 /* 2899 * Reset the P-MTU size on this association, this involves changing 2900 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2901 * allow the DF flag to be cleared. 2902 */ 2903 struct sctp_tmit_chunk *chk; 2904 unsigned int eff_mtu, ovh; 2905 2906 asoc->smallest_mtu = mtu; 2907 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2908 ovh = SCTP_MIN_OVERHEAD; 2909 } else { 2910 ovh = SCTP_MIN_V4_OVERHEAD; 2911 } 2912 eff_mtu = mtu - ovh; 2913 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2914 if (chk->send_size > eff_mtu) { 2915 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2916 } 2917 } 2918 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2919 if (chk->send_size > eff_mtu) { 2920 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2921 } 2922 } 2923 } 2924 2925 /* 2926 * Given an association and starting time of the current RTT period, update 2927 * RTO in number of msecs. net should point to the current network. 2928 * Return 1, if an RTO update was performed, return 0 if no update was 2929 * performed due to invalid starting point. 2930 */ 2931 2932 int 2933 sctp_calculate_rto(struct sctp_tcb *stcb, 2934 struct sctp_association *asoc, 2935 struct sctp_nets *net, 2936 struct timeval *old, 2937 int rtt_from_sack) 2938 { 2939 struct timeval now; 2940 uint64_t rtt_us; /* RTT in us */ 2941 int32_t rtt; /* RTT in ms */ 2942 uint32_t new_rto; 2943 int first_measure = 0; 2944 2945 /************************/ 2946 /* 1. calculate new RTT */ 2947 /************************/ 2948 /* get the current time */ 2949 if (stcb->asoc.use_precise_time) { 2950 (void)SCTP_GETPTIME_TIMEVAL(&now); 2951 } else { 2952 (void)SCTP_GETTIME_TIMEVAL(&now); 2953 } 2954 if ((old->tv_sec > now.tv_sec) || 2955 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2956 /* The starting point is in the future. */ 2957 return (0); 2958 } 2959 timevalsub(&now, old); 2960 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2961 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2962 /* The RTT is larger than a sane value. */ 2963 return (0); 2964 } 2965 /* store the current RTT in us */ 2966 net->rtt = rtt_us; 2967 /* compute rtt in ms */ 2968 rtt = (int32_t)(net->rtt / 1000); 2969 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2970 /* 2971 * Tell the CC module that a new update has just occurred 2972 * from a sack 2973 */ 2974 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2975 } 2976 /* 2977 * Do we need to determine the lan? We do this only on sacks i.e. 2978 * RTT being determined from data not non-data (HB/INIT->INITACK). 2979 */ 2980 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2981 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2982 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2983 net->lan_type = SCTP_LAN_INTERNET; 2984 } else { 2985 net->lan_type = SCTP_LAN_LOCAL; 2986 } 2987 } 2988 2989 /***************************/ 2990 /* 2. update RTTVAR & SRTT */ 2991 /***************************/ 2992 /*- 2993 * Compute the scaled average lastsa and the 2994 * scaled variance lastsv as described in van Jacobson 2995 * Paper "Congestion Avoidance and Control", Annex A. 2996 * 2997 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2998 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2999 */ 3000 if (net->RTO_measured) { 3001 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3002 net->lastsa += rtt; 3003 if (rtt < 0) { 3004 rtt = -rtt; 3005 } 3006 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3007 net->lastsv += rtt; 3008 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3009 rto_logging(net, SCTP_LOG_RTTVAR); 3010 } 3011 } else { 3012 /* First RTO measurment */ 3013 net->RTO_measured = 1; 3014 first_measure = 1; 3015 net->lastsa = rtt << SCTP_RTT_SHIFT; 3016 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3018 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3019 } 3020 } 3021 if (net->lastsv == 0) { 3022 net->lastsv = SCTP_CLOCK_GRANULARITY; 3023 } 3024 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3025 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3026 (stcb->asoc.sat_network_lockout == 0)) { 3027 stcb->asoc.sat_network = 1; 3028 } else if ((!first_measure) && stcb->asoc.sat_network) { 3029 stcb->asoc.sat_network = 0; 3030 stcb->asoc.sat_network_lockout = 1; 3031 } 3032 /* bound it, per C6/C7 in Section 5.3.1 */ 3033 if (new_rto < stcb->asoc.minrto) { 3034 new_rto = stcb->asoc.minrto; 3035 } 3036 if (new_rto > stcb->asoc.maxrto) { 3037 new_rto = stcb->asoc.maxrto; 3038 } 3039 net->RTO = new_rto; 3040 return (1); 3041 } 3042 3043 /* 3044 * return a pointer to a contiguous piece of data from the given mbuf chain 3045 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3046 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3047 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3048 */ 3049 caddr_t 3050 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3051 { 3052 uint32_t count; 3053 uint8_t *ptr; 3054 3055 ptr = in_ptr; 3056 if ((off < 0) || (len <= 0)) 3057 return (NULL); 3058 3059 /* find the desired start location */ 3060 while ((m != NULL) && (off > 0)) { 3061 if (off < SCTP_BUF_LEN(m)) 3062 break; 3063 off -= SCTP_BUF_LEN(m); 3064 m = SCTP_BUF_NEXT(m); 3065 } 3066 if (m == NULL) 3067 return (NULL); 3068 3069 /* is the current mbuf large enough (eg. contiguous)? */ 3070 if ((SCTP_BUF_LEN(m) - off) >= len) { 3071 return (mtod(m, caddr_t)+off); 3072 } else { 3073 /* else, it spans more than one mbuf, so save a temp copy... */ 3074 while ((m != NULL) && (len > 0)) { 3075 count = min(SCTP_BUF_LEN(m) - off, len); 3076 memcpy(ptr, mtod(m, caddr_t)+off, count); 3077 len -= count; 3078 ptr += count; 3079 off = 0; 3080 m = SCTP_BUF_NEXT(m); 3081 } 3082 if ((m == NULL) && (len > 0)) 3083 return (NULL); 3084 else 3085 return ((caddr_t)in_ptr); 3086 } 3087 } 3088 3089 struct sctp_paramhdr * 3090 sctp_get_next_param(struct mbuf *m, 3091 int offset, 3092 struct sctp_paramhdr *pull, 3093 int pull_limit) 3094 { 3095 /* This just provides a typed signature to Peter's Pull routine */ 3096 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3097 (uint8_t *)pull)); 3098 } 3099 3100 struct mbuf * 3101 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3102 { 3103 struct mbuf *m_last; 3104 caddr_t dp; 3105 3106 if (padlen > 3) { 3107 return (NULL); 3108 } 3109 if (padlen <= M_TRAILINGSPACE(m)) { 3110 /* 3111 * The easy way. We hope the majority of the time we hit 3112 * here :) 3113 */ 3114 m_last = m; 3115 } else { 3116 /* Hard way we must grow the mbuf chain */ 3117 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3118 if (m_last == NULL) { 3119 return (NULL); 3120 } 3121 SCTP_BUF_LEN(m_last) = 0; 3122 SCTP_BUF_NEXT(m_last) = NULL; 3123 SCTP_BUF_NEXT(m) = m_last; 3124 } 3125 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3126 SCTP_BUF_LEN(m_last) += padlen; 3127 memset(dp, 0, padlen); 3128 return (m_last); 3129 } 3130 3131 struct mbuf * 3132 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3133 { 3134 /* find the last mbuf in chain and pad it */ 3135 struct mbuf *m_at; 3136 3137 if (last_mbuf != NULL) { 3138 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3139 } else { 3140 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3141 if (SCTP_BUF_NEXT(m_at) == NULL) { 3142 return (sctp_add_pad_tombuf(m_at, padval)); 3143 } 3144 } 3145 } 3146 return (NULL); 3147 } 3148 3149 static void 3150 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3151 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3152 { 3153 struct mbuf *m_notify; 3154 struct sctp_assoc_change *sac; 3155 struct sctp_queued_to_read *control; 3156 unsigned int notif_len; 3157 uint16_t abort_len; 3158 unsigned int i; 3159 3160 if (stcb == NULL) { 3161 return; 3162 } 3163 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3164 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3165 if (abort != NULL) { 3166 abort_len = ntohs(abort->ch.chunk_length); 3167 /* 3168 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3169 * contiguous. 3170 */ 3171 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3172 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3173 } 3174 } else { 3175 abort_len = 0; 3176 } 3177 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3178 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3179 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3180 notif_len += abort_len; 3181 } 3182 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3183 if (m_notify == NULL) { 3184 /* Retry with smaller value. */ 3185 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3186 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3187 if (m_notify == NULL) { 3188 goto set_error; 3189 } 3190 } 3191 SCTP_BUF_NEXT(m_notify) = NULL; 3192 sac = mtod(m_notify, struct sctp_assoc_change *); 3193 memset(sac, 0, notif_len); 3194 sac->sac_type = SCTP_ASSOC_CHANGE; 3195 sac->sac_flags = 0; 3196 sac->sac_length = sizeof(struct sctp_assoc_change); 3197 sac->sac_state = state; 3198 sac->sac_error = error; 3199 /* XXX verify these stream counts */ 3200 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3201 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3202 sac->sac_assoc_id = sctp_get_associd(stcb); 3203 if (notif_len > sizeof(struct sctp_assoc_change)) { 3204 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3205 i = 0; 3206 if (stcb->asoc.prsctp_supported == 1) { 3207 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3208 } 3209 if (stcb->asoc.auth_supported == 1) { 3210 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3211 } 3212 if (stcb->asoc.asconf_supported == 1) { 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3214 } 3215 if (stcb->asoc.idata_supported == 1) { 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3217 } 3218 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3219 if (stcb->asoc.reconfig_supported == 1) { 3220 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3221 } 3222 sac->sac_length += i; 3223 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3224 memcpy(sac->sac_info, abort, abort_len); 3225 sac->sac_length += abort_len; 3226 } 3227 } 3228 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3229 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3230 0, 0, stcb->asoc.context, 0, 0, 0, 3231 m_notify); 3232 if (control != NULL) { 3233 control->length = SCTP_BUF_LEN(m_notify); 3234 control->spec_flags = M_NOTIFICATION; 3235 /* not that we need this */ 3236 control->tail_mbuf = m_notify; 3237 sctp_add_to_readq(stcb->sctp_ep, stcb, 3238 control, 3239 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3240 so_locked); 3241 } else { 3242 sctp_m_freem(m_notify); 3243 } 3244 } 3245 /* 3246 * For 1-to-1 style sockets, we send up and error when an ABORT 3247 * comes in. 3248 */ 3249 set_error: 3250 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3251 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3252 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3253 SOCK_LOCK(stcb->sctp_socket); 3254 if (from_peer) { 3255 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3256 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3257 stcb->sctp_socket->so_error = ECONNREFUSED; 3258 } else { 3259 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3260 stcb->sctp_socket->so_error = ECONNRESET; 3261 } 3262 } else { 3263 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3264 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3266 stcb->sctp_socket->so_error = ETIMEDOUT; 3267 } else { 3268 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3269 stcb->sctp_socket->so_error = ECONNABORTED; 3270 } 3271 } 3272 SOCK_UNLOCK(stcb->sctp_socket); 3273 } 3274 /* Wake ANY sleepers */ 3275 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3276 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3277 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3278 socantrcvmore(stcb->sctp_socket); 3279 } 3280 sorwakeup(stcb->sctp_socket); 3281 sowwakeup(stcb->sctp_socket); 3282 } 3283 3284 static void 3285 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3286 struct sockaddr *sa, uint32_t error, int so_locked) 3287 { 3288 struct mbuf *m_notify; 3289 struct sctp_paddr_change *spc; 3290 struct sctp_queued_to_read *control; 3291 3292 if ((stcb == NULL) || 3293 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3294 /* event not enabled */ 3295 return; 3296 } 3297 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3298 if (m_notify == NULL) 3299 return; 3300 SCTP_BUF_LEN(m_notify) = 0; 3301 spc = mtod(m_notify, struct sctp_paddr_change *); 3302 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3303 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3304 spc->spc_flags = 0; 3305 spc->spc_length = sizeof(struct sctp_paddr_change); 3306 switch (sa->sa_family) { 3307 #ifdef INET 3308 case AF_INET: 3309 #ifdef INET6 3310 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3311 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3312 (struct sockaddr_in6 *)&spc->spc_aaddr); 3313 } else { 3314 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3315 } 3316 #else 3317 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3318 #endif 3319 break; 3320 #endif 3321 #ifdef INET6 3322 case AF_INET6: 3323 { 3324 struct sockaddr_in6 *sin6; 3325 3326 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3327 3328 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3329 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3330 if (sin6->sin6_scope_id == 0) { 3331 /* recover scope_id for user */ 3332 (void)sa6_recoverscope(sin6); 3333 } else { 3334 /* clear embedded scope_id for user */ 3335 in6_clearscope(&sin6->sin6_addr); 3336 } 3337 } 3338 break; 3339 } 3340 #endif 3341 default: 3342 /* TSNH */ 3343 break; 3344 } 3345 spc->spc_state = state; 3346 spc->spc_error = error; 3347 spc->spc_assoc_id = sctp_get_associd(stcb); 3348 3349 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3350 SCTP_BUF_NEXT(m_notify) = NULL; 3351 3352 /* append to socket */ 3353 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3354 0, 0, stcb->asoc.context, 0, 0, 0, 3355 m_notify); 3356 if (control == NULL) { 3357 /* no memory */ 3358 sctp_m_freem(m_notify); 3359 return; 3360 } 3361 control->length = SCTP_BUF_LEN(m_notify); 3362 control->spec_flags = M_NOTIFICATION; 3363 /* not that we need this */ 3364 control->tail_mbuf = m_notify; 3365 sctp_add_to_readq(stcb->sctp_ep, stcb, 3366 control, 3367 &stcb->sctp_socket->so_rcv, 1, 3368 SCTP_READ_LOCK_NOT_HELD, 3369 so_locked); 3370 } 3371 3372 static void 3373 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3374 struct sctp_tmit_chunk *chk, int so_locked) 3375 { 3376 struct mbuf *m_notify; 3377 struct sctp_send_failed *ssf; 3378 struct sctp_send_failed_event *ssfe; 3379 struct sctp_queued_to_read *control; 3380 struct sctp_chunkhdr *chkhdr; 3381 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3382 3383 if ((stcb == NULL) || 3384 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3385 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3386 /* event not enabled */ 3387 return; 3388 } 3389 3390 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3391 notifhdr_len = sizeof(struct sctp_send_failed_event); 3392 } else { 3393 notifhdr_len = sizeof(struct sctp_send_failed); 3394 } 3395 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3396 if (m_notify == NULL) 3397 /* no space left */ 3398 return; 3399 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3400 if (stcb->asoc.idata_supported) { 3401 chkhdr_len = sizeof(struct sctp_idata_chunk); 3402 } else { 3403 chkhdr_len = sizeof(struct sctp_data_chunk); 3404 } 3405 /* Use some defaults in case we can't access the chunk header */ 3406 if (chk->send_size >= chkhdr_len) { 3407 payload_len = chk->send_size - chkhdr_len; 3408 } else { 3409 payload_len = 0; 3410 } 3411 padding_len = 0; 3412 if (chk->data != NULL) { 3413 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3414 if (chkhdr != NULL) { 3415 chk_len = ntohs(chkhdr->chunk_length); 3416 if ((chk_len >= chkhdr_len) && 3417 (chk->send_size >= chk_len) && 3418 (chk->send_size - chk_len < 4)) { 3419 padding_len = chk->send_size - chk_len; 3420 payload_len = chk->send_size - chkhdr_len - padding_len; 3421 } 3422 } 3423 } 3424 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3425 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3426 memset(ssfe, 0, notifhdr_len); 3427 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3428 if (sent) { 3429 ssfe->ssfe_flags = SCTP_DATA_SENT; 3430 } else { 3431 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3432 } 3433 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3434 ssfe->ssfe_error = error; 3435 /* not exactly what the user sent in, but should be close :) */ 3436 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3437 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3438 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3439 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3440 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3441 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3442 } else { 3443 ssf = mtod(m_notify, struct sctp_send_failed *); 3444 memset(ssf, 0, notifhdr_len); 3445 ssf->ssf_type = SCTP_SEND_FAILED; 3446 if (sent) { 3447 ssf->ssf_flags = SCTP_DATA_SENT; 3448 } else { 3449 ssf->ssf_flags = SCTP_DATA_UNSENT; 3450 } 3451 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3452 ssf->ssf_error = error; 3453 /* not exactly what the user sent in, but should be close :) */ 3454 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3455 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3456 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3457 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3458 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3459 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3460 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3461 } 3462 if (chk->data != NULL) { 3463 /* Trim off the sctp chunk header (it should be there) */ 3464 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3465 m_adj(chk->data, chkhdr_len); 3466 m_adj(chk->data, -padding_len); 3467 sctp_mbuf_crush(chk->data); 3468 chk->send_size -= (chkhdr_len + padding_len); 3469 } 3470 } 3471 SCTP_BUF_NEXT(m_notify) = chk->data; 3472 /* Steal off the mbuf */ 3473 chk->data = NULL; 3474 /* 3475 * For this case, we check the actual socket buffer, since the assoc 3476 * is going away we don't want to overfill the socket buffer for a 3477 * non-reader 3478 */ 3479 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3480 sctp_m_freem(m_notify); 3481 return; 3482 } 3483 /* append to socket */ 3484 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3485 0, 0, stcb->asoc.context, 0, 0, 0, 3486 m_notify); 3487 if (control == NULL) { 3488 /* no memory */ 3489 sctp_m_freem(m_notify); 3490 return; 3491 } 3492 control->length = SCTP_BUF_LEN(m_notify); 3493 control->spec_flags = M_NOTIFICATION; 3494 /* not that we need this */ 3495 control->tail_mbuf = m_notify; 3496 sctp_add_to_readq(stcb->sctp_ep, stcb, 3497 control, 3498 &stcb->sctp_socket->so_rcv, 1, 3499 SCTP_READ_LOCK_NOT_HELD, 3500 so_locked); 3501 } 3502 3503 static void 3504 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3505 struct sctp_stream_queue_pending *sp, int so_locked) 3506 { 3507 struct mbuf *m_notify; 3508 struct sctp_send_failed *ssf; 3509 struct sctp_send_failed_event *ssfe; 3510 struct sctp_queued_to_read *control; 3511 int notifhdr_len; 3512 3513 if ((stcb == NULL) || 3514 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3515 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3516 /* event not enabled */ 3517 return; 3518 } 3519 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3520 notifhdr_len = sizeof(struct sctp_send_failed_event); 3521 } else { 3522 notifhdr_len = sizeof(struct sctp_send_failed); 3523 } 3524 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3525 if (m_notify == NULL) { 3526 /* no space left */ 3527 return; 3528 } 3529 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3530 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3531 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3532 memset(ssfe, 0, notifhdr_len); 3533 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3534 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3535 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3536 ssfe->ssfe_error = error; 3537 /* not exactly what the user sent in, but should be close :) */ 3538 ssfe->ssfe_info.snd_sid = sp->sid; 3539 if (sp->some_taken) { 3540 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3541 } else { 3542 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3543 } 3544 ssfe->ssfe_info.snd_ppid = sp->ppid; 3545 ssfe->ssfe_info.snd_context = sp->context; 3546 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3547 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3548 } else { 3549 ssf = mtod(m_notify, struct sctp_send_failed *); 3550 memset(ssf, 0, notifhdr_len); 3551 ssf->ssf_type = SCTP_SEND_FAILED; 3552 ssf->ssf_flags = SCTP_DATA_UNSENT; 3553 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3554 ssf->ssf_error = error; 3555 /* not exactly what the user sent in, but should be close :) */ 3556 ssf->ssf_info.sinfo_stream = sp->sid; 3557 ssf->ssf_info.sinfo_ssn = 0; 3558 if (sp->some_taken) { 3559 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3560 } else { 3561 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3562 } 3563 ssf->ssf_info.sinfo_ppid = sp->ppid; 3564 ssf->ssf_info.sinfo_context = sp->context; 3565 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3566 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3567 } 3568 SCTP_BUF_NEXT(m_notify) = sp->data; 3569 3570 /* Steal off the mbuf */ 3571 sp->data = NULL; 3572 /* 3573 * For this case, we check the actual socket buffer, since the assoc 3574 * is going away we don't want to overfill the socket buffer for a 3575 * non-reader 3576 */ 3577 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3578 sctp_m_freem(m_notify); 3579 return; 3580 } 3581 /* append to socket */ 3582 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3583 0, 0, stcb->asoc.context, 0, 0, 0, 3584 m_notify); 3585 if (control == NULL) { 3586 /* no memory */ 3587 sctp_m_freem(m_notify); 3588 return; 3589 } 3590 control->length = SCTP_BUF_LEN(m_notify); 3591 control->spec_flags = M_NOTIFICATION; 3592 /* not that we need this */ 3593 control->tail_mbuf = m_notify; 3594 sctp_add_to_readq(stcb->sctp_ep, stcb, 3595 control, 3596 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3597 } 3598 3599 static void 3600 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3601 { 3602 struct mbuf *m_notify; 3603 struct sctp_adaptation_event *sai; 3604 struct sctp_queued_to_read *control; 3605 3606 if ((stcb == NULL) || 3607 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3608 /* event not enabled */ 3609 return; 3610 } 3611 3612 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3613 if (m_notify == NULL) 3614 /* no space left */ 3615 return; 3616 SCTP_BUF_LEN(m_notify) = 0; 3617 sai = mtod(m_notify, struct sctp_adaptation_event *); 3618 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3619 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3620 sai->sai_flags = 0; 3621 sai->sai_length = sizeof(struct sctp_adaptation_event); 3622 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3623 sai->sai_assoc_id = sctp_get_associd(stcb); 3624 3625 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3626 SCTP_BUF_NEXT(m_notify) = NULL; 3627 3628 /* append to socket */ 3629 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3630 0, 0, stcb->asoc.context, 0, 0, 0, 3631 m_notify); 3632 if (control == NULL) { 3633 /* no memory */ 3634 sctp_m_freem(m_notify); 3635 return; 3636 } 3637 control->length = SCTP_BUF_LEN(m_notify); 3638 control->spec_flags = M_NOTIFICATION; 3639 /* not that we need this */ 3640 control->tail_mbuf = m_notify; 3641 sctp_add_to_readq(stcb->sctp_ep, stcb, 3642 control, 3643 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3644 } 3645 3646 /* This always must be called with the read-queue LOCKED in the INP */ 3647 static void 3648 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3649 uint32_t val, int so_locked) 3650 { 3651 struct mbuf *m_notify; 3652 struct sctp_pdapi_event *pdapi; 3653 struct sctp_queued_to_read *control; 3654 struct sockbuf *sb; 3655 3656 if ((stcb == NULL) || 3657 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3658 /* event not enabled */ 3659 return; 3660 } 3661 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3662 return; 3663 } 3664 3665 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3666 if (m_notify == NULL) 3667 /* no space left */ 3668 return; 3669 SCTP_BUF_LEN(m_notify) = 0; 3670 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3671 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3672 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3673 pdapi->pdapi_flags = 0; 3674 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3675 pdapi->pdapi_indication = error; 3676 pdapi->pdapi_stream = (val >> 16); 3677 pdapi->pdapi_seq = (val & 0x0000ffff); 3678 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3679 3680 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3681 SCTP_BUF_NEXT(m_notify) = NULL; 3682 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3683 0, 0, stcb->asoc.context, 0, 0, 0, 3684 m_notify); 3685 if (control == NULL) { 3686 /* no memory */ 3687 sctp_m_freem(m_notify); 3688 return; 3689 } 3690 control->length = SCTP_BUF_LEN(m_notify); 3691 control->spec_flags = M_NOTIFICATION; 3692 /* not that we need this */ 3693 control->tail_mbuf = m_notify; 3694 sb = &stcb->sctp_socket->so_rcv; 3695 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3696 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3697 } 3698 sctp_sballoc(stcb, sb, m_notify); 3699 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3700 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3701 } 3702 control->end_added = 1; 3703 if (stcb->asoc.control_pdapi) 3704 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3705 else { 3706 /* we really should not see this case */ 3707 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3708 } 3709 if (stcb->sctp_ep && stcb->sctp_socket) { 3710 /* This should always be the case */ 3711 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3712 } 3713 } 3714 3715 static void 3716 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3717 { 3718 struct mbuf *m_notify; 3719 struct sctp_shutdown_event *sse; 3720 struct sctp_queued_to_read *control; 3721 3722 /* 3723 * For TCP model AND UDP connected sockets we will send an error up 3724 * when an SHUTDOWN completes 3725 */ 3726 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3727 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3728 /* mark socket closed for read/write and wakeup! */ 3729 socantsendmore(stcb->sctp_socket); 3730 } 3731 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3732 /* event not enabled */ 3733 return; 3734 } 3735 3736 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3737 if (m_notify == NULL) 3738 /* no space left */ 3739 return; 3740 sse = mtod(m_notify, struct sctp_shutdown_event *); 3741 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3742 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3743 sse->sse_flags = 0; 3744 sse->sse_length = sizeof(struct sctp_shutdown_event); 3745 sse->sse_assoc_id = sctp_get_associd(stcb); 3746 3747 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3748 SCTP_BUF_NEXT(m_notify) = NULL; 3749 3750 /* append to socket */ 3751 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3752 0, 0, stcb->asoc.context, 0, 0, 0, 3753 m_notify); 3754 if (control == NULL) { 3755 /* no memory */ 3756 sctp_m_freem(m_notify); 3757 return; 3758 } 3759 control->length = SCTP_BUF_LEN(m_notify); 3760 control->spec_flags = M_NOTIFICATION; 3761 /* not that we need this */ 3762 control->tail_mbuf = m_notify; 3763 sctp_add_to_readq(stcb->sctp_ep, stcb, 3764 control, 3765 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3766 } 3767 3768 static void 3769 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3770 int so_locked) 3771 { 3772 struct mbuf *m_notify; 3773 struct sctp_sender_dry_event *event; 3774 struct sctp_queued_to_read *control; 3775 3776 if ((stcb == NULL) || 3777 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3778 /* event not enabled */ 3779 return; 3780 } 3781 3782 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3783 if (m_notify == NULL) { 3784 /* no space left */ 3785 return; 3786 } 3787 SCTP_BUF_LEN(m_notify) = 0; 3788 event = mtod(m_notify, struct sctp_sender_dry_event *); 3789 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3790 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3791 event->sender_dry_flags = 0; 3792 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3793 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3794 3795 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3796 SCTP_BUF_NEXT(m_notify) = NULL; 3797 3798 /* append to socket */ 3799 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3800 0, 0, stcb->asoc.context, 0, 0, 0, 3801 m_notify); 3802 if (control == NULL) { 3803 /* no memory */ 3804 sctp_m_freem(m_notify); 3805 return; 3806 } 3807 control->length = SCTP_BUF_LEN(m_notify); 3808 control->spec_flags = M_NOTIFICATION; 3809 /* not that we need this */ 3810 control->tail_mbuf = m_notify; 3811 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3812 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3813 } 3814 3815 void 3816 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3817 { 3818 struct mbuf *m_notify; 3819 struct sctp_queued_to_read *control; 3820 struct sctp_stream_change_event *stradd; 3821 3822 if ((stcb == NULL) || 3823 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3824 /* event not enabled */ 3825 return; 3826 } 3827 if ((stcb->asoc.peer_req_out) && flag) { 3828 /* Peer made the request, don't tell the local user */ 3829 stcb->asoc.peer_req_out = 0; 3830 return; 3831 } 3832 stcb->asoc.peer_req_out = 0; 3833 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3834 if (m_notify == NULL) 3835 /* no space left */ 3836 return; 3837 SCTP_BUF_LEN(m_notify) = 0; 3838 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3839 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3840 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3841 stradd->strchange_flags = flag; 3842 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3843 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3844 stradd->strchange_instrms = numberin; 3845 stradd->strchange_outstrms = numberout; 3846 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3847 SCTP_BUF_NEXT(m_notify) = NULL; 3848 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3849 /* no space */ 3850 sctp_m_freem(m_notify); 3851 return; 3852 } 3853 /* append to socket */ 3854 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3855 0, 0, stcb->asoc.context, 0, 0, 0, 3856 m_notify); 3857 if (control == NULL) { 3858 /* no memory */ 3859 sctp_m_freem(m_notify); 3860 return; 3861 } 3862 control->length = SCTP_BUF_LEN(m_notify); 3863 control->spec_flags = M_NOTIFICATION; 3864 /* not that we need this */ 3865 control->tail_mbuf = m_notify; 3866 sctp_add_to_readq(stcb->sctp_ep, stcb, 3867 control, 3868 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3869 } 3870 3871 void 3872 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3873 { 3874 struct mbuf *m_notify; 3875 struct sctp_queued_to_read *control; 3876 struct sctp_assoc_reset_event *strasoc; 3877 3878 if ((stcb == NULL) || 3879 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3880 /* event not enabled */ 3881 return; 3882 } 3883 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3884 if (m_notify == NULL) 3885 /* no space left */ 3886 return; 3887 SCTP_BUF_LEN(m_notify) = 0; 3888 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3889 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3890 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3891 strasoc->assocreset_flags = flag; 3892 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3893 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3894 strasoc->assocreset_local_tsn = sending_tsn; 3895 strasoc->assocreset_remote_tsn = recv_tsn; 3896 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3897 SCTP_BUF_NEXT(m_notify) = NULL; 3898 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3899 /* no space */ 3900 sctp_m_freem(m_notify); 3901 return; 3902 } 3903 /* append to socket */ 3904 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3905 0, 0, stcb->asoc.context, 0, 0, 0, 3906 m_notify); 3907 if (control == NULL) { 3908 /* no memory */ 3909 sctp_m_freem(m_notify); 3910 return; 3911 } 3912 control->length = SCTP_BUF_LEN(m_notify); 3913 control->spec_flags = M_NOTIFICATION; 3914 /* not that we need this */ 3915 control->tail_mbuf = m_notify; 3916 sctp_add_to_readq(stcb->sctp_ep, stcb, 3917 control, 3918 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3919 } 3920 3921 static void 3922 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3923 int number_entries, uint16_t *list, int flag) 3924 { 3925 struct mbuf *m_notify; 3926 struct sctp_queued_to_read *control; 3927 struct sctp_stream_reset_event *strreset; 3928 int len; 3929 3930 if ((stcb == NULL) || 3931 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3932 /* event not enabled */ 3933 return; 3934 } 3935 3936 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3937 if (m_notify == NULL) 3938 /* no space left */ 3939 return; 3940 SCTP_BUF_LEN(m_notify) = 0; 3941 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3942 if (len > M_TRAILINGSPACE(m_notify)) { 3943 /* never enough room */ 3944 sctp_m_freem(m_notify); 3945 return; 3946 } 3947 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3948 memset(strreset, 0, len); 3949 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3950 strreset->strreset_flags = flag; 3951 strreset->strreset_length = len; 3952 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3953 if (number_entries) { 3954 int i; 3955 3956 for (i = 0; i < number_entries; i++) { 3957 strreset->strreset_stream_list[i] = ntohs(list[i]); 3958 } 3959 } 3960 SCTP_BUF_LEN(m_notify) = len; 3961 SCTP_BUF_NEXT(m_notify) = NULL; 3962 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3963 /* no space */ 3964 sctp_m_freem(m_notify); 3965 return; 3966 } 3967 /* append to socket */ 3968 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3969 0, 0, stcb->asoc.context, 0, 0, 0, 3970 m_notify); 3971 if (control == NULL) { 3972 /* no memory */ 3973 sctp_m_freem(m_notify); 3974 return; 3975 } 3976 control->length = SCTP_BUF_LEN(m_notify); 3977 control->spec_flags = M_NOTIFICATION; 3978 /* not that we need this */ 3979 control->tail_mbuf = m_notify; 3980 sctp_add_to_readq(stcb->sctp_ep, stcb, 3981 control, 3982 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3983 } 3984 3985 static void 3986 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3987 { 3988 struct mbuf *m_notify; 3989 struct sctp_remote_error *sre; 3990 struct sctp_queued_to_read *control; 3991 unsigned int notif_len; 3992 uint16_t chunk_len; 3993 3994 if ((stcb == NULL) || 3995 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3996 return; 3997 } 3998 if (chunk != NULL) { 3999 chunk_len = ntohs(chunk->ch.chunk_length); 4000 /* 4001 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4002 * contiguous. 4003 */ 4004 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4005 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4006 } 4007 } else { 4008 chunk_len = 0; 4009 } 4010 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4011 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4012 if (m_notify == NULL) { 4013 /* Retry with smaller value. */ 4014 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4015 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4016 if (m_notify == NULL) { 4017 return; 4018 } 4019 } 4020 SCTP_BUF_NEXT(m_notify) = NULL; 4021 sre = mtod(m_notify, struct sctp_remote_error *); 4022 memset(sre, 0, notif_len); 4023 sre->sre_type = SCTP_REMOTE_ERROR; 4024 sre->sre_flags = 0; 4025 sre->sre_length = sizeof(struct sctp_remote_error); 4026 sre->sre_error = error; 4027 sre->sre_assoc_id = sctp_get_associd(stcb); 4028 if (notif_len > sizeof(struct sctp_remote_error)) { 4029 memcpy(sre->sre_data, chunk, chunk_len); 4030 sre->sre_length += chunk_len; 4031 } 4032 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4033 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4034 0, 0, stcb->asoc.context, 0, 0, 0, 4035 m_notify); 4036 if (control != NULL) { 4037 control->length = SCTP_BUF_LEN(m_notify); 4038 control->spec_flags = M_NOTIFICATION; 4039 /* not that we need this */ 4040 control->tail_mbuf = m_notify; 4041 sctp_add_to_readq(stcb->sctp_ep, stcb, 4042 control, 4043 &stcb->sctp_socket->so_rcv, 1, 4044 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4045 } else { 4046 sctp_m_freem(m_notify); 4047 } 4048 } 4049 4050 void 4051 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4052 uint32_t error, void *data, int so_locked) 4053 { 4054 if ((stcb == NULL) || 4055 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4056 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4057 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4058 /* If the socket is gone we are out of here */ 4059 return; 4060 } 4061 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4062 return; 4063 } 4064 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4065 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4066 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4067 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4068 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4069 /* Don't report these in front states */ 4070 return; 4071 } 4072 } 4073 switch (notification) { 4074 case SCTP_NOTIFY_ASSOC_UP: 4075 if (stcb->asoc.assoc_up_sent == 0) { 4076 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4077 stcb->asoc.assoc_up_sent = 1; 4078 } 4079 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4080 sctp_notify_adaptation_layer(stcb); 4081 } 4082 if (stcb->asoc.auth_supported == 0) { 4083 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4084 NULL, so_locked); 4085 } 4086 break; 4087 case SCTP_NOTIFY_ASSOC_DOWN: 4088 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4089 break; 4090 case SCTP_NOTIFY_INTERFACE_DOWN: 4091 { 4092 struct sctp_nets *net; 4093 4094 net = (struct sctp_nets *)data; 4095 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4096 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4097 break; 4098 } 4099 case SCTP_NOTIFY_INTERFACE_UP: 4100 { 4101 struct sctp_nets *net; 4102 4103 net = (struct sctp_nets *)data; 4104 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4105 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4106 break; 4107 } 4108 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4109 { 4110 struct sctp_nets *net; 4111 4112 net = (struct sctp_nets *)data; 4113 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4114 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4115 break; 4116 } 4117 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4118 sctp_notify_send_failed2(stcb, error, 4119 (struct sctp_stream_queue_pending *)data, so_locked); 4120 break; 4121 case SCTP_NOTIFY_SENT_DG_FAIL: 4122 sctp_notify_send_failed(stcb, 1, error, 4123 (struct sctp_tmit_chunk *)data, so_locked); 4124 break; 4125 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4126 sctp_notify_send_failed(stcb, 0, error, 4127 (struct sctp_tmit_chunk *)data, so_locked); 4128 break; 4129 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4130 { 4131 uint32_t val; 4132 4133 val = *((uint32_t *)data); 4134 4135 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4136 break; 4137 } 4138 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4139 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4140 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4141 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4142 } else { 4143 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4144 } 4145 break; 4146 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4147 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4148 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4149 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4150 } else { 4151 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4152 } 4153 break; 4154 case SCTP_NOTIFY_ASSOC_RESTART: 4155 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4156 if (stcb->asoc.auth_supported == 0) { 4157 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4158 NULL, so_locked); 4159 } 4160 break; 4161 case SCTP_NOTIFY_STR_RESET_SEND: 4162 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4163 break; 4164 case SCTP_NOTIFY_STR_RESET_RECV: 4165 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4166 break; 4167 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4168 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4169 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4170 break; 4171 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4172 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4173 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4174 break; 4175 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4176 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4177 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4178 break; 4179 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4180 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4181 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4182 break; 4183 case SCTP_NOTIFY_ASCONF_ADD_IP: 4184 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4185 error, so_locked); 4186 break; 4187 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4188 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4189 error, so_locked); 4190 break; 4191 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4192 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4193 error, so_locked); 4194 break; 4195 case SCTP_NOTIFY_PEER_SHUTDOWN: 4196 sctp_notify_shutdown_event(stcb); 4197 break; 4198 case SCTP_NOTIFY_AUTH_NEW_KEY: 4199 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4200 (uint16_t)(uintptr_t)data, 4201 so_locked); 4202 break; 4203 case SCTP_NOTIFY_AUTH_FREE_KEY: 4204 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4205 (uint16_t)(uintptr_t)data, 4206 so_locked); 4207 break; 4208 case SCTP_NOTIFY_NO_PEER_AUTH: 4209 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4210 (uint16_t)(uintptr_t)data, 4211 so_locked); 4212 break; 4213 case SCTP_NOTIFY_SENDER_DRY: 4214 sctp_notify_sender_dry_event(stcb, so_locked); 4215 break; 4216 case SCTP_NOTIFY_REMOTE_ERROR: 4217 sctp_notify_remote_error(stcb, error, data); 4218 break; 4219 default: 4220 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4221 __func__, notification, notification); 4222 break; 4223 } /* end switch */ 4224 } 4225 4226 void 4227 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4228 { 4229 struct sctp_association *asoc; 4230 struct sctp_stream_out *outs; 4231 struct sctp_tmit_chunk *chk, *nchk; 4232 struct sctp_stream_queue_pending *sp, *nsp; 4233 int i; 4234 4235 if (stcb == NULL) { 4236 return; 4237 } 4238 asoc = &stcb->asoc; 4239 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4240 /* already being freed */ 4241 return; 4242 } 4243 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4244 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4245 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4246 return; 4247 } 4248 /* now through all the gunk freeing chunks */ 4249 /* sent queue SHOULD be empty */ 4250 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4251 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4252 asoc->sent_queue_cnt--; 4253 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4254 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4255 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4256 #ifdef INVARIANTS 4257 } else { 4258 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4259 #endif 4260 } 4261 } 4262 if (chk->data != NULL) { 4263 sctp_free_bufspace(stcb, asoc, chk, 1); 4264 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4265 error, chk, so_locked); 4266 if (chk->data) { 4267 sctp_m_freem(chk->data); 4268 chk->data = NULL; 4269 } 4270 } 4271 sctp_free_a_chunk(stcb, chk, so_locked); 4272 /* sa_ignore FREED_MEMORY */ 4273 } 4274 /* pending send queue SHOULD be empty */ 4275 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4276 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4277 asoc->send_queue_cnt--; 4278 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4279 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4280 #ifdef INVARIANTS 4281 } else { 4282 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4283 #endif 4284 } 4285 if (chk->data != NULL) { 4286 sctp_free_bufspace(stcb, asoc, chk, 1); 4287 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4288 error, chk, so_locked); 4289 if (chk->data) { 4290 sctp_m_freem(chk->data); 4291 chk->data = NULL; 4292 } 4293 } 4294 sctp_free_a_chunk(stcb, chk, so_locked); 4295 /* sa_ignore FREED_MEMORY */ 4296 } 4297 for (i = 0; i < asoc->streamoutcnt; i++) { 4298 /* For each stream */ 4299 outs = &asoc->strmout[i]; 4300 /* clean up any sends there */ 4301 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4302 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4303 TAILQ_REMOVE(&outs->outqueue, sp, next); 4304 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4305 sctp_free_spbufspace(stcb, asoc, sp); 4306 if (sp->data) { 4307 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4308 error, (void *)sp, so_locked); 4309 if (sp->data) { 4310 sctp_m_freem(sp->data); 4311 sp->data = NULL; 4312 sp->tail_mbuf = NULL; 4313 sp->length = 0; 4314 } 4315 } 4316 if (sp->net) { 4317 sctp_free_remote_addr(sp->net); 4318 sp->net = NULL; 4319 } 4320 /* Free the chunk */ 4321 sctp_free_a_strmoq(stcb, sp, so_locked); 4322 /* sa_ignore FREED_MEMORY */ 4323 } 4324 } 4325 } 4326 4327 void 4328 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4329 struct sctp_abort_chunk *abort, int so_locked) 4330 { 4331 if (stcb == NULL) { 4332 return; 4333 } 4334 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4335 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4336 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4337 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4338 } 4339 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4340 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4341 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4342 return; 4343 } 4344 SCTP_TCB_SEND_LOCK(stcb); 4345 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4346 /* Tell them we lost the asoc */ 4347 sctp_report_all_outbound(stcb, error, so_locked); 4348 SCTP_TCB_SEND_UNLOCK(stcb); 4349 if (from_peer) { 4350 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4351 } else { 4352 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4353 } 4354 } 4355 4356 void 4357 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4358 struct mbuf *m, int iphlen, 4359 struct sockaddr *src, struct sockaddr *dst, 4360 struct sctphdr *sh, struct mbuf *op_err, 4361 uint8_t mflowtype, uint32_t mflowid, 4362 uint32_t vrf_id, uint16_t port) 4363 { 4364 uint32_t vtag; 4365 4366 vtag = 0; 4367 if (stcb != NULL) { 4368 vtag = stcb->asoc.peer_vtag; 4369 vrf_id = stcb->asoc.vrf_id; 4370 } 4371 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4372 mflowtype, mflowid, inp->fibnum, 4373 vrf_id, port); 4374 if (stcb != NULL) { 4375 /* We have a TCB to abort, send notification too */ 4376 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4377 /* Ok, now lets free it */ 4378 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4379 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4380 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4381 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4382 } 4383 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4384 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4385 } 4386 } 4387 #ifdef SCTP_ASOCLOG_OF_TSNS 4388 void 4389 sctp_print_out_track_log(struct sctp_tcb *stcb) 4390 { 4391 #ifdef NOSIY_PRINTS 4392 int i; 4393 4394 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4395 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4396 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4397 SCTP_PRINTF("None rcvd\n"); 4398 goto none_in; 4399 } 4400 if (stcb->asoc.tsn_in_wrapped) { 4401 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4402 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4403 stcb->asoc.in_tsnlog[i].tsn, 4404 stcb->asoc.in_tsnlog[i].strm, 4405 stcb->asoc.in_tsnlog[i].seq, 4406 stcb->asoc.in_tsnlog[i].flgs, 4407 stcb->asoc.in_tsnlog[i].sz); 4408 } 4409 } 4410 if (stcb->asoc.tsn_in_at) { 4411 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4412 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4413 stcb->asoc.in_tsnlog[i].tsn, 4414 stcb->asoc.in_tsnlog[i].strm, 4415 stcb->asoc.in_tsnlog[i].seq, 4416 stcb->asoc.in_tsnlog[i].flgs, 4417 stcb->asoc.in_tsnlog[i].sz); 4418 } 4419 } 4420 none_in: 4421 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4422 if ((stcb->asoc.tsn_out_at == 0) && 4423 (stcb->asoc.tsn_out_wrapped == 0)) { 4424 SCTP_PRINTF("None sent\n"); 4425 } 4426 if (stcb->asoc.tsn_out_wrapped) { 4427 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4428 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4429 stcb->asoc.out_tsnlog[i].tsn, 4430 stcb->asoc.out_tsnlog[i].strm, 4431 stcb->asoc.out_tsnlog[i].seq, 4432 stcb->asoc.out_tsnlog[i].flgs, 4433 stcb->asoc.out_tsnlog[i].sz); 4434 } 4435 } 4436 if (stcb->asoc.tsn_out_at) { 4437 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4438 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4439 stcb->asoc.out_tsnlog[i].tsn, 4440 stcb->asoc.out_tsnlog[i].strm, 4441 stcb->asoc.out_tsnlog[i].seq, 4442 stcb->asoc.out_tsnlog[i].flgs, 4443 stcb->asoc.out_tsnlog[i].sz); 4444 } 4445 } 4446 #endif 4447 } 4448 #endif 4449 4450 void 4451 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4452 struct mbuf *op_err, 4453 int so_locked) 4454 { 4455 4456 if (stcb == NULL) { 4457 /* Got to have a TCB */ 4458 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4459 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4460 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4461 SCTP_CALLED_DIRECTLY_NOCMPSET); 4462 } 4463 } 4464 return; 4465 } 4466 /* notify the peer */ 4467 sctp_send_abort_tcb(stcb, op_err, so_locked); 4468 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4469 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4470 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4471 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4472 } 4473 /* notify the ulp */ 4474 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4475 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4476 } 4477 /* now free the asoc */ 4478 #ifdef SCTP_ASOCLOG_OF_TSNS 4479 sctp_print_out_track_log(stcb); 4480 #endif 4481 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4482 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4483 } 4484 4485 void 4486 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4487 struct sockaddr *src, struct sockaddr *dst, 4488 struct sctphdr *sh, struct sctp_inpcb *inp, 4489 struct mbuf *cause, 4490 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4491 uint32_t vrf_id, uint16_t port) 4492 { 4493 struct sctp_chunkhdr *ch, chunk_buf; 4494 unsigned int chk_length; 4495 int contains_init_chunk; 4496 4497 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4498 /* Generate a TO address for future reference */ 4499 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4500 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4501 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4502 SCTP_CALLED_DIRECTLY_NOCMPSET); 4503 } 4504 } 4505 contains_init_chunk = 0; 4506 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4507 sizeof(*ch), (uint8_t *)&chunk_buf); 4508 while (ch != NULL) { 4509 chk_length = ntohs(ch->chunk_length); 4510 if (chk_length < sizeof(*ch)) { 4511 /* break to abort land */ 4512 break; 4513 } 4514 switch (ch->chunk_type) { 4515 case SCTP_INIT: 4516 contains_init_chunk = 1; 4517 break; 4518 case SCTP_PACKET_DROPPED: 4519 /* we don't respond to pkt-dropped */ 4520 return; 4521 case SCTP_ABORT_ASSOCIATION: 4522 /* we don't respond with an ABORT to an ABORT */ 4523 return; 4524 case SCTP_SHUTDOWN_COMPLETE: 4525 /* 4526 * we ignore it since we are not waiting for it and 4527 * peer is gone 4528 */ 4529 return; 4530 case SCTP_SHUTDOWN_ACK: 4531 sctp_send_shutdown_complete2(src, dst, sh, 4532 mflowtype, mflowid, fibnum, 4533 vrf_id, port); 4534 return; 4535 default: 4536 break; 4537 } 4538 offset += SCTP_SIZE32(chk_length); 4539 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4540 sizeof(*ch), (uint8_t *)&chunk_buf); 4541 } 4542 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4543 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4544 (contains_init_chunk == 0))) { 4545 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4546 mflowtype, mflowid, fibnum, 4547 vrf_id, port); 4548 } 4549 } 4550 4551 /* 4552 * check the inbound datagram to make sure there is not an abort inside it, 4553 * if there is return 1, else return 0. 4554 */ 4555 int 4556 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4557 { 4558 struct sctp_chunkhdr *ch; 4559 struct sctp_init_chunk *init_chk, chunk_buf; 4560 int offset; 4561 unsigned int chk_length; 4562 4563 offset = iphlen + sizeof(struct sctphdr); 4564 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4565 (uint8_t *)&chunk_buf); 4566 while (ch != NULL) { 4567 chk_length = ntohs(ch->chunk_length); 4568 if (chk_length < sizeof(*ch)) { 4569 /* packet is probably corrupt */ 4570 break; 4571 } 4572 /* we seem to be ok, is it an abort? */ 4573 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4574 /* yep, tell them */ 4575 return (1); 4576 } 4577 if ((ch->chunk_type == SCTP_INITIATION) || 4578 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4579 /* need to update the Vtag */ 4580 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4581 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4582 if (init_chk != NULL) { 4583 *vtag = ntohl(init_chk->init.initiate_tag); 4584 } 4585 } 4586 /* Nope, move to the next chunk */ 4587 offset += SCTP_SIZE32(chk_length); 4588 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4589 sizeof(*ch), (uint8_t *)&chunk_buf); 4590 } 4591 return (0); 4592 } 4593 4594 /* 4595 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4596 * set (i.e. it's 0) so, create this function to compare link local scopes 4597 */ 4598 #ifdef INET6 4599 uint32_t 4600 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4601 { 4602 struct sockaddr_in6 a, b; 4603 4604 /* save copies */ 4605 a = *addr1; 4606 b = *addr2; 4607 4608 if (a.sin6_scope_id == 0) 4609 if (sa6_recoverscope(&a)) { 4610 /* can't get scope, so can't match */ 4611 return (0); 4612 } 4613 if (b.sin6_scope_id == 0) 4614 if (sa6_recoverscope(&b)) { 4615 /* can't get scope, so can't match */ 4616 return (0); 4617 } 4618 if (a.sin6_scope_id != b.sin6_scope_id) 4619 return (0); 4620 4621 return (1); 4622 } 4623 4624 /* 4625 * returns a sockaddr_in6 with embedded scope recovered and removed 4626 */ 4627 struct sockaddr_in6 * 4628 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4629 { 4630 /* check and strip embedded scope junk */ 4631 if (addr->sin6_family == AF_INET6) { 4632 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4633 if (addr->sin6_scope_id == 0) { 4634 *store = *addr; 4635 if (!sa6_recoverscope(store)) { 4636 /* use the recovered scope */ 4637 addr = store; 4638 } 4639 } else { 4640 /* else, return the original "to" addr */ 4641 in6_clearscope(&addr->sin6_addr); 4642 } 4643 } 4644 } 4645 return (addr); 4646 } 4647 #endif 4648 4649 /* 4650 * are the two addresses the same? currently a "scopeless" check returns: 1 4651 * if same, 0 if not 4652 */ 4653 int 4654 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4655 { 4656 4657 /* must be valid */ 4658 if (sa1 == NULL || sa2 == NULL) 4659 return (0); 4660 4661 /* must be the same family */ 4662 if (sa1->sa_family != sa2->sa_family) 4663 return (0); 4664 4665 switch (sa1->sa_family) { 4666 #ifdef INET6 4667 case AF_INET6: 4668 { 4669 /* IPv6 addresses */ 4670 struct sockaddr_in6 *sin6_1, *sin6_2; 4671 4672 sin6_1 = (struct sockaddr_in6 *)sa1; 4673 sin6_2 = (struct sockaddr_in6 *)sa2; 4674 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4675 sin6_2)); 4676 } 4677 #endif 4678 #ifdef INET 4679 case AF_INET: 4680 { 4681 /* IPv4 addresses */ 4682 struct sockaddr_in *sin_1, *sin_2; 4683 4684 sin_1 = (struct sockaddr_in *)sa1; 4685 sin_2 = (struct sockaddr_in *)sa2; 4686 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4687 } 4688 #endif 4689 default: 4690 /* we don't do these... */ 4691 return (0); 4692 } 4693 } 4694 4695 void 4696 sctp_print_address(struct sockaddr *sa) 4697 { 4698 #ifdef INET6 4699 char ip6buf[INET6_ADDRSTRLEN]; 4700 #endif 4701 4702 switch (sa->sa_family) { 4703 #ifdef INET6 4704 case AF_INET6: 4705 { 4706 struct sockaddr_in6 *sin6; 4707 4708 sin6 = (struct sockaddr_in6 *)sa; 4709 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4710 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4711 ntohs(sin6->sin6_port), 4712 sin6->sin6_scope_id); 4713 break; 4714 } 4715 #endif 4716 #ifdef INET 4717 case AF_INET: 4718 { 4719 struct sockaddr_in *sin; 4720 unsigned char *p; 4721 4722 sin = (struct sockaddr_in *)sa; 4723 p = (unsigned char *)&sin->sin_addr; 4724 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4725 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4726 break; 4727 } 4728 #endif 4729 default: 4730 SCTP_PRINTF("?\n"); 4731 break; 4732 } 4733 } 4734 4735 void 4736 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4737 struct sctp_inpcb *new_inp, 4738 struct sctp_tcb *stcb, 4739 int waitflags) 4740 { 4741 /* 4742 * go through our old INP and pull off any control structures that 4743 * belong to stcb and move then to the new inp. 4744 */ 4745 struct socket *old_so, *new_so; 4746 struct sctp_queued_to_read *control, *nctl; 4747 struct sctp_readhead tmp_queue; 4748 struct mbuf *m; 4749 int error = 0; 4750 4751 old_so = old_inp->sctp_socket; 4752 new_so = new_inp->sctp_socket; 4753 TAILQ_INIT(&tmp_queue); 4754 error = sblock(&old_so->so_rcv, waitflags); 4755 if (error) { 4756 /* 4757 * Gak, can't get sblock, we have a problem. data will be 4758 * left stranded.. and we don't dare look at it since the 4759 * other thread may be reading something. Oh well, its a 4760 * screwed up app that does a peeloff OR a accept while 4761 * reading from the main socket... actually its only the 4762 * peeloff() case, since I think read will fail on a 4763 * listening socket.. 4764 */ 4765 return; 4766 } 4767 /* lock the socket buffers */ 4768 SCTP_INP_READ_LOCK(old_inp); 4769 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4770 /* Pull off all for out target stcb */ 4771 if (control->stcb == stcb) { 4772 /* remove it we want it */ 4773 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4774 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4775 m = control->data; 4776 while (m) { 4777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4778 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4779 } 4780 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4782 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4783 } 4784 m = SCTP_BUF_NEXT(m); 4785 } 4786 } 4787 } 4788 SCTP_INP_READ_UNLOCK(old_inp); 4789 /* Remove the sb-lock on the old socket */ 4790 4791 sbunlock(&old_so->so_rcv); 4792 /* Now we move them over to the new socket buffer */ 4793 SCTP_INP_READ_LOCK(new_inp); 4794 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4795 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4796 m = control->data; 4797 while (m) { 4798 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4799 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4800 } 4801 sctp_sballoc(stcb, &new_so->so_rcv, m); 4802 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4803 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4804 } 4805 m = SCTP_BUF_NEXT(m); 4806 } 4807 } 4808 SCTP_INP_READ_UNLOCK(new_inp); 4809 } 4810 4811 void 4812 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4813 struct sctp_tcb *stcb, 4814 int so_locked 4815 SCTP_UNUSED 4816 ) 4817 { 4818 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4819 sctp_sorwakeup(inp, inp->sctp_socket); 4820 } 4821 } 4822 4823 void 4824 sctp_add_to_readq(struct sctp_inpcb *inp, 4825 struct sctp_tcb *stcb, 4826 struct sctp_queued_to_read *control, 4827 struct sockbuf *sb, 4828 int end, 4829 int inp_read_lock_held, 4830 int so_locked) 4831 { 4832 /* 4833 * Here we must place the control on the end of the socket read 4834 * queue AND increment sb_cc so that select will work properly on 4835 * read. 4836 */ 4837 struct mbuf *m, *prev = NULL; 4838 4839 if (inp == NULL) { 4840 /* Gak, TSNH!! */ 4841 #ifdef INVARIANTS 4842 panic("Gak, inp NULL on add_to_readq"); 4843 #endif 4844 return; 4845 } 4846 if (inp_read_lock_held == 0) 4847 SCTP_INP_READ_LOCK(inp); 4848 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4849 if (!control->on_strm_q) { 4850 sctp_free_remote_addr(control->whoFrom); 4851 if (control->data) { 4852 sctp_m_freem(control->data); 4853 control->data = NULL; 4854 } 4855 sctp_free_a_readq(stcb, control); 4856 } 4857 if (inp_read_lock_held == 0) 4858 SCTP_INP_READ_UNLOCK(inp); 4859 return; 4860 } 4861 if (!(control->spec_flags & M_NOTIFICATION)) { 4862 atomic_add_int(&inp->total_recvs, 1); 4863 if (!control->do_not_ref_stcb) { 4864 atomic_add_int(&stcb->total_recvs, 1); 4865 } 4866 } 4867 m = control->data; 4868 control->held_length = 0; 4869 control->length = 0; 4870 while (m) { 4871 if (SCTP_BUF_LEN(m) == 0) { 4872 /* Skip mbufs with NO length */ 4873 if (prev == NULL) { 4874 /* First one */ 4875 control->data = sctp_m_free(m); 4876 m = control->data; 4877 } else { 4878 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4879 m = SCTP_BUF_NEXT(prev); 4880 } 4881 if (m == NULL) { 4882 control->tail_mbuf = prev; 4883 } 4884 continue; 4885 } 4886 prev = m; 4887 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4888 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4889 } 4890 sctp_sballoc(stcb, sb, m); 4891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4892 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4893 } 4894 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4895 m = SCTP_BUF_NEXT(m); 4896 } 4897 if (prev != NULL) { 4898 control->tail_mbuf = prev; 4899 } else { 4900 /* Everything got collapsed out?? */ 4901 if (!control->on_strm_q) { 4902 sctp_free_remote_addr(control->whoFrom); 4903 sctp_free_a_readq(stcb, control); 4904 } 4905 if (inp_read_lock_held == 0) 4906 SCTP_INP_READ_UNLOCK(inp); 4907 return; 4908 } 4909 if (end) { 4910 control->end_added = 1; 4911 } 4912 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4913 control->on_read_q = 1; 4914 if (inp_read_lock_held == 0) 4915 SCTP_INP_READ_UNLOCK(inp); 4916 if (inp && inp->sctp_socket) { 4917 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4918 } 4919 } 4920 4921 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4922 *************ALTERNATE ROUTING CODE 4923 */ 4924 4925 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4926 *************ALTERNATE ROUTING CODE 4927 */ 4928 4929 struct mbuf * 4930 sctp_generate_cause(uint16_t code, char *info) 4931 { 4932 struct mbuf *m; 4933 struct sctp_gen_error_cause *cause; 4934 size_t info_len; 4935 uint16_t len; 4936 4937 if ((code == 0) || (info == NULL)) { 4938 return (NULL); 4939 } 4940 info_len = strlen(info); 4941 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4942 return (NULL); 4943 } 4944 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4945 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4946 if (m != NULL) { 4947 SCTP_BUF_LEN(m) = len; 4948 cause = mtod(m, struct sctp_gen_error_cause *); 4949 cause->code = htons(code); 4950 cause->length = htons(len); 4951 memcpy(cause->info, info, info_len); 4952 } 4953 return (m); 4954 } 4955 4956 struct mbuf * 4957 sctp_generate_no_user_data_cause(uint32_t tsn) 4958 { 4959 struct mbuf *m; 4960 struct sctp_error_no_user_data *no_user_data_cause; 4961 uint16_t len; 4962 4963 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4964 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4965 if (m != NULL) { 4966 SCTP_BUF_LEN(m) = len; 4967 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4968 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4969 no_user_data_cause->cause.length = htons(len); 4970 no_user_data_cause->tsn = htonl(tsn); 4971 } 4972 return (m); 4973 } 4974 4975 #ifdef SCTP_MBCNT_LOGGING 4976 void 4977 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4978 struct sctp_tmit_chunk *tp1, int chk_cnt) 4979 { 4980 if (tp1->data == NULL) { 4981 return; 4982 } 4983 asoc->chunks_on_out_queue -= chk_cnt; 4984 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4985 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4986 asoc->total_output_queue_size, 4987 tp1->book_size, 4988 0, 4989 tp1->mbcnt); 4990 } 4991 if (asoc->total_output_queue_size >= tp1->book_size) { 4992 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4993 } else { 4994 asoc->total_output_queue_size = 0; 4995 } 4996 4997 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4998 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4999 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5000 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5001 } else { 5002 stcb->sctp_socket->so_snd.sb_cc = 0; 5003 } 5004 } 5005 } 5006 5007 #endif 5008 5009 int 5010 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5011 uint8_t sent, int so_locked) 5012 { 5013 struct sctp_stream_out *strq; 5014 struct sctp_tmit_chunk *chk = NULL, *tp2; 5015 struct sctp_stream_queue_pending *sp; 5016 uint32_t mid; 5017 uint16_t sid; 5018 uint8_t foundeom = 0; 5019 int ret_sz = 0; 5020 int notdone; 5021 int do_wakeup_routine = 0; 5022 5023 sid = tp1->rec.data.sid; 5024 mid = tp1->rec.data.mid; 5025 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5026 stcb->asoc.abandoned_sent[0]++; 5027 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5028 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5029 #if defined(SCTP_DETAILED_STR_STATS) 5030 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5031 #endif 5032 } else { 5033 stcb->asoc.abandoned_unsent[0]++; 5034 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5035 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5036 #if defined(SCTP_DETAILED_STR_STATS) 5037 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5038 #endif 5039 } 5040 do { 5041 ret_sz += tp1->book_size; 5042 if (tp1->data != NULL) { 5043 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5044 sctp_flight_size_decrease(tp1); 5045 sctp_total_flight_decrease(stcb, tp1); 5046 } 5047 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5048 stcb->asoc.peers_rwnd += tp1->send_size; 5049 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5050 if (sent) { 5051 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5052 } else { 5053 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5054 } 5055 if (tp1->data) { 5056 sctp_m_freem(tp1->data); 5057 tp1->data = NULL; 5058 } 5059 do_wakeup_routine = 1; 5060 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5061 stcb->asoc.sent_queue_cnt_removeable--; 5062 } 5063 } 5064 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5065 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5066 SCTP_DATA_NOT_FRAG) { 5067 /* not frag'ed we ae done */ 5068 notdone = 0; 5069 foundeom = 1; 5070 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5071 /* end of frag, we are done */ 5072 notdone = 0; 5073 foundeom = 1; 5074 } else { 5075 /* 5076 * Its a begin or middle piece, we must mark all of 5077 * it 5078 */ 5079 notdone = 1; 5080 tp1 = TAILQ_NEXT(tp1, sctp_next); 5081 } 5082 } while (tp1 && notdone); 5083 if (foundeom == 0) { 5084 /* 5085 * The multi-part message was scattered across the send and 5086 * sent queue. 5087 */ 5088 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5089 if ((tp1->rec.data.sid != sid) || 5090 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5091 break; 5092 } 5093 /* 5094 * save to chk in case we have some on stream out 5095 * queue. If so and we have an un-transmitted one we 5096 * don't have to fudge the TSN. 5097 */ 5098 chk = tp1; 5099 ret_sz += tp1->book_size; 5100 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5101 if (sent) { 5102 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5103 } else { 5104 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5105 } 5106 if (tp1->data) { 5107 sctp_m_freem(tp1->data); 5108 tp1->data = NULL; 5109 } 5110 /* No flight involved here book the size to 0 */ 5111 tp1->book_size = 0; 5112 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5113 foundeom = 1; 5114 } 5115 do_wakeup_routine = 1; 5116 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5117 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5118 /* 5119 * on to the sent queue so we can wait for it to be 5120 * passed by. 5121 */ 5122 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5123 sctp_next); 5124 stcb->asoc.send_queue_cnt--; 5125 stcb->asoc.sent_queue_cnt++; 5126 } 5127 } 5128 if (foundeom == 0) { 5129 /* 5130 * Still no eom found. That means there is stuff left on the 5131 * stream out queue.. yuck. 5132 */ 5133 SCTP_TCB_SEND_LOCK(stcb); 5134 strq = &stcb->asoc.strmout[sid]; 5135 sp = TAILQ_FIRST(&strq->outqueue); 5136 if (sp != NULL) { 5137 sp->discard_rest = 1; 5138 /* 5139 * We may need to put a chunk on the queue that 5140 * holds the TSN that would have been sent with the 5141 * LAST bit. 5142 */ 5143 if (chk == NULL) { 5144 /* Yep, we have to */ 5145 sctp_alloc_a_chunk(stcb, chk); 5146 if (chk == NULL) { 5147 /* 5148 * we are hosed. All we can do is 5149 * nothing.. which will cause an 5150 * abort if the peer is paying 5151 * attention. 5152 */ 5153 goto oh_well; 5154 } 5155 memset(chk, 0, sizeof(*chk)); 5156 chk->rec.data.rcv_flags = 0; 5157 chk->sent = SCTP_FORWARD_TSN_SKIP; 5158 chk->asoc = &stcb->asoc; 5159 if (stcb->asoc.idata_supported == 0) { 5160 if (sp->sinfo_flags & SCTP_UNORDERED) { 5161 chk->rec.data.mid = 0; 5162 } else { 5163 chk->rec.data.mid = strq->next_mid_ordered; 5164 } 5165 } else { 5166 if (sp->sinfo_flags & SCTP_UNORDERED) { 5167 chk->rec.data.mid = strq->next_mid_unordered; 5168 } else { 5169 chk->rec.data.mid = strq->next_mid_ordered; 5170 } 5171 } 5172 chk->rec.data.sid = sp->sid; 5173 chk->rec.data.ppid = sp->ppid; 5174 chk->rec.data.context = sp->context; 5175 chk->flags = sp->act_flags; 5176 chk->whoTo = NULL; 5177 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5178 strq->chunks_on_queues++; 5179 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5180 stcb->asoc.sent_queue_cnt++; 5181 stcb->asoc.pr_sctp_cnt++; 5182 } 5183 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5184 if (sp->sinfo_flags & SCTP_UNORDERED) { 5185 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5186 } 5187 if (stcb->asoc.idata_supported == 0) { 5188 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5189 strq->next_mid_ordered++; 5190 } 5191 } else { 5192 if (sp->sinfo_flags & SCTP_UNORDERED) { 5193 strq->next_mid_unordered++; 5194 } else { 5195 strq->next_mid_ordered++; 5196 } 5197 } 5198 oh_well: 5199 if (sp->data) { 5200 /* 5201 * Pull any data to free up the SB and allow 5202 * sender to "add more" while we will throw 5203 * away :-) 5204 */ 5205 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5206 ret_sz += sp->length; 5207 do_wakeup_routine = 1; 5208 sp->some_taken = 1; 5209 sctp_m_freem(sp->data); 5210 sp->data = NULL; 5211 sp->tail_mbuf = NULL; 5212 sp->length = 0; 5213 } 5214 } 5215 SCTP_TCB_SEND_UNLOCK(stcb); 5216 } 5217 if (do_wakeup_routine) { 5218 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5219 } 5220 return (ret_sz); 5221 } 5222 5223 /* 5224 * checks to see if the given address, sa, is one that is currently known by 5225 * the kernel note: can't distinguish the same address on multiple interfaces 5226 * and doesn't handle multiple addresses with different zone/scope id's note: 5227 * ifa_ifwithaddr() compares the entire sockaddr struct 5228 */ 5229 struct sctp_ifa * 5230 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5231 int holds_lock) 5232 { 5233 struct sctp_laddr *laddr; 5234 5235 if (holds_lock == 0) { 5236 SCTP_INP_RLOCK(inp); 5237 } 5238 5239 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5240 if (laddr->ifa == NULL) 5241 continue; 5242 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5243 continue; 5244 #ifdef INET 5245 if (addr->sa_family == AF_INET) { 5246 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5247 laddr->ifa->address.sin.sin_addr.s_addr) { 5248 /* found him. */ 5249 break; 5250 } 5251 } 5252 #endif 5253 #ifdef INET6 5254 if (addr->sa_family == AF_INET6) { 5255 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5256 &laddr->ifa->address.sin6)) { 5257 /* found him. */ 5258 break; 5259 } 5260 } 5261 #endif 5262 } 5263 if (holds_lock == 0) { 5264 SCTP_INP_RUNLOCK(inp); 5265 } 5266 if (laddr != NULL) { 5267 return (laddr->ifa); 5268 } else { 5269 return (NULL); 5270 } 5271 } 5272 5273 uint32_t 5274 sctp_get_ifa_hash_val(struct sockaddr *addr) 5275 { 5276 switch (addr->sa_family) { 5277 #ifdef INET 5278 case AF_INET: 5279 { 5280 struct sockaddr_in *sin; 5281 5282 sin = (struct sockaddr_in *)addr; 5283 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5284 } 5285 #endif 5286 #ifdef INET6 5287 case AF_INET6: 5288 { 5289 struct sockaddr_in6 *sin6; 5290 uint32_t hash_of_addr; 5291 5292 sin6 = (struct sockaddr_in6 *)addr; 5293 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5294 sin6->sin6_addr.s6_addr32[1] + 5295 sin6->sin6_addr.s6_addr32[2] + 5296 sin6->sin6_addr.s6_addr32[3]); 5297 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5298 return (hash_of_addr); 5299 } 5300 #endif 5301 default: 5302 break; 5303 } 5304 return (0); 5305 } 5306 5307 struct sctp_ifa * 5308 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5309 { 5310 struct sctp_ifa *sctp_ifap; 5311 struct sctp_vrf *vrf; 5312 struct sctp_ifalist *hash_head; 5313 uint32_t hash_of_addr; 5314 5315 if (holds_lock == 0) { 5316 SCTP_IPI_ADDR_RLOCK(); 5317 } else { 5318 SCTP_IPI_ADDR_LOCK_ASSERT(); 5319 } 5320 5321 vrf = sctp_find_vrf(vrf_id); 5322 if (vrf == NULL) { 5323 if (holds_lock == 0) 5324 SCTP_IPI_ADDR_RUNLOCK(); 5325 return (NULL); 5326 } 5327 5328 hash_of_addr = sctp_get_ifa_hash_val(addr); 5329 5330 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5331 if (hash_head == NULL) { 5332 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5333 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5334 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5335 sctp_print_address(addr); 5336 SCTP_PRINTF("No such bucket for address\n"); 5337 if (holds_lock == 0) 5338 SCTP_IPI_ADDR_RUNLOCK(); 5339 5340 return (NULL); 5341 } 5342 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5343 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5344 continue; 5345 #ifdef INET 5346 if (addr->sa_family == AF_INET) { 5347 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5348 sctp_ifap->address.sin.sin_addr.s_addr) { 5349 /* found him. */ 5350 break; 5351 } 5352 } 5353 #endif 5354 #ifdef INET6 5355 if (addr->sa_family == AF_INET6) { 5356 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5357 &sctp_ifap->address.sin6)) { 5358 /* found him. */ 5359 break; 5360 } 5361 } 5362 #endif 5363 } 5364 if (holds_lock == 0) 5365 SCTP_IPI_ADDR_RUNLOCK(); 5366 return (sctp_ifap); 5367 } 5368 5369 static void 5370 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5371 uint32_t rwnd_req) 5372 { 5373 /* User pulled some data, do we need a rwnd update? */ 5374 struct epoch_tracker et; 5375 int r_unlocked = 0; 5376 uint32_t dif, rwnd; 5377 struct socket *so = NULL; 5378 5379 if (stcb == NULL) 5380 return; 5381 5382 atomic_add_int(&stcb->asoc.refcnt, 1); 5383 5384 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5385 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5386 /* Pre-check If we are freeing no update */ 5387 goto no_lock; 5388 } 5389 SCTP_INP_INCR_REF(stcb->sctp_ep); 5390 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5391 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5392 goto out; 5393 } 5394 so = stcb->sctp_socket; 5395 if (so == NULL) { 5396 goto out; 5397 } 5398 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5399 /* Have you have freed enough to look */ 5400 *freed_so_far = 0; 5401 /* Yep, its worth a look and the lock overhead */ 5402 5403 /* Figure out what the rwnd would be */ 5404 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5405 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5406 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5407 } else { 5408 dif = 0; 5409 } 5410 if (dif >= rwnd_req) { 5411 if (hold_rlock) { 5412 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5413 r_unlocked = 1; 5414 } 5415 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5416 /* 5417 * One last check before we allow the guy possibly 5418 * to get in. There is a race, where the guy has not 5419 * reached the gate. In that case 5420 */ 5421 goto out; 5422 } 5423 SCTP_TCB_LOCK(stcb); 5424 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5425 /* No reports here */ 5426 SCTP_TCB_UNLOCK(stcb); 5427 goto out; 5428 } 5429 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5430 NET_EPOCH_ENTER(et); 5431 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5432 5433 sctp_chunk_output(stcb->sctp_ep, stcb, 5434 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5435 /* make sure no timer is running */ 5436 NET_EPOCH_EXIT(et); 5437 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5438 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5439 SCTP_TCB_UNLOCK(stcb); 5440 } else { 5441 /* Update how much we have pending */ 5442 stcb->freed_by_sorcv_sincelast = dif; 5443 } 5444 out: 5445 if (so && r_unlocked && hold_rlock) { 5446 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5447 } 5448 5449 SCTP_INP_DECR_REF(stcb->sctp_ep); 5450 no_lock: 5451 atomic_add_int(&stcb->asoc.refcnt, -1); 5452 return; 5453 } 5454 5455 int 5456 sctp_sorecvmsg(struct socket *so, 5457 struct uio *uio, 5458 struct mbuf **mp, 5459 struct sockaddr *from, 5460 int fromlen, 5461 int *msg_flags, 5462 struct sctp_sndrcvinfo *sinfo, 5463 int filling_sinfo) 5464 { 5465 /* 5466 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5467 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5468 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5469 * On the way out we may send out any combination of: 5470 * MSG_NOTIFICATION MSG_EOR 5471 * 5472 */ 5473 struct sctp_inpcb *inp = NULL; 5474 ssize_t my_len = 0; 5475 ssize_t cp_len = 0; 5476 int error = 0; 5477 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5478 struct mbuf *m = NULL; 5479 struct sctp_tcb *stcb = NULL; 5480 int wakeup_read_socket = 0; 5481 int freecnt_applied = 0; 5482 int out_flags = 0, in_flags = 0; 5483 int block_allowed = 1; 5484 uint32_t freed_so_far = 0; 5485 ssize_t copied_so_far = 0; 5486 int in_eeor_mode = 0; 5487 int no_rcv_needed = 0; 5488 uint32_t rwnd_req = 0; 5489 int hold_sblock = 0; 5490 int hold_rlock = 0; 5491 ssize_t slen = 0; 5492 uint32_t held_length = 0; 5493 int sockbuf_lock = 0; 5494 5495 if (uio == NULL) { 5496 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5497 return (EINVAL); 5498 } 5499 5500 if (msg_flags) { 5501 in_flags = *msg_flags; 5502 if (in_flags & MSG_PEEK) 5503 SCTP_STAT_INCR(sctps_read_peeks); 5504 } else { 5505 in_flags = 0; 5506 } 5507 slen = uio->uio_resid; 5508 5509 /* Pull in and set up our int flags */ 5510 if (in_flags & MSG_OOB) { 5511 /* Out of band's NOT supported */ 5512 return (EOPNOTSUPP); 5513 } 5514 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5515 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5516 return (EINVAL); 5517 } 5518 if ((in_flags & (MSG_DONTWAIT 5519 | MSG_NBIO 5520 )) || 5521 SCTP_SO_IS_NBIO(so)) { 5522 block_allowed = 0; 5523 } 5524 /* setup the endpoint */ 5525 inp = (struct sctp_inpcb *)so->so_pcb; 5526 if (inp == NULL) { 5527 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5528 return (EFAULT); 5529 } 5530 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5531 /* Must be at least a MTU's worth */ 5532 if (rwnd_req < SCTP_MIN_RWND) 5533 rwnd_req = SCTP_MIN_RWND; 5534 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5536 sctp_misc_ints(SCTP_SORECV_ENTER, 5537 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5538 } 5539 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5540 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5541 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5542 } 5543 5544 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5545 if (error) { 5546 goto release_unlocked; 5547 } 5548 sockbuf_lock = 1; 5549 restart: 5550 5551 restart_nosblocks: 5552 if (hold_sblock == 0) { 5553 SOCKBUF_LOCK(&so->so_rcv); 5554 hold_sblock = 1; 5555 } 5556 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5557 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5558 goto out; 5559 } 5560 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5561 if (so->so_error) { 5562 error = so->so_error; 5563 if ((in_flags & MSG_PEEK) == 0) 5564 so->so_error = 0; 5565 goto out; 5566 } else { 5567 if (so->so_rcv.sb_cc == 0) { 5568 /* indicate EOF */ 5569 error = 0; 5570 goto out; 5571 } 5572 } 5573 } 5574 if (so->so_rcv.sb_cc <= held_length) { 5575 if (so->so_error) { 5576 error = so->so_error; 5577 if ((in_flags & MSG_PEEK) == 0) { 5578 so->so_error = 0; 5579 } 5580 goto out; 5581 } 5582 if ((so->so_rcv.sb_cc == 0) && 5583 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5584 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5585 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5586 /* 5587 * For active open side clear flags for 5588 * re-use passive open is blocked by 5589 * connect. 5590 */ 5591 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5592 /* 5593 * You were aborted, passive side 5594 * always hits here 5595 */ 5596 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5597 error = ECONNRESET; 5598 } 5599 so->so_state &= ~(SS_ISCONNECTING | 5600 SS_ISDISCONNECTING | 5601 SS_ISCONFIRMING | 5602 SS_ISCONNECTED); 5603 if (error == 0) { 5604 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5605 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5606 error = ENOTCONN; 5607 } 5608 } 5609 goto out; 5610 } 5611 } 5612 if (block_allowed) { 5613 error = sbwait(&so->so_rcv); 5614 if (error) { 5615 goto out; 5616 } 5617 held_length = 0; 5618 goto restart_nosblocks; 5619 } else { 5620 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5621 error = EWOULDBLOCK; 5622 goto out; 5623 } 5624 } 5625 if (hold_sblock == 1) { 5626 SOCKBUF_UNLOCK(&so->so_rcv); 5627 hold_sblock = 0; 5628 } 5629 /* we possibly have data we can read */ 5630 /* sa_ignore FREED_MEMORY */ 5631 control = TAILQ_FIRST(&inp->read_queue); 5632 if (control == NULL) { 5633 /* 5634 * This could be happening since the appender did the 5635 * increment but as not yet did the tailq insert onto the 5636 * read_queue 5637 */ 5638 if (hold_rlock == 0) { 5639 SCTP_INP_READ_LOCK(inp); 5640 } 5641 control = TAILQ_FIRST(&inp->read_queue); 5642 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5643 #ifdef INVARIANTS 5644 panic("Huh, its non zero and nothing on control?"); 5645 #endif 5646 so->so_rcv.sb_cc = 0; 5647 } 5648 SCTP_INP_READ_UNLOCK(inp); 5649 hold_rlock = 0; 5650 goto restart; 5651 } 5652 5653 if ((control->length == 0) && 5654 (control->do_not_ref_stcb)) { 5655 /* 5656 * Clean up code for freeing assoc that left behind a 5657 * pdapi.. maybe a peer in EEOR that just closed after 5658 * sending and never indicated a EOR. 5659 */ 5660 if (hold_rlock == 0) { 5661 hold_rlock = 1; 5662 SCTP_INP_READ_LOCK(inp); 5663 } 5664 control->held_length = 0; 5665 if (control->data) { 5666 /* Hmm there is data here .. fix */ 5667 struct mbuf *m_tmp; 5668 int cnt = 0; 5669 5670 m_tmp = control->data; 5671 while (m_tmp) { 5672 cnt += SCTP_BUF_LEN(m_tmp); 5673 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5674 control->tail_mbuf = m_tmp; 5675 control->end_added = 1; 5676 } 5677 m_tmp = SCTP_BUF_NEXT(m_tmp); 5678 } 5679 control->length = cnt; 5680 } else { 5681 /* remove it */ 5682 TAILQ_REMOVE(&inp->read_queue, control, next); 5683 /* Add back any hiddend data */ 5684 sctp_free_remote_addr(control->whoFrom); 5685 sctp_free_a_readq(stcb, control); 5686 } 5687 if (hold_rlock) { 5688 hold_rlock = 0; 5689 SCTP_INP_READ_UNLOCK(inp); 5690 } 5691 goto restart; 5692 } 5693 if ((control->length == 0) && 5694 (control->end_added == 1)) { 5695 /* 5696 * Do we also need to check for (control->pdapi_aborted == 5697 * 1)? 5698 */ 5699 if (hold_rlock == 0) { 5700 hold_rlock = 1; 5701 SCTP_INP_READ_LOCK(inp); 5702 } 5703 TAILQ_REMOVE(&inp->read_queue, control, next); 5704 if (control->data) { 5705 #ifdef INVARIANTS 5706 panic("control->data not null but control->length == 0"); 5707 #else 5708 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5709 sctp_m_freem(control->data); 5710 control->data = NULL; 5711 #endif 5712 } 5713 if (control->aux_data) { 5714 sctp_m_free(control->aux_data); 5715 control->aux_data = NULL; 5716 } 5717 #ifdef INVARIANTS 5718 if (control->on_strm_q) { 5719 panic("About to free ctl:%p so:%p and its in %d", 5720 control, so, control->on_strm_q); 5721 } 5722 #endif 5723 sctp_free_remote_addr(control->whoFrom); 5724 sctp_free_a_readq(stcb, control); 5725 if (hold_rlock) { 5726 hold_rlock = 0; 5727 SCTP_INP_READ_UNLOCK(inp); 5728 } 5729 goto restart; 5730 } 5731 if (control->length == 0) { 5732 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5733 (filling_sinfo)) { 5734 /* find a more suitable one then this */ 5735 ctl = TAILQ_NEXT(control, next); 5736 while (ctl) { 5737 if ((ctl->stcb != control->stcb) && (ctl->length) && 5738 (ctl->some_taken || 5739 (ctl->spec_flags & M_NOTIFICATION) || 5740 ((ctl->do_not_ref_stcb == 0) && 5741 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5742 ) { 5743 /*- 5744 * If we have a different TCB next, and there is data 5745 * present. If we have already taken some (pdapi), OR we can 5746 * ref the tcb and no delivery as started on this stream, we 5747 * take it. Note we allow a notification on a different 5748 * assoc to be delivered.. 5749 */ 5750 control = ctl; 5751 goto found_one; 5752 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5753 (ctl->length) && 5754 ((ctl->some_taken) || 5755 ((ctl->do_not_ref_stcb == 0) && 5756 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5757 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5758 /*- 5759 * If we have the same tcb, and there is data present, and we 5760 * have the strm interleave feature present. Then if we have 5761 * taken some (pdapi) or we can refer to tht tcb AND we have 5762 * not started a delivery for this stream, we can take it. 5763 * Note we do NOT allow a notificaiton on the same assoc to 5764 * be delivered. 5765 */ 5766 control = ctl; 5767 goto found_one; 5768 } 5769 ctl = TAILQ_NEXT(ctl, next); 5770 } 5771 } 5772 /* 5773 * if we reach here, not suitable replacement is available 5774 * <or> fragment interleave is NOT on. So stuff the sb_cc 5775 * into the our held count, and its time to sleep again. 5776 */ 5777 held_length = so->so_rcv.sb_cc; 5778 control->held_length = so->so_rcv.sb_cc; 5779 goto restart; 5780 } 5781 /* Clear the held length since there is something to read */ 5782 control->held_length = 0; 5783 found_one: 5784 /* 5785 * If we reach here, control has a some data for us to read off. 5786 * Note that stcb COULD be NULL. 5787 */ 5788 if (hold_rlock == 0) { 5789 hold_rlock = 1; 5790 SCTP_INP_READ_LOCK(inp); 5791 } 5792 control->some_taken++; 5793 stcb = control->stcb; 5794 if (stcb) { 5795 if ((control->do_not_ref_stcb == 0) && 5796 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5797 if (freecnt_applied == 0) 5798 stcb = NULL; 5799 } else if (control->do_not_ref_stcb == 0) { 5800 /* you can't free it on me please */ 5801 /* 5802 * The lock on the socket buffer protects us so the 5803 * free code will stop. But since we used the 5804 * socketbuf lock and the sender uses the tcb_lock 5805 * to increment, we need to use the atomic add to 5806 * the refcnt 5807 */ 5808 if (freecnt_applied) { 5809 #ifdef INVARIANTS 5810 panic("refcnt already incremented"); 5811 #else 5812 SCTP_PRINTF("refcnt already incremented?\n"); 5813 #endif 5814 } else { 5815 atomic_add_int(&stcb->asoc.refcnt, 1); 5816 freecnt_applied = 1; 5817 } 5818 /* 5819 * Setup to remember how much we have not yet told 5820 * the peer our rwnd has opened up. Note we grab the 5821 * value from the tcb from last time. Note too that 5822 * sack sending clears this when a sack is sent, 5823 * which is fine. Once we hit the rwnd_req, we then 5824 * will go to the sctp_user_rcvd() that will not 5825 * lock until it KNOWs it MUST send a WUP-SACK. 5826 */ 5827 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5828 stcb->freed_by_sorcv_sincelast = 0; 5829 } 5830 } 5831 if (stcb && 5832 ((control->spec_flags & M_NOTIFICATION) == 0) && 5833 control->do_not_ref_stcb == 0) { 5834 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5835 } 5836 5837 /* First lets get off the sinfo and sockaddr info */ 5838 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5839 sinfo->sinfo_stream = control->sinfo_stream; 5840 sinfo->sinfo_ssn = (uint16_t)control->mid; 5841 sinfo->sinfo_flags = control->sinfo_flags; 5842 sinfo->sinfo_ppid = control->sinfo_ppid; 5843 sinfo->sinfo_context = control->sinfo_context; 5844 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5845 sinfo->sinfo_tsn = control->sinfo_tsn; 5846 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5847 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5848 nxt = TAILQ_NEXT(control, next); 5849 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5850 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5851 struct sctp_extrcvinfo *s_extra; 5852 5853 s_extra = (struct sctp_extrcvinfo *)sinfo; 5854 if ((nxt) && 5855 (nxt->length)) { 5856 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5857 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5858 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5859 } 5860 if (nxt->spec_flags & M_NOTIFICATION) { 5861 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5862 } 5863 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5864 s_extra->serinfo_next_length = nxt->length; 5865 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5866 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5867 if (nxt->tail_mbuf != NULL) { 5868 if (nxt->end_added) { 5869 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5870 } 5871 } 5872 } else { 5873 /* 5874 * we explicitly 0 this, since the memcpy 5875 * got some other things beyond the older 5876 * sinfo_ that is on the control's structure 5877 * :-D 5878 */ 5879 nxt = NULL; 5880 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5881 s_extra->serinfo_next_aid = 0; 5882 s_extra->serinfo_next_length = 0; 5883 s_extra->serinfo_next_ppid = 0; 5884 s_extra->serinfo_next_stream = 0; 5885 } 5886 } 5887 /* 5888 * update off the real current cum-ack, if we have an stcb. 5889 */ 5890 if ((control->do_not_ref_stcb == 0) && stcb) 5891 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5892 /* 5893 * mask off the high bits, we keep the actual chunk bits in 5894 * there. 5895 */ 5896 sinfo->sinfo_flags &= 0x00ff; 5897 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5898 sinfo->sinfo_flags |= SCTP_UNORDERED; 5899 } 5900 } 5901 #ifdef SCTP_ASOCLOG_OF_TSNS 5902 { 5903 int index, newindex; 5904 struct sctp_pcbtsn_rlog *entry; 5905 5906 do { 5907 index = inp->readlog_index; 5908 newindex = index + 1; 5909 if (newindex >= SCTP_READ_LOG_SIZE) { 5910 newindex = 0; 5911 } 5912 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5913 entry = &inp->readlog[index]; 5914 entry->vtag = control->sinfo_assoc_id; 5915 entry->strm = control->sinfo_stream; 5916 entry->seq = (uint16_t)control->mid; 5917 entry->sz = control->length; 5918 entry->flgs = control->sinfo_flags; 5919 } 5920 #endif 5921 if ((fromlen > 0) && (from != NULL)) { 5922 union sctp_sockstore store; 5923 size_t len; 5924 5925 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5926 #ifdef INET6 5927 case AF_INET6: 5928 len = sizeof(struct sockaddr_in6); 5929 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5930 store.sin6.sin6_port = control->port_from; 5931 break; 5932 #endif 5933 #ifdef INET 5934 case AF_INET: 5935 #ifdef INET6 5936 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5937 len = sizeof(struct sockaddr_in6); 5938 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5939 &store.sin6); 5940 store.sin6.sin6_port = control->port_from; 5941 } else { 5942 len = sizeof(struct sockaddr_in); 5943 store.sin = control->whoFrom->ro._l_addr.sin; 5944 store.sin.sin_port = control->port_from; 5945 } 5946 #else 5947 len = sizeof(struct sockaddr_in); 5948 store.sin = control->whoFrom->ro._l_addr.sin; 5949 store.sin.sin_port = control->port_from; 5950 #endif 5951 break; 5952 #endif 5953 default: 5954 len = 0; 5955 break; 5956 } 5957 memcpy(from, &store, min((size_t)fromlen, len)); 5958 #ifdef INET6 5959 { 5960 struct sockaddr_in6 lsa6, *from6; 5961 5962 from6 = (struct sockaddr_in6 *)from; 5963 sctp_recover_scope_mac(from6, (&lsa6)); 5964 } 5965 #endif 5966 } 5967 if (hold_rlock) { 5968 SCTP_INP_READ_UNLOCK(inp); 5969 hold_rlock = 0; 5970 } 5971 if (hold_sblock) { 5972 SOCKBUF_UNLOCK(&so->so_rcv); 5973 hold_sblock = 0; 5974 } 5975 /* now copy out what data we can */ 5976 if (mp == NULL) { 5977 /* copy out each mbuf in the chain up to length */ 5978 get_more_data: 5979 m = control->data; 5980 while (m) { 5981 /* Move out all we can */ 5982 cp_len = uio->uio_resid; 5983 my_len = SCTP_BUF_LEN(m); 5984 if (cp_len > my_len) { 5985 /* not enough in this buf */ 5986 cp_len = my_len; 5987 } 5988 if (hold_rlock) { 5989 SCTP_INP_READ_UNLOCK(inp); 5990 hold_rlock = 0; 5991 } 5992 if (cp_len > 0) 5993 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5994 /* re-read */ 5995 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5996 goto release; 5997 } 5998 5999 if ((control->do_not_ref_stcb == 0) && stcb && 6000 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6001 no_rcv_needed = 1; 6002 } 6003 if (error) { 6004 /* error we are out of here */ 6005 goto release; 6006 } 6007 SCTP_INP_READ_LOCK(inp); 6008 hold_rlock = 1; 6009 if (cp_len == SCTP_BUF_LEN(m)) { 6010 if ((SCTP_BUF_NEXT(m) == NULL) && 6011 (control->end_added)) { 6012 out_flags |= MSG_EOR; 6013 if ((control->do_not_ref_stcb == 0) && 6014 (control->stcb != NULL) && 6015 ((control->spec_flags & M_NOTIFICATION) == 0)) 6016 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6017 } 6018 if (control->spec_flags & M_NOTIFICATION) { 6019 out_flags |= MSG_NOTIFICATION; 6020 } 6021 /* we ate up the mbuf */ 6022 if (in_flags & MSG_PEEK) { 6023 /* just looking */ 6024 m = SCTP_BUF_NEXT(m); 6025 copied_so_far += cp_len; 6026 } else { 6027 /* dispose of the mbuf */ 6028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6029 sctp_sblog(&so->so_rcv, 6030 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6031 } 6032 sctp_sbfree(control, stcb, &so->so_rcv, m); 6033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6034 sctp_sblog(&so->so_rcv, 6035 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6036 } 6037 copied_so_far += cp_len; 6038 freed_so_far += (uint32_t)cp_len; 6039 freed_so_far += MSIZE; 6040 atomic_subtract_int(&control->length, cp_len); 6041 control->data = sctp_m_free(m); 6042 m = control->data; 6043 /* 6044 * been through it all, must hold sb 6045 * lock ok to null tail 6046 */ 6047 if (control->data == NULL) { 6048 #ifdef INVARIANTS 6049 if ((control->end_added == 0) || 6050 (TAILQ_NEXT(control, next) == NULL)) { 6051 /* 6052 * If the end is not 6053 * added, OR the 6054 * next is NOT null 6055 * we MUST have the 6056 * lock. 6057 */ 6058 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6059 panic("Hmm we don't own the lock?"); 6060 } 6061 } 6062 #endif 6063 control->tail_mbuf = NULL; 6064 #ifdef INVARIANTS 6065 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6066 panic("end_added, nothing left and no MSG_EOR"); 6067 } 6068 #endif 6069 } 6070 } 6071 } else { 6072 /* Do we need to trim the mbuf? */ 6073 if (control->spec_flags & M_NOTIFICATION) { 6074 out_flags |= MSG_NOTIFICATION; 6075 } 6076 if ((in_flags & MSG_PEEK) == 0) { 6077 SCTP_BUF_RESV_UF(m, cp_len); 6078 SCTP_BUF_LEN(m) -= (int)cp_len; 6079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6080 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6081 } 6082 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6083 if ((control->do_not_ref_stcb == 0) && 6084 stcb) { 6085 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6086 } 6087 copied_so_far += cp_len; 6088 freed_so_far += (uint32_t)cp_len; 6089 freed_so_far += MSIZE; 6090 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6091 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6092 SCTP_LOG_SBRESULT, 0); 6093 } 6094 atomic_subtract_int(&control->length, cp_len); 6095 } else { 6096 copied_so_far += cp_len; 6097 } 6098 } 6099 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6100 break; 6101 } 6102 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6103 (control->do_not_ref_stcb == 0) && 6104 (freed_so_far >= rwnd_req)) { 6105 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6106 } 6107 } /* end while(m) */ 6108 /* 6109 * At this point we have looked at it all and we either have 6110 * a MSG_EOR/or read all the user wants... <OR> 6111 * control->length == 0. 6112 */ 6113 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6114 /* we are done with this control */ 6115 if (control->length == 0) { 6116 if (control->data) { 6117 #ifdef INVARIANTS 6118 panic("control->data not null at read eor?"); 6119 #else 6120 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6121 sctp_m_freem(control->data); 6122 control->data = NULL; 6123 #endif 6124 } 6125 done_with_control: 6126 if (hold_rlock == 0) { 6127 SCTP_INP_READ_LOCK(inp); 6128 hold_rlock = 1; 6129 } 6130 TAILQ_REMOVE(&inp->read_queue, control, next); 6131 /* Add back any hiddend data */ 6132 if (control->held_length) { 6133 held_length = 0; 6134 control->held_length = 0; 6135 wakeup_read_socket = 1; 6136 } 6137 if (control->aux_data) { 6138 sctp_m_free(control->aux_data); 6139 control->aux_data = NULL; 6140 } 6141 no_rcv_needed = control->do_not_ref_stcb; 6142 sctp_free_remote_addr(control->whoFrom); 6143 control->data = NULL; 6144 #ifdef INVARIANTS 6145 if (control->on_strm_q) { 6146 panic("About to free ctl:%p so:%p and its in %d", 6147 control, so, control->on_strm_q); 6148 } 6149 #endif 6150 sctp_free_a_readq(stcb, control); 6151 control = NULL; 6152 if ((freed_so_far >= rwnd_req) && 6153 (no_rcv_needed == 0)) 6154 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6155 6156 } else { 6157 /* 6158 * The user did not read all of this 6159 * message, turn off the returned MSG_EOR 6160 * since we are leaving more behind on the 6161 * control to read. 6162 */ 6163 #ifdef INVARIANTS 6164 if (control->end_added && 6165 (control->data == NULL) && 6166 (control->tail_mbuf == NULL)) { 6167 panic("Gak, control->length is corrupt?"); 6168 } 6169 #endif 6170 no_rcv_needed = control->do_not_ref_stcb; 6171 out_flags &= ~MSG_EOR; 6172 } 6173 } 6174 if (out_flags & MSG_EOR) { 6175 goto release; 6176 } 6177 if ((uio->uio_resid == 0) || 6178 ((in_eeor_mode) && 6179 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6180 goto release; 6181 } 6182 /* 6183 * If I hit here the receiver wants more and this message is 6184 * NOT done (pd-api). So two questions. Can we block? if not 6185 * we are done. Did the user NOT set MSG_WAITALL? 6186 */ 6187 if (block_allowed == 0) { 6188 goto release; 6189 } 6190 /* 6191 * We need to wait for more data a few things: - We don't 6192 * sbunlock() so we don't get someone else reading. - We 6193 * must be sure to account for the case where what is added 6194 * is NOT to our control when we wakeup. 6195 */ 6196 6197 /* 6198 * Do we need to tell the transport a rwnd update might be 6199 * needed before we go to sleep? 6200 */ 6201 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6202 ((freed_so_far >= rwnd_req) && 6203 (control->do_not_ref_stcb == 0) && 6204 (no_rcv_needed == 0))) { 6205 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6206 } 6207 wait_some_more: 6208 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6209 goto release; 6210 } 6211 6212 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6213 goto release; 6214 6215 if (hold_rlock == 1) { 6216 SCTP_INP_READ_UNLOCK(inp); 6217 hold_rlock = 0; 6218 } 6219 if (hold_sblock == 0) { 6220 SOCKBUF_LOCK(&so->so_rcv); 6221 hold_sblock = 1; 6222 } 6223 if ((copied_so_far) && (control->length == 0) && 6224 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6225 goto release; 6226 } 6227 if (so->so_rcv.sb_cc <= control->held_length) { 6228 error = sbwait(&so->so_rcv); 6229 if (error) { 6230 goto release; 6231 } 6232 control->held_length = 0; 6233 } 6234 if (hold_sblock) { 6235 SOCKBUF_UNLOCK(&so->so_rcv); 6236 hold_sblock = 0; 6237 } 6238 if (control->length == 0) { 6239 /* still nothing here */ 6240 if (control->end_added == 1) { 6241 /* he aborted, or is done i.e.did a shutdown */ 6242 out_flags |= MSG_EOR; 6243 if (control->pdapi_aborted) { 6244 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6245 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6246 6247 out_flags |= MSG_TRUNC; 6248 } else { 6249 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6250 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6251 } 6252 goto done_with_control; 6253 } 6254 if (so->so_rcv.sb_cc > held_length) { 6255 control->held_length = so->so_rcv.sb_cc; 6256 held_length = 0; 6257 } 6258 goto wait_some_more; 6259 } else if (control->data == NULL) { 6260 /* 6261 * we must re-sync since data is probably being 6262 * added 6263 */ 6264 SCTP_INP_READ_LOCK(inp); 6265 if ((control->length > 0) && (control->data == NULL)) { 6266 /* 6267 * big trouble.. we have the lock and its 6268 * corrupt? 6269 */ 6270 #ifdef INVARIANTS 6271 panic("Impossible data==NULL length !=0"); 6272 #endif 6273 out_flags |= MSG_EOR; 6274 out_flags |= MSG_TRUNC; 6275 control->length = 0; 6276 SCTP_INP_READ_UNLOCK(inp); 6277 goto done_with_control; 6278 } 6279 SCTP_INP_READ_UNLOCK(inp); 6280 /* We will fall around to get more data */ 6281 } 6282 goto get_more_data; 6283 } else { 6284 /*- 6285 * Give caller back the mbuf chain, 6286 * store in uio_resid the length 6287 */ 6288 wakeup_read_socket = 0; 6289 if ((control->end_added == 0) || 6290 (TAILQ_NEXT(control, next) == NULL)) { 6291 /* Need to get rlock */ 6292 if (hold_rlock == 0) { 6293 SCTP_INP_READ_LOCK(inp); 6294 hold_rlock = 1; 6295 } 6296 } 6297 if (control->end_added) { 6298 out_flags |= MSG_EOR; 6299 if ((control->do_not_ref_stcb == 0) && 6300 (control->stcb != NULL) && 6301 ((control->spec_flags & M_NOTIFICATION) == 0)) 6302 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6303 } 6304 if (control->spec_flags & M_NOTIFICATION) { 6305 out_flags |= MSG_NOTIFICATION; 6306 } 6307 uio->uio_resid = control->length; 6308 *mp = control->data; 6309 m = control->data; 6310 while (m) { 6311 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6312 sctp_sblog(&so->so_rcv, 6313 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6314 } 6315 sctp_sbfree(control, stcb, &so->so_rcv, m); 6316 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6317 freed_so_far += MSIZE; 6318 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6319 sctp_sblog(&so->so_rcv, 6320 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6321 } 6322 m = SCTP_BUF_NEXT(m); 6323 } 6324 control->data = control->tail_mbuf = NULL; 6325 control->length = 0; 6326 if (out_flags & MSG_EOR) { 6327 /* Done with this control */ 6328 goto done_with_control; 6329 } 6330 } 6331 release: 6332 if (hold_rlock == 1) { 6333 SCTP_INP_READ_UNLOCK(inp); 6334 hold_rlock = 0; 6335 } 6336 if (hold_sblock == 1) { 6337 SOCKBUF_UNLOCK(&so->so_rcv); 6338 hold_sblock = 0; 6339 } 6340 6341 sbunlock(&so->so_rcv); 6342 sockbuf_lock = 0; 6343 6344 release_unlocked: 6345 if (hold_sblock) { 6346 SOCKBUF_UNLOCK(&so->so_rcv); 6347 hold_sblock = 0; 6348 } 6349 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6350 if ((freed_so_far >= rwnd_req) && 6351 (control && (control->do_not_ref_stcb == 0)) && 6352 (no_rcv_needed == 0)) 6353 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6354 } 6355 out: 6356 if (msg_flags) { 6357 *msg_flags = out_flags; 6358 } 6359 if (((out_flags & MSG_EOR) == 0) && 6360 ((in_flags & MSG_PEEK) == 0) && 6361 (sinfo) && 6362 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6363 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6364 struct sctp_extrcvinfo *s_extra; 6365 6366 s_extra = (struct sctp_extrcvinfo *)sinfo; 6367 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6368 } 6369 if (hold_rlock == 1) { 6370 SCTP_INP_READ_UNLOCK(inp); 6371 } 6372 if (hold_sblock) { 6373 SOCKBUF_UNLOCK(&so->so_rcv); 6374 } 6375 if (sockbuf_lock) { 6376 sbunlock(&so->so_rcv); 6377 } 6378 6379 if (freecnt_applied) { 6380 /* 6381 * The lock on the socket buffer protects us so the free 6382 * code will stop. But since we used the socketbuf lock and 6383 * the sender uses the tcb_lock to increment, we need to use 6384 * the atomic add to the refcnt. 6385 */ 6386 if (stcb == NULL) { 6387 #ifdef INVARIANTS 6388 panic("stcb for refcnt has gone NULL?"); 6389 goto stage_left; 6390 #else 6391 goto stage_left; 6392 #endif 6393 } 6394 /* Save the value back for next time */ 6395 stcb->freed_by_sorcv_sincelast = freed_so_far; 6396 atomic_add_int(&stcb->asoc.refcnt, -1); 6397 } 6398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6399 if (stcb) { 6400 sctp_misc_ints(SCTP_SORECV_DONE, 6401 freed_so_far, 6402 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6403 stcb->asoc.my_rwnd, 6404 so->so_rcv.sb_cc); 6405 } else { 6406 sctp_misc_ints(SCTP_SORECV_DONE, 6407 freed_so_far, 6408 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6409 0, 6410 so->so_rcv.sb_cc); 6411 } 6412 } 6413 stage_left: 6414 if (wakeup_read_socket) { 6415 sctp_sorwakeup(inp, so); 6416 } 6417 return (error); 6418 } 6419 6420 #ifdef SCTP_MBUF_LOGGING 6421 struct mbuf * 6422 sctp_m_free(struct mbuf *m) 6423 { 6424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6425 sctp_log_mb(m, SCTP_MBUF_IFREE); 6426 } 6427 return (m_free(m)); 6428 } 6429 6430 void 6431 sctp_m_freem(struct mbuf *mb) 6432 { 6433 while (mb != NULL) 6434 mb = sctp_m_free(mb); 6435 } 6436 6437 #endif 6438 6439 int 6440 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6441 { 6442 /* 6443 * Given a local address. For all associations that holds the 6444 * address, request a peer-set-primary. 6445 */ 6446 struct sctp_ifa *ifa; 6447 struct sctp_laddr *wi; 6448 6449 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6450 if (ifa == NULL) { 6451 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6452 return (EADDRNOTAVAIL); 6453 } 6454 /* 6455 * Now that we have the ifa we must awaken the iterator with this 6456 * message. 6457 */ 6458 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6459 if (wi == NULL) { 6460 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6461 return (ENOMEM); 6462 } 6463 /* Now incr the count and int wi structure */ 6464 SCTP_INCR_LADDR_COUNT(); 6465 memset(wi, 0, sizeof(*wi)); 6466 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6467 wi->ifa = ifa; 6468 wi->action = SCTP_SET_PRIM_ADDR; 6469 atomic_add_int(&ifa->refcount, 1); 6470 6471 /* Now add it to the work queue */ 6472 SCTP_WQ_ADDR_LOCK(); 6473 /* 6474 * Should this really be a tailq? As it is we will process the 6475 * newest first :-0 6476 */ 6477 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6478 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6479 (struct sctp_inpcb *)NULL, 6480 (struct sctp_tcb *)NULL, 6481 (struct sctp_nets *)NULL); 6482 SCTP_WQ_ADDR_UNLOCK(); 6483 return (0); 6484 } 6485 6486 int 6487 sctp_soreceive(struct socket *so, 6488 struct sockaddr **psa, 6489 struct uio *uio, 6490 struct mbuf **mp0, 6491 struct mbuf **controlp, 6492 int *flagsp) 6493 { 6494 int error, fromlen; 6495 uint8_t sockbuf[256]; 6496 struct sockaddr *from; 6497 struct sctp_extrcvinfo sinfo; 6498 int filling_sinfo = 1; 6499 int flags; 6500 struct sctp_inpcb *inp; 6501 6502 inp = (struct sctp_inpcb *)so->so_pcb; 6503 /* pickup the assoc we are reading from */ 6504 if (inp == NULL) { 6505 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6506 return (EINVAL); 6507 } 6508 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6509 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6510 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6511 (controlp == NULL)) { 6512 /* user does not want the sndrcv ctl */ 6513 filling_sinfo = 0; 6514 } 6515 if (psa) { 6516 from = (struct sockaddr *)sockbuf; 6517 fromlen = sizeof(sockbuf); 6518 from->sa_len = 0; 6519 } else { 6520 from = NULL; 6521 fromlen = 0; 6522 } 6523 6524 if (filling_sinfo) { 6525 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6526 } 6527 if (flagsp != NULL) { 6528 flags = *flagsp; 6529 } else { 6530 flags = 0; 6531 } 6532 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6533 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6534 if (flagsp != NULL) { 6535 *flagsp = flags; 6536 } 6537 if (controlp != NULL) { 6538 /* copy back the sinfo in a CMSG format */ 6539 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6540 *controlp = sctp_build_ctl_nchunk(inp, 6541 (struct sctp_sndrcvinfo *)&sinfo); 6542 } else { 6543 *controlp = NULL; 6544 } 6545 } 6546 if (psa) { 6547 /* copy back the address info */ 6548 if (from && from->sa_len) { 6549 *psa = sodupsockaddr(from, M_NOWAIT); 6550 } else { 6551 *psa = NULL; 6552 } 6553 } 6554 return (error); 6555 } 6556 6557 int 6558 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6559 int totaddr, int *error) 6560 { 6561 int added = 0; 6562 int i; 6563 struct sctp_inpcb *inp; 6564 struct sockaddr *sa; 6565 size_t incr = 0; 6566 #ifdef INET 6567 struct sockaddr_in *sin; 6568 #endif 6569 #ifdef INET6 6570 struct sockaddr_in6 *sin6; 6571 #endif 6572 6573 sa = addr; 6574 inp = stcb->sctp_ep; 6575 *error = 0; 6576 for (i = 0; i < totaddr; i++) { 6577 switch (sa->sa_family) { 6578 #ifdef INET 6579 case AF_INET: 6580 incr = sizeof(struct sockaddr_in); 6581 sin = (struct sockaddr_in *)sa; 6582 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6583 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6584 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6585 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6586 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6587 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6588 *error = EINVAL; 6589 goto out_now; 6590 } 6591 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6592 SCTP_DONOT_SETSCOPE, 6593 SCTP_ADDR_IS_CONFIRMED)) { 6594 /* assoc gone no un-lock */ 6595 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6596 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6597 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6598 *error = ENOBUFS; 6599 goto out_now; 6600 } 6601 added++; 6602 break; 6603 #endif 6604 #ifdef INET6 6605 case AF_INET6: 6606 incr = sizeof(struct sockaddr_in6); 6607 sin6 = (struct sockaddr_in6 *)sa; 6608 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6609 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6610 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6611 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6612 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6613 *error = EINVAL; 6614 goto out_now; 6615 } 6616 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6617 SCTP_DONOT_SETSCOPE, 6618 SCTP_ADDR_IS_CONFIRMED)) { 6619 /* assoc gone no un-lock */ 6620 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6621 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6622 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6623 *error = ENOBUFS; 6624 goto out_now; 6625 } 6626 added++; 6627 break; 6628 #endif 6629 default: 6630 break; 6631 } 6632 sa = (struct sockaddr *)((caddr_t)sa + incr); 6633 } 6634 out_now: 6635 return (added); 6636 } 6637 6638 int 6639 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6640 unsigned int totaddr, 6641 unsigned int *num_v4, unsigned int *num_v6, 6642 unsigned int limit) 6643 { 6644 struct sockaddr *sa; 6645 struct sctp_tcb *stcb; 6646 unsigned int incr, at, i; 6647 6648 at = 0; 6649 sa = addr; 6650 *num_v6 = *num_v4 = 0; 6651 /* account and validate addresses */ 6652 if (totaddr == 0) { 6653 return (EINVAL); 6654 } 6655 for (i = 0; i < totaddr; i++) { 6656 if (at + sizeof(struct sockaddr) > limit) { 6657 return (EINVAL); 6658 } 6659 switch (sa->sa_family) { 6660 #ifdef INET 6661 case AF_INET: 6662 incr = (unsigned int)sizeof(struct sockaddr_in); 6663 if (sa->sa_len != incr) { 6664 return (EINVAL); 6665 } 6666 (*num_v4) += 1; 6667 break; 6668 #endif 6669 #ifdef INET6 6670 case AF_INET6: 6671 { 6672 struct sockaddr_in6 *sin6; 6673 6674 sin6 = (struct sockaddr_in6 *)sa; 6675 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6676 /* Must be non-mapped for connectx */ 6677 return (EINVAL); 6678 } 6679 incr = (unsigned int)sizeof(struct sockaddr_in6); 6680 if (sa->sa_len != incr) { 6681 return (EINVAL); 6682 } 6683 (*num_v6) += 1; 6684 break; 6685 } 6686 #endif 6687 default: 6688 return (EINVAL); 6689 } 6690 if ((at + incr) > limit) { 6691 return (EINVAL); 6692 } 6693 SCTP_INP_INCR_REF(inp); 6694 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6695 if (stcb != NULL) { 6696 SCTP_TCB_UNLOCK(stcb); 6697 return (EALREADY); 6698 } else { 6699 SCTP_INP_DECR_REF(inp); 6700 } 6701 at += incr; 6702 sa = (struct sockaddr *)((caddr_t)sa + incr); 6703 } 6704 return (0); 6705 } 6706 6707 /* 6708 * sctp_bindx(ADD) for one address. 6709 * assumes all arguments are valid/checked by caller. 6710 */ 6711 void 6712 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6713 struct sockaddr *sa, uint32_t vrf_id, int *error, 6714 void *p) 6715 { 6716 #if defined(INET) && defined(INET6) 6717 struct sockaddr_in sin; 6718 #endif 6719 #ifdef INET6 6720 struct sockaddr_in6 *sin6; 6721 #endif 6722 #ifdef INET 6723 struct sockaddr_in *sinp; 6724 #endif 6725 struct sockaddr *addr_to_use; 6726 struct sctp_inpcb *lep; 6727 uint16_t port; 6728 6729 /* see if we're bound all already! */ 6730 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6731 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6732 *error = EINVAL; 6733 return; 6734 } 6735 switch (sa->sa_family) { 6736 #ifdef INET6 6737 case AF_INET6: 6738 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6739 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6740 *error = EINVAL; 6741 return; 6742 } 6743 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6744 /* can only bind v6 on PF_INET6 sockets */ 6745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6746 *error = EINVAL; 6747 return; 6748 } 6749 sin6 = (struct sockaddr_in6 *)sa; 6750 port = sin6->sin6_port; 6751 #ifdef INET 6752 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6753 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6754 SCTP_IPV6_V6ONLY(inp)) { 6755 /* can't bind v4-mapped on PF_INET sockets */ 6756 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6757 *error = EINVAL; 6758 return; 6759 } 6760 in6_sin6_2_sin(&sin, sin6); 6761 addr_to_use = (struct sockaddr *)&sin; 6762 } else { 6763 addr_to_use = sa; 6764 } 6765 #else 6766 addr_to_use = sa; 6767 #endif 6768 break; 6769 #endif 6770 #ifdef INET 6771 case AF_INET: 6772 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6773 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6774 *error = EINVAL; 6775 return; 6776 } 6777 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6778 SCTP_IPV6_V6ONLY(inp)) { 6779 /* can't bind v4 on PF_INET sockets */ 6780 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6781 *error = EINVAL; 6782 return; 6783 } 6784 sinp = (struct sockaddr_in *)sa; 6785 port = sinp->sin_port; 6786 addr_to_use = sa; 6787 break; 6788 #endif 6789 default: 6790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6791 *error = EINVAL; 6792 return; 6793 } 6794 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6795 if (p == NULL) { 6796 /* Can't get proc for Net/Open BSD */ 6797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6798 *error = EINVAL; 6799 return; 6800 } 6801 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6802 return; 6803 } 6804 /* Validate the incoming port. */ 6805 if ((port != 0) && (port != inp->sctp_lport)) { 6806 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6807 *error = EINVAL; 6808 return; 6809 } 6810 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6811 if (lep == NULL) { 6812 /* add the address */ 6813 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6814 SCTP_ADD_IP_ADDRESS, vrf_id); 6815 } else { 6816 if (lep != inp) { 6817 *error = EADDRINUSE; 6818 } 6819 SCTP_INP_DECR_REF(lep); 6820 } 6821 } 6822 6823 /* 6824 * sctp_bindx(DELETE) for one address. 6825 * assumes all arguments are valid/checked by caller. 6826 */ 6827 void 6828 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6829 struct sockaddr *sa, uint32_t vrf_id, int *error) 6830 { 6831 struct sockaddr *addr_to_use; 6832 #if defined(INET) && defined(INET6) 6833 struct sockaddr_in6 *sin6; 6834 struct sockaddr_in sin; 6835 #endif 6836 6837 /* see if we're bound all already! */ 6838 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6839 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6840 *error = EINVAL; 6841 return; 6842 } 6843 switch (sa->sa_family) { 6844 #ifdef INET6 6845 case AF_INET6: 6846 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6847 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6848 *error = EINVAL; 6849 return; 6850 } 6851 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6852 /* can only bind v6 on PF_INET6 sockets */ 6853 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6854 *error = EINVAL; 6855 return; 6856 } 6857 #ifdef INET 6858 sin6 = (struct sockaddr_in6 *)sa; 6859 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6860 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6861 SCTP_IPV6_V6ONLY(inp)) { 6862 /* can't bind mapped-v4 on PF_INET sockets */ 6863 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6864 *error = EINVAL; 6865 return; 6866 } 6867 in6_sin6_2_sin(&sin, sin6); 6868 addr_to_use = (struct sockaddr *)&sin; 6869 } else { 6870 addr_to_use = sa; 6871 } 6872 #else 6873 addr_to_use = sa; 6874 #endif 6875 break; 6876 #endif 6877 #ifdef INET 6878 case AF_INET: 6879 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6880 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6881 *error = EINVAL; 6882 return; 6883 } 6884 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6885 SCTP_IPV6_V6ONLY(inp)) { 6886 /* can't bind v4 on PF_INET sockets */ 6887 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6888 *error = EINVAL; 6889 return; 6890 } 6891 addr_to_use = sa; 6892 break; 6893 #endif 6894 default: 6895 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6896 *error = EINVAL; 6897 return; 6898 } 6899 /* No lock required mgmt_ep_sa does its own locking. */ 6900 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6901 vrf_id); 6902 } 6903 6904 /* 6905 * returns the valid local address count for an assoc, taking into account 6906 * all scoping rules 6907 */ 6908 int 6909 sctp_local_addr_count(struct sctp_tcb *stcb) 6910 { 6911 int loopback_scope; 6912 #if defined(INET) 6913 int ipv4_local_scope, ipv4_addr_legal; 6914 #endif 6915 #if defined(INET6) 6916 int local_scope, site_scope, ipv6_addr_legal; 6917 #endif 6918 struct sctp_vrf *vrf; 6919 struct sctp_ifn *sctp_ifn; 6920 struct sctp_ifa *sctp_ifa; 6921 int count = 0; 6922 6923 /* Turn on all the appropriate scopes */ 6924 loopback_scope = stcb->asoc.scope.loopback_scope; 6925 #if defined(INET) 6926 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6927 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6928 #endif 6929 #if defined(INET6) 6930 local_scope = stcb->asoc.scope.local_scope; 6931 site_scope = stcb->asoc.scope.site_scope; 6932 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6933 #endif 6934 SCTP_IPI_ADDR_RLOCK(); 6935 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6936 if (vrf == NULL) { 6937 /* no vrf, no addresses */ 6938 SCTP_IPI_ADDR_RUNLOCK(); 6939 return (0); 6940 } 6941 6942 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6943 /* 6944 * bound all case: go through all ifns on the vrf 6945 */ 6946 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6947 if ((loopback_scope == 0) && 6948 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6949 continue; 6950 } 6951 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6952 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6953 continue; 6954 switch (sctp_ifa->address.sa.sa_family) { 6955 #ifdef INET 6956 case AF_INET: 6957 if (ipv4_addr_legal) { 6958 struct sockaddr_in *sin; 6959 6960 sin = &sctp_ifa->address.sin; 6961 if (sin->sin_addr.s_addr == 0) { 6962 /* 6963 * skip unspecified 6964 * addrs 6965 */ 6966 continue; 6967 } 6968 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6969 &sin->sin_addr) != 0) { 6970 continue; 6971 } 6972 if ((ipv4_local_scope == 0) && 6973 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6974 continue; 6975 } 6976 /* count this one */ 6977 count++; 6978 } else { 6979 continue; 6980 } 6981 break; 6982 #endif 6983 #ifdef INET6 6984 case AF_INET6: 6985 if (ipv6_addr_legal) { 6986 struct sockaddr_in6 *sin6; 6987 6988 sin6 = &sctp_ifa->address.sin6; 6989 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6990 continue; 6991 } 6992 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6993 &sin6->sin6_addr) != 0) { 6994 continue; 6995 } 6996 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6997 if (local_scope == 0) 6998 continue; 6999 if (sin6->sin6_scope_id == 0) { 7000 if (sa6_recoverscope(sin6) != 0) 7001 /* 7002 * 7003 * bad 7004 * link 7005 * 7006 * local 7007 * 7008 * address 7009 */ 7010 continue; 7011 } 7012 } 7013 if ((site_scope == 0) && 7014 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7015 continue; 7016 } 7017 /* count this one */ 7018 count++; 7019 } 7020 break; 7021 #endif 7022 default: 7023 /* TSNH */ 7024 break; 7025 } 7026 } 7027 } 7028 } else { 7029 /* 7030 * subset bound case 7031 */ 7032 struct sctp_laddr *laddr; 7033 7034 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7035 sctp_nxt_addr) { 7036 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7037 continue; 7038 } 7039 /* count this one */ 7040 count++; 7041 } 7042 } 7043 SCTP_IPI_ADDR_RUNLOCK(); 7044 return (count); 7045 } 7046 7047 #if defined(SCTP_LOCAL_TRACE_BUF) 7048 7049 void 7050 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7051 { 7052 uint32_t saveindex, newindex; 7053 7054 do { 7055 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7056 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7057 newindex = 1; 7058 } else { 7059 newindex = saveindex + 1; 7060 } 7061 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7062 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7063 saveindex = 0; 7064 } 7065 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7066 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7067 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7068 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7069 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7070 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7071 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7072 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7073 } 7074 7075 #endif 7076 static void 7077 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7078 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7079 { 7080 struct ip *iph; 7081 #ifdef INET6 7082 struct ip6_hdr *ip6; 7083 #endif 7084 struct mbuf *sp, *last; 7085 struct udphdr *uhdr; 7086 uint16_t port; 7087 7088 if ((m->m_flags & M_PKTHDR) == 0) { 7089 /* Can't handle one that is not a pkt hdr */ 7090 goto out; 7091 } 7092 /* Pull the src port */ 7093 iph = mtod(m, struct ip *); 7094 uhdr = (struct udphdr *)((caddr_t)iph + off); 7095 port = uhdr->uh_sport; 7096 /* 7097 * Split out the mbuf chain. Leave the IP header in m, place the 7098 * rest in the sp. 7099 */ 7100 sp = m_split(m, off, M_NOWAIT); 7101 if (sp == NULL) { 7102 /* Gak, drop packet, we can't do a split */ 7103 goto out; 7104 } 7105 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7106 /* Gak, packet can't have an SCTP header in it - too small */ 7107 m_freem(sp); 7108 goto out; 7109 } 7110 /* Now pull up the UDP header and SCTP header together */ 7111 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7112 if (sp == NULL) { 7113 /* Gak pullup failed */ 7114 goto out; 7115 } 7116 /* Trim out the UDP header */ 7117 m_adj(sp, sizeof(struct udphdr)); 7118 7119 /* Now reconstruct the mbuf chain */ 7120 for (last = m; last->m_next; last = last->m_next); 7121 last->m_next = sp; 7122 m->m_pkthdr.len += sp->m_pkthdr.len; 7123 /* 7124 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7125 * checksum and it was valid. Since CSUM_DATA_VALID == 7126 * CSUM_SCTP_VALID this would imply that the HW also verified the 7127 * SCTP checksum. Therefore, clear the bit. 7128 */ 7129 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7130 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7131 m->m_pkthdr.len, 7132 if_name(m->m_pkthdr.rcvif), 7133 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7134 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7135 iph = mtod(m, struct ip *); 7136 switch (iph->ip_v) { 7137 #ifdef INET 7138 case IPVERSION: 7139 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7140 sctp_input_with_port(m, off, port); 7141 break; 7142 #endif 7143 #ifdef INET6 7144 case IPV6_VERSION >> 4: 7145 ip6 = mtod(m, struct ip6_hdr *); 7146 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7147 sctp6_input_with_port(&m, &off, port); 7148 break; 7149 #endif 7150 default: 7151 goto out; 7152 break; 7153 } 7154 return; 7155 out: 7156 m_freem(m); 7157 } 7158 7159 #ifdef INET 7160 static void 7161 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7162 { 7163 struct ip *outer_ip, *inner_ip; 7164 struct sctphdr *sh; 7165 struct icmp *icmp; 7166 struct udphdr *udp; 7167 struct sctp_inpcb *inp; 7168 struct sctp_tcb *stcb; 7169 struct sctp_nets *net; 7170 struct sctp_init_chunk *ch; 7171 struct sockaddr_in src, dst; 7172 uint8_t type, code; 7173 7174 inner_ip = (struct ip *)vip; 7175 icmp = (struct icmp *)((caddr_t)inner_ip - 7176 (sizeof(struct icmp) - sizeof(struct ip))); 7177 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7178 if (ntohs(outer_ip->ip_len) < 7179 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7180 return; 7181 } 7182 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7183 sh = (struct sctphdr *)(udp + 1); 7184 memset(&src, 0, sizeof(struct sockaddr_in)); 7185 src.sin_family = AF_INET; 7186 src.sin_len = sizeof(struct sockaddr_in); 7187 src.sin_port = sh->src_port; 7188 src.sin_addr = inner_ip->ip_src; 7189 memset(&dst, 0, sizeof(struct sockaddr_in)); 7190 dst.sin_family = AF_INET; 7191 dst.sin_len = sizeof(struct sockaddr_in); 7192 dst.sin_port = sh->dest_port; 7193 dst.sin_addr = inner_ip->ip_dst; 7194 /* 7195 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7196 * holds our local endpoint address. Thus we reverse the dst and the 7197 * src in the lookup. 7198 */ 7199 inp = NULL; 7200 net = NULL; 7201 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7202 (struct sockaddr *)&src, 7203 &inp, &net, 1, 7204 SCTP_DEFAULT_VRFID); 7205 if ((stcb != NULL) && 7206 (net != NULL) && 7207 (inp != NULL)) { 7208 /* Check the UDP port numbers */ 7209 if ((udp->uh_dport != net->port) || 7210 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7211 SCTP_TCB_UNLOCK(stcb); 7212 return; 7213 } 7214 /* Check the verification tag */ 7215 if (ntohl(sh->v_tag) != 0) { 7216 /* 7217 * This must be the verification tag used for 7218 * sending out packets. We don't consider packets 7219 * reflecting the verification tag. 7220 */ 7221 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7222 SCTP_TCB_UNLOCK(stcb); 7223 return; 7224 } 7225 } else { 7226 if (ntohs(outer_ip->ip_len) >= 7227 sizeof(struct ip) + 7228 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7229 /* 7230 * In this case we can check if we got an 7231 * INIT chunk and if the initiate tag 7232 * matches. 7233 */ 7234 ch = (struct sctp_init_chunk *)(sh + 1); 7235 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7236 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7237 SCTP_TCB_UNLOCK(stcb); 7238 return; 7239 } 7240 } else { 7241 SCTP_TCB_UNLOCK(stcb); 7242 return; 7243 } 7244 } 7245 type = icmp->icmp_type; 7246 code = icmp->icmp_code; 7247 if ((type == ICMP_UNREACH) && 7248 (code == ICMP_UNREACH_PORT)) { 7249 code = ICMP_UNREACH_PROTOCOL; 7250 } 7251 sctp_notify(inp, stcb, net, type, code, 7252 ntohs(inner_ip->ip_len), 7253 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7254 } else { 7255 if ((stcb == NULL) && (inp != NULL)) { 7256 /* reduce ref-count */ 7257 SCTP_INP_WLOCK(inp); 7258 SCTP_INP_DECR_REF(inp); 7259 SCTP_INP_WUNLOCK(inp); 7260 } 7261 if (stcb) { 7262 SCTP_TCB_UNLOCK(stcb); 7263 } 7264 } 7265 return; 7266 } 7267 #endif 7268 7269 #ifdef INET6 7270 static void 7271 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7272 { 7273 struct ip6ctlparam *ip6cp; 7274 struct sctp_inpcb *inp; 7275 struct sctp_tcb *stcb; 7276 struct sctp_nets *net; 7277 struct sctphdr sh; 7278 struct udphdr udp; 7279 struct sockaddr_in6 src, dst; 7280 uint8_t type, code; 7281 7282 ip6cp = (struct ip6ctlparam *)d; 7283 /* 7284 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7285 */ 7286 if (ip6cp->ip6c_m == NULL) { 7287 return; 7288 } 7289 /* 7290 * Check if we can safely examine the ports and the verification tag 7291 * of the SCTP common header. 7292 */ 7293 if (ip6cp->ip6c_m->m_pkthdr.len < 7294 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7295 return; 7296 } 7297 /* Copy out the UDP header. */ 7298 memset(&udp, 0, sizeof(struct udphdr)); 7299 m_copydata(ip6cp->ip6c_m, 7300 ip6cp->ip6c_off, 7301 sizeof(struct udphdr), 7302 (caddr_t)&udp); 7303 /* Copy out the port numbers and the verification tag. */ 7304 memset(&sh, 0, sizeof(struct sctphdr)); 7305 m_copydata(ip6cp->ip6c_m, 7306 ip6cp->ip6c_off + sizeof(struct udphdr), 7307 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7308 (caddr_t)&sh); 7309 memset(&src, 0, sizeof(struct sockaddr_in6)); 7310 src.sin6_family = AF_INET6; 7311 src.sin6_len = sizeof(struct sockaddr_in6); 7312 src.sin6_port = sh.src_port; 7313 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7314 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7315 return; 7316 } 7317 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7318 dst.sin6_family = AF_INET6; 7319 dst.sin6_len = sizeof(struct sockaddr_in6); 7320 dst.sin6_port = sh.dest_port; 7321 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7322 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7323 return; 7324 } 7325 inp = NULL; 7326 net = NULL; 7327 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7328 (struct sockaddr *)&src, 7329 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7330 if ((stcb != NULL) && 7331 (net != NULL) && 7332 (inp != NULL)) { 7333 /* Check the UDP port numbers */ 7334 if ((udp.uh_dport != net->port) || 7335 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7336 SCTP_TCB_UNLOCK(stcb); 7337 return; 7338 } 7339 /* Check the verification tag */ 7340 if (ntohl(sh.v_tag) != 0) { 7341 /* 7342 * This must be the verification tag used for 7343 * sending out packets. We don't consider packets 7344 * reflecting the verification tag. 7345 */ 7346 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7347 SCTP_TCB_UNLOCK(stcb); 7348 return; 7349 } 7350 } else { 7351 if (ip6cp->ip6c_m->m_pkthdr.len >= 7352 ip6cp->ip6c_off + sizeof(struct udphdr) + 7353 sizeof(struct sctphdr) + 7354 sizeof(struct sctp_chunkhdr) + 7355 offsetof(struct sctp_init, a_rwnd)) { 7356 /* 7357 * In this case we can check if we got an 7358 * INIT chunk and if the initiate tag 7359 * matches. 7360 */ 7361 uint32_t initiate_tag; 7362 uint8_t chunk_type; 7363 7364 m_copydata(ip6cp->ip6c_m, 7365 ip6cp->ip6c_off + 7366 sizeof(struct udphdr) + 7367 sizeof(struct sctphdr), 7368 sizeof(uint8_t), 7369 (caddr_t)&chunk_type); 7370 m_copydata(ip6cp->ip6c_m, 7371 ip6cp->ip6c_off + 7372 sizeof(struct udphdr) + 7373 sizeof(struct sctphdr) + 7374 sizeof(struct sctp_chunkhdr), 7375 sizeof(uint32_t), 7376 (caddr_t)&initiate_tag); 7377 if ((chunk_type != SCTP_INITIATION) || 7378 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7379 SCTP_TCB_UNLOCK(stcb); 7380 return; 7381 } 7382 } else { 7383 SCTP_TCB_UNLOCK(stcb); 7384 return; 7385 } 7386 } 7387 type = ip6cp->ip6c_icmp6->icmp6_type; 7388 code = ip6cp->ip6c_icmp6->icmp6_code; 7389 if ((type == ICMP6_DST_UNREACH) && 7390 (code == ICMP6_DST_UNREACH_NOPORT)) { 7391 type = ICMP6_PARAM_PROB; 7392 code = ICMP6_PARAMPROB_NEXTHEADER; 7393 } 7394 sctp6_notify(inp, stcb, net, type, code, 7395 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7396 } else { 7397 if ((stcb == NULL) && (inp != NULL)) { 7398 /* reduce inp's ref-count */ 7399 SCTP_INP_WLOCK(inp); 7400 SCTP_INP_DECR_REF(inp); 7401 SCTP_INP_WUNLOCK(inp); 7402 } 7403 if (stcb) { 7404 SCTP_TCB_UNLOCK(stcb); 7405 } 7406 } 7407 } 7408 #endif 7409 7410 void 7411 sctp_over_udp_stop(void) 7412 { 7413 /* 7414 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7415 * for writting! 7416 */ 7417 #ifdef INET 7418 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7419 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7420 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7421 } 7422 #endif 7423 #ifdef INET6 7424 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7425 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7426 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7427 } 7428 #endif 7429 } 7430 7431 int 7432 sctp_over_udp_start(void) 7433 { 7434 uint16_t port; 7435 int ret; 7436 #ifdef INET 7437 struct sockaddr_in sin; 7438 #endif 7439 #ifdef INET6 7440 struct sockaddr_in6 sin6; 7441 #endif 7442 /* 7443 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7444 * for writting! 7445 */ 7446 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7447 if (ntohs(port) == 0) { 7448 /* Must have a port set */ 7449 return (EINVAL); 7450 } 7451 #ifdef INET 7452 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7453 /* Already running -- must stop first */ 7454 return (EALREADY); 7455 } 7456 #endif 7457 #ifdef INET6 7458 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7459 /* Already running -- must stop first */ 7460 return (EALREADY); 7461 } 7462 #endif 7463 #ifdef INET 7464 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7465 SOCK_DGRAM, IPPROTO_UDP, 7466 curthread->td_ucred, curthread))) { 7467 sctp_over_udp_stop(); 7468 return (ret); 7469 } 7470 /* Call the special UDP hook. */ 7471 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7472 sctp_recv_udp_tunneled_packet, 7473 sctp_recv_icmp_tunneled_packet, 7474 NULL))) { 7475 sctp_over_udp_stop(); 7476 return (ret); 7477 } 7478 /* Ok, we have a socket, bind it to the port. */ 7479 memset(&sin, 0, sizeof(struct sockaddr_in)); 7480 sin.sin_len = sizeof(struct sockaddr_in); 7481 sin.sin_family = AF_INET; 7482 sin.sin_port = htons(port); 7483 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7484 (struct sockaddr *)&sin, curthread))) { 7485 sctp_over_udp_stop(); 7486 return (ret); 7487 } 7488 #endif 7489 #ifdef INET6 7490 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7491 SOCK_DGRAM, IPPROTO_UDP, 7492 curthread->td_ucred, curthread))) { 7493 sctp_over_udp_stop(); 7494 return (ret); 7495 } 7496 /* Call the special UDP hook. */ 7497 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7498 sctp_recv_udp_tunneled_packet, 7499 sctp_recv_icmp6_tunneled_packet, 7500 NULL))) { 7501 sctp_over_udp_stop(); 7502 return (ret); 7503 } 7504 /* Ok, we have a socket, bind it to the port. */ 7505 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7506 sin6.sin6_len = sizeof(struct sockaddr_in6); 7507 sin6.sin6_family = AF_INET6; 7508 sin6.sin6_port = htons(port); 7509 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7510 (struct sockaddr *)&sin6, curthread))) { 7511 sctp_over_udp_stop(); 7512 return (ret); 7513 } 7514 #endif 7515 return (0); 7516 } 7517 7518 /* 7519 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7520 * If all arguments are zero, zero is returned. 7521 */ 7522 uint32_t 7523 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7524 { 7525 if (mtu1 > 0) { 7526 if (mtu2 > 0) { 7527 if (mtu3 > 0) { 7528 return (min(mtu1, min(mtu2, mtu3))); 7529 } else { 7530 return (min(mtu1, mtu2)); 7531 } 7532 } else { 7533 if (mtu3 > 0) { 7534 return (min(mtu1, mtu3)); 7535 } else { 7536 return (mtu1); 7537 } 7538 } 7539 } else { 7540 if (mtu2 > 0) { 7541 if (mtu3 > 0) { 7542 return (min(mtu2, mtu3)); 7543 } else { 7544 return (mtu2); 7545 } 7546 } else { 7547 return (mtu3); 7548 } 7549 } 7550 } 7551 7552 void 7553 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7554 { 7555 struct in_conninfo inc; 7556 7557 memset(&inc, 0, sizeof(struct in_conninfo)); 7558 inc.inc_fibnum = fibnum; 7559 switch (addr->sa.sa_family) { 7560 #ifdef INET 7561 case AF_INET: 7562 inc.inc_faddr = addr->sin.sin_addr; 7563 break; 7564 #endif 7565 #ifdef INET6 7566 case AF_INET6: 7567 inc.inc_flags |= INC_ISIPV6; 7568 inc.inc6_faddr = addr->sin6.sin6_addr; 7569 break; 7570 #endif 7571 default: 7572 return; 7573 } 7574 tcp_hc_updatemtu(&inc, (u_long)mtu); 7575 } 7576 7577 uint32_t 7578 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7579 { 7580 struct in_conninfo inc; 7581 7582 memset(&inc, 0, sizeof(struct in_conninfo)); 7583 inc.inc_fibnum = fibnum; 7584 switch (addr->sa.sa_family) { 7585 #ifdef INET 7586 case AF_INET: 7587 inc.inc_faddr = addr->sin.sin_addr; 7588 break; 7589 #endif 7590 #ifdef INET6 7591 case AF_INET6: 7592 inc.inc_flags |= INC_ISIPV6; 7593 inc.inc6_faddr = addr->sin6.sin6_addr; 7594 break; 7595 #endif 7596 default: 7597 return (0); 7598 } 7599 return ((uint32_t)tcp_hc_getmtu(&inc)); 7600 } 7601 7602 void 7603 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7604 { 7605 #if defined(KDTRACE_HOOKS) 7606 int old_state = stcb->asoc.state; 7607 #endif 7608 7609 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7610 ("sctp_set_state: Can't set substate (new_state = %x)", 7611 new_state)); 7612 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7613 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7614 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7615 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7616 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7617 } 7618 #if defined(KDTRACE_HOOKS) 7619 if (((old_state & SCTP_STATE_MASK) != new_state) && 7620 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7621 (new_state == SCTP_STATE_INUSE))) { 7622 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7623 } 7624 #endif 7625 } 7626 7627 void 7628 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7629 { 7630 #if defined(KDTRACE_HOOKS) 7631 int old_state = stcb->asoc.state; 7632 #endif 7633 7634 KASSERT((substate & SCTP_STATE_MASK) == 0, 7635 ("sctp_add_substate: Can't set state (substate = %x)", 7636 substate)); 7637 stcb->asoc.state |= substate; 7638 #if defined(KDTRACE_HOOKS) 7639 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7640 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7641 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7642 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7643 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7644 } 7645 #endif 7646 } 7647