1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, 1116 uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 if (override_tag) { 1195 asoc->init_seq_number = initial_tsn; 1196 } else { 1197 asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); 1198 } 1199 asoc->asconf_seq_out = asoc->init_seq_number; 1200 asoc->str_reset_seq_out = asoc->init_seq_number; 1201 asoc->sending_seq = asoc->init_seq_number; 1202 asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; 1203 /* we are optimisitic here */ 1204 asoc->peer_supports_nat = 0; 1205 asoc->sent_queue_retran_cnt = 0; 1206 1207 /* for CMT */ 1208 asoc->last_net_cmt_send_started = NULL; 1209 1210 asoc->last_acked_seq = asoc->init_seq_number - 1; 1211 asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; 1212 asoc->asconf_seq_in = asoc->init_seq_number - 1; 1213 1214 /* here we are different, we hold the next one we expect */ 1215 asoc->str_reset_seq_in = asoc->init_seq_number; 1216 1217 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1218 asoc->initial_rto = inp->sctp_ep.initial_rto; 1219 1220 asoc->default_mtu = inp->sctp_ep.default_mtu; 1221 asoc->max_init_times = inp->sctp_ep.max_init_times; 1222 asoc->max_send_times = inp->sctp_ep.max_send_times; 1223 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1224 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1225 asoc->free_chunk_cnt = 0; 1226 1227 asoc->iam_blocking = 0; 1228 asoc->context = inp->sctp_context; 1229 asoc->local_strreset_support = inp->local_strreset_support; 1230 asoc->def_send = inp->def_send; 1231 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1232 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1233 asoc->pr_sctp_cnt = 0; 1234 asoc->total_output_queue_size = 0; 1235 1236 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1237 asoc->scope.ipv6_addr_legal = 1; 1238 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } else { 1241 asoc->scope.ipv4_addr_legal = 0; 1242 } 1243 } else { 1244 asoc->scope.ipv6_addr_legal = 0; 1245 asoc->scope.ipv4_addr_legal = 1; 1246 } 1247 1248 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1249 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1250 1251 asoc->smallest_mtu = inp->sctp_frag_point; 1252 asoc->minrto = inp->sctp_ep.sctp_minrto; 1253 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1254 1255 asoc->stream_locked_on = 0; 1256 asoc->ecn_echo_cnt_onq = 0; 1257 asoc->stream_locked = 0; 1258 1259 asoc->send_sack = 1; 1260 1261 LIST_INIT(&asoc->sctp_restricted_addrs); 1262 1263 TAILQ_INIT(&asoc->nets); 1264 TAILQ_INIT(&asoc->pending_reply_queue); 1265 TAILQ_INIT(&asoc->asconf_ack_sent); 1266 /* Setup to fill the hb random cache at first HB */ 1267 asoc->hb_random_idx = 4; 1268 1269 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1270 1271 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1272 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1273 1274 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1275 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1276 1277 /* 1278 * Now the stream parameters, here we allocate space for all streams 1279 * that we request by default. 1280 */ 1281 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1282 o_strms; 1283 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1284 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1285 SCTP_M_STRMO); 1286 if (asoc->strmout == NULL) { 1287 /* big trouble no memory */ 1288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1289 return (ENOMEM); 1290 } 1291 for (i = 0; i < asoc->streamoutcnt; i++) { 1292 /* 1293 * inbound side must be set to 0xffff, also NOTE when we get 1294 * the INIT-ACK back (for INIT sender) we MUST reduce the 1295 * count (streamoutcnt) but first check if we sent to any of 1296 * the upper streams that were dropped (if some were). Those 1297 * that were dropped must be notified to the upper layer as 1298 * failed to send. 1299 */ 1300 TAILQ_INIT(&asoc->strmout[i].outqueue); 1301 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1302 asoc->strmout[i].chunks_on_queues = 0; 1303 #if defined(SCTP_DETAILED_STR_STATS) 1304 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1305 asoc->strmout[i].abandoned_sent[j] = 0; 1306 asoc->strmout[i].abandoned_unsent[j] = 0; 1307 } 1308 #else 1309 asoc->strmout[i].abandoned_sent[0] = 0; 1310 asoc->strmout[i].abandoned_unsent[0] = 0; 1311 #endif 1312 asoc->strmout[i].next_mid_ordered = 0; 1313 asoc->strmout[i].next_mid_unordered = 0; 1314 asoc->strmout[i].sid = i; 1315 asoc->strmout[i].last_msg_incomplete = 0; 1316 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1317 } 1318 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1319 1320 /* Now the mapping array */ 1321 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1322 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1323 SCTP_M_MAP); 1324 if (asoc->mapping_array == NULL) { 1325 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1326 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1327 return (ENOMEM); 1328 } 1329 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1330 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1331 SCTP_M_MAP); 1332 if (asoc->nr_mapping_array == NULL) { 1333 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1334 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1335 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1336 return (ENOMEM); 1337 } 1338 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1339 1340 /* Now the init of the other outqueues */ 1341 TAILQ_INIT(&asoc->free_chunks); 1342 TAILQ_INIT(&asoc->control_send_queue); 1343 TAILQ_INIT(&asoc->asconf_send_queue); 1344 TAILQ_INIT(&asoc->send_queue); 1345 TAILQ_INIT(&asoc->sent_queue); 1346 TAILQ_INIT(&asoc->resetHead); 1347 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1348 TAILQ_INIT(&asoc->asconf_queue); 1349 /* authentication fields */ 1350 asoc->authinfo.random = NULL; 1351 asoc->authinfo.active_keyid = 0; 1352 asoc->authinfo.assoc_key = NULL; 1353 asoc->authinfo.assoc_keyid = 0; 1354 asoc->authinfo.recv_key = NULL; 1355 asoc->authinfo.recv_keyid = 0; 1356 LIST_INIT(&asoc->shared_keys); 1357 asoc->marked_retrans = 0; 1358 asoc->port = inp->sctp_ep.port; 1359 asoc->timoinit = 0; 1360 asoc->timodata = 0; 1361 asoc->timosack = 0; 1362 asoc->timoshutdown = 0; 1363 asoc->timoheartbeat = 0; 1364 asoc->timocookie = 0; 1365 asoc->timoshutdownack = 0; 1366 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1367 asoc->discontinuity_time = asoc->start_time; 1368 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1369 asoc->abandoned_unsent[i] = 0; 1370 asoc->abandoned_sent[i] = 0; 1371 } 1372 /* 1373 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1374 * freed later when the association is freed. 1375 */ 1376 return (0); 1377 } 1378 1379 void 1380 sctp_print_mapping_array(struct sctp_association *asoc) 1381 { 1382 unsigned int i, limit; 1383 1384 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1385 asoc->mapping_array_size, 1386 asoc->mapping_array_base_tsn, 1387 asoc->cumulative_tsn, 1388 asoc->highest_tsn_inside_map, 1389 asoc->highest_tsn_inside_nr_map); 1390 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1391 if (asoc->mapping_array[limit - 1] != 0) { 1392 break; 1393 } 1394 } 1395 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1396 for (i = 0; i < limit; i++) { 1397 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1398 } 1399 if (limit % 16) 1400 SCTP_PRINTF("\n"); 1401 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1402 if (asoc->nr_mapping_array[limit - 1]) { 1403 break; 1404 } 1405 } 1406 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1407 for (i = 0; i < limit; i++) { 1408 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1409 } 1410 if (limit % 16) 1411 SCTP_PRINTF("\n"); 1412 } 1413 1414 int 1415 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1416 { 1417 /* mapping array needs to grow */ 1418 uint8_t *new_array1, *new_array2; 1419 uint32_t new_size; 1420 1421 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1422 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1423 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1424 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1425 /* can't get more, forget it */ 1426 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1427 if (new_array1) { 1428 SCTP_FREE(new_array1, SCTP_M_MAP); 1429 } 1430 if (new_array2) { 1431 SCTP_FREE(new_array2, SCTP_M_MAP); 1432 } 1433 return (-1); 1434 } 1435 memset(new_array1, 0, new_size); 1436 memset(new_array2, 0, new_size); 1437 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1438 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1439 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1440 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1441 asoc->mapping_array = new_array1; 1442 asoc->nr_mapping_array = new_array2; 1443 asoc->mapping_array_size = new_size; 1444 return (0); 1445 } 1446 1447 static void 1448 sctp_iterator_work(struct sctp_iterator *it) 1449 { 1450 struct epoch_tracker et; 1451 struct sctp_inpcb *tinp; 1452 int iteration_count = 0; 1453 int inp_skip = 0; 1454 int first_in = 1; 1455 1456 NET_EPOCH_ENTER(et); 1457 SCTP_INP_INFO_RLOCK(); 1458 SCTP_ITERATOR_LOCK(); 1459 sctp_it_ctl.cur_it = it; 1460 if (it->inp) { 1461 SCTP_INP_RLOCK(it->inp); 1462 SCTP_INP_DECR_REF(it->inp); 1463 } 1464 if (it->inp == NULL) { 1465 /* iterator is complete */ 1466 done_with_iterator: 1467 sctp_it_ctl.cur_it = NULL; 1468 SCTP_ITERATOR_UNLOCK(); 1469 SCTP_INP_INFO_RUNLOCK(); 1470 if (it->function_atend != NULL) { 1471 (*it->function_atend) (it->pointer, it->val); 1472 } 1473 SCTP_FREE(it, SCTP_M_ITER); 1474 NET_EPOCH_EXIT(et); 1475 return; 1476 } 1477 select_a_new_ep: 1478 if (first_in) { 1479 first_in = 0; 1480 } else { 1481 SCTP_INP_RLOCK(it->inp); 1482 } 1483 while (((it->pcb_flags) && 1484 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1485 ((it->pcb_features) && 1486 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1487 /* endpoint flags or features don't match, so keep looking */ 1488 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1489 SCTP_INP_RUNLOCK(it->inp); 1490 goto done_with_iterator; 1491 } 1492 tinp = it->inp; 1493 it->inp = LIST_NEXT(it->inp, sctp_list); 1494 it->stcb = NULL; 1495 SCTP_INP_RUNLOCK(tinp); 1496 if (it->inp == NULL) { 1497 goto done_with_iterator; 1498 } 1499 SCTP_INP_RLOCK(it->inp); 1500 } 1501 /* now go through each assoc which is in the desired state */ 1502 if (it->done_current_ep == 0) { 1503 if (it->function_inp != NULL) 1504 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1505 it->done_current_ep = 1; 1506 } 1507 if (it->stcb == NULL) { 1508 /* run the per instance function */ 1509 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1510 } 1511 if ((inp_skip) || it->stcb == NULL) { 1512 if (it->function_inp_end != NULL) { 1513 inp_skip = (*it->function_inp_end) (it->inp, 1514 it->pointer, 1515 it->val); 1516 } 1517 SCTP_INP_RUNLOCK(it->inp); 1518 goto no_stcb; 1519 } 1520 while (it->stcb) { 1521 SCTP_TCB_LOCK(it->stcb); 1522 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1523 /* not in the right state... keep looking */ 1524 SCTP_TCB_UNLOCK(it->stcb); 1525 goto next_assoc; 1526 } 1527 /* see if we have limited out the iterator loop */ 1528 iteration_count++; 1529 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1530 /* Pause to let others grab the lock */ 1531 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1532 SCTP_TCB_UNLOCK(it->stcb); 1533 SCTP_INP_INCR_REF(it->inp); 1534 SCTP_INP_RUNLOCK(it->inp); 1535 SCTP_ITERATOR_UNLOCK(); 1536 SCTP_INP_INFO_RUNLOCK(); 1537 SCTP_INP_INFO_RLOCK(); 1538 SCTP_ITERATOR_LOCK(); 1539 if (sctp_it_ctl.iterator_flags) { 1540 /* We won't be staying here */ 1541 SCTP_INP_DECR_REF(it->inp); 1542 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1543 if (sctp_it_ctl.iterator_flags & 1544 SCTP_ITERATOR_STOP_CUR_IT) { 1545 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1546 goto done_with_iterator; 1547 } 1548 if (sctp_it_ctl.iterator_flags & 1549 SCTP_ITERATOR_STOP_CUR_INP) { 1550 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1551 goto no_stcb; 1552 } 1553 /* If we reach here huh? */ 1554 SCTP_PRINTF("Unknown it ctl flag %x\n", 1555 sctp_it_ctl.iterator_flags); 1556 sctp_it_ctl.iterator_flags = 0; 1557 } 1558 SCTP_INP_RLOCK(it->inp); 1559 SCTP_INP_DECR_REF(it->inp); 1560 SCTP_TCB_LOCK(it->stcb); 1561 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1562 iteration_count = 0; 1563 } 1564 KASSERT(it->inp == it->stcb->sctp_ep, 1565 ("%s: stcb %p does not belong to inp %p, but inp %p", 1566 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1567 1568 /* run function on this one */ 1569 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1570 1571 /* 1572 * we lie here, it really needs to have its own type but 1573 * first I must verify that this won't effect things :-0 1574 */ 1575 if (it->no_chunk_output == 0) 1576 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1577 1578 SCTP_TCB_UNLOCK(it->stcb); 1579 next_assoc: 1580 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1581 if (it->stcb == NULL) { 1582 /* Run last function */ 1583 if (it->function_inp_end != NULL) { 1584 inp_skip = (*it->function_inp_end) (it->inp, 1585 it->pointer, 1586 it->val); 1587 } 1588 } 1589 } 1590 SCTP_INP_RUNLOCK(it->inp); 1591 no_stcb: 1592 /* done with all assocs on this endpoint, move on to next endpoint */ 1593 it->done_current_ep = 0; 1594 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1595 it->inp = NULL; 1596 } else { 1597 it->inp = LIST_NEXT(it->inp, sctp_list); 1598 } 1599 it->stcb = NULL; 1600 if (it->inp == NULL) { 1601 goto done_with_iterator; 1602 } 1603 goto select_a_new_ep; 1604 } 1605 1606 void 1607 sctp_iterator_worker(void) 1608 { 1609 struct sctp_iterator *it; 1610 1611 /* This function is called with the WQ lock in place */ 1612 sctp_it_ctl.iterator_running = 1; 1613 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1614 /* now lets work on this one */ 1615 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1616 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1617 CURVNET_SET(it->vn); 1618 sctp_iterator_work(it); 1619 CURVNET_RESTORE(); 1620 SCTP_IPI_ITERATOR_WQ_LOCK(); 1621 /* sa_ignore FREED_MEMORY */ 1622 } 1623 sctp_it_ctl.iterator_running = 0; 1624 return; 1625 } 1626 1627 static void 1628 sctp_handle_addr_wq(void) 1629 { 1630 /* deal with the ADDR wq from the rtsock calls */ 1631 struct sctp_laddr *wi, *nwi; 1632 struct sctp_asconf_iterator *asc; 1633 1634 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1635 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1636 if (asc == NULL) { 1637 /* Try later, no memory */ 1638 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1639 (struct sctp_inpcb *)NULL, 1640 (struct sctp_tcb *)NULL, 1641 (struct sctp_nets *)NULL); 1642 return; 1643 } 1644 LIST_INIT(&asc->list_of_work); 1645 asc->cnt = 0; 1646 1647 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1648 LIST_REMOVE(wi, sctp_nxt_addr); 1649 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1650 asc->cnt++; 1651 } 1652 1653 if (asc->cnt == 0) { 1654 SCTP_FREE(asc, SCTP_M_ASC_IT); 1655 } else { 1656 int ret; 1657 1658 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1659 sctp_asconf_iterator_stcb, 1660 NULL, /* No ep end for boundall */ 1661 SCTP_PCB_FLAGS_BOUNDALL, 1662 SCTP_PCB_ANY_FEATURES, 1663 SCTP_ASOC_ANY_STATE, 1664 (void *)asc, 0, 1665 sctp_asconf_iterator_end, NULL, 0); 1666 if (ret) { 1667 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1668 /* 1669 * Freeing if we are stopping or put back on the 1670 * addr_wq. 1671 */ 1672 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1673 sctp_asconf_iterator_end(asc, 0); 1674 } else { 1675 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1676 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1677 } 1678 SCTP_FREE(asc, SCTP_M_ASC_IT); 1679 } 1680 } 1681 } 1682 } 1683 1684 /*- 1685 * The following table shows which pointers for the inp, stcb, or net are 1686 * stored for each timer after it was started. 1687 * 1688 *|Name |Timer |inp |stcb|net | 1689 *|-----------------------------|-----------------------------|----|----|----| 1690 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1693 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1697 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1698 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1699 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1700 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1701 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1703 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1704 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1705 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1706 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1707 */ 1708 1709 void 1710 sctp_timeout_handler(void *t) 1711 { 1712 struct epoch_tracker et; 1713 struct timeval tv; 1714 struct sctp_inpcb *inp; 1715 struct sctp_tcb *stcb; 1716 struct sctp_nets *net; 1717 struct sctp_timer *tmr; 1718 struct mbuf *op_err; 1719 int type; 1720 int i, secret; 1721 bool did_output, released_asoc_reference; 1722 1723 /* 1724 * If inp, stcb or net are not NULL, then references to these were 1725 * added when the timer was started, and must be released before 1726 * this function returns. 1727 */ 1728 tmr = (struct sctp_timer *)t; 1729 inp = (struct sctp_inpcb *)tmr->ep; 1730 stcb = (struct sctp_tcb *)tmr->tcb; 1731 net = (struct sctp_nets *)tmr->net; 1732 CURVNET_SET((struct vnet *)tmr->vnet); 1733 NET_EPOCH_ENTER(et); 1734 released_asoc_reference = false; 1735 1736 #ifdef SCTP_AUDITING_ENABLED 1737 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1738 sctp_auditing(3, inp, stcb, net); 1739 #endif 1740 1741 /* sanity checks... */ 1742 KASSERT(tmr->self == NULL || tmr->self == tmr, 1743 ("sctp_timeout_handler: tmr->self corrupted")); 1744 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1745 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1746 type = tmr->type; 1747 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1748 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1749 type, stcb, stcb->sctp_ep)); 1750 tmr->stopped_from = 0xa001; 1751 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1752 SCTPDBG(SCTP_DEBUG_TIMER2, 1753 "Timer type %d handler exiting due to CLOSED association.\n", 1754 type); 1755 goto out_decr; 1756 } 1757 tmr->stopped_from = 0xa002; 1758 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1759 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1760 SCTPDBG(SCTP_DEBUG_TIMER2, 1761 "Timer type %d handler exiting due to not being active.\n", 1762 type); 1763 goto out_decr; 1764 } 1765 1766 tmr->stopped_from = 0xa003; 1767 if (stcb) { 1768 SCTP_TCB_LOCK(stcb); 1769 /* 1770 * Release reference so that association can be freed if 1771 * necessary below. This is safe now that we have acquired 1772 * the lock. 1773 */ 1774 atomic_add_int(&stcb->asoc.refcnt, -1); 1775 released_asoc_reference = true; 1776 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1777 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1778 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1779 SCTPDBG(SCTP_DEBUG_TIMER2, 1780 "Timer type %d handler exiting due to CLOSED association.\n", 1781 type); 1782 goto out; 1783 } 1784 } else if (inp != NULL) { 1785 SCTP_INP_WLOCK(inp); 1786 } else { 1787 SCTP_WQ_ADDR_LOCK(); 1788 } 1789 1790 /* Record in stopped_from which timeout occurred. */ 1791 tmr->stopped_from = type; 1792 /* mark as being serviced now */ 1793 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1794 /* 1795 * Callout has been rescheduled. 1796 */ 1797 goto out; 1798 } 1799 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1800 /* 1801 * Not active, so no action. 1802 */ 1803 goto out; 1804 } 1805 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1806 1807 /* call the handler for the appropriate timer type */ 1808 switch (type) { 1809 case SCTP_TIMER_TYPE_SEND: 1810 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1811 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1812 type, inp, stcb, net)); 1813 SCTP_STAT_INCR(sctps_timodata); 1814 stcb->asoc.timodata++; 1815 stcb->asoc.num_send_timers_up--; 1816 if (stcb->asoc.num_send_timers_up < 0) { 1817 stcb->asoc.num_send_timers_up = 0; 1818 } 1819 SCTP_TCB_LOCK_ASSERT(stcb); 1820 if (sctp_t3rxt_timer(inp, stcb, net)) { 1821 /* no need to unlock on tcb its gone */ 1822 1823 goto out_decr; 1824 } 1825 SCTP_TCB_LOCK_ASSERT(stcb); 1826 #ifdef SCTP_AUDITING_ENABLED 1827 sctp_auditing(4, inp, stcb, net); 1828 #endif 1829 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1830 did_output = true; 1831 if ((stcb->asoc.num_send_timers_up == 0) && 1832 (stcb->asoc.sent_queue_cnt > 0)) { 1833 struct sctp_tmit_chunk *chk; 1834 1835 /* 1836 * Safeguard. If there on some on the sent queue 1837 * somewhere but no timers running something is 1838 * wrong... so we start a timer on the first chunk 1839 * on the send queue on whatever net it is sent to. 1840 */ 1841 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1842 if (chk->whoTo != NULL) { 1843 break; 1844 } 1845 } 1846 if (chk != NULL) { 1847 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1848 } 1849 } 1850 break; 1851 case SCTP_TIMER_TYPE_INIT: 1852 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1853 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1854 type, inp, stcb, net)); 1855 SCTP_STAT_INCR(sctps_timoinit); 1856 stcb->asoc.timoinit++; 1857 if (sctp_t1init_timer(inp, stcb, net)) { 1858 /* no need to unlock on tcb its gone */ 1859 goto out_decr; 1860 } 1861 did_output = false; 1862 break; 1863 case SCTP_TIMER_TYPE_RECV: 1864 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1865 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1866 type, inp, stcb, net)); 1867 SCTP_STAT_INCR(sctps_timosack); 1868 stcb->asoc.timosack++; 1869 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1870 #ifdef SCTP_AUDITING_ENABLED 1871 sctp_auditing(4, inp, stcb, NULL); 1872 #endif 1873 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1874 did_output = true; 1875 break; 1876 case SCTP_TIMER_TYPE_SHUTDOWN: 1877 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1878 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1879 type, inp, stcb, net)); 1880 SCTP_STAT_INCR(sctps_timoshutdown); 1881 stcb->asoc.timoshutdown++; 1882 if (sctp_shutdown_timer(inp, stcb, net)) { 1883 /* no need to unlock on tcb its gone */ 1884 goto out_decr; 1885 } 1886 #ifdef SCTP_AUDITING_ENABLED 1887 sctp_auditing(4, inp, stcb, net); 1888 #endif 1889 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1890 did_output = true; 1891 break; 1892 case SCTP_TIMER_TYPE_HEARTBEAT: 1893 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1894 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1895 type, inp, stcb, net)); 1896 SCTP_STAT_INCR(sctps_timoheartbeat); 1897 stcb->asoc.timoheartbeat++; 1898 if (sctp_heartbeat_timer(inp, stcb, net)) { 1899 /* no need to unlock on tcb its gone */ 1900 goto out_decr; 1901 } 1902 #ifdef SCTP_AUDITING_ENABLED 1903 sctp_auditing(4, inp, stcb, net); 1904 #endif 1905 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1906 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1907 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1908 did_output = true; 1909 } else { 1910 did_output = false; 1911 } 1912 break; 1913 case SCTP_TIMER_TYPE_COOKIE: 1914 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1915 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1916 type, inp, stcb, net)); 1917 SCTP_STAT_INCR(sctps_timocookie); 1918 stcb->asoc.timocookie++; 1919 if (sctp_cookie_timer(inp, stcb, net)) { 1920 /* no need to unlock on tcb its gone */ 1921 goto out_decr; 1922 } 1923 #ifdef SCTP_AUDITING_ENABLED 1924 sctp_auditing(4, inp, stcb, net); 1925 #endif 1926 /* 1927 * We consider T3 and Cookie timer pretty much the same with 1928 * respect to where from in chunk_output. 1929 */ 1930 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1931 did_output = true; 1932 break; 1933 case SCTP_TIMER_TYPE_NEWCOOKIE: 1934 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1935 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1936 type, inp, stcb, net)); 1937 SCTP_STAT_INCR(sctps_timosecret); 1938 (void)SCTP_GETTIME_TIMEVAL(&tv); 1939 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1940 inp->sctp_ep.last_secret_number = 1941 inp->sctp_ep.current_secret_number; 1942 inp->sctp_ep.current_secret_number++; 1943 if (inp->sctp_ep.current_secret_number >= 1944 SCTP_HOW_MANY_SECRETS) { 1945 inp->sctp_ep.current_secret_number = 0; 1946 } 1947 secret = (int)inp->sctp_ep.current_secret_number; 1948 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1949 inp->sctp_ep.secret_key[secret][i] = 1950 sctp_select_initial_TSN(&inp->sctp_ep); 1951 } 1952 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1953 did_output = false; 1954 break; 1955 case SCTP_TIMER_TYPE_PATHMTURAISE: 1956 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1957 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1958 type, inp, stcb, net)); 1959 SCTP_STAT_INCR(sctps_timopathmtu); 1960 sctp_pathmtu_timer(inp, stcb, net); 1961 did_output = false; 1962 break; 1963 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1964 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1965 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1966 type, inp, stcb, net)); 1967 if (sctp_shutdownack_timer(inp, stcb, net)) { 1968 /* no need to unlock on tcb its gone */ 1969 goto out_decr; 1970 } 1971 SCTP_STAT_INCR(sctps_timoshutdownack); 1972 stcb->asoc.timoshutdownack++; 1973 #ifdef SCTP_AUDITING_ENABLED 1974 sctp_auditing(4, inp, stcb, net); 1975 #endif 1976 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1977 did_output = true; 1978 break; 1979 case SCTP_TIMER_TYPE_ASCONF: 1980 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1981 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1982 type, inp, stcb, net)); 1983 SCTP_STAT_INCR(sctps_timoasconf); 1984 if (sctp_asconf_timer(inp, stcb, net)) { 1985 /* no need to unlock on tcb its gone */ 1986 goto out_decr; 1987 } 1988 #ifdef SCTP_AUDITING_ENABLED 1989 sctp_auditing(4, inp, stcb, net); 1990 #endif 1991 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1992 did_output = true; 1993 break; 1994 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1995 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1996 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1997 type, inp, stcb, net)); 1998 SCTP_STAT_INCR(sctps_timoshutdownguard); 1999 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2000 "Shutdown guard timer expired"); 2001 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2002 /* no need to unlock on tcb its gone */ 2003 goto out_decr; 2004 case SCTP_TIMER_TYPE_AUTOCLOSE: 2005 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2006 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2007 type, inp, stcb, net)); 2008 SCTP_STAT_INCR(sctps_timoautoclose); 2009 sctp_autoclose_timer(inp, stcb); 2010 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2011 did_output = true; 2012 break; 2013 case SCTP_TIMER_TYPE_STRRESET: 2014 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2015 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2016 type, inp, stcb, net)); 2017 SCTP_STAT_INCR(sctps_timostrmrst); 2018 if (sctp_strreset_timer(inp, stcb)) { 2019 /* no need to unlock on tcb its gone */ 2020 goto out_decr; 2021 } 2022 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2023 did_output = true; 2024 break; 2025 case SCTP_TIMER_TYPE_INPKILL: 2026 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2027 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2028 type, inp, stcb, net)); 2029 SCTP_STAT_INCR(sctps_timoinpkill); 2030 /* 2031 * special case, take away our increment since WE are the 2032 * killer 2033 */ 2034 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2035 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2036 SCTP_INP_DECR_REF(inp); 2037 SCTP_INP_WUNLOCK(inp); 2038 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2039 SCTP_CALLED_FROM_INPKILL_TIMER); 2040 inp = NULL; 2041 goto out_decr; 2042 case SCTP_TIMER_TYPE_ASOCKILL: 2043 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2044 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2045 type, inp, stcb, net)); 2046 SCTP_STAT_INCR(sctps_timoassockill); 2047 /* Can we free it yet? */ 2048 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2049 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2050 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2051 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2052 /* 2053 * free asoc, always unlocks (or destroy's) so prevent 2054 * duplicate unlock or unlock of a free mtx :-0 2055 */ 2056 stcb = NULL; 2057 goto out_decr; 2058 case SCTP_TIMER_TYPE_ADDR_WQ: 2059 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2060 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2061 type, inp, stcb, net)); 2062 sctp_handle_addr_wq(); 2063 did_output = true; 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 did_output = false; 2072 break; 2073 default: 2074 #ifdef INVARIANTS 2075 panic("Unknown timer type %d", type); 2076 #else 2077 goto out; 2078 #endif 2079 } 2080 #ifdef SCTP_AUDITING_ENABLED 2081 sctp_audit_log(0xF1, (uint8_t)type); 2082 if (inp != NULL) 2083 sctp_auditing(5, inp, stcb, net); 2084 #endif 2085 if (did_output && (stcb != NULL)) { 2086 /* 2087 * Now we need to clean up the control chunk chain if an 2088 * ECNE is on it. It must be marked as UNSENT again so next 2089 * call will continue to send it until such time that we get 2090 * a CWR, to remove it. It is, however, less likely that we 2091 * will find a ecn echo on the chain though. 2092 */ 2093 sctp_fix_ecn_echo(&stcb->asoc); 2094 } 2095 out: 2096 if (stcb != NULL) { 2097 SCTP_TCB_UNLOCK(stcb); 2098 } else if (inp != NULL) { 2099 SCTP_INP_WUNLOCK(inp); 2100 } else { 2101 SCTP_WQ_ADDR_UNLOCK(); 2102 } 2103 2104 out_decr: 2105 /* These reference counts were incremented in sctp_timer_start(). */ 2106 if (inp != NULL) { 2107 SCTP_INP_DECR_REF(inp); 2108 } 2109 if ((stcb != NULL) && !released_asoc_reference) { 2110 atomic_add_int(&stcb->asoc.refcnt, -1); 2111 } 2112 if (net != NULL) { 2113 sctp_free_remote_addr(net); 2114 } 2115 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2116 CURVNET_RESTORE(); 2117 NET_EPOCH_EXIT(et); 2118 } 2119 2120 /*- 2121 * The following table shows which parameters must be provided 2122 * when calling sctp_timer_start(). For parameters not being 2123 * provided, NULL must be used. 2124 * 2125 * |Name |inp |stcb|net | 2126 * |-----------------------------|----|----|----| 2127 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2128 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2130 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2132 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2134 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2139 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2141 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2142 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2143 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2144 * 2145 */ 2146 2147 void 2148 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2149 struct sctp_nets *net) 2150 { 2151 struct sctp_timer *tmr; 2152 uint32_t to_ticks; 2153 uint32_t rndval, jitter; 2154 2155 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2156 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2157 t_type, stcb, stcb->sctp_ep)); 2158 tmr = NULL; 2159 if (stcb != NULL) { 2160 SCTP_TCB_LOCK_ASSERT(stcb); 2161 } else if (inp != NULL) { 2162 SCTP_INP_WLOCK_ASSERT(inp); 2163 } else { 2164 SCTP_WQ_ADDR_LOCK_ASSERT(); 2165 } 2166 if (stcb != NULL) { 2167 /* 2168 * Don't restart timer on association that's about to be 2169 * killed. 2170 */ 2171 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2172 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2173 SCTPDBG(SCTP_DEBUG_TIMER2, 2174 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2175 t_type, inp, stcb, net); 2176 return; 2177 } 2178 /* Don't restart timer on net that's been removed. */ 2179 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2180 SCTPDBG(SCTP_DEBUG_TIMER2, 2181 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2182 t_type, inp, stcb, net); 2183 return; 2184 } 2185 } 2186 switch (t_type) { 2187 case SCTP_TIMER_TYPE_SEND: 2188 /* Here we use the RTO timer. */ 2189 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2190 #ifdef INVARIANTS 2191 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2192 t_type, inp, stcb, net); 2193 #else 2194 return; 2195 #endif 2196 } 2197 tmr = &net->rxt_timer; 2198 if (net->RTO == 0) { 2199 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2200 } else { 2201 to_ticks = sctp_msecs_to_ticks(net->RTO); 2202 } 2203 break; 2204 case SCTP_TIMER_TYPE_INIT: 2205 /* 2206 * Here we use the INIT timer default usually about 1 2207 * second. 2208 */ 2209 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2210 #ifdef INVARIANTS 2211 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2212 t_type, inp, stcb, net); 2213 #else 2214 return; 2215 #endif 2216 } 2217 tmr = &net->rxt_timer; 2218 if (net->RTO == 0) { 2219 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2220 } else { 2221 to_ticks = sctp_msecs_to_ticks(net->RTO); 2222 } 2223 break; 2224 case SCTP_TIMER_TYPE_RECV: 2225 /* 2226 * Here we use the Delayed-Ack timer value from the inp, 2227 * ususually about 200ms. 2228 */ 2229 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2230 #ifdef INVARIANTS 2231 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2232 t_type, inp, stcb, net); 2233 #else 2234 return; 2235 #endif 2236 } 2237 tmr = &stcb->asoc.dack_timer; 2238 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2239 break; 2240 case SCTP_TIMER_TYPE_SHUTDOWN: 2241 /* Here we use the RTO of the destination. */ 2242 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2243 #ifdef INVARIANTS 2244 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2245 t_type, inp, stcb, net); 2246 #else 2247 return; 2248 #endif 2249 } 2250 tmr = &net->rxt_timer; 2251 if (net->RTO == 0) { 2252 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2253 } else { 2254 to_ticks = sctp_msecs_to_ticks(net->RTO); 2255 } 2256 break; 2257 case SCTP_TIMER_TYPE_HEARTBEAT: 2258 /* 2259 * The net is used here so that we can add in the RTO. Even 2260 * though we use a different timer. We also add the HB timer 2261 * PLUS a random jitter. 2262 */ 2263 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2264 #ifdef INVARIANTS 2265 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2266 t_type, inp, stcb, net); 2267 #else 2268 return; 2269 #endif 2270 } 2271 if ((net->dest_state & SCTP_ADDR_NOHB) && 2272 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2273 SCTPDBG(SCTP_DEBUG_TIMER2, 2274 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2275 t_type, inp, stcb, net); 2276 return; 2277 } 2278 tmr = &net->hb_timer; 2279 if (net->RTO == 0) { 2280 to_ticks = stcb->asoc.initial_rto; 2281 } else { 2282 to_ticks = net->RTO; 2283 } 2284 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2285 jitter = rndval % to_ticks; 2286 if (to_ticks > 1) { 2287 to_ticks >>= 1; 2288 } 2289 if (jitter < (UINT32_MAX - to_ticks)) { 2290 to_ticks += jitter; 2291 } else { 2292 to_ticks = UINT32_MAX; 2293 } 2294 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2295 !(net->dest_state & SCTP_ADDR_PF)) { 2296 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2297 to_ticks += net->heart_beat_delay; 2298 } else { 2299 to_ticks = UINT32_MAX; 2300 } 2301 } 2302 /* 2303 * Now we must convert the to_ticks that are now in ms to 2304 * ticks. 2305 */ 2306 to_ticks = sctp_msecs_to_ticks(to_ticks); 2307 break; 2308 case SCTP_TIMER_TYPE_COOKIE: 2309 /* 2310 * Here we can use the RTO timer from the network since one 2311 * RTT was complete. If a retransmission happened then we 2312 * will be using the RTO initial value. 2313 */ 2314 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2315 #ifdef INVARIANTS 2316 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2317 t_type, inp, stcb, net); 2318 #else 2319 return; 2320 #endif 2321 } 2322 tmr = &net->rxt_timer; 2323 if (net->RTO == 0) { 2324 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2325 } else { 2326 to_ticks = sctp_msecs_to_ticks(net->RTO); 2327 } 2328 break; 2329 case SCTP_TIMER_TYPE_NEWCOOKIE: 2330 /* 2331 * Nothing needed but the endpoint here ususually about 60 2332 * minutes. 2333 */ 2334 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2335 #ifdef INVARIANTS 2336 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2337 t_type, inp, stcb, net); 2338 #else 2339 return; 2340 #endif 2341 } 2342 tmr = &inp->sctp_ep.signature_change; 2343 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2344 break; 2345 case SCTP_TIMER_TYPE_PATHMTURAISE: 2346 /* 2347 * Here we use the value found in the EP for PMTUD, 2348 * ususually about 10 minutes. 2349 */ 2350 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2351 #ifdef INVARIANTS 2352 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2353 t_type, inp, stcb, net); 2354 #else 2355 return; 2356 #endif 2357 } 2358 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2359 SCTPDBG(SCTP_DEBUG_TIMER2, 2360 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2361 t_type, inp, stcb, net); 2362 return; 2363 } 2364 tmr = &net->pmtu_timer; 2365 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2366 break; 2367 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2368 /* Here we use the RTO of the destination. */ 2369 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2370 #ifdef INVARIANTS 2371 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2372 t_type, inp, stcb, net); 2373 #else 2374 return; 2375 #endif 2376 } 2377 tmr = &net->rxt_timer; 2378 if (net->RTO == 0) { 2379 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2380 } else { 2381 to_ticks = sctp_msecs_to_ticks(net->RTO); 2382 } 2383 break; 2384 case SCTP_TIMER_TYPE_ASCONF: 2385 /* 2386 * Here the timer comes from the stcb but its value is from 2387 * the net's RTO. 2388 */ 2389 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2390 #ifdef INVARIANTS 2391 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2392 t_type, inp, stcb, net); 2393 #else 2394 return; 2395 #endif 2396 } 2397 tmr = &stcb->asoc.asconf_timer; 2398 if (net->RTO == 0) { 2399 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2400 } else { 2401 to_ticks = sctp_msecs_to_ticks(net->RTO); 2402 } 2403 break; 2404 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2405 /* 2406 * Here we use the endpoints shutdown guard timer usually 2407 * about 3 minutes. 2408 */ 2409 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2410 #ifdef INVARIANTS 2411 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2412 t_type, inp, stcb, net); 2413 #else 2414 return; 2415 #endif 2416 } 2417 tmr = &stcb->asoc.shut_guard_timer; 2418 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2419 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2420 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2421 } else { 2422 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2423 } 2424 } else { 2425 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2426 } 2427 break; 2428 case SCTP_TIMER_TYPE_AUTOCLOSE: 2429 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2430 #ifdef INVARIANTS 2431 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2432 t_type, inp, stcb, net); 2433 #else 2434 return; 2435 #endif 2436 } 2437 tmr = &stcb->asoc.autoclose_timer; 2438 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2439 break; 2440 case SCTP_TIMER_TYPE_STRRESET: 2441 /* 2442 * Here the timer comes from the stcb but its value is from 2443 * the net's RTO. 2444 */ 2445 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2446 #ifdef INVARIANTS 2447 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2448 t_type, inp, stcb, net); 2449 #else 2450 return; 2451 #endif 2452 } 2453 tmr = &stcb->asoc.strreset_timer; 2454 if (net->RTO == 0) { 2455 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2456 } else { 2457 to_ticks = sctp_msecs_to_ticks(net->RTO); 2458 } 2459 break; 2460 case SCTP_TIMER_TYPE_INPKILL: 2461 /* 2462 * The inp is setup to die. We re-use the signature_chage 2463 * timer since that has stopped and we are in the GONE 2464 * state. 2465 */ 2466 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2467 #ifdef INVARIANTS 2468 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2469 t_type, inp, stcb, net); 2470 #else 2471 return; 2472 #endif 2473 } 2474 tmr = &inp->sctp_ep.signature_change; 2475 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2476 break; 2477 case SCTP_TIMER_TYPE_ASOCKILL: 2478 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 tmr = &stcb->asoc.strreset_timer; 2487 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2488 break; 2489 case SCTP_TIMER_TYPE_ADDR_WQ: 2490 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2491 #ifdef INVARIANTS 2492 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2493 t_type, inp, stcb, net); 2494 #else 2495 return; 2496 #endif 2497 } 2498 /* Only 1 tick away :-) */ 2499 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2500 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2501 break; 2502 case SCTP_TIMER_TYPE_PRIM_DELETED: 2503 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2504 #ifdef INVARIANTS 2505 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2506 t_type, inp, stcb, net); 2507 #else 2508 return; 2509 #endif 2510 } 2511 tmr = &stcb->asoc.delete_prim_timer; 2512 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2513 break; 2514 default: 2515 #ifdef INVARIANTS 2516 panic("Unknown timer type %d", t_type); 2517 #else 2518 return; 2519 #endif 2520 } 2521 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2522 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2523 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2524 /* 2525 * We do NOT allow you to have it already running. If it is, 2526 * we leave the current one up unchanged. 2527 */ 2528 SCTPDBG(SCTP_DEBUG_TIMER2, 2529 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2530 t_type, inp, stcb, net); 2531 return; 2532 } 2533 /* At this point we can proceed. */ 2534 if (t_type == SCTP_TIMER_TYPE_SEND) { 2535 stcb->asoc.num_send_timers_up++; 2536 } 2537 tmr->stopped_from = 0; 2538 tmr->type = t_type; 2539 tmr->ep = (void *)inp; 2540 tmr->tcb = (void *)stcb; 2541 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2542 tmr->net = NULL; 2543 } else { 2544 tmr->net = (void *)net; 2545 } 2546 tmr->self = (void *)tmr; 2547 tmr->vnet = (void *)curvnet; 2548 tmr->ticks = sctp_get_tick_count(); 2549 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2550 SCTPDBG(SCTP_DEBUG_TIMER2, 2551 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2552 t_type, to_ticks, inp, stcb, net); 2553 /* 2554 * If this is a newly scheduled callout, as opposed to a 2555 * rescheduled one, increment relevant reference counts. 2556 */ 2557 if (tmr->ep != NULL) { 2558 SCTP_INP_INCR_REF(inp); 2559 } 2560 if (tmr->tcb != NULL) { 2561 atomic_add_int(&stcb->asoc.refcnt, 1); 2562 } 2563 if (tmr->net != NULL) { 2564 atomic_add_int(&net->ref_count, 1); 2565 } 2566 } else { 2567 /* 2568 * This should not happen, since we checked for pending 2569 * above. 2570 */ 2571 SCTPDBG(SCTP_DEBUG_TIMER2, 2572 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2573 t_type, to_ticks, inp, stcb, net); 2574 } 2575 return; 2576 } 2577 2578 /*- 2579 * The following table shows which parameters must be provided 2580 * when calling sctp_timer_stop(). For parameters not being 2581 * provided, NULL must be used. 2582 * 2583 * |Name |inp |stcb|net | 2584 * |-----------------------------|----|----|----| 2585 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2586 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2587 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2588 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2589 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2590 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2591 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2592 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2593 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2594 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2595 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2596 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2597 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2598 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2599 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2600 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2601 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2602 * 2603 */ 2604 2605 void 2606 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2607 struct sctp_nets *net, uint32_t from) 2608 { 2609 struct sctp_timer *tmr; 2610 2611 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2612 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2613 t_type, stcb, stcb->sctp_ep)); 2614 if (stcb != NULL) { 2615 SCTP_TCB_LOCK_ASSERT(stcb); 2616 } else if (inp != NULL) { 2617 SCTP_INP_WLOCK_ASSERT(inp); 2618 } else { 2619 SCTP_WQ_ADDR_LOCK_ASSERT(); 2620 } 2621 tmr = NULL; 2622 switch (t_type) { 2623 case SCTP_TIMER_TYPE_SEND: 2624 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2625 #ifdef INVARIANTS 2626 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2627 t_type, inp, stcb, net); 2628 #else 2629 return; 2630 #endif 2631 } 2632 tmr = &net->rxt_timer; 2633 break; 2634 case SCTP_TIMER_TYPE_INIT: 2635 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2636 #ifdef INVARIANTS 2637 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2638 t_type, inp, stcb, net); 2639 #else 2640 return; 2641 #endif 2642 } 2643 tmr = &net->rxt_timer; 2644 break; 2645 case SCTP_TIMER_TYPE_RECV: 2646 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2647 #ifdef INVARIANTS 2648 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2649 t_type, inp, stcb, net); 2650 #else 2651 return; 2652 #endif 2653 } 2654 tmr = &stcb->asoc.dack_timer; 2655 break; 2656 case SCTP_TIMER_TYPE_SHUTDOWN: 2657 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2658 #ifdef INVARIANTS 2659 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2660 t_type, inp, stcb, net); 2661 #else 2662 return; 2663 #endif 2664 } 2665 tmr = &net->rxt_timer; 2666 break; 2667 case SCTP_TIMER_TYPE_HEARTBEAT: 2668 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2669 #ifdef INVARIANTS 2670 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2671 t_type, inp, stcb, net); 2672 #else 2673 return; 2674 #endif 2675 } 2676 tmr = &net->hb_timer; 2677 break; 2678 case SCTP_TIMER_TYPE_COOKIE: 2679 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2680 #ifdef INVARIANTS 2681 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2682 t_type, inp, stcb, net); 2683 #else 2684 return; 2685 #endif 2686 } 2687 tmr = &net->rxt_timer; 2688 break; 2689 case SCTP_TIMER_TYPE_NEWCOOKIE: 2690 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2691 #ifdef INVARIANTS 2692 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2693 t_type, inp, stcb, net); 2694 #else 2695 return; 2696 #endif 2697 } 2698 tmr = &inp->sctp_ep.signature_change; 2699 break; 2700 case SCTP_TIMER_TYPE_PATHMTURAISE: 2701 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2702 #ifdef INVARIANTS 2703 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2704 t_type, inp, stcb, net); 2705 #else 2706 return; 2707 #endif 2708 } 2709 tmr = &net->pmtu_timer; 2710 break; 2711 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2712 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2713 #ifdef INVARIANTS 2714 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2715 t_type, inp, stcb, net); 2716 #else 2717 return; 2718 #endif 2719 } 2720 tmr = &net->rxt_timer; 2721 break; 2722 case SCTP_TIMER_TYPE_ASCONF: 2723 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2724 #ifdef INVARIANTS 2725 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2726 t_type, inp, stcb, net); 2727 #else 2728 return; 2729 #endif 2730 } 2731 tmr = &stcb->asoc.asconf_timer; 2732 break; 2733 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2734 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2735 #ifdef INVARIANTS 2736 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2737 t_type, inp, stcb, net); 2738 #else 2739 return; 2740 #endif 2741 } 2742 tmr = &stcb->asoc.shut_guard_timer; 2743 break; 2744 case SCTP_TIMER_TYPE_AUTOCLOSE: 2745 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2746 #ifdef INVARIANTS 2747 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2748 t_type, inp, stcb, net); 2749 #else 2750 return; 2751 #endif 2752 } 2753 tmr = &stcb->asoc.autoclose_timer; 2754 break; 2755 case SCTP_TIMER_TYPE_STRRESET: 2756 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2757 #ifdef INVARIANTS 2758 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2759 t_type, inp, stcb, net); 2760 #else 2761 return; 2762 #endif 2763 } 2764 tmr = &stcb->asoc.strreset_timer; 2765 break; 2766 case SCTP_TIMER_TYPE_INPKILL: 2767 /* 2768 * The inp is setup to die. We re-use the signature_chage 2769 * timer since that has stopped and we are in the GONE 2770 * state. 2771 */ 2772 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2773 #ifdef INVARIANTS 2774 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2775 t_type, inp, stcb, net); 2776 #else 2777 return; 2778 #endif 2779 } 2780 tmr = &inp->sctp_ep.signature_change; 2781 break; 2782 case SCTP_TIMER_TYPE_ASOCKILL: 2783 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2784 #ifdef INVARIANTS 2785 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2786 t_type, inp, stcb, net); 2787 #else 2788 return; 2789 #endif 2790 } 2791 tmr = &stcb->asoc.strreset_timer; 2792 break; 2793 case SCTP_TIMER_TYPE_ADDR_WQ: 2794 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2795 #ifdef INVARIANTS 2796 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2797 t_type, inp, stcb, net); 2798 #else 2799 return; 2800 #endif 2801 } 2802 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2803 break; 2804 case SCTP_TIMER_TYPE_PRIM_DELETED: 2805 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2806 #ifdef INVARIANTS 2807 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2808 t_type, inp, stcb, net); 2809 #else 2810 return; 2811 #endif 2812 } 2813 tmr = &stcb->asoc.delete_prim_timer; 2814 break; 2815 default: 2816 #ifdef INVARIANTS 2817 panic("Unknown timer type %d", t_type); 2818 #else 2819 return; 2820 #endif 2821 } 2822 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2823 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2824 (tmr->type != t_type)) { 2825 /* 2826 * Ok we have a timer that is under joint use. Cookie timer 2827 * per chance with the SEND timer. We therefore are NOT 2828 * running the timer that the caller wants stopped. So just 2829 * return. 2830 */ 2831 SCTPDBG(SCTP_DEBUG_TIMER2, 2832 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2833 t_type, inp, stcb, net); 2834 return; 2835 } 2836 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2837 stcb->asoc.num_send_timers_up--; 2838 if (stcb->asoc.num_send_timers_up < 0) { 2839 stcb->asoc.num_send_timers_up = 0; 2840 } 2841 } 2842 tmr->self = NULL; 2843 tmr->stopped_from = from; 2844 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2845 KASSERT(tmr->ep == inp, 2846 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2847 t_type, inp, tmr->ep)); 2848 KASSERT(tmr->tcb == stcb, 2849 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2850 t_type, stcb, tmr->tcb)); 2851 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2852 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2853 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2854 t_type, net, tmr->net)); 2855 SCTPDBG(SCTP_DEBUG_TIMER2, 2856 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2857 t_type, inp, stcb, net); 2858 /* 2859 * If the timer was actually stopped, decrement reference 2860 * counts that were incremented in sctp_timer_start(). 2861 */ 2862 if (tmr->ep != NULL) { 2863 SCTP_INP_DECR_REF(inp); 2864 tmr->ep = NULL; 2865 } 2866 if (tmr->tcb != NULL) { 2867 atomic_add_int(&stcb->asoc.refcnt, -1); 2868 tmr->tcb = NULL; 2869 } 2870 if (tmr->net != NULL) { 2871 /* 2872 * Can't use net, since it doesn't work for 2873 * SCTP_TIMER_TYPE_ASCONF. 2874 */ 2875 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2876 tmr->net = NULL; 2877 } 2878 } else { 2879 SCTPDBG(SCTP_DEBUG_TIMER2, 2880 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2881 t_type, inp, stcb, net); 2882 } 2883 return; 2884 } 2885 2886 uint32_t 2887 sctp_calculate_len(struct mbuf *m) 2888 { 2889 uint32_t tlen = 0; 2890 struct mbuf *at; 2891 2892 at = m; 2893 while (at) { 2894 tlen += SCTP_BUF_LEN(at); 2895 at = SCTP_BUF_NEXT(at); 2896 } 2897 return (tlen); 2898 } 2899 2900 void 2901 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2902 struct sctp_association *asoc, uint32_t mtu) 2903 { 2904 /* 2905 * Reset the P-MTU size on this association, this involves changing 2906 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2907 * allow the DF flag to be cleared. 2908 */ 2909 struct sctp_tmit_chunk *chk; 2910 unsigned int eff_mtu, ovh; 2911 2912 asoc->smallest_mtu = mtu; 2913 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2914 ovh = SCTP_MIN_OVERHEAD; 2915 } else { 2916 ovh = SCTP_MIN_V4_OVERHEAD; 2917 } 2918 eff_mtu = mtu - ovh; 2919 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2920 if (chk->send_size > eff_mtu) { 2921 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2922 } 2923 } 2924 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2925 if (chk->send_size > eff_mtu) { 2926 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2927 } 2928 } 2929 } 2930 2931 /* 2932 * Given an association and starting time of the current RTT period, update 2933 * RTO in number of msecs. net should point to the current network. 2934 * Return 1, if an RTO update was performed, return 0 if no update was 2935 * performed due to invalid starting point. 2936 */ 2937 2938 int 2939 sctp_calculate_rto(struct sctp_tcb *stcb, 2940 struct sctp_association *asoc, 2941 struct sctp_nets *net, 2942 struct timeval *old, 2943 int rtt_from_sack) 2944 { 2945 struct timeval now; 2946 uint64_t rtt_us; /* RTT in us */ 2947 int32_t rtt; /* RTT in ms */ 2948 uint32_t new_rto; 2949 int first_measure = 0; 2950 2951 /************************/ 2952 /* 1. calculate new RTT */ 2953 /************************/ 2954 /* get the current time */ 2955 if (stcb->asoc.use_precise_time) { 2956 (void)SCTP_GETPTIME_TIMEVAL(&now); 2957 } else { 2958 (void)SCTP_GETTIME_TIMEVAL(&now); 2959 } 2960 if ((old->tv_sec > now.tv_sec) || 2961 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2962 /* The starting point is in the future. */ 2963 return (0); 2964 } 2965 timevalsub(&now, old); 2966 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2967 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2968 /* The RTT is larger than a sane value. */ 2969 return (0); 2970 } 2971 /* store the current RTT in us */ 2972 net->rtt = rtt_us; 2973 /* compute rtt in ms */ 2974 rtt = (int32_t)(net->rtt / 1000); 2975 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2976 /* 2977 * Tell the CC module that a new update has just occurred 2978 * from a sack 2979 */ 2980 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2981 } 2982 /* 2983 * Do we need to determine the lan? We do this only on sacks i.e. 2984 * RTT being determined from data not non-data (HB/INIT->INITACK). 2985 */ 2986 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2987 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2988 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2989 net->lan_type = SCTP_LAN_INTERNET; 2990 } else { 2991 net->lan_type = SCTP_LAN_LOCAL; 2992 } 2993 } 2994 2995 /***************************/ 2996 /* 2. update RTTVAR & SRTT */ 2997 /***************************/ 2998 /*- 2999 * Compute the scaled average lastsa and the 3000 * scaled variance lastsv as described in van Jacobson 3001 * Paper "Congestion Avoidance and Control", Annex A. 3002 * 3003 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 3004 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 3005 */ 3006 if (net->RTO_measured) { 3007 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3008 net->lastsa += rtt; 3009 if (rtt < 0) { 3010 rtt = -rtt; 3011 } 3012 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3013 net->lastsv += rtt; 3014 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3015 rto_logging(net, SCTP_LOG_RTTVAR); 3016 } 3017 } else { 3018 /* First RTO measurment */ 3019 net->RTO_measured = 1; 3020 first_measure = 1; 3021 net->lastsa = rtt << SCTP_RTT_SHIFT; 3022 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3024 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3025 } 3026 } 3027 if (net->lastsv == 0) { 3028 net->lastsv = SCTP_CLOCK_GRANULARITY; 3029 } 3030 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3031 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3032 (stcb->asoc.sat_network_lockout == 0)) { 3033 stcb->asoc.sat_network = 1; 3034 } else if ((!first_measure) && stcb->asoc.sat_network) { 3035 stcb->asoc.sat_network = 0; 3036 stcb->asoc.sat_network_lockout = 1; 3037 } 3038 /* bound it, per C6/C7 in Section 5.3.1 */ 3039 if (new_rto < stcb->asoc.minrto) { 3040 new_rto = stcb->asoc.minrto; 3041 } 3042 if (new_rto > stcb->asoc.maxrto) { 3043 new_rto = stcb->asoc.maxrto; 3044 } 3045 net->RTO = new_rto; 3046 return (1); 3047 } 3048 3049 /* 3050 * return a pointer to a contiguous piece of data from the given mbuf chain 3051 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3052 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3053 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3054 */ 3055 caddr_t 3056 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3057 { 3058 uint32_t count; 3059 uint8_t *ptr; 3060 3061 ptr = in_ptr; 3062 if ((off < 0) || (len <= 0)) 3063 return (NULL); 3064 3065 /* find the desired start location */ 3066 while ((m != NULL) && (off > 0)) { 3067 if (off < SCTP_BUF_LEN(m)) 3068 break; 3069 off -= SCTP_BUF_LEN(m); 3070 m = SCTP_BUF_NEXT(m); 3071 } 3072 if (m == NULL) 3073 return (NULL); 3074 3075 /* is the current mbuf large enough (eg. contiguous)? */ 3076 if ((SCTP_BUF_LEN(m) - off) >= len) { 3077 return (mtod(m, caddr_t)+off); 3078 } else { 3079 /* else, it spans more than one mbuf, so save a temp copy... */ 3080 while ((m != NULL) && (len > 0)) { 3081 count = min(SCTP_BUF_LEN(m) - off, len); 3082 memcpy(ptr, mtod(m, caddr_t)+off, count); 3083 len -= count; 3084 ptr += count; 3085 off = 0; 3086 m = SCTP_BUF_NEXT(m); 3087 } 3088 if ((m == NULL) && (len > 0)) 3089 return (NULL); 3090 else 3091 return ((caddr_t)in_ptr); 3092 } 3093 } 3094 3095 struct sctp_paramhdr * 3096 sctp_get_next_param(struct mbuf *m, 3097 int offset, 3098 struct sctp_paramhdr *pull, 3099 int pull_limit) 3100 { 3101 /* This just provides a typed signature to Peter's Pull routine */ 3102 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3103 (uint8_t *)pull)); 3104 } 3105 3106 struct mbuf * 3107 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3108 { 3109 struct mbuf *m_last; 3110 caddr_t dp; 3111 3112 if (padlen > 3) { 3113 return (NULL); 3114 } 3115 if (padlen <= M_TRAILINGSPACE(m)) { 3116 /* 3117 * The easy way. We hope the majority of the time we hit 3118 * here :) 3119 */ 3120 m_last = m; 3121 } else { 3122 /* Hard way we must grow the mbuf chain */ 3123 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3124 if (m_last == NULL) { 3125 return (NULL); 3126 } 3127 SCTP_BUF_LEN(m_last) = 0; 3128 SCTP_BUF_NEXT(m_last) = NULL; 3129 SCTP_BUF_NEXT(m) = m_last; 3130 } 3131 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3132 SCTP_BUF_LEN(m_last) += padlen; 3133 memset(dp, 0, padlen); 3134 return (m_last); 3135 } 3136 3137 struct mbuf * 3138 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3139 { 3140 /* find the last mbuf in chain and pad it */ 3141 struct mbuf *m_at; 3142 3143 if (last_mbuf != NULL) { 3144 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3145 } else { 3146 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3147 if (SCTP_BUF_NEXT(m_at) == NULL) { 3148 return (sctp_add_pad_tombuf(m_at, padval)); 3149 } 3150 } 3151 } 3152 return (NULL); 3153 } 3154 3155 static void 3156 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3157 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3158 { 3159 struct mbuf *m_notify; 3160 struct sctp_assoc_change *sac; 3161 struct sctp_queued_to_read *control; 3162 unsigned int notif_len; 3163 uint16_t abort_len; 3164 unsigned int i; 3165 3166 if (stcb == NULL) { 3167 return; 3168 } 3169 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3170 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3171 if (abort != NULL) { 3172 abort_len = ntohs(abort->ch.chunk_length); 3173 /* 3174 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3175 * contiguous. 3176 */ 3177 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3178 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3179 } 3180 } else { 3181 abort_len = 0; 3182 } 3183 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3184 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3185 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3186 notif_len += abort_len; 3187 } 3188 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3189 if (m_notify == NULL) { 3190 /* Retry with smaller value. */ 3191 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3192 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3193 if (m_notify == NULL) { 3194 goto set_error; 3195 } 3196 } 3197 SCTP_BUF_NEXT(m_notify) = NULL; 3198 sac = mtod(m_notify, struct sctp_assoc_change *); 3199 memset(sac, 0, notif_len); 3200 sac->sac_type = SCTP_ASSOC_CHANGE; 3201 sac->sac_flags = 0; 3202 sac->sac_length = sizeof(struct sctp_assoc_change); 3203 sac->sac_state = state; 3204 sac->sac_error = error; 3205 /* XXX verify these stream counts */ 3206 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3207 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3208 sac->sac_assoc_id = sctp_get_associd(stcb); 3209 if (notif_len > sizeof(struct sctp_assoc_change)) { 3210 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3211 i = 0; 3212 if (stcb->asoc.prsctp_supported == 1) { 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3214 } 3215 if (stcb->asoc.auth_supported == 1) { 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3217 } 3218 if (stcb->asoc.asconf_supported == 1) { 3219 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3220 } 3221 if (stcb->asoc.idata_supported == 1) { 3222 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3223 } 3224 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3225 if (stcb->asoc.reconfig_supported == 1) { 3226 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3227 } 3228 sac->sac_length += i; 3229 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3230 memcpy(sac->sac_info, abort, abort_len); 3231 sac->sac_length += abort_len; 3232 } 3233 } 3234 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3235 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3236 0, 0, stcb->asoc.context, 0, 0, 0, 3237 m_notify); 3238 if (control != NULL) { 3239 control->length = SCTP_BUF_LEN(m_notify); 3240 control->spec_flags = M_NOTIFICATION; 3241 /* not that we need this */ 3242 control->tail_mbuf = m_notify; 3243 sctp_add_to_readq(stcb->sctp_ep, stcb, 3244 control, 3245 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3246 so_locked); 3247 } else { 3248 sctp_m_freem(m_notify); 3249 } 3250 } 3251 /* 3252 * For 1-to-1 style sockets, we send up and error when an ABORT 3253 * comes in. 3254 */ 3255 set_error: 3256 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3257 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3258 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3259 SOCK_LOCK(stcb->sctp_socket); 3260 if (from_peer) { 3261 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3263 stcb->sctp_socket->so_error = ECONNREFUSED; 3264 } else { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3266 stcb->sctp_socket->so_error = ECONNRESET; 3267 } 3268 } else { 3269 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3270 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3271 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3272 stcb->sctp_socket->so_error = ETIMEDOUT; 3273 } else { 3274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3275 stcb->sctp_socket->so_error = ECONNABORTED; 3276 } 3277 } 3278 SOCK_UNLOCK(stcb->sctp_socket); 3279 } 3280 /* Wake ANY sleepers */ 3281 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3282 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3283 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3284 socantrcvmore(stcb->sctp_socket); 3285 } 3286 sorwakeup(stcb->sctp_socket); 3287 sowwakeup(stcb->sctp_socket); 3288 } 3289 3290 static void 3291 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3292 struct sockaddr *sa, uint32_t error, int so_locked) 3293 { 3294 struct mbuf *m_notify; 3295 struct sctp_paddr_change *spc; 3296 struct sctp_queued_to_read *control; 3297 3298 if ((stcb == NULL) || 3299 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3300 /* event not enabled */ 3301 return; 3302 } 3303 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3304 if (m_notify == NULL) 3305 return; 3306 SCTP_BUF_LEN(m_notify) = 0; 3307 spc = mtod(m_notify, struct sctp_paddr_change *); 3308 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3309 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3310 spc->spc_flags = 0; 3311 spc->spc_length = sizeof(struct sctp_paddr_change); 3312 switch (sa->sa_family) { 3313 #ifdef INET 3314 case AF_INET: 3315 #ifdef INET6 3316 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3317 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3318 (struct sockaddr_in6 *)&spc->spc_aaddr); 3319 } else { 3320 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3321 } 3322 #else 3323 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3324 #endif 3325 break; 3326 #endif 3327 #ifdef INET6 3328 case AF_INET6: 3329 { 3330 struct sockaddr_in6 *sin6; 3331 3332 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3333 3334 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3335 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3336 if (sin6->sin6_scope_id == 0) { 3337 /* recover scope_id for user */ 3338 (void)sa6_recoverscope(sin6); 3339 } else { 3340 /* clear embedded scope_id for user */ 3341 in6_clearscope(&sin6->sin6_addr); 3342 } 3343 } 3344 break; 3345 } 3346 #endif 3347 default: 3348 /* TSNH */ 3349 break; 3350 } 3351 spc->spc_state = state; 3352 spc->spc_error = error; 3353 spc->spc_assoc_id = sctp_get_associd(stcb); 3354 3355 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3356 SCTP_BUF_NEXT(m_notify) = NULL; 3357 3358 /* append to socket */ 3359 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3360 0, 0, stcb->asoc.context, 0, 0, 0, 3361 m_notify); 3362 if (control == NULL) { 3363 /* no memory */ 3364 sctp_m_freem(m_notify); 3365 return; 3366 } 3367 control->length = SCTP_BUF_LEN(m_notify); 3368 control->spec_flags = M_NOTIFICATION; 3369 /* not that we need this */ 3370 control->tail_mbuf = m_notify; 3371 sctp_add_to_readq(stcb->sctp_ep, stcb, 3372 control, 3373 &stcb->sctp_socket->so_rcv, 1, 3374 SCTP_READ_LOCK_NOT_HELD, 3375 so_locked); 3376 } 3377 3378 static void 3379 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3380 struct sctp_tmit_chunk *chk, int so_locked) 3381 { 3382 struct mbuf *m_notify; 3383 struct sctp_send_failed *ssf; 3384 struct sctp_send_failed_event *ssfe; 3385 struct sctp_queued_to_read *control; 3386 struct sctp_chunkhdr *chkhdr; 3387 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3388 3389 if ((stcb == NULL) || 3390 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3391 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3392 /* event not enabled */ 3393 return; 3394 } 3395 3396 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3397 notifhdr_len = sizeof(struct sctp_send_failed_event); 3398 } else { 3399 notifhdr_len = sizeof(struct sctp_send_failed); 3400 } 3401 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3402 if (m_notify == NULL) 3403 /* no space left */ 3404 return; 3405 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3406 if (stcb->asoc.idata_supported) { 3407 chkhdr_len = sizeof(struct sctp_idata_chunk); 3408 } else { 3409 chkhdr_len = sizeof(struct sctp_data_chunk); 3410 } 3411 /* Use some defaults in case we can't access the chunk header */ 3412 if (chk->send_size >= chkhdr_len) { 3413 payload_len = chk->send_size - chkhdr_len; 3414 } else { 3415 payload_len = 0; 3416 } 3417 padding_len = 0; 3418 if (chk->data != NULL) { 3419 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3420 if (chkhdr != NULL) { 3421 chk_len = ntohs(chkhdr->chunk_length); 3422 if ((chk_len >= chkhdr_len) && 3423 (chk->send_size >= chk_len) && 3424 (chk->send_size - chk_len < 4)) { 3425 padding_len = chk->send_size - chk_len; 3426 payload_len = chk->send_size - chkhdr_len - padding_len; 3427 } 3428 } 3429 } 3430 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3431 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3432 memset(ssfe, 0, notifhdr_len); 3433 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3434 if (sent) { 3435 ssfe->ssfe_flags = SCTP_DATA_SENT; 3436 } else { 3437 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3438 } 3439 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3440 ssfe->ssfe_error = error; 3441 /* not exactly what the user sent in, but should be close :) */ 3442 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3443 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3444 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3445 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3446 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3447 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3448 } else { 3449 ssf = mtod(m_notify, struct sctp_send_failed *); 3450 memset(ssf, 0, notifhdr_len); 3451 ssf->ssf_type = SCTP_SEND_FAILED; 3452 if (sent) { 3453 ssf->ssf_flags = SCTP_DATA_SENT; 3454 } else { 3455 ssf->ssf_flags = SCTP_DATA_UNSENT; 3456 } 3457 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3458 ssf->ssf_error = error; 3459 /* not exactly what the user sent in, but should be close :) */ 3460 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3461 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3462 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3463 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3464 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3465 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3466 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3467 } 3468 if (chk->data != NULL) { 3469 /* Trim off the sctp chunk header (it should be there) */ 3470 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3471 m_adj(chk->data, chkhdr_len); 3472 m_adj(chk->data, -padding_len); 3473 sctp_mbuf_crush(chk->data); 3474 chk->send_size -= (chkhdr_len + padding_len); 3475 } 3476 } 3477 SCTP_BUF_NEXT(m_notify) = chk->data; 3478 /* Steal off the mbuf */ 3479 chk->data = NULL; 3480 /* 3481 * For this case, we check the actual socket buffer, since the assoc 3482 * is going away we don't want to overfill the socket buffer for a 3483 * non-reader 3484 */ 3485 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3486 sctp_m_freem(m_notify); 3487 return; 3488 } 3489 /* append to socket */ 3490 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3491 0, 0, stcb->asoc.context, 0, 0, 0, 3492 m_notify); 3493 if (control == NULL) { 3494 /* no memory */ 3495 sctp_m_freem(m_notify); 3496 return; 3497 } 3498 control->length = SCTP_BUF_LEN(m_notify); 3499 control->spec_flags = M_NOTIFICATION; 3500 /* not that we need this */ 3501 control->tail_mbuf = m_notify; 3502 sctp_add_to_readq(stcb->sctp_ep, stcb, 3503 control, 3504 &stcb->sctp_socket->so_rcv, 1, 3505 SCTP_READ_LOCK_NOT_HELD, 3506 so_locked); 3507 } 3508 3509 static void 3510 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3511 struct sctp_stream_queue_pending *sp, int so_locked) 3512 { 3513 struct mbuf *m_notify; 3514 struct sctp_send_failed *ssf; 3515 struct sctp_send_failed_event *ssfe; 3516 struct sctp_queued_to_read *control; 3517 int notifhdr_len; 3518 3519 if ((stcb == NULL) || 3520 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3521 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3522 /* event not enabled */ 3523 return; 3524 } 3525 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3526 notifhdr_len = sizeof(struct sctp_send_failed_event); 3527 } else { 3528 notifhdr_len = sizeof(struct sctp_send_failed); 3529 } 3530 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3531 if (m_notify == NULL) { 3532 /* no space left */ 3533 return; 3534 } 3535 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3536 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3537 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3538 memset(ssfe, 0, notifhdr_len); 3539 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3540 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3541 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3542 ssfe->ssfe_error = error; 3543 /* not exactly what the user sent in, but should be close :) */ 3544 ssfe->ssfe_info.snd_sid = sp->sid; 3545 if (sp->some_taken) { 3546 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3547 } else { 3548 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3549 } 3550 ssfe->ssfe_info.snd_ppid = sp->ppid; 3551 ssfe->ssfe_info.snd_context = sp->context; 3552 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3553 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3554 } else { 3555 ssf = mtod(m_notify, struct sctp_send_failed *); 3556 memset(ssf, 0, notifhdr_len); 3557 ssf->ssf_type = SCTP_SEND_FAILED; 3558 ssf->ssf_flags = SCTP_DATA_UNSENT; 3559 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3560 ssf->ssf_error = error; 3561 /* not exactly what the user sent in, but should be close :) */ 3562 ssf->ssf_info.sinfo_stream = sp->sid; 3563 ssf->ssf_info.sinfo_ssn = 0; 3564 if (sp->some_taken) { 3565 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3566 } else { 3567 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3568 } 3569 ssf->ssf_info.sinfo_ppid = sp->ppid; 3570 ssf->ssf_info.sinfo_context = sp->context; 3571 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3572 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3573 } 3574 SCTP_BUF_NEXT(m_notify) = sp->data; 3575 3576 /* Steal off the mbuf */ 3577 sp->data = NULL; 3578 /* 3579 * For this case, we check the actual socket buffer, since the assoc 3580 * is going away we don't want to overfill the socket buffer for a 3581 * non-reader 3582 */ 3583 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3584 sctp_m_freem(m_notify); 3585 return; 3586 } 3587 /* append to socket */ 3588 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3589 0, 0, stcb->asoc.context, 0, 0, 0, 3590 m_notify); 3591 if (control == NULL) { 3592 /* no memory */ 3593 sctp_m_freem(m_notify); 3594 return; 3595 } 3596 control->length = SCTP_BUF_LEN(m_notify); 3597 control->spec_flags = M_NOTIFICATION; 3598 /* not that we need this */ 3599 control->tail_mbuf = m_notify; 3600 sctp_add_to_readq(stcb->sctp_ep, stcb, 3601 control, 3602 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3603 } 3604 3605 static void 3606 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3607 { 3608 struct mbuf *m_notify; 3609 struct sctp_adaptation_event *sai; 3610 struct sctp_queued_to_read *control; 3611 3612 if ((stcb == NULL) || 3613 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3614 /* event not enabled */ 3615 return; 3616 } 3617 3618 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3619 if (m_notify == NULL) 3620 /* no space left */ 3621 return; 3622 SCTP_BUF_LEN(m_notify) = 0; 3623 sai = mtod(m_notify, struct sctp_adaptation_event *); 3624 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3625 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3626 sai->sai_flags = 0; 3627 sai->sai_length = sizeof(struct sctp_adaptation_event); 3628 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3629 sai->sai_assoc_id = sctp_get_associd(stcb); 3630 3631 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3632 SCTP_BUF_NEXT(m_notify) = NULL; 3633 3634 /* append to socket */ 3635 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3636 0, 0, stcb->asoc.context, 0, 0, 0, 3637 m_notify); 3638 if (control == NULL) { 3639 /* no memory */ 3640 sctp_m_freem(m_notify); 3641 return; 3642 } 3643 control->length = SCTP_BUF_LEN(m_notify); 3644 control->spec_flags = M_NOTIFICATION; 3645 /* not that we need this */ 3646 control->tail_mbuf = m_notify; 3647 sctp_add_to_readq(stcb->sctp_ep, stcb, 3648 control, 3649 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3650 } 3651 3652 /* This always must be called with the read-queue LOCKED in the INP */ 3653 static void 3654 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3655 uint32_t val, int so_locked) 3656 { 3657 struct mbuf *m_notify; 3658 struct sctp_pdapi_event *pdapi; 3659 struct sctp_queued_to_read *control; 3660 struct sockbuf *sb; 3661 3662 if ((stcb == NULL) || 3663 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3664 /* event not enabled */ 3665 return; 3666 } 3667 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3668 return; 3669 } 3670 3671 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3672 if (m_notify == NULL) 3673 /* no space left */ 3674 return; 3675 SCTP_BUF_LEN(m_notify) = 0; 3676 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3677 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3678 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3679 pdapi->pdapi_flags = 0; 3680 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3681 pdapi->pdapi_indication = error; 3682 pdapi->pdapi_stream = (val >> 16); 3683 pdapi->pdapi_seq = (val & 0x0000ffff); 3684 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3685 3686 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3687 SCTP_BUF_NEXT(m_notify) = NULL; 3688 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3689 0, 0, stcb->asoc.context, 0, 0, 0, 3690 m_notify); 3691 if (control == NULL) { 3692 /* no memory */ 3693 sctp_m_freem(m_notify); 3694 return; 3695 } 3696 control->length = SCTP_BUF_LEN(m_notify); 3697 control->spec_flags = M_NOTIFICATION; 3698 /* not that we need this */ 3699 control->tail_mbuf = m_notify; 3700 sb = &stcb->sctp_socket->so_rcv; 3701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3702 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3703 } 3704 sctp_sballoc(stcb, sb, m_notify); 3705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3706 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3707 } 3708 control->end_added = 1; 3709 if (stcb->asoc.control_pdapi) 3710 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3711 else { 3712 /* we really should not see this case */ 3713 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3714 } 3715 if (stcb->sctp_ep && stcb->sctp_socket) { 3716 /* This should always be the case */ 3717 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3718 } 3719 } 3720 3721 static void 3722 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3723 { 3724 struct mbuf *m_notify; 3725 struct sctp_shutdown_event *sse; 3726 struct sctp_queued_to_read *control; 3727 3728 /* 3729 * For TCP model AND UDP connected sockets we will send an error up 3730 * when an SHUTDOWN completes 3731 */ 3732 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3733 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3734 /* mark socket closed for read/write and wakeup! */ 3735 socantsendmore(stcb->sctp_socket); 3736 } 3737 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3738 /* event not enabled */ 3739 return; 3740 } 3741 3742 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3743 if (m_notify == NULL) 3744 /* no space left */ 3745 return; 3746 sse = mtod(m_notify, struct sctp_shutdown_event *); 3747 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3748 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3749 sse->sse_flags = 0; 3750 sse->sse_length = sizeof(struct sctp_shutdown_event); 3751 sse->sse_assoc_id = sctp_get_associd(stcb); 3752 3753 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3754 SCTP_BUF_NEXT(m_notify) = NULL; 3755 3756 /* append to socket */ 3757 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3758 0, 0, stcb->asoc.context, 0, 0, 0, 3759 m_notify); 3760 if (control == NULL) { 3761 /* no memory */ 3762 sctp_m_freem(m_notify); 3763 return; 3764 } 3765 control->length = SCTP_BUF_LEN(m_notify); 3766 control->spec_flags = M_NOTIFICATION; 3767 /* not that we need this */ 3768 control->tail_mbuf = m_notify; 3769 sctp_add_to_readq(stcb->sctp_ep, stcb, 3770 control, 3771 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3772 } 3773 3774 static void 3775 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3776 int so_locked) 3777 { 3778 struct mbuf *m_notify; 3779 struct sctp_sender_dry_event *event; 3780 struct sctp_queued_to_read *control; 3781 3782 if ((stcb == NULL) || 3783 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3784 /* event not enabled */ 3785 return; 3786 } 3787 3788 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3789 if (m_notify == NULL) { 3790 /* no space left */ 3791 return; 3792 } 3793 SCTP_BUF_LEN(m_notify) = 0; 3794 event = mtod(m_notify, struct sctp_sender_dry_event *); 3795 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3796 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3797 event->sender_dry_flags = 0; 3798 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3799 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3800 3801 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3802 SCTP_BUF_NEXT(m_notify) = NULL; 3803 3804 /* append to socket */ 3805 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3806 0, 0, stcb->asoc.context, 0, 0, 0, 3807 m_notify); 3808 if (control == NULL) { 3809 /* no memory */ 3810 sctp_m_freem(m_notify); 3811 return; 3812 } 3813 control->length = SCTP_BUF_LEN(m_notify); 3814 control->spec_flags = M_NOTIFICATION; 3815 /* not that we need this */ 3816 control->tail_mbuf = m_notify; 3817 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3818 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3819 } 3820 3821 void 3822 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3823 { 3824 struct mbuf *m_notify; 3825 struct sctp_queued_to_read *control; 3826 struct sctp_stream_change_event *stradd; 3827 3828 if ((stcb == NULL) || 3829 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3830 /* event not enabled */ 3831 return; 3832 } 3833 if ((stcb->asoc.peer_req_out) && flag) { 3834 /* Peer made the request, don't tell the local user */ 3835 stcb->asoc.peer_req_out = 0; 3836 return; 3837 } 3838 stcb->asoc.peer_req_out = 0; 3839 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3840 if (m_notify == NULL) 3841 /* no space left */ 3842 return; 3843 SCTP_BUF_LEN(m_notify) = 0; 3844 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3845 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3846 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3847 stradd->strchange_flags = flag; 3848 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3849 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3850 stradd->strchange_instrms = numberin; 3851 stradd->strchange_outstrms = numberout; 3852 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3853 SCTP_BUF_NEXT(m_notify) = NULL; 3854 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3855 /* no space */ 3856 sctp_m_freem(m_notify); 3857 return; 3858 } 3859 /* append to socket */ 3860 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3861 0, 0, stcb->asoc.context, 0, 0, 0, 3862 m_notify); 3863 if (control == NULL) { 3864 /* no memory */ 3865 sctp_m_freem(m_notify); 3866 return; 3867 } 3868 control->length = SCTP_BUF_LEN(m_notify); 3869 control->spec_flags = M_NOTIFICATION; 3870 /* not that we need this */ 3871 control->tail_mbuf = m_notify; 3872 sctp_add_to_readq(stcb->sctp_ep, stcb, 3873 control, 3874 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3875 } 3876 3877 void 3878 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3879 { 3880 struct mbuf *m_notify; 3881 struct sctp_queued_to_read *control; 3882 struct sctp_assoc_reset_event *strasoc; 3883 3884 if ((stcb == NULL) || 3885 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3886 /* event not enabled */ 3887 return; 3888 } 3889 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3890 if (m_notify == NULL) 3891 /* no space left */ 3892 return; 3893 SCTP_BUF_LEN(m_notify) = 0; 3894 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3895 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3896 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3897 strasoc->assocreset_flags = flag; 3898 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3899 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3900 strasoc->assocreset_local_tsn = sending_tsn; 3901 strasoc->assocreset_remote_tsn = recv_tsn; 3902 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3903 SCTP_BUF_NEXT(m_notify) = NULL; 3904 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3905 /* no space */ 3906 sctp_m_freem(m_notify); 3907 return; 3908 } 3909 /* append to socket */ 3910 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3911 0, 0, stcb->asoc.context, 0, 0, 0, 3912 m_notify); 3913 if (control == NULL) { 3914 /* no memory */ 3915 sctp_m_freem(m_notify); 3916 return; 3917 } 3918 control->length = SCTP_BUF_LEN(m_notify); 3919 control->spec_flags = M_NOTIFICATION; 3920 /* not that we need this */ 3921 control->tail_mbuf = m_notify; 3922 sctp_add_to_readq(stcb->sctp_ep, stcb, 3923 control, 3924 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3925 } 3926 3927 static void 3928 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3929 int number_entries, uint16_t *list, int flag) 3930 { 3931 struct mbuf *m_notify; 3932 struct sctp_queued_to_read *control; 3933 struct sctp_stream_reset_event *strreset; 3934 int len; 3935 3936 if ((stcb == NULL) || 3937 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3938 /* event not enabled */ 3939 return; 3940 } 3941 3942 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3943 if (m_notify == NULL) 3944 /* no space left */ 3945 return; 3946 SCTP_BUF_LEN(m_notify) = 0; 3947 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3948 if (len > M_TRAILINGSPACE(m_notify)) { 3949 /* never enough room */ 3950 sctp_m_freem(m_notify); 3951 return; 3952 } 3953 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3954 memset(strreset, 0, len); 3955 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3956 strreset->strreset_flags = flag; 3957 strreset->strreset_length = len; 3958 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3959 if (number_entries) { 3960 int i; 3961 3962 for (i = 0; i < number_entries; i++) { 3963 strreset->strreset_stream_list[i] = ntohs(list[i]); 3964 } 3965 } 3966 SCTP_BUF_LEN(m_notify) = len; 3967 SCTP_BUF_NEXT(m_notify) = NULL; 3968 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3969 /* no space */ 3970 sctp_m_freem(m_notify); 3971 return; 3972 } 3973 /* append to socket */ 3974 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3975 0, 0, stcb->asoc.context, 0, 0, 0, 3976 m_notify); 3977 if (control == NULL) { 3978 /* no memory */ 3979 sctp_m_freem(m_notify); 3980 return; 3981 } 3982 control->length = SCTP_BUF_LEN(m_notify); 3983 control->spec_flags = M_NOTIFICATION; 3984 /* not that we need this */ 3985 control->tail_mbuf = m_notify; 3986 sctp_add_to_readq(stcb->sctp_ep, stcb, 3987 control, 3988 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3989 } 3990 3991 static void 3992 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3993 { 3994 struct mbuf *m_notify; 3995 struct sctp_remote_error *sre; 3996 struct sctp_queued_to_read *control; 3997 unsigned int notif_len; 3998 uint16_t chunk_len; 3999 4000 if ((stcb == NULL) || 4001 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4002 return; 4003 } 4004 if (chunk != NULL) { 4005 chunk_len = ntohs(chunk->ch.chunk_length); 4006 /* 4007 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4008 * contiguous. 4009 */ 4010 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4011 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4012 } 4013 } else { 4014 chunk_len = 0; 4015 } 4016 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4017 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4018 if (m_notify == NULL) { 4019 /* Retry with smaller value. */ 4020 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4021 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4022 if (m_notify == NULL) { 4023 return; 4024 } 4025 } 4026 SCTP_BUF_NEXT(m_notify) = NULL; 4027 sre = mtod(m_notify, struct sctp_remote_error *); 4028 memset(sre, 0, notif_len); 4029 sre->sre_type = SCTP_REMOTE_ERROR; 4030 sre->sre_flags = 0; 4031 sre->sre_length = sizeof(struct sctp_remote_error); 4032 sre->sre_error = error; 4033 sre->sre_assoc_id = sctp_get_associd(stcb); 4034 if (notif_len > sizeof(struct sctp_remote_error)) { 4035 memcpy(sre->sre_data, chunk, chunk_len); 4036 sre->sre_length += chunk_len; 4037 } 4038 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4039 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4040 0, 0, stcb->asoc.context, 0, 0, 0, 4041 m_notify); 4042 if (control != NULL) { 4043 control->length = SCTP_BUF_LEN(m_notify); 4044 control->spec_flags = M_NOTIFICATION; 4045 /* not that we need this */ 4046 control->tail_mbuf = m_notify; 4047 sctp_add_to_readq(stcb->sctp_ep, stcb, 4048 control, 4049 &stcb->sctp_socket->so_rcv, 1, 4050 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4051 } else { 4052 sctp_m_freem(m_notify); 4053 } 4054 } 4055 4056 void 4057 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4058 uint32_t error, void *data, int so_locked) 4059 { 4060 if ((stcb == NULL) || 4061 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4062 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4063 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4064 /* If the socket is gone we are out of here */ 4065 return; 4066 } 4067 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4068 return; 4069 } 4070 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4071 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4072 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4073 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4074 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4075 /* Don't report these in front states */ 4076 return; 4077 } 4078 } 4079 switch (notification) { 4080 case SCTP_NOTIFY_ASSOC_UP: 4081 if (stcb->asoc.assoc_up_sent == 0) { 4082 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4083 stcb->asoc.assoc_up_sent = 1; 4084 } 4085 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4086 sctp_notify_adaptation_layer(stcb); 4087 } 4088 if (stcb->asoc.auth_supported == 0) { 4089 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4090 NULL, so_locked); 4091 } 4092 break; 4093 case SCTP_NOTIFY_ASSOC_DOWN: 4094 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4095 break; 4096 case SCTP_NOTIFY_INTERFACE_DOWN: 4097 { 4098 struct sctp_nets *net; 4099 4100 net = (struct sctp_nets *)data; 4101 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4102 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4103 break; 4104 } 4105 case SCTP_NOTIFY_INTERFACE_UP: 4106 { 4107 struct sctp_nets *net; 4108 4109 net = (struct sctp_nets *)data; 4110 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4111 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4112 break; 4113 } 4114 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4115 { 4116 struct sctp_nets *net; 4117 4118 net = (struct sctp_nets *)data; 4119 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4120 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4121 break; 4122 } 4123 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4124 sctp_notify_send_failed2(stcb, error, 4125 (struct sctp_stream_queue_pending *)data, so_locked); 4126 break; 4127 case SCTP_NOTIFY_SENT_DG_FAIL: 4128 sctp_notify_send_failed(stcb, 1, error, 4129 (struct sctp_tmit_chunk *)data, so_locked); 4130 break; 4131 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4132 sctp_notify_send_failed(stcb, 0, error, 4133 (struct sctp_tmit_chunk *)data, so_locked); 4134 break; 4135 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4136 { 4137 uint32_t val; 4138 4139 val = *((uint32_t *)data); 4140 4141 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4142 break; 4143 } 4144 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4145 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4146 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4147 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4148 } else { 4149 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4150 } 4151 break; 4152 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4153 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4154 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4155 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4156 } else { 4157 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4158 } 4159 break; 4160 case SCTP_NOTIFY_ASSOC_RESTART: 4161 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4162 if (stcb->asoc.auth_supported == 0) { 4163 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4164 NULL, so_locked); 4165 } 4166 break; 4167 case SCTP_NOTIFY_STR_RESET_SEND: 4168 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4169 break; 4170 case SCTP_NOTIFY_STR_RESET_RECV: 4171 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4172 break; 4173 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4174 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4175 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4176 break; 4177 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4178 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4179 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4180 break; 4181 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4182 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4183 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4184 break; 4185 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4186 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4187 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4188 break; 4189 case SCTP_NOTIFY_ASCONF_ADD_IP: 4190 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4191 error, so_locked); 4192 break; 4193 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4194 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4195 error, so_locked); 4196 break; 4197 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4198 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4199 error, so_locked); 4200 break; 4201 case SCTP_NOTIFY_PEER_SHUTDOWN: 4202 sctp_notify_shutdown_event(stcb); 4203 break; 4204 case SCTP_NOTIFY_AUTH_NEW_KEY: 4205 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4206 (uint16_t)(uintptr_t)data, 4207 so_locked); 4208 break; 4209 case SCTP_NOTIFY_AUTH_FREE_KEY: 4210 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4211 (uint16_t)(uintptr_t)data, 4212 so_locked); 4213 break; 4214 case SCTP_NOTIFY_NO_PEER_AUTH: 4215 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4216 (uint16_t)(uintptr_t)data, 4217 so_locked); 4218 break; 4219 case SCTP_NOTIFY_SENDER_DRY: 4220 sctp_notify_sender_dry_event(stcb, so_locked); 4221 break; 4222 case SCTP_NOTIFY_REMOTE_ERROR: 4223 sctp_notify_remote_error(stcb, error, data); 4224 break; 4225 default: 4226 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4227 __func__, notification, notification); 4228 break; 4229 } /* end switch */ 4230 } 4231 4232 void 4233 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4234 { 4235 struct sctp_association *asoc; 4236 struct sctp_stream_out *outs; 4237 struct sctp_tmit_chunk *chk, *nchk; 4238 struct sctp_stream_queue_pending *sp, *nsp; 4239 int i; 4240 4241 if (stcb == NULL) { 4242 return; 4243 } 4244 asoc = &stcb->asoc; 4245 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4246 /* already being freed */ 4247 return; 4248 } 4249 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4250 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4251 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4252 return; 4253 } 4254 /* now through all the gunk freeing chunks */ 4255 /* sent queue SHOULD be empty */ 4256 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4257 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4258 asoc->sent_queue_cnt--; 4259 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4260 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4261 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4262 #ifdef INVARIANTS 4263 } else { 4264 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4265 #endif 4266 } 4267 } 4268 if (chk->data != NULL) { 4269 sctp_free_bufspace(stcb, asoc, chk, 1); 4270 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4271 error, chk, so_locked); 4272 if (chk->data) { 4273 sctp_m_freem(chk->data); 4274 chk->data = NULL; 4275 } 4276 } 4277 sctp_free_a_chunk(stcb, chk, so_locked); 4278 /* sa_ignore FREED_MEMORY */ 4279 } 4280 /* pending send queue SHOULD be empty */ 4281 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4282 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4283 asoc->send_queue_cnt--; 4284 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4285 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4286 #ifdef INVARIANTS 4287 } else { 4288 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4289 #endif 4290 } 4291 if (chk->data != NULL) { 4292 sctp_free_bufspace(stcb, asoc, chk, 1); 4293 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4294 error, chk, so_locked); 4295 if (chk->data) { 4296 sctp_m_freem(chk->data); 4297 chk->data = NULL; 4298 } 4299 } 4300 sctp_free_a_chunk(stcb, chk, so_locked); 4301 /* sa_ignore FREED_MEMORY */ 4302 } 4303 for (i = 0; i < asoc->streamoutcnt; i++) { 4304 /* For each stream */ 4305 outs = &asoc->strmout[i]; 4306 /* clean up any sends there */ 4307 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4308 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4309 TAILQ_REMOVE(&outs->outqueue, sp, next); 4310 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4311 sctp_free_spbufspace(stcb, asoc, sp); 4312 if (sp->data) { 4313 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4314 error, (void *)sp, so_locked); 4315 if (sp->data) { 4316 sctp_m_freem(sp->data); 4317 sp->data = NULL; 4318 sp->tail_mbuf = NULL; 4319 sp->length = 0; 4320 } 4321 } 4322 if (sp->net) { 4323 sctp_free_remote_addr(sp->net); 4324 sp->net = NULL; 4325 } 4326 /* Free the chunk */ 4327 sctp_free_a_strmoq(stcb, sp, so_locked); 4328 /* sa_ignore FREED_MEMORY */ 4329 } 4330 } 4331 } 4332 4333 void 4334 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4335 struct sctp_abort_chunk *abort, int so_locked) 4336 { 4337 if (stcb == NULL) { 4338 return; 4339 } 4340 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4341 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4342 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4343 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4344 } 4345 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4346 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4347 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4348 return; 4349 } 4350 SCTP_TCB_SEND_LOCK(stcb); 4351 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4352 /* Tell them we lost the asoc */ 4353 sctp_report_all_outbound(stcb, error, so_locked); 4354 SCTP_TCB_SEND_UNLOCK(stcb); 4355 if (from_peer) { 4356 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4357 } else { 4358 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4359 } 4360 } 4361 4362 void 4363 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4364 struct mbuf *m, int iphlen, 4365 struct sockaddr *src, struct sockaddr *dst, 4366 struct sctphdr *sh, struct mbuf *op_err, 4367 uint8_t mflowtype, uint32_t mflowid, 4368 uint32_t vrf_id, uint16_t port) 4369 { 4370 uint32_t vtag; 4371 4372 vtag = 0; 4373 if (stcb != NULL) { 4374 vtag = stcb->asoc.peer_vtag; 4375 vrf_id = stcb->asoc.vrf_id; 4376 } 4377 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4378 mflowtype, mflowid, inp->fibnum, 4379 vrf_id, port); 4380 if (stcb != NULL) { 4381 /* We have a TCB to abort, send notification too */ 4382 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4383 /* Ok, now lets free it */ 4384 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4385 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4386 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4387 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4388 } 4389 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4390 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4391 } 4392 } 4393 #ifdef SCTP_ASOCLOG_OF_TSNS 4394 void 4395 sctp_print_out_track_log(struct sctp_tcb *stcb) 4396 { 4397 #ifdef NOSIY_PRINTS 4398 int i; 4399 4400 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4401 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4402 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4403 SCTP_PRINTF("None rcvd\n"); 4404 goto none_in; 4405 } 4406 if (stcb->asoc.tsn_in_wrapped) { 4407 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4408 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4409 stcb->asoc.in_tsnlog[i].tsn, 4410 stcb->asoc.in_tsnlog[i].strm, 4411 stcb->asoc.in_tsnlog[i].seq, 4412 stcb->asoc.in_tsnlog[i].flgs, 4413 stcb->asoc.in_tsnlog[i].sz); 4414 } 4415 } 4416 if (stcb->asoc.tsn_in_at) { 4417 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4418 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4419 stcb->asoc.in_tsnlog[i].tsn, 4420 stcb->asoc.in_tsnlog[i].strm, 4421 stcb->asoc.in_tsnlog[i].seq, 4422 stcb->asoc.in_tsnlog[i].flgs, 4423 stcb->asoc.in_tsnlog[i].sz); 4424 } 4425 } 4426 none_in: 4427 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4428 if ((stcb->asoc.tsn_out_at == 0) && 4429 (stcb->asoc.tsn_out_wrapped == 0)) { 4430 SCTP_PRINTF("None sent\n"); 4431 } 4432 if (stcb->asoc.tsn_out_wrapped) { 4433 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4434 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4435 stcb->asoc.out_tsnlog[i].tsn, 4436 stcb->asoc.out_tsnlog[i].strm, 4437 stcb->asoc.out_tsnlog[i].seq, 4438 stcb->asoc.out_tsnlog[i].flgs, 4439 stcb->asoc.out_tsnlog[i].sz); 4440 } 4441 } 4442 if (stcb->asoc.tsn_out_at) { 4443 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4444 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4445 stcb->asoc.out_tsnlog[i].tsn, 4446 stcb->asoc.out_tsnlog[i].strm, 4447 stcb->asoc.out_tsnlog[i].seq, 4448 stcb->asoc.out_tsnlog[i].flgs, 4449 stcb->asoc.out_tsnlog[i].sz); 4450 } 4451 } 4452 #endif 4453 } 4454 #endif 4455 4456 void 4457 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4458 struct mbuf *op_err, 4459 int so_locked) 4460 { 4461 4462 if (stcb == NULL) { 4463 /* Got to have a TCB */ 4464 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4465 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4466 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4467 SCTP_CALLED_DIRECTLY_NOCMPSET); 4468 } 4469 } 4470 return; 4471 } 4472 /* notify the peer */ 4473 sctp_send_abort_tcb(stcb, op_err, so_locked); 4474 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4475 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4476 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4477 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4478 } 4479 /* notify the ulp */ 4480 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4481 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4482 } 4483 /* now free the asoc */ 4484 #ifdef SCTP_ASOCLOG_OF_TSNS 4485 sctp_print_out_track_log(stcb); 4486 #endif 4487 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4488 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4489 } 4490 4491 void 4492 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4493 struct sockaddr *src, struct sockaddr *dst, 4494 struct sctphdr *sh, struct sctp_inpcb *inp, 4495 struct mbuf *cause, 4496 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4497 uint32_t vrf_id, uint16_t port) 4498 { 4499 struct sctp_chunkhdr *ch, chunk_buf; 4500 unsigned int chk_length; 4501 int contains_init_chunk; 4502 4503 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4504 /* Generate a TO address for future reference */ 4505 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4506 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4507 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4508 SCTP_CALLED_DIRECTLY_NOCMPSET); 4509 } 4510 } 4511 contains_init_chunk = 0; 4512 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4513 sizeof(*ch), (uint8_t *)&chunk_buf); 4514 while (ch != NULL) { 4515 chk_length = ntohs(ch->chunk_length); 4516 if (chk_length < sizeof(*ch)) { 4517 /* break to abort land */ 4518 break; 4519 } 4520 switch (ch->chunk_type) { 4521 case SCTP_INIT: 4522 contains_init_chunk = 1; 4523 break; 4524 case SCTP_PACKET_DROPPED: 4525 /* we don't respond to pkt-dropped */ 4526 return; 4527 case SCTP_ABORT_ASSOCIATION: 4528 /* we don't respond with an ABORT to an ABORT */ 4529 return; 4530 case SCTP_SHUTDOWN_COMPLETE: 4531 /* 4532 * we ignore it since we are not waiting for it and 4533 * peer is gone 4534 */ 4535 return; 4536 case SCTP_SHUTDOWN_ACK: 4537 sctp_send_shutdown_complete2(src, dst, sh, 4538 mflowtype, mflowid, fibnum, 4539 vrf_id, port); 4540 return; 4541 default: 4542 break; 4543 } 4544 offset += SCTP_SIZE32(chk_length); 4545 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4546 sizeof(*ch), (uint8_t *)&chunk_buf); 4547 } 4548 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4549 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4550 (contains_init_chunk == 0))) { 4551 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4552 mflowtype, mflowid, fibnum, 4553 vrf_id, port); 4554 } 4555 } 4556 4557 /* 4558 * check the inbound datagram to make sure there is not an abort inside it, 4559 * if there is return 1, else return 0. 4560 */ 4561 int 4562 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) 4563 { 4564 struct sctp_chunkhdr *ch; 4565 struct sctp_init_chunk *init_chk, chunk_buf; 4566 int offset; 4567 unsigned int chk_length; 4568 4569 offset = iphlen + sizeof(struct sctphdr); 4570 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4571 (uint8_t *)&chunk_buf); 4572 while (ch != NULL) { 4573 chk_length = ntohs(ch->chunk_length); 4574 if (chk_length < sizeof(*ch)) { 4575 /* packet is probably corrupt */ 4576 break; 4577 } 4578 /* we seem to be ok, is it an abort? */ 4579 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4580 /* yep, tell them */ 4581 return (1); 4582 } 4583 if ((ch->chunk_type == SCTP_INITIATION) || 4584 (ch->chunk_type == SCTP_INITIATION_ACK)) { 4585 /* need to update the Vtag */ 4586 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4587 offset, sizeof(struct sctp_init_chunk), (uint8_t *)&chunk_buf); 4588 if (init_chk != NULL) { 4589 *vtag = ntohl(init_chk->init.initiate_tag); 4590 } 4591 } 4592 /* Nope, move to the next chunk */ 4593 offset += SCTP_SIZE32(chk_length); 4594 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4595 sizeof(*ch), (uint8_t *)&chunk_buf); 4596 } 4597 return (0); 4598 } 4599 4600 /* 4601 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4602 * set (i.e. it's 0) so, create this function to compare link local scopes 4603 */ 4604 #ifdef INET6 4605 uint32_t 4606 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4607 { 4608 struct sockaddr_in6 a, b; 4609 4610 /* save copies */ 4611 a = *addr1; 4612 b = *addr2; 4613 4614 if (a.sin6_scope_id == 0) 4615 if (sa6_recoverscope(&a)) { 4616 /* can't get scope, so can't match */ 4617 return (0); 4618 } 4619 if (b.sin6_scope_id == 0) 4620 if (sa6_recoverscope(&b)) { 4621 /* can't get scope, so can't match */ 4622 return (0); 4623 } 4624 if (a.sin6_scope_id != b.sin6_scope_id) 4625 return (0); 4626 4627 return (1); 4628 } 4629 4630 /* 4631 * returns a sockaddr_in6 with embedded scope recovered and removed 4632 */ 4633 struct sockaddr_in6 * 4634 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4635 { 4636 /* check and strip embedded scope junk */ 4637 if (addr->sin6_family == AF_INET6) { 4638 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4639 if (addr->sin6_scope_id == 0) { 4640 *store = *addr; 4641 if (!sa6_recoverscope(store)) { 4642 /* use the recovered scope */ 4643 addr = store; 4644 } 4645 } else { 4646 /* else, return the original "to" addr */ 4647 in6_clearscope(&addr->sin6_addr); 4648 } 4649 } 4650 } 4651 return (addr); 4652 } 4653 #endif 4654 4655 /* 4656 * are the two addresses the same? currently a "scopeless" check returns: 1 4657 * if same, 0 if not 4658 */ 4659 int 4660 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4661 { 4662 4663 /* must be valid */ 4664 if (sa1 == NULL || sa2 == NULL) 4665 return (0); 4666 4667 /* must be the same family */ 4668 if (sa1->sa_family != sa2->sa_family) 4669 return (0); 4670 4671 switch (sa1->sa_family) { 4672 #ifdef INET6 4673 case AF_INET6: 4674 { 4675 /* IPv6 addresses */ 4676 struct sockaddr_in6 *sin6_1, *sin6_2; 4677 4678 sin6_1 = (struct sockaddr_in6 *)sa1; 4679 sin6_2 = (struct sockaddr_in6 *)sa2; 4680 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4681 sin6_2)); 4682 } 4683 #endif 4684 #ifdef INET 4685 case AF_INET: 4686 { 4687 /* IPv4 addresses */ 4688 struct sockaddr_in *sin_1, *sin_2; 4689 4690 sin_1 = (struct sockaddr_in *)sa1; 4691 sin_2 = (struct sockaddr_in *)sa2; 4692 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4693 } 4694 #endif 4695 default: 4696 /* we don't do these... */ 4697 return (0); 4698 } 4699 } 4700 4701 void 4702 sctp_print_address(struct sockaddr *sa) 4703 { 4704 #ifdef INET6 4705 char ip6buf[INET6_ADDRSTRLEN]; 4706 #endif 4707 4708 switch (sa->sa_family) { 4709 #ifdef INET6 4710 case AF_INET6: 4711 { 4712 struct sockaddr_in6 *sin6; 4713 4714 sin6 = (struct sockaddr_in6 *)sa; 4715 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4716 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4717 ntohs(sin6->sin6_port), 4718 sin6->sin6_scope_id); 4719 break; 4720 } 4721 #endif 4722 #ifdef INET 4723 case AF_INET: 4724 { 4725 struct sockaddr_in *sin; 4726 unsigned char *p; 4727 4728 sin = (struct sockaddr_in *)sa; 4729 p = (unsigned char *)&sin->sin_addr; 4730 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4731 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4732 break; 4733 } 4734 #endif 4735 default: 4736 SCTP_PRINTF("?\n"); 4737 break; 4738 } 4739 } 4740 4741 void 4742 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4743 struct sctp_inpcb *new_inp, 4744 struct sctp_tcb *stcb, 4745 int waitflags) 4746 { 4747 /* 4748 * go through our old INP and pull off any control structures that 4749 * belong to stcb and move then to the new inp. 4750 */ 4751 struct socket *old_so, *new_so; 4752 struct sctp_queued_to_read *control, *nctl; 4753 struct sctp_readhead tmp_queue; 4754 struct mbuf *m; 4755 int error = 0; 4756 4757 old_so = old_inp->sctp_socket; 4758 new_so = new_inp->sctp_socket; 4759 TAILQ_INIT(&tmp_queue); 4760 error = sblock(&old_so->so_rcv, waitflags); 4761 if (error) { 4762 /* 4763 * Gak, can't get sblock, we have a problem. data will be 4764 * left stranded.. and we don't dare look at it since the 4765 * other thread may be reading something. Oh well, its a 4766 * screwed up app that does a peeloff OR a accept while 4767 * reading from the main socket... actually its only the 4768 * peeloff() case, since I think read will fail on a 4769 * listening socket.. 4770 */ 4771 return; 4772 } 4773 /* lock the socket buffers */ 4774 SCTP_INP_READ_LOCK(old_inp); 4775 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4776 /* Pull off all for out target stcb */ 4777 if (control->stcb == stcb) { 4778 /* remove it we want it */ 4779 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4780 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4781 m = control->data; 4782 while (m) { 4783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4784 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4785 } 4786 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4787 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4788 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4789 } 4790 m = SCTP_BUF_NEXT(m); 4791 } 4792 } 4793 } 4794 SCTP_INP_READ_UNLOCK(old_inp); 4795 /* Remove the sb-lock on the old socket */ 4796 4797 sbunlock(&old_so->so_rcv); 4798 /* Now we move them over to the new socket buffer */ 4799 SCTP_INP_READ_LOCK(new_inp); 4800 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4801 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4802 m = control->data; 4803 while (m) { 4804 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4805 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4806 } 4807 sctp_sballoc(stcb, &new_so->so_rcv, m); 4808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4809 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4810 } 4811 m = SCTP_BUF_NEXT(m); 4812 } 4813 } 4814 SCTP_INP_READ_UNLOCK(new_inp); 4815 } 4816 4817 void 4818 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4819 struct sctp_tcb *stcb, 4820 int so_locked 4821 SCTP_UNUSED 4822 ) 4823 { 4824 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4825 sctp_sorwakeup(inp, inp->sctp_socket); 4826 } 4827 } 4828 4829 void 4830 sctp_add_to_readq(struct sctp_inpcb *inp, 4831 struct sctp_tcb *stcb, 4832 struct sctp_queued_to_read *control, 4833 struct sockbuf *sb, 4834 int end, 4835 int inp_read_lock_held, 4836 int so_locked) 4837 { 4838 /* 4839 * Here we must place the control on the end of the socket read 4840 * queue AND increment sb_cc so that select will work properly on 4841 * read. 4842 */ 4843 struct mbuf *m, *prev = NULL; 4844 4845 if (inp == NULL) { 4846 /* Gak, TSNH!! */ 4847 #ifdef INVARIANTS 4848 panic("Gak, inp NULL on add_to_readq"); 4849 #endif 4850 return; 4851 } 4852 if (inp_read_lock_held == 0) 4853 SCTP_INP_READ_LOCK(inp); 4854 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4855 if (!control->on_strm_q) { 4856 sctp_free_remote_addr(control->whoFrom); 4857 if (control->data) { 4858 sctp_m_freem(control->data); 4859 control->data = NULL; 4860 } 4861 sctp_free_a_readq(stcb, control); 4862 } 4863 if (inp_read_lock_held == 0) 4864 SCTP_INP_READ_UNLOCK(inp); 4865 return; 4866 } 4867 if (!(control->spec_flags & M_NOTIFICATION)) { 4868 atomic_add_int(&inp->total_recvs, 1); 4869 if (!control->do_not_ref_stcb) { 4870 atomic_add_int(&stcb->total_recvs, 1); 4871 } 4872 } 4873 m = control->data; 4874 control->held_length = 0; 4875 control->length = 0; 4876 while (m) { 4877 if (SCTP_BUF_LEN(m) == 0) { 4878 /* Skip mbufs with NO length */ 4879 if (prev == NULL) { 4880 /* First one */ 4881 control->data = sctp_m_free(m); 4882 m = control->data; 4883 } else { 4884 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4885 m = SCTP_BUF_NEXT(prev); 4886 } 4887 if (m == NULL) { 4888 control->tail_mbuf = prev; 4889 } 4890 continue; 4891 } 4892 prev = m; 4893 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4894 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4895 } 4896 sctp_sballoc(stcb, sb, m); 4897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4898 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4899 } 4900 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4901 m = SCTP_BUF_NEXT(m); 4902 } 4903 if (prev != NULL) { 4904 control->tail_mbuf = prev; 4905 } else { 4906 /* Everything got collapsed out?? */ 4907 if (!control->on_strm_q) { 4908 sctp_free_remote_addr(control->whoFrom); 4909 sctp_free_a_readq(stcb, control); 4910 } 4911 if (inp_read_lock_held == 0) 4912 SCTP_INP_READ_UNLOCK(inp); 4913 return; 4914 } 4915 if (end) { 4916 control->end_added = 1; 4917 } 4918 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4919 control->on_read_q = 1; 4920 if (inp_read_lock_held == 0) 4921 SCTP_INP_READ_UNLOCK(inp); 4922 if (inp && inp->sctp_socket) { 4923 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4924 } 4925 } 4926 4927 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4928 *************ALTERNATE ROUTING CODE 4929 */ 4930 4931 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4932 *************ALTERNATE ROUTING CODE 4933 */ 4934 4935 struct mbuf * 4936 sctp_generate_cause(uint16_t code, char *info) 4937 { 4938 struct mbuf *m; 4939 struct sctp_gen_error_cause *cause; 4940 size_t info_len; 4941 uint16_t len; 4942 4943 if ((code == 0) || (info == NULL)) { 4944 return (NULL); 4945 } 4946 info_len = strlen(info); 4947 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4948 return (NULL); 4949 } 4950 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4951 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4952 if (m != NULL) { 4953 SCTP_BUF_LEN(m) = len; 4954 cause = mtod(m, struct sctp_gen_error_cause *); 4955 cause->code = htons(code); 4956 cause->length = htons(len); 4957 memcpy(cause->info, info, info_len); 4958 } 4959 return (m); 4960 } 4961 4962 struct mbuf * 4963 sctp_generate_no_user_data_cause(uint32_t tsn) 4964 { 4965 struct mbuf *m; 4966 struct sctp_error_no_user_data *no_user_data_cause; 4967 uint16_t len; 4968 4969 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4970 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4971 if (m != NULL) { 4972 SCTP_BUF_LEN(m) = len; 4973 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4974 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4975 no_user_data_cause->cause.length = htons(len); 4976 no_user_data_cause->tsn = htonl(tsn); 4977 } 4978 return (m); 4979 } 4980 4981 #ifdef SCTP_MBCNT_LOGGING 4982 void 4983 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4984 struct sctp_tmit_chunk *tp1, int chk_cnt) 4985 { 4986 if (tp1->data == NULL) { 4987 return; 4988 } 4989 asoc->chunks_on_out_queue -= chk_cnt; 4990 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4991 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4992 asoc->total_output_queue_size, 4993 tp1->book_size, 4994 0, 4995 tp1->mbcnt); 4996 } 4997 if (asoc->total_output_queue_size >= tp1->book_size) { 4998 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4999 } else { 5000 asoc->total_output_queue_size = 0; 5001 } 5002 5003 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5004 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5005 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5006 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5007 } else { 5008 stcb->sctp_socket->so_snd.sb_cc = 0; 5009 } 5010 } 5011 } 5012 5013 #endif 5014 5015 int 5016 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5017 uint8_t sent, int so_locked) 5018 { 5019 struct sctp_stream_out *strq; 5020 struct sctp_tmit_chunk *chk = NULL, *tp2; 5021 struct sctp_stream_queue_pending *sp; 5022 uint32_t mid; 5023 uint16_t sid; 5024 uint8_t foundeom = 0; 5025 int ret_sz = 0; 5026 int notdone; 5027 int do_wakeup_routine = 0; 5028 5029 sid = tp1->rec.data.sid; 5030 mid = tp1->rec.data.mid; 5031 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5032 stcb->asoc.abandoned_sent[0]++; 5033 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5034 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5035 #if defined(SCTP_DETAILED_STR_STATS) 5036 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5037 #endif 5038 } else { 5039 stcb->asoc.abandoned_unsent[0]++; 5040 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5041 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5042 #if defined(SCTP_DETAILED_STR_STATS) 5043 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5044 #endif 5045 } 5046 do { 5047 ret_sz += tp1->book_size; 5048 if (tp1->data != NULL) { 5049 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5050 sctp_flight_size_decrease(tp1); 5051 sctp_total_flight_decrease(stcb, tp1); 5052 } 5053 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5054 stcb->asoc.peers_rwnd += tp1->send_size; 5055 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5056 if (sent) { 5057 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5058 } else { 5059 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5060 } 5061 if (tp1->data) { 5062 sctp_m_freem(tp1->data); 5063 tp1->data = NULL; 5064 } 5065 do_wakeup_routine = 1; 5066 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5067 stcb->asoc.sent_queue_cnt_removeable--; 5068 } 5069 } 5070 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5071 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5072 SCTP_DATA_NOT_FRAG) { 5073 /* not frag'ed we ae done */ 5074 notdone = 0; 5075 foundeom = 1; 5076 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5077 /* end of frag, we are done */ 5078 notdone = 0; 5079 foundeom = 1; 5080 } else { 5081 /* 5082 * Its a begin or middle piece, we must mark all of 5083 * it 5084 */ 5085 notdone = 1; 5086 tp1 = TAILQ_NEXT(tp1, sctp_next); 5087 } 5088 } while (tp1 && notdone); 5089 if (foundeom == 0) { 5090 /* 5091 * The multi-part message was scattered across the send and 5092 * sent queue. 5093 */ 5094 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5095 if ((tp1->rec.data.sid != sid) || 5096 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5097 break; 5098 } 5099 /* 5100 * save to chk in case we have some on stream out 5101 * queue. If so and we have an un-transmitted one we 5102 * don't have to fudge the TSN. 5103 */ 5104 chk = tp1; 5105 ret_sz += tp1->book_size; 5106 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5107 if (sent) { 5108 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5109 } else { 5110 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5111 } 5112 if (tp1->data) { 5113 sctp_m_freem(tp1->data); 5114 tp1->data = NULL; 5115 } 5116 /* No flight involved here book the size to 0 */ 5117 tp1->book_size = 0; 5118 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5119 foundeom = 1; 5120 } 5121 do_wakeup_routine = 1; 5122 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5123 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5124 /* 5125 * on to the sent queue so we can wait for it to be 5126 * passed by. 5127 */ 5128 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5129 sctp_next); 5130 stcb->asoc.send_queue_cnt--; 5131 stcb->asoc.sent_queue_cnt++; 5132 } 5133 } 5134 if (foundeom == 0) { 5135 /* 5136 * Still no eom found. That means there is stuff left on the 5137 * stream out queue.. yuck. 5138 */ 5139 SCTP_TCB_SEND_LOCK(stcb); 5140 strq = &stcb->asoc.strmout[sid]; 5141 sp = TAILQ_FIRST(&strq->outqueue); 5142 if (sp != NULL) { 5143 sp->discard_rest = 1; 5144 /* 5145 * We may need to put a chunk on the queue that 5146 * holds the TSN that would have been sent with the 5147 * LAST bit. 5148 */ 5149 if (chk == NULL) { 5150 /* Yep, we have to */ 5151 sctp_alloc_a_chunk(stcb, chk); 5152 if (chk == NULL) { 5153 /* 5154 * we are hosed. All we can do is 5155 * nothing.. which will cause an 5156 * abort if the peer is paying 5157 * attention. 5158 */ 5159 goto oh_well; 5160 } 5161 memset(chk, 0, sizeof(*chk)); 5162 chk->rec.data.rcv_flags = 0; 5163 chk->sent = SCTP_FORWARD_TSN_SKIP; 5164 chk->asoc = &stcb->asoc; 5165 if (stcb->asoc.idata_supported == 0) { 5166 if (sp->sinfo_flags & SCTP_UNORDERED) { 5167 chk->rec.data.mid = 0; 5168 } else { 5169 chk->rec.data.mid = strq->next_mid_ordered; 5170 } 5171 } else { 5172 if (sp->sinfo_flags & SCTP_UNORDERED) { 5173 chk->rec.data.mid = strq->next_mid_unordered; 5174 } else { 5175 chk->rec.data.mid = strq->next_mid_ordered; 5176 } 5177 } 5178 chk->rec.data.sid = sp->sid; 5179 chk->rec.data.ppid = sp->ppid; 5180 chk->rec.data.context = sp->context; 5181 chk->flags = sp->act_flags; 5182 chk->whoTo = NULL; 5183 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5184 strq->chunks_on_queues++; 5185 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5186 stcb->asoc.sent_queue_cnt++; 5187 stcb->asoc.pr_sctp_cnt++; 5188 } 5189 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5190 if (sp->sinfo_flags & SCTP_UNORDERED) { 5191 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5192 } 5193 if (stcb->asoc.idata_supported == 0) { 5194 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5195 strq->next_mid_ordered++; 5196 } 5197 } else { 5198 if (sp->sinfo_flags & SCTP_UNORDERED) { 5199 strq->next_mid_unordered++; 5200 } else { 5201 strq->next_mid_ordered++; 5202 } 5203 } 5204 oh_well: 5205 if (sp->data) { 5206 /* 5207 * Pull any data to free up the SB and allow 5208 * sender to "add more" while we will throw 5209 * away :-) 5210 */ 5211 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5212 ret_sz += sp->length; 5213 do_wakeup_routine = 1; 5214 sp->some_taken = 1; 5215 sctp_m_freem(sp->data); 5216 sp->data = NULL; 5217 sp->tail_mbuf = NULL; 5218 sp->length = 0; 5219 } 5220 } 5221 SCTP_TCB_SEND_UNLOCK(stcb); 5222 } 5223 if (do_wakeup_routine) { 5224 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5225 } 5226 return (ret_sz); 5227 } 5228 5229 /* 5230 * checks to see if the given address, sa, is one that is currently known by 5231 * the kernel note: can't distinguish the same address on multiple interfaces 5232 * and doesn't handle multiple addresses with different zone/scope id's note: 5233 * ifa_ifwithaddr() compares the entire sockaddr struct 5234 */ 5235 struct sctp_ifa * 5236 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5237 int holds_lock) 5238 { 5239 struct sctp_laddr *laddr; 5240 5241 if (holds_lock == 0) { 5242 SCTP_INP_RLOCK(inp); 5243 } 5244 5245 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5246 if (laddr->ifa == NULL) 5247 continue; 5248 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5249 continue; 5250 #ifdef INET 5251 if (addr->sa_family == AF_INET) { 5252 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5253 laddr->ifa->address.sin.sin_addr.s_addr) { 5254 /* found him. */ 5255 break; 5256 } 5257 } 5258 #endif 5259 #ifdef INET6 5260 if (addr->sa_family == AF_INET6) { 5261 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5262 &laddr->ifa->address.sin6)) { 5263 /* found him. */ 5264 break; 5265 } 5266 } 5267 #endif 5268 } 5269 if (holds_lock == 0) { 5270 SCTP_INP_RUNLOCK(inp); 5271 } 5272 if (laddr != NULL) { 5273 return (laddr->ifa); 5274 } else { 5275 return (NULL); 5276 } 5277 } 5278 5279 uint32_t 5280 sctp_get_ifa_hash_val(struct sockaddr *addr) 5281 { 5282 switch (addr->sa_family) { 5283 #ifdef INET 5284 case AF_INET: 5285 { 5286 struct sockaddr_in *sin; 5287 5288 sin = (struct sockaddr_in *)addr; 5289 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5290 } 5291 #endif 5292 #ifdef INET6 5293 case AF_INET6: 5294 { 5295 struct sockaddr_in6 *sin6; 5296 uint32_t hash_of_addr; 5297 5298 sin6 = (struct sockaddr_in6 *)addr; 5299 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5300 sin6->sin6_addr.s6_addr32[1] + 5301 sin6->sin6_addr.s6_addr32[2] + 5302 sin6->sin6_addr.s6_addr32[3]); 5303 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5304 return (hash_of_addr); 5305 } 5306 #endif 5307 default: 5308 break; 5309 } 5310 return (0); 5311 } 5312 5313 struct sctp_ifa * 5314 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5315 { 5316 struct sctp_ifa *sctp_ifap; 5317 struct sctp_vrf *vrf; 5318 struct sctp_ifalist *hash_head; 5319 uint32_t hash_of_addr; 5320 5321 if (holds_lock == 0) { 5322 SCTP_IPI_ADDR_RLOCK(); 5323 } else { 5324 SCTP_IPI_ADDR_LOCK_ASSERT(); 5325 } 5326 5327 vrf = sctp_find_vrf(vrf_id); 5328 if (vrf == NULL) { 5329 if (holds_lock == 0) 5330 SCTP_IPI_ADDR_RUNLOCK(); 5331 return (NULL); 5332 } 5333 5334 hash_of_addr = sctp_get_ifa_hash_val(addr); 5335 5336 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5337 if (hash_head == NULL) { 5338 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5339 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5340 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5341 sctp_print_address(addr); 5342 SCTP_PRINTF("No such bucket for address\n"); 5343 if (holds_lock == 0) 5344 SCTP_IPI_ADDR_RUNLOCK(); 5345 5346 return (NULL); 5347 } 5348 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5349 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5350 continue; 5351 #ifdef INET 5352 if (addr->sa_family == AF_INET) { 5353 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5354 sctp_ifap->address.sin.sin_addr.s_addr) { 5355 /* found him. */ 5356 break; 5357 } 5358 } 5359 #endif 5360 #ifdef INET6 5361 if (addr->sa_family == AF_INET6) { 5362 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5363 &sctp_ifap->address.sin6)) { 5364 /* found him. */ 5365 break; 5366 } 5367 } 5368 #endif 5369 } 5370 if (holds_lock == 0) 5371 SCTP_IPI_ADDR_RUNLOCK(); 5372 return (sctp_ifap); 5373 } 5374 5375 static void 5376 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5377 uint32_t rwnd_req) 5378 { 5379 /* User pulled some data, do we need a rwnd update? */ 5380 struct epoch_tracker et; 5381 int r_unlocked = 0; 5382 uint32_t dif, rwnd; 5383 struct socket *so = NULL; 5384 5385 if (stcb == NULL) 5386 return; 5387 5388 atomic_add_int(&stcb->asoc.refcnt, 1); 5389 5390 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5391 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5392 /* Pre-check If we are freeing no update */ 5393 goto no_lock; 5394 } 5395 SCTP_INP_INCR_REF(stcb->sctp_ep); 5396 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5397 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5398 goto out; 5399 } 5400 so = stcb->sctp_socket; 5401 if (so == NULL) { 5402 goto out; 5403 } 5404 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5405 /* Have you have freed enough to look */ 5406 *freed_so_far = 0; 5407 /* Yep, its worth a look and the lock overhead */ 5408 5409 /* Figure out what the rwnd would be */ 5410 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5411 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5412 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5413 } else { 5414 dif = 0; 5415 } 5416 if (dif >= rwnd_req) { 5417 if (hold_rlock) { 5418 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5419 r_unlocked = 1; 5420 } 5421 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5422 /* 5423 * One last check before we allow the guy possibly 5424 * to get in. There is a race, where the guy has not 5425 * reached the gate. In that case 5426 */ 5427 goto out; 5428 } 5429 SCTP_TCB_LOCK(stcb); 5430 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5431 /* No reports here */ 5432 SCTP_TCB_UNLOCK(stcb); 5433 goto out; 5434 } 5435 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5436 NET_EPOCH_ENTER(et); 5437 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5438 5439 sctp_chunk_output(stcb->sctp_ep, stcb, 5440 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5441 /* make sure no timer is running */ 5442 NET_EPOCH_EXIT(et); 5443 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5444 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5445 SCTP_TCB_UNLOCK(stcb); 5446 } else { 5447 /* Update how much we have pending */ 5448 stcb->freed_by_sorcv_sincelast = dif; 5449 } 5450 out: 5451 if (so && r_unlocked && hold_rlock) { 5452 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5453 } 5454 5455 SCTP_INP_DECR_REF(stcb->sctp_ep); 5456 no_lock: 5457 atomic_add_int(&stcb->asoc.refcnt, -1); 5458 return; 5459 } 5460 5461 int 5462 sctp_sorecvmsg(struct socket *so, 5463 struct uio *uio, 5464 struct mbuf **mp, 5465 struct sockaddr *from, 5466 int fromlen, 5467 int *msg_flags, 5468 struct sctp_sndrcvinfo *sinfo, 5469 int filling_sinfo) 5470 { 5471 /* 5472 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5473 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5474 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5475 * On the way out we may send out any combination of: 5476 * MSG_NOTIFICATION MSG_EOR 5477 * 5478 */ 5479 struct sctp_inpcb *inp = NULL; 5480 ssize_t my_len = 0; 5481 ssize_t cp_len = 0; 5482 int error = 0; 5483 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5484 struct mbuf *m = NULL; 5485 struct sctp_tcb *stcb = NULL; 5486 int wakeup_read_socket = 0; 5487 int freecnt_applied = 0; 5488 int out_flags = 0, in_flags = 0; 5489 int block_allowed = 1; 5490 uint32_t freed_so_far = 0; 5491 ssize_t copied_so_far = 0; 5492 int in_eeor_mode = 0; 5493 int no_rcv_needed = 0; 5494 uint32_t rwnd_req = 0; 5495 int hold_sblock = 0; 5496 int hold_rlock = 0; 5497 ssize_t slen = 0; 5498 uint32_t held_length = 0; 5499 int sockbuf_lock = 0; 5500 5501 if (uio == NULL) { 5502 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5503 return (EINVAL); 5504 } 5505 5506 if (msg_flags) { 5507 in_flags = *msg_flags; 5508 if (in_flags & MSG_PEEK) 5509 SCTP_STAT_INCR(sctps_read_peeks); 5510 } else { 5511 in_flags = 0; 5512 } 5513 slen = uio->uio_resid; 5514 5515 /* Pull in and set up our int flags */ 5516 if (in_flags & MSG_OOB) { 5517 /* Out of band's NOT supported */ 5518 return (EOPNOTSUPP); 5519 } 5520 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5521 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5522 return (EINVAL); 5523 } 5524 if ((in_flags & (MSG_DONTWAIT 5525 | MSG_NBIO 5526 )) || 5527 SCTP_SO_IS_NBIO(so)) { 5528 block_allowed = 0; 5529 } 5530 /* setup the endpoint */ 5531 inp = (struct sctp_inpcb *)so->so_pcb; 5532 if (inp == NULL) { 5533 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5534 return (EFAULT); 5535 } 5536 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5537 /* Must be at least a MTU's worth */ 5538 if (rwnd_req < SCTP_MIN_RWND) 5539 rwnd_req = SCTP_MIN_RWND; 5540 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5541 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5542 sctp_misc_ints(SCTP_SORECV_ENTER, 5543 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5544 } 5545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5546 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5547 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5548 } 5549 5550 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5551 if (error) { 5552 goto release_unlocked; 5553 } 5554 sockbuf_lock = 1; 5555 restart: 5556 5557 restart_nosblocks: 5558 if (hold_sblock == 0) { 5559 SOCKBUF_LOCK(&so->so_rcv); 5560 hold_sblock = 1; 5561 } 5562 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5563 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5564 goto out; 5565 } 5566 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5567 if (so->so_error) { 5568 error = so->so_error; 5569 if ((in_flags & MSG_PEEK) == 0) 5570 so->so_error = 0; 5571 goto out; 5572 } else { 5573 if (so->so_rcv.sb_cc == 0) { 5574 /* indicate EOF */ 5575 error = 0; 5576 goto out; 5577 } 5578 } 5579 } 5580 if (so->so_rcv.sb_cc <= held_length) { 5581 if (so->so_error) { 5582 error = so->so_error; 5583 if ((in_flags & MSG_PEEK) == 0) { 5584 so->so_error = 0; 5585 } 5586 goto out; 5587 } 5588 if ((so->so_rcv.sb_cc == 0) && 5589 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5590 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5591 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5592 /* 5593 * For active open side clear flags for 5594 * re-use passive open is blocked by 5595 * connect. 5596 */ 5597 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5598 /* 5599 * You were aborted, passive side 5600 * always hits here 5601 */ 5602 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5603 error = ECONNRESET; 5604 } 5605 so->so_state &= ~(SS_ISCONNECTING | 5606 SS_ISDISCONNECTING | 5607 SS_ISCONFIRMING | 5608 SS_ISCONNECTED); 5609 if (error == 0) { 5610 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5611 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5612 error = ENOTCONN; 5613 } 5614 } 5615 goto out; 5616 } 5617 } 5618 if (block_allowed) { 5619 error = sbwait(&so->so_rcv); 5620 if (error) { 5621 goto out; 5622 } 5623 held_length = 0; 5624 goto restart_nosblocks; 5625 } else { 5626 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5627 error = EWOULDBLOCK; 5628 goto out; 5629 } 5630 } 5631 if (hold_sblock == 1) { 5632 SOCKBUF_UNLOCK(&so->so_rcv); 5633 hold_sblock = 0; 5634 } 5635 /* we possibly have data we can read */ 5636 /* sa_ignore FREED_MEMORY */ 5637 control = TAILQ_FIRST(&inp->read_queue); 5638 if (control == NULL) { 5639 /* 5640 * This could be happening since the appender did the 5641 * increment but as not yet did the tailq insert onto the 5642 * read_queue 5643 */ 5644 if (hold_rlock == 0) { 5645 SCTP_INP_READ_LOCK(inp); 5646 } 5647 control = TAILQ_FIRST(&inp->read_queue); 5648 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5649 #ifdef INVARIANTS 5650 panic("Huh, its non zero and nothing on control?"); 5651 #endif 5652 so->so_rcv.sb_cc = 0; 5653 } 5654 SCTP_INP_READ_UNLOCK(inp); 5655 hold_rlock = 0; 5656 goto restart; 5657 } 5658 5659 if ((control->length == 0) && 5660 (control->do_not_ref_stcb)) { 5661 /* 5662 * Clean up code for freeing assoc that left behind a 5663 * pdapi.. maybe a peer in EEOR that just closed after 5664 * sending and never indicated a EOR. 5665 */ 5666 if (hold_rlock == 0) { 5667 hold_rlock = 1; 5668 SCTP_INP_READ_LOCK(inp); 5669 } 5670 control->held_length = 0; 5671 if (control->data) { 5672 /* Hmm there is data here .. fix */ 5673 struct mbuf *m_tmp; 5674 int cnt = 0; 5675 5676 m_tmp = control->data; 5677 while (m_tmp) { 5678 cnt += SCTP_BUF_LEN(m_tmp); 5679 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5680 control->tail_mbuf = m_tmp; 5681 control->end_added = 1; 5682 } 5683 m_tmp = SCTP_BUF_NEXT(m_tmp); 5684 } 5685 control->length = cnt; 5686 } else { 5687 /* remove it */ 5688 TAILQ_REMOVE(&inp->read_queue, control, next); 5689 /* Add back any hiddend data */ 5690 sctp_free_remote_addr(control->whoFrom); 5691 sctp_free_a_readq(stcb, control); 5692 } 5693 if (hold_rlock) { 5694 hold_rlock = 0; 5695 SCTP_INP_READ_UNLOCK(inp); 5696 } 5697 goto restart; 5698 } 5699 if ((control->length == 0) && 5700 (control->end_added == 1)) { 5701 /* 5702 * Do we also need to check for (control->pdapi_aborted == 5703 * 1)? 5704 */ 5705 if (hold_rlock == 0) { 5706 hold_rlock = 1; 5707 SCTP_INP_READ_LOCK(inp); 5708 } 5709 TAILQ_REMOVE(&inp->read_queue, control, next); 5710 if (control->data) { 5711 #ifdef INVARIANTS 5712 panic("control->data not null but control->length == 0"); 5713 #else 5714 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5715 sctp_m_freem(control->data); 5716 control->data = NULL; 5717 #endif 5718 } 5719 if (control->aux_data) { 5720 sctp_m_free(control->aux_data); 5721 control->aux_data = NULL; 5722 } 5723 #ifdef INVARIANTS 5724 if (control->on_strm_q) { 5725 panic("About to free ctl:%p so:%p and its in %d", 5726 control, so, control->on_strm_q); 5727 } 5728 #endif 5729 sctp_free_remote_addr(control->whoFrom); 5730 sctp_free_a_readq(stcb, control); 5731 if (hold_rlock) { 5732 hold_rlock = 0; 5733 SCTP_INP_READ_UNLOCK(inp); 5734 } 5735 goto restart; 5736 } 5737 if (control->length == 0) { 5738 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5739 (filling_sinfo)) { 5740 /* find a more suitable one then this */ 5741 ctl = TAILQ_NEXT(control, next); 5742 while (ctl) { 5743 if ((ctl->stcb != control->stcb) && (ctl->length) && 5744 (ctl->some_taken || 5745 (ctl->spec_flags & M_NOTIFICATION) || 5746 ((ctl->do_not_ref_stcb == 0) && 5747 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5748 ) { 5749 /*- 5750 * If we have a different TCB next, and there is data 5751 * present. If we have already taken some (pdapi), OR we can 5752 * ref the tcb and no delivery as started on this stream, we 5753 * take it. Note we allow a notification on a different 5754 * assoc to be delivered.. 5755 */ 5756 control = ctl; 5757 goto found_one; 5758 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5759 (ctl->length) && 5760 ((ctl->some_taken) || 5761 ((ctl->do_not_ref_stcb == 0) && 5762 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5763 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5764 /*- 5765 * If we have the same tcb, and there is data present, and we 5766 * have the strm interleave feature present. Then if we have 5767 * taken some (pdapi) or we can refer to tht tcb AND we have 5768 * not started a delivery for this stream, we can take it. 5769 * Note we do NOT allow a notificaiton on the same assoc to 5770 * be delivered. 5771 */ 5772 control = ctl; 5773 goto found_one; 5774 } 5775 ctl = TAILQ_NEXT(ctl, next); 5776 } 5777 } 5778 /* 5779 * if we reach here, not suitable replacement is available 5780 * <or> fragment interleave is NOT on. So stuff the sb_cc 5781 * into the our held count, and its time to sleep again. 5782 */ 5783 held_length = so->so_rcv.sb_cc; 5784 control->held_length = so->so_rcv.sb_cc; 5785 goto restart; 5786 } 5787 /* Clear the held length since there is something to read */ 5788 control->held_length = 0; 5789 found_one: 5790 /* 5791 * If we reach here, control has a some data for us to read off. 5792 * Note that stcb COULD be NULL. 5793 */ 5794 if (hold_rlock == 0) { 5795 hold_rlock = 1; 5796 SCTP_INP_READ_LOCK(inp); 5797 } 5798 control->some_taken++; 5799 stcb = control->stcb; 5800 if (stcb) { 5801 if ((control->do_not_ref_stcb == 0) && 5802 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5803 if (freecnt_applied == 0) 5804 stcb = NULL; 5805 } else if (control->do_not_ref_stcb == 0) { 5806 /* you can't free it on me please */ 5807 /* 5808 * The lock on the socket buffer protects us so the 5809 * free code will stop. But since we used the 5810 * socketbuf lock and the sender uses the tcb_lock 5811 * to increment, we need to use the atomic add to 5812 * the refcnt 5813 */ 5814 if (freecnt_applied) { 5815 #ifdef INVARIANTS 5816 panic("refcnt already incremented"); 5817 #else 5818 SCTP_PRINTF("refcnt already incremented?\n"); 5819 #endif 5820 } else { 5821 atomic_add_int(&stcb->asoc.refcnt, 1); 5822 freecnt_applied = 1; 5823 } 5824 /* 5825 * Setup to remember how much we have not yet told 5826 * the peer our rwnd has opened up. Note we grab the 5827 * value from the tcb from last time. Note too that 5828 * sack sending clears this when a sack is sent, 5829 * which is fine. Once we hit the rwnd_req, we then 5830 * will go to the sctp_user_rcvd() that will not 5831 * lock until it KNOWs it MUST send a WUP-SACK. 5832 */ 5833 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5834 stcb->freed_by_sorcv_sincelast = 0; 5835 } 5836 } 5837 if (stcb && 5838 ((control->spec_flags & M_NOTIFICATION) == 0) && 5839 control->do_not_ref_stcb == 0) { 5840 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5841 } 5842 5843 /* First lets get off the sinfo and sockaddr info */ 5844 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5845 sinfo->sinfo_stream = control->sinfo_stream; 5846 sinfo->sinfo_ssn = (uint16_t)control->mid; 5847 sinfo->sinfo_flags = control->sinfo_flags; 5848 sinfo->sinfo_ppid = control->sinfo_ppid; 5849 sinfo->sinfo_context = control->sinfo_context; 5850 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5851 sinfo->sinfo_tsn = control->sinfo_tsn; 5852 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5853 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5854 nxt = TAILQ_NEXT(control, next); 5855 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5856 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5857 struct sctp_extrcvinfo *s_extra; 5858 5859 s_extra = (struct sctp_extrcvinfo *)sinfo; 5860 if ((nxt) && 5861 (nxt->length)) { 5862 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5863 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5864 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5865 } 5866 if (nxt->spec_flags & M_NOTIFICATION) { 5867 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5868 } 5869 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5870 s_extra->serinfo_next_length = nxt->length; 5871 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5872 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5873 if (nxt->tail_mbuf != NULL) { 5874 if (nxt->end_added) { 5875 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5876 } 5877 } 5878 } else { 5879 /* 5880 * we explicitly 0 this, since the memcpy 5881 * got some other things beyond the older 5882 * sinfo_ that is on the control's structure 5883 * :-D 5884 */ 5885 nxt = NULL; 5886 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5887 s_extra->serinfo_next_aid = 0; 5888 s_extra->serinfo_next_length = 0; 5889 s_extra->serinfo_next_ppid = 0; 5890 s_extra->serinfo_next_stream = 0; 5891 } 5892 } 5893 /* 5894 * update off the real current cum-ack, if we have an stcb. 5895 */ 5896 if ((control->do_not_ref_stcb == 0) && stcb) 5897 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5898 /* 5899 * mask off the high bits, we keep the actual chunk bits in 5900 * there. 5901 */ 5902 sinfo->sinfo_flags &= 0x00ff; 5903 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5904 sinfo->sinfo_flags |= SCTP_UNORDERED; 5905 } 5906 } 5907 #ifdef SCTP_ASOCLOG_OF_TSNS 5908 { 5909 int index, newindex; 5910 struct sctp_pcbtsn_rlog *entry; 5911 5912 do { 5913 index = inp->readlog_index; 5914 newindex = index + 1; 5915 if (newindex >= SCTP_READ_LOG_SIZE) { 5916 newindex = 0; 5917 } 5918 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5919 entry = &inp->readlog[index]; 5920 entry->vtag = control->sinfo_assoc_id; 5921 entry->strm = control->sinfo_stream; 5922 entry->seq = (uint16_t)control->mid; 5923 entry->sz = control->length; 5924 entry->flgs = control->sinfo_flags; 5925 } 5926 #endif 5927 if ((fromlen > 0) && (from != NULL)) { 5928 union sctp_sockstore store; 5929 size_t len; 5930 5931 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5932 #ifdef INET6 5933 case AF_INET6: 5934 len = sizeof(struct sockaddr_in6); 5935 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5936 store.sin6.sin6_port = control->port_from; 5937 break; 5938 #endif 5939 #ifdef INET 5940 case AF_INET: 5941 #ifdef INET6 5942 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5943 len = sizeof(struct sockaddr_in6); 5944 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5945 &store.sin6); 5946 store.sin6.sin6_port = control->port_from; 5947 } else { 5948 len = sizeof(struct sockaddr_in); 5949 store.sin = control->whoFrom->ro._l_addr.sin; 5950 store.sin.sin_port = control->port_from; 5951 } 5952 #else 5953 len = sizeof(struct sockaddr_in); 5954 store.sin = control->whoFrom->ro._l_addr.sin; 5955 store.sin.sin_port = control->port_from; 5956 #endif 5957 break; 5958 #endif 5959 default: 5960 len = 0; 5961 break; 5962 } 5963 memcpy(from, &store, min((size_t)fromlen, len)); 5964 #ifdef INET6 5965 { 5966 struct sockaddr_in6 lsa6, *from6; 5967 5968 from6 = (struct sockaddr_in6 *)from; 5969 sctp_recover_scope_mac(from6, (&lsa6)); 5970 } 5971 #endif 5972 } 5973 if (hold_rlock) { 5974 SCTP_INP_READ_UNLOCK(inp); 5975 hold_rlock = 0; 5976 } 5977 if (hold_sblock) { 5978 SOCKBUF_UNLOCK(&so->so_rcv); 5979 hold_sblock = 0; 5980 } 5981 /* now copy out what data we can */ 5982 if (mp == NULL) { 5983 /* copy out each mbuf in the chain up to length */ 5984 get_more_data: 5985 m = control->data; 5986 while (m) { 5987 /* Move out all we can */ 5988 cp_len = uio->uio_resid; 5989 my_len = SCTP_BUF_LEN(m); 5990 if (cp_len > my_len) { 5991 /* not enough in this buf */ 5992 cp_len = my_len; 5993 } 5994 if (hold_rlock) { 5995 SCTP_INP_READ_UNLOCK(inp); 5996 hold_rlock = 0; 5997 } 5998 if (cp_len > 0) 5999 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6000 /* re-read */ 6001 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6002 goto release; 6003 } 6004 6005 if ((control->do_not_ref_stcb == 0) && stcb && 6006 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6007 no_rcv_needed = 1; 6008 } 6009 if (error) { 6010 /* error we are out of here */ 6011 goto release; 6012 } 6013 SCTP_INP_READ_LOCK(inp); 6014 hold_rlock = 1; 6015 if (cp_len == SCTP_BUF_LEN(m)) { 6016 if ((SCTP_BUF_NEXT(m) == NULL) && 6017 (control->end_added)) { 6018 out_flags |= MSG_EOR; 6019 if ((control->do_not_ref_stcb == 0) && 6020 (control->stcb != NULL) && 6021 ((control->spec_flags & M_NOTIFICATION) == 0)) 6022 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6023 } 6024 if (control->spec_flags & M_NOTIFICATION) { 6025 out_flags |= MSG_NOTIFICATION; 6026 } 6027 /* we ate up the mbuf */ 6028 if (in_flags & MSG_PEEK) { 6029 /* just looking */ 6030 m = SCTP_BUF_NEXT(m); 6031 copied_so_far += cp_len; 6032 } else { 6033 /* dispose of the mbuf */ 6034 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6035 sctp_sblog(&so->so_rcv, 6036 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6037 } 6038 sctp_sbfree(control, stcb, &so->so_rcv, m); 6039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6040 sctp_sblog(&so->so_rcv, 6041 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6042 } 6043 copied_so_far += cp_len; 6044 freed_so_far += (uint32_t)cp_len; 6045 freed_so_far += MSIZE; 6046 atomic_subtract_int(&control->length, cp_len); 6047 control->data = sctp_m_free(m); 6048 m = control->data; 6049 /* 6050 * been through it all, must hold sb 6051 * lock ok to null tail 6052 */ 6053 if (control->data == NULL) { 6054 #ifdef INVARIANTS 6055 if ((control->end_added == 0) || 6056 (TAILQ_NEXT(control, next) == NULL)) { 6057 /* 6058 * If the end is not 6059 * added, OR the 6060 * next is NOT null 6061 * we MUST have the 6062 * lock. 6063 */ 6064 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6065 panic("Hmm we don't own the lock?"); 6066 } 6067 } 6068 #endif 6069 control->tail_mbuf = NULL; 6070 #ifdef INVARIANTS 6071 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6072 panic("end_added, nothing left and no MSG_EOR"); 6073 } 6074 #endif 6075 } 6076 } 6077 } else { 6078 /* Do we need to trim the mbuf? */ 6079 if (control->spec_flags & M_NOTIFICATION) { 6080 out_flags |= MSG_NOTIFICATION; 6081 } 6082 if ((in_flags & MSG_PEEK) == 0) { 6083 SCTP_BUF_RESV_UF(m, cp_len); 6084 SCTP_BUF_LEN(m) -= (int)cp_len; 6085 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6086 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6087 } 6088 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6089 if ((control->do_not_ref_stcb == 0) && 6090 stcb) { 6091 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6092 } 6093 copied_so_far += cp_len; 6094 freed_so_far += (uint32_t)cp_len; 6095 freed_so_far += MSIZE; 6096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6097 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6098 SCTP_LOG_SBRESULT, 0); 6099 } 6100 atomic_subtract_int(&control->length, cp_len); 6101 } else { 6102 copied_so_far += cp_len; 6103 } 6104 } 6105 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6106 break; 6107 } 6108 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6109 (control->do_not_ref_stcb == 0) && 6110 (freed_so_far >= rwnd_req)) { 6111 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6112 } 6113 } /* end while(m) */ 6114 /* 6115 * At this point we have looked at it all and we either have 6116 * a MSG_EOR/or read all the user wants... <OR> 6117 * control->length == 0. 6118 */ 6119 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6120 /* we are done with this control */ 6121 if (control->length == 0) { 6122 if (control->data) { 6123 #ifdef INVARIANTS 6124 panic("control->data not null at read eor?"); 6125 #else 6126 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6127 sctp_m_freem(control->data); 6128 control->data = NULL; 6129 #endif 6130 } 6131 done_with_control: 6132 if (hold_rlock == 0) { 6133 SCTP_INP_READ_LOCK(inp); 6134 hold_rlock = 1; 6135 } 6136 TAILQ_REMOVE(&inp->read_queue, control, next); 6137 /* Add back any hiddend data */ 6138 if (control->held_length) { 6139 held_length = 0; 6140 control->held_length = 0; 6141 wakeup_read_socket = 1; 6142 } 6143 if (control->aux_data) { 6144 sctp_m_free(control->aux_data); 6145 control->aux_data = NULL; 6146 } 6147 no_rcv_needed = control->do_not_ref_stcb; 6148 sctp_free_remote_addr(control->whoFrom); 6149 control->data = NULL; 6150 #ifdef INVARIANTS 6151 if (control->on_strm_q) { 6152 panic("About to free ctl:%p so:%p and its in %d", 6153 control, so, control->on_strm_q); 6154 } 6155 #endif 6156 sctp_free_a_readq(stcb, control); 6157 control = NULL; 6158 if ((freed_so_far >= rwnd_req) && 6159 (no_rcv_needed == 0)) 6160 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6161 6162 } else { 6163 /* 6164 * The user did not read all of this 6165 * message, turn off the returned MSG_EOR 6166 * since we are leaving more behind on the 6167 * control to read. 6168 */ 6169 #ifdef INVARIANTS 6170 if (control->end_added && 6171 (control->data == NULL) && 6172 (control->tail_mbuf == NULL)) { 6173 panic("Gak, control->length is corrupt?"); 6174 } 6175 #endif 6176 no_rcv_needed = control->do_not_ref_stcb; 6177 out_flags &= ~MSG_EOR; 6178 } 6179 } 6180 if (out_flags & MSG_EOR) { 6181 goto release; 6182 } 6183 if ((uio->uio_resid == 0) || 6184 ((in_eeor_mode) && 6185 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6186 goto release; 6187 } 6188 /* 6189 * If I hit here the receiver wants more and this message is 6190 * NOT done (pd-api). So two questions. Can we block? if not 6191 * we are done. Did the user NOT set MSG_WAITALL? 6192 */ 6193 if (block_allowed == 0) { 6194 goto release; 6195 } 6196 /* 6197 * We need to wait for more data a few things: - We don't 6198 * sbunlock() so we don't get someone else reading. - We 6199 * must be sure to account for the case where what is added 6200 * is NOT to our control when we wakeup. 6201 */ 6202 6203 /* 6204 * Do we need to tell the transport a rwnd update might be 6205 * needed before we go to sleep? 6206 */ 6207 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6208 ((freed_so_far >= rwnd_req) && 6209 (control->do_not_ref_stcb == 0) && 6210 (no_rcv_needed == 0))) { 6211 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6212 } 6213 wait_some_more: 6214 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6215 goto release; 6216 } 6217 6218 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6219 goto release; 6220 6221 if (hold_rlock == 1) { 6222 SCTP_INP_READ_UNLOCK(inp); 6223 hold_rlock = 0; 6224 } 6225 if (hold_sblock == 0) { 6226 SOCKBUF_LOCK(&so->so_rcv); 6227 hold_sblock = 1; 6228 } 6229 if ((copied_so_far) && (control->length == 0) && 6230 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6231 goto release; 6232 } 6233 if (so->so_rcv.sb_cc <= control->held_length) { 6234 error = sbwait(&so->so_rcv); 6235 if (error) { 6236 goto release; 6237 } 6238 control->held_length = 0; 6239 } 6240 if (hold_sblock) { 6241 SOCKBUF_UNLOCK(&so->so_rcv); 6242 hold_sblock = 0; 6243 } 6244 if (control->length == 0) { 6245 /* still nothing here */ 6246 if (control->end_added == 1) { 6247 /* he aborted, or is done i.e.did a shutdown */ 6248 out_flags |= MSG_EOR; 6249 if (control->pdapi_aborted) { 6250 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6251 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6252 6253 out_flags |= MSG_TRUNC; 6254 } else { 6255 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6256 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6257 } 6258 goto done_with_control; 6259 } 6260 if (so->so_rcv.sb_cc > held_length) { 6261 control->held_length = so->so_rcv.sb_cc; 6262 held_length = 0; 6263 } 6264 goto wait_some_more; 6265 } else if (control->data == NULL) { 6266 /* 6267 * we must re-sync since data is probably being 6268 * added 6269 */ 6270 SCTP_INP_READ_LOCK(inp); 6271 if ((control->length > 0) && (control->data == NULL)) { 6272 /* 6273 * big trouble.. we have the lock and its 6274 * corrupt? 6275 */ 6276 #ifdef INVARIANTS 6277 panic("Impossible data==NULL length !=0"); 6278 #endif 6279 out_flags |= MSG_EOR; 6280 out_flags |= MSG_TRUNC; 6281 control->length = 0; 6282 SCTP_INP_READ_UNLOCK(inp); 6283 goto done_with_control; 6284 } 6285 SCTP_INP_READ_UNLOCK(inp); 6286 /* We will fall around to get more data */ 6287 } 6288 goto get_more_data; 6289 } else { 6290 /*- 6291 * Give caller back the mbuf chain, 6292 * store in uio_resid the length 6293 */ 6294 wakeup_read_socket = 0; 6295 if ((control->end_added == 0) || 6296 (TAILQ_NEXT(control, next) == NULL)) { 6297 /* Need to get rlock */ 6298 if (hold_rlock == 0) { 6299 SCTP_INP_READ_LOCK(inp); 6300 hold_rlock = 1; 6301 } 6302 } 6303 if (control->end_added) { 6304 out_flags |= MSG_EOR; 6305 if ((control->do_not_ref_stcb == 0) && 6306 (control->stcb != NULL) && 6307 ((control->spec_flags & M_NOTIFICATION) == 0)) 6308 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6309 } 6310 if (control->spec_flags & M_NOTIFICATION) { 6311 out_flags |= MSG_NOTIFICATION; 6312 } 6313 uio->uio_resid = control->length; 6314 *mp = control->data; 6315 m = control->data; 6316 while (m) { 6317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6318 sctp_sblog(&so->so_rcv, 6319 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6320 } 6321 sctp_sbfree(control, stcb, &so->so_rcv, m); 6322 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6323 freed_so_far += MSIZE; 6324 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6325 sctp_sblog(&so->so_rcv, 6326 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6327 } 6328 m = SCTP_BUF_NEXT(m); 6329 } 6330 control->data = control->tail_mbuf = NULL; 6331 control->length = 0; 6332 if (out_flags & MSG_EOR) { 6333 /* Done with this control */ 6334 goto done_with_control; 6335 } 6336 } 6337 release: 6338 if (hold_rlock == 1) { 6339 SCTP_INP_READ_UNLOCK(inp); 6340 hold_rlock = 0; 6341 } 6342 if (hold_sblock == 1) { 6343 SOCKBUF_UNLOCK(&so->so_rcv); 6344 hold_sblock = 0; 6345 } 6346 6347 sbunlock(&so->so_rcv); 6348 sockbuf_lock = 0; 6349 6350 release_unlocked: 6351 if (hold_sblock) { 6352 SOCKBUF_UNLOCK(&so->so_rcv); 6353 hold_sblock = 0; 6354 } 6355 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6356 if ((freed_so_far >= rwnd_req) && 6357 (control && (control->do_not_ref_stcb == 0)) && 6358 (no_rcv_needed == 0)) 6359 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6360 } 6361 out: 6362 if (msg_flags) { 6363 *msg_flags = out_flags; 6364 } 6365 if (((out_flags & MSG_EOR) == 0) && 6366 ((in_flags & MSG_PEEK) == 0) && 6367 (sinfo) && 6368 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6369 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6370 struct sctp_extrcvinfo *s_extra; 6371 6372 s_extra = (struct sctp_extrcvinfo *)sinfo; 6373 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6374 } 6375 if (hold_rlock == 1) { 6376 SCTP_INP_READ_UNLOCK(inp); 6377 } 6378 if (hold_sblock) { 6379 SOCKBUF_UNLOCK(&so->so_rcv); 6380 } 6381 if (sockbuf_lock) { 6382 sbunlock(&so->so_rcv); 6383 } 6384 6385 if (freecnt_applied) { 6386 /* 6387 * The lock on the socket buffer protects us so the free 6388 * code will stop. But since we used the socketbuf lock and 6389 * the sender uses the tcb_lock to increment, we need to use 6390 * the atomic add to the refcnt. 6391 */ 6392 if (stcb == NULL) { 6393 #ifdef INVARIANTS 6394 panic("stcb for refcnt has gone NULL?"); 6395 goto stage_left; 6396 #else 6397 goto stage_left; 6398 #endif 6399 } 6400 /* Save the value back for next time */ 6401 stcb->freed_by_sorcv_sincelast = freed_so_far; 6402 atomic_add_int(&stcb->asoc.refcnt, -1); 6403 } 6404 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6405 if (stcb) { 6406 sctp_misc_ints(SCTP_SORECV_DONE, 6407 freed_so_far, 6408 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6409 stcb->asoc.my_rwnd, 6410 so->so_rcv.sb_cc); 6411 } else { 6412 sctp_misc_ints(SCTP_SORECV_DONE, 6413 freed_so_far, 6414 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6415 0, 6416 so->so_rcv.sb_cc); 6417 } 6418 } 6419 stage_left: 6420 if (wakeup_read_socket) { 6421 sctp_sorwakeup(inp, so); 6422 } 6423 return (error); 6424 } 6425 6426 #ifdef SCTP_MBUF_LOGGING 6427 struct mbuf * 6428 sctp_m_free(struct mbuf *m) 6429 { 6430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6431 sctp_log_mb(m, SCTP_MBUF_IFREE); 6432 } 6433 return (m_free(m)); 6434 } 6435 6436 void 6437 sctp_m_freem(struct mbuf *mb) 6438 { 6439 while (mb != NULL) 6440 mb = sctp_m_free(mb); 6441 } 6442 6443 #endif 6444 6445 int 6446 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6447 { 6448 /* 6449 * Given a local address. For all associations that holds the 6450 * address, request a peer-set-primary. 6451 */ 6452 struct sctp_ifa *ifa; 6453 struct sctp_laddr *wi; 6454 6455 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6456 if (ifa == NULL) { 6457 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6458 return (EADDRNOTAVAIL); 6459 } 6460 /* 6461 * Now that we have the ifa we must awaken the iterator with this 6462 * message. 6463 */ 6464 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6465 if (wi == NULL) { 6466 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6467 return (ENOMEM); 6468 } 6469 /* Now incr the count and int wi structure */ 6470 SCTP_INCR_LADDR_COUNT(); 6471 memset(wi, 0, sizeof(*wi)); 6472 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6473 wi->ifa = ifa; 6474 wi->action = SCTP_SET_PRIM_ADDR; 6475 atomic_add_int(&ifa->refcount, 1); 6476 6477 /* Now add it to the work queue */ 6478 SCTP_WQ_ADDR_LOCK(); 6479 /* 6480 * Should this really be a tailq? As it is we will process the 6481 * newest first :-0 6482 */ 6483 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6484 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6485 (struct sctp_inpcb *)NULL, 6486 (struct sctp_tcb *)NULL, 6487 (struct sctp_nets *)NULL); 6488 SCTP_WQ_ADDR_UNLOCK(); 6489 return (0); 6490 } 6491 6492 int 6493 sctp_soreceive(struct socket *so, 6494 struct sockaddr **psa, 6495 struct uio *uio, 6496 struct mbuf **mp0, 6497 struct mbuf **controlp, 6498 int *flagsp) 6499 { 6500 int error, fromlen; 6501 uint8_t sockbuf[256]; 6502 struct sockaddr *from; 6503 struct sctp_extrcvinfo sinfo; 6504 int filling_sinfo = 1; 6505 int flags; 6506 struct sctp_inpcb *inp; 6507 6508 inp = (struct sctp_inpcb *)so->so_pcb; 6509 /* pickup the assoc we are reading from */ 6510 if (inp == NULL) { 6511 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6512 return (EINVAL); 6513 } 6514 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6515 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6516 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6517 (controlp == NULL)) { 6518 /* user does not want the sndrcv ctl */ 6519 filling_sinfo = 0; 6520 } 6521 if (psa) { 6522 from = (struct sockaddr *)sockbuf; 6523 fromlen = sizeof(sockbuf); 6524 from->sa_len = 0; 6525 } else { 6526 from = NULL; 6527 fromlen = 0; 6528 } 6529 6530 if (filling_sinfo) { 6531 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6532 } 6533 if (flagsp != NULL) { 6534 flags = *flagsp; 6535 } else { 6536 flags = 0; 6537 } 6538 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6539 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6540 if (flagsp != NULL) { 6541 *flagsp = flags; 6542 } 6543 if (controlp != NULL) { 6544 /* copy back the sinfo in a CMSG format */ 6545 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6546 *controlp = sctp_build_ctl_nchunk(inp, 6547 (struct sctp_sndrcvinfo *)&sinfo); 6548 } else { 6549 *controlp = NULL; 6550 } 6551 } 6552 if (psa) { 6553 /* copy back the address info */ 6554 if (from && from->sa_len) { 6555 *psa = sodupsockaddr(from, M_NOWAIT); 6556 } else { 6557 *psa = NULL; 6558 } 6559 } 6560 return (error); 6561 } 6562 6563 int 6564 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6565 int totaddr, int *error) 6566 { 6567 int added = 0; 6568 int i; 6569 struct sctp_inpcb *inp; 6570 struct sockaddr *sa; 6571 size_t incr = 0; 6572 #ifdef INET 6573 struct sockaddr_in *sin; 6574 #endif 6575 #ifdef INET6 6576 struct sockaddr_in6 *sin6; 6577 #endif 6578 6579 sa = addr; 6580 inp = stcb->sctp_ep; 6581 *error = 0; 6582 for (i = 0; i < totaddr; i++) { 6583 switch (sa->sa_family) { 6584 #ifdef INET 6585 case AF_INET: 6586 incr = sizeof(struct sockaddr_in); 6587 sin = (struct sockaddr_in *)sa; 6588 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6589 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6590 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6591 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6592 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6593 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6594 *error = EINVAL; 6595 goto out_now; 6596 } 6597 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6598 SCTP_DONOT_SETSCOPE, 6599 SCTP_ADDR_IS_CONFIRMED)) { 6600 /* assoc gone no un-lock */ 6601 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6602 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6603 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6604 *error = ENOBUFS; 6605 goto out_now; 6606 } 6607 added++; 6608 break; 6609 #endif 6610 #ifdef INET6 6611 case AF_INET6: 6612 incr = sizeof(struct sockaddr_in6); 6613 sin6 = (struct sockaddr_in6 *)sa; 6614 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6615 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6616 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6617 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6618 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6619 *error = EINVAL; 6620 goto out_now; 6621 } 6622 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6623 SCTP_DONOT_SETSCOPE, 6624 SCTP_ADDR_IS_CONFIRMED)) { 6625 /* assoc gone no un-lock */ 6626 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6627 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6628 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6629 *error = ENOBUFS; 6630 goto out_now; 6631 } 6632 added++; 6633 break; 6634 #endif 6635 default: 6636 break; 6637 } 6638 sa = (struct sockaddr *)((caddr_t)sa + incr); 6639 } 6640 out_now: 6641 return (added); 6642 } 6643 6644 int 6645 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6646 unsigned int totaddr, 6647 unsigned int *num_v4, unsigned int *num_v6, 6648 unsigned int limit) 6649 { 6650 struct sockaddr *sa; 6651 struct sctp_tcb *stcb; 6652 unsigned int incr, at, i; 6653 6654 at = 0; 6655 sa = addr; 6656 *num_v6 = *num_v4 = 0; 6657 /* account and validate addresses */ 6658 if (totaddr == 0) { 6659 return (EINVAL); 6660 } 6661 for (i = 0; i < totaddr; i++) { 6662 if (at + sizeof(struct sockaddr) > limit) { 6663 return (EINVAL); 6664 } 6665 switch (sa->sa_family) { 6666 #ifdef INET 6667 case AF_INET: 6668 incr = (unsigned int)sizeof(struct sockaddr_in); 6669 if (sa->sa_len != incr) { 6670 return (EINVAL); 6671 } 6672 (*num_v4) += 1; 6673 break; 6674 #endif 6675 #ifdef INET6 6676 case AF_INET6: 6677 { 6678 struct sockaddr_in6 *sin6; 6679 6680 sin6 = (struct sockaddr_in6 *)sa; 6681 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6682 /* Must be non-mapped for connectx */ 6683 return (EINVAL); 6684 } 6685 incr = (unsigned int)sizeof(struct sockaddr_in6); 6686 if (sa->sa_len != incr) { 6687 return (EINVAL); 6688 } 6689 (*num_v6) += 1; 6690 break; 6691 } 6692 #endif 6693 default: 6694 return (EINVAL); 6695 } 6696 if ((at + incr) > limit) { 6697 return (EINVAL); 6698 } 6699 SCTP_INP_INCR_REF(inp); 6700 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6701 if (stcb != NULL) { 6702 SCTP_TCB_UNLOCK(stcb); 6703 return (EALREADY); 6704 } else { 6705 SCTP_INP_DECR_REF(inp); 6706 } 6707 at += incr; 6708 sa = (struct sockaddr *)((caddr_t)sa + incr); 6709 } 6710 return (0); 6711 } 6712 6713 /* 6714 * sctp_bindx(ADD) for one address. 6715 * assumes all arguments are valid/checked by caller. 6716 */ 6717 void 6718 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6719 struct sockaddr *sa, uint32_t vrf_id, int *error, 6720 void *p) 6721 { 6722 #if defined(INET) && defined(INET6) 6723 struct sockaddr_in sin; 6724 #endif 6725 #ifdef INET6 6726 struct sockaddr_in6 *sin6; 6727 #endif 6728 #ifdef INET 6729 struct sockaddr_in *sinp; 6730 #endif 6731 struct sockaddr *addr_to_use; 6732 struct sctp_inpcb *lep; 6733 uint16_t port; 6734 6735 /* see if we're bound all already! */ 6736 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6737 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6738 *error = EINVAL; 6739 return; 6740 } 6741 switch (sa->sa_family) { 6742 #ifdef INET6 6743 case AF_INET6: 6744 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6745 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6746 *error = EINVAL; 6747 return; 6748 } 6749 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6750 /* can only bind v6 on PF_INET6 sockets */ 6751 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6752 *error = EINVAL; 6753 return; 6754 } 6755 sin6 = (struct sockaddr_in6 *)sa; 6756 port = sin6->sin6_port; 6757 #ifdef INET 6758 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6759 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6760 SCTP_IPV6_V6ONLY(inp)) { 6761 /* can't bind v4-mapped on PF_INET sockets */ 6762 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6763 *error = EINVAL; 6764 return; 6765 } 6766 in6_sin6_2_sin(&sin, sin6); 6767 addr_to_use = (struct sockaddr *)&sin; 6768 } else { 6769 addr_to_use = sa; 6770 } 6771 #else 6772 addr_to_use = sa; 6773 #endif 6774 break; 6775 #endif 6776 #ifdef INET 6777 case AF_INET: 6778 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6779 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6780 *error = EINVAL; 6781 return; 6782 } 6783 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6784 SCTP_IPV6_V6ONLY(inp)) { 6785 /* can't bind v4 on PF_INET sockets */ 6786 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6787 *error = EINVAL; 6788 return; 6789 } 6790 sinp = (struct sockaddr_in *)sa; 6791 port = sinp->sin_port; 6792 addr_to_use = sa; 6793 break; 6794 #endif 6795 default: 6796 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6797 *error = EINVAL; 6798 return; 6799 } 6800 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6801 if (p == NULL) { 6802 /* Can't get proc for Net/Open BSD */ 6803 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6804 *error = EINVAL; 6805 return; 6806 } 6807 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6808 return; 6809 } 6810 /* Validate the incoming port. */ 6811 if ((port != 0) && (port != inp->sctp_lport)) { 6812 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6813 *error = EINVAL; 6814 return; 6815 } 6816 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6817 if (lep == NULL) { 6818 /* add the address */ 6819 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6820 SCTP_ADD_IP_ADDRESS, vrf_id); 6821 } else { 6822 if (lep != inp) { 6823 *error = EADDRINUSE; 6824 } 6825 SCTP_INP_DECR_REF(lep); 6826 } 6827 } 6828 6829 /* 6830 * sctp_bindx(DELETE) for one address. 6831 * assumes all arguments are valid/checked by caller. 6832 */ 6833 void 6834 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6835 struct sockaddr *sa, uint32_t vrf_id, int *error) 6836 { 6837 struct sockaddr *addr_to_use; 6838 #if defined(INET) && defined(INET6) 6839 struct sockaddr_in6 *sin6; 6840 struct sockaddr_in sin; 6841 #endif 6842 6843 /* see if we're bound all already! */ 6844 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6845 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6846 *error = EINVAL; 6847 return; 6848 } 6849 switch (sa->sa_family) { 6850 #ifdef INET6 6851 case AF_INET6: 6852 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6853 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6854 *error = EINVAL; 6855 return; 6856 } 6857 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6858 /* can only bind v6 on PF_INET6 sockets */ 6859 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6860 *error = EINVAL; 6861 return; 6862 } 6863 #ifdef INET 6864 sin6 = (struct sockaddr_in6 *)sa; 6865 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6866 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6867 SCTP_IPV6_V6ONLY(inp)) { 6868 /* can't bind mapped-v4 on PF_INET sockets */ 6869 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6870 *error = EINVAL; 6871 return; 6872 } 6873 in6_sin6_2_sin(&sin, sin6); 6874 addr_to_use = (struct sockaddr *)&sin; 6875 } else { 6876 addr_to_use = sa; 6877 } 6878 #else 6879 addr_to_use = sa; 6880 #endif 6881 break; 6882 #endif 6883 #ifdef INET 6884 case AF_INET: 6885 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6887 *error = EINVAL; 6888 return; 6889 } 6890 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6891 SCTP_IPV6_V6ONLY(inp)) { 6892 /* can't bind v4 on PF_INET sockets */ 6893 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6894 *error = EINVAL; 6895 return; 6896 } 6897 addr_to_use = sa; 6898 break; 6899 #endif 6900 default: 6901 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6902 *error = EINVAL; 6903 return; 6904 } 6905 /* No lock required mgmt_ep_sa does its own locking. */ 6906 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6907 vrf_id); 6908 } 6909 6910 /* 6911 * returns the valid local address count for an assoc, taking into account 6912 * all scoping rules 6913 */ 6914 int 6915 sctp_local_addr_count(struct sctp_tcb *stcb) 6916 { 6917 int loopback_scope; 6918 #if defined(INET) 6919 int ipv4_local_scope, ipv4_addr_legal; 6920 #endif 6921 #if defined(INET6) 6922 int local_scope, site_scope, ipv6_addr_legal; 6923 #endif 6924 struct sctp_vrf *vrf; 6925 struct sctp_ifn *sctp_ifn; 6926 struct sctp_ifa *sctp_ifa; 6927 int count = 0; 6928 6929 /* Turn on all the appropriate scopes */ 6930 loopback_scope = stcb->asoc.scope.loopback_scope; 6931 #if defined(INET) 6932 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6933 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6934 #endif 6935 #if defined(INET6) 6936 local_scope = stcb->asoc.scope.local_scope; 6937 site_scope = stcb->asoc.scope.site_scope; 6938 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6939 #endif 6940 SCTP_IPI_ADDR_RLOCK(); 6941 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6942 if (vrf == NULL) { 6943 /* no vrf, no addresses */ 6944 SCTP_IPI_ADDR_RUNLOCK(); 6945 return (0); 6946 } 6947 6948 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6949 /* 6950 * bound all case: go through all ifns on the vrf 6951 */ 6952 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6953 if ((loopback_scope == 0) && 6954 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6955 continue; 6956 } 6957 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6958 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6959 continue; 6960 switch (sctp_ifa->address.sa.sa_family) { 6961 #ifdef INET 6962 case AF_INET: 6963 if (ipv4_addr_legal) { 6964 struct sockaddr_in *sin; 6965 6966 sin = &sctp_ifa->address.sin; 6967 if (sin->sin_addr.s_addr == 0) { 6968 /* 6969 * skip unspecified 6970 * addrs 6971 */ 6972 continue; 6973 } 6974 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6975 &sin->sin_addr) != 0) { 6976 continue; 6977 } 6978 if ((ipv4_local_scope == 0) && 6979 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6980 continue; 6981 } 6982 /* count this one */ 6983 count++; 6984 } else { 6985 continue; 6986 } 6987 break; 6988 #endif 6989 #ifdef INET6 6990 case AF_INET6: 6991 if (ipv6_addr_legal) { 6992 struct sockaddr_in6 *sin6; 6993 6994 sin6 = &sctp_ifa->address.sin6; 6995 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6996 continue; 6997 } 6998 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6999 &sin6->sin6_addr) != 0) { 7000 continue; 7001 } 7002 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7003 if (local_scope == 0) 7004 continue; 7005 if (sin6->sin6_scope_id == 0) { 7006 if (sa6_recoverscope(sin6) != 0) 7007 /* 7008 * 7009 * bad 7010 * link 7011 * 7012 * local 7013 * 7014 * address 7015 */ 7016 continue; 7017 } 7018 } 7019 if ((site_scope == 0) && 7020 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7021 continue; 7022 } 7023 /* count this one */ 7024 count++; 7025 } 7026 break; 7027 #endif 7028 default: 7029 /* TSNH */ 7030 break; 7031 } 7032 } 7033 } 7034 } else { 7035 /* 7036 * subset bound case 7037 */ 7038 struct sctp_laddr *laddr; 7039 7040 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7041 sctp_nxt_addr) { 7042 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7043 continue; 7044 } 7045 /* count this one */ 7046 count++; 7047 } 7048 } 7049 SCTP_IPI_ADDR_RUNLOCK(); 7050 return (count); 7051 } 7052 7053 #if defined(SCTP_LOCAL_TRACE_BUF) 7054 7055 void 7056 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7057 { 7058 uint32_t saveindex, newindex; 7059 7060 do { 7061 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7062 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7063 newindex = 1; 7064 } else { 7065 newindex = saveindex + 1; 7066 } 7067 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7068 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7069 saveindex = 0; 7070 } 7071 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7072 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7073 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7074 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7075 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7076 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7077 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7078 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7079 } 7080 7081 #endif 7082 static void 7083 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7084 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7085 { 7086 struct ip *iph; 7087 #ifdef INET6 7088 struct ip6_hdr *ip6; 7089 #endif 7090 struct mbuf *sp, *last; 7091 struct udphdr *uhdr; 7092 uint16_t port; 7093 7094 if ((m->m_flags & M_PKTHDR) == 0) { 7095 /* Can't handle one that is not a pkt hdr */ 7096 goto out; 7097 } 7098 /* Pull the src port */ 7099 iph = mtod(m, struct ip *); 7100 uhdr = (struct udphdr *)((caddr_t)iph + off); 7101 port = uhdr->uh_sport; 7102 /* 7103 * Split out the mbuf chain. Leave the IP header in m, place the 7104 * rest in the sp. 7105 */ 7106 sp = m_split(m, off, M_NOWAIT); 7107 if (sp == NULL) { 7108 /* Gak, drop packet, we can't do a split */ 7109 goto out; 7110 } 7111 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7112 /* Gak, packet can't have an SCTP header in it - too small */ 7113 m_freem(sp); 7114 goto out; 7115 } 7116 /* Now pull up the UDP header and SCTP header together */ 7117 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7118 if (sp == NULL) { 7119 /* Gak pullup failed */ 7120 goto out; 7121 } 7122 /* Trim out the UDP header */ 7123 m_adj(sp, sizeof(struct udphdr)); 7124 7125 /* Now reconstruct the mbuf chain */ 7126 for (last = m; last->m_next; last = last->m_next); 7127 last->m_next = sp; 7128 m->m_pkthdr.len += sp->m_pkthdr.len; 7129 /* 7130 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7131 * checksum and it was valid. Since CSUM_DATA_VALID == 7132 * CSUM_SCTP_VALID this would imply that the HW also verified the 7133 * SCTP checksum. Therefore, clear the bit. 7134 */ 7135 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7136 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7137 m->m_pkthdr.len, 7138 if_name(m->m_pkthdr.rcvif), 7139 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7140 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7141 iph = mtod(m, struct ip *); 7142 switch (iph->ip_v) { 7143 #ifdef INET 7144 case IPVERSION: 7145 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7146 sctp_input_with_port(m, off, port); 7147 break; 7148 #endif 7149 #ifdef INET6 7150 case IPV6_VERSION >> 4: 7151 ip6 = mtod(m, struct ip6_hdr *); 7152 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7153 sctp6_input_with_port(&m, &off, port); 7154 break; 7155 #endif 7156 default: 7157 goto out; 7158 break; 7159 } 7160 return; 7161 out: 7162 m_freem(m); 7163 } 7164 7165 #ifdef INET 7166 static void 7167 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7168 { 7169 struct ip *outer_ip, *inner_ip; 7170 struct sctphdr *sh; 7171 struct icmp *icmp; 7172 struct udphdr *udp; 7173 struct sctp_inpcb *inp; 7174 struct sctp_tcb *stcb; 7175 struct sctp_nets *net; 7176 struct sctp_init_chunk *ch; 7177 struct sockaddr_in src, dst; 7178 uint8_t type, code; 7179 7180 inner_ip = (struct ip *)vip; 7181 icmp = (struct icmp *)((caddr_t)inner_ip - 7182 (sizeof(struct icmp) - sizeof(struct ip))); 7183 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7184 if (ntohs(outer_ip->ip_len) < 7185 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7186 return; 7187 } 7188 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7189 sh = (struct sctphdr *)(udp + 1); 7190 memset(&src, 0, sizeof(struct sockaddr_in)); 7191 src.sin_family = AF_INET; 7192 src.sin_len = sizeof(struct sockaddr_in); 7193 src.sin_port = sh->src_port; 7194 src.sin_addr = inner_ip->ip_src; 7195 memset(&dst, 0, sizeof(struct sockaddr_in)); 7196 dst.sin_family = AF_INET; 7197 dst.sin_len = sizeof(struct sockaddr_in); 7198 dst.sin_port = sh->dest_port; 7199 dst.sin_addr = inner_ip->ip_dst; 7200 /* 7201 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7202 * holds our local endpoint address. Thus we reverse the dst and the 7203 * src in the lookup. 7204 */ 7205 inp = NULL; 7206 net = NULL; 7207 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7208 (struct sockaddr *)&src, 7209 &inp, &net, 1, 7210 SCTP_DEFAULT_VRFID); 7211 if ((stcb != NULL) && 7212 (net != NULL) && 7213 (inp != NULL)) { 7214 /* Check the UDP port numbers */ 7215 if ((udp->uh_dport != net->port) || 7216 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7217 SCTP_TCB_UNLOCK(stcb); 7218 return; 7219 } 7220 /* Check the verification tag */ 7221 if (ntohl(sh->v_tag) != 0) { 7222 /* 7223 * This must be the verification tag used for 7224 * sending out packets. We don't consider packets 7225 * reflecting the verification tag. 7226 */ 7227 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7228 SCTP_TCB_UNLOCK(stcb); 7229 return; 7230 } 7231 } else { 7232 if (ntohs(outer_ip->ip_len) >= 7233 sizeof(struct ip) + 7234 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7235 /* 7236 * In this case we can check if we got an 7237 * INIT chunk and if the initiate tag 7238 * matches. 7239 */ 7240 ch = (struct sctp_init_chunk *)(sh + 1); 7241 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7242 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7243 SCTP_TCB_UNLOCK(stcb); 7244 return; 7245 } 7246 } else { 7247 SCTP_TCB_UNLOCK(stcb); 7248 return; 7249 } 7250 } 7251 type = icmp->icmp_type; 7252 code = icmp->icmp_code; 7253 if ((type == ICMP_UNREACH) && 7254 (code == ICMP_UNREACH_PORT)) { 7255 code = ICMP_UNREACH_PROTOCOL; 7256 } 7257 sctp_notify(inp, stcb, net, type, code, 7258 ntohs(inner_ip->ip_len), 7259 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7260 } else { 7261 if ((stcb == NULL) && (inp != NULL)) { 7262 /* reduce ref-count */ 7263 SCTP_INP_WLOCK(inp); 7264 SCTP_INP_DECR_REF(inp); 7265 SCTP_INP_WUNLOCK(inp); 7266 } 7267 if (stcb) { 7268 SCTP_TCB_UNLOCK(stcb); 7269 } 7270 } 7271 return; 7272 } 7273 #endif 7274 7275 #ifdef INET6 7276 static void 7277 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7278 { 7279 struct ip6ctlparam *ip6cp; 7280 struct sctp_inpcb *inp; 7281 struct sctp_tcb *stcb; 7282 struct sctp_nets *net; 7283 struct sctphdr sh; 7284 struct udphdr udp; 7285 struct sockaddr_in6 src, dst; 7286 uint8_t type, code; 7287 7288 ip6cp = (struct ip6ctlparam *)d; 7289 /* 7290 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7291 */ 7292 if (ip6cp->ip6c_m == NULL) { 7293 return; 7294 } 7295 /* 7296 * Check if we can safely examine the ports and the verification tag 7297 * of the SCTP common header. 7298 */ 7299 if (ip6cp->ip6c_m->m_pkthdr.len < 7300 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7301 return; 7302 } 7303 /* Copy out the UDP header. */ 7304 memset(&udp, 0, sizeof(struct udphdr)); 7305 m_copydata(ip6cp->ip6c_m, 7306 ip6cp->ip6c_off, 7307 sizeof(struct udphdr), 7308 (caddr_t)&udp); 7309 /* Copy out the port numbers and the verification tag. */ 7310 memset(&sh, 0, sizeof(struct sctphdr)); 7311 m_copydata(ip6cp->ip6c_m, 7312 ip6cp->ip6c_off + sizeof(struct udphdr), 7313 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7314 (caddr_t)&sh); 7315 memset(&src, 0, sizeof(struct sockaddr_in6)); 7316 src.sin6_family = AF_INET6; 7317 src.sin6_len = sizeof(struct sockaddr_in6); 7318 src.sin6_port = sh.src_port; 7319 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7320 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7321 return; 7322 } 7323 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7324 dst.sin6_family = AF_INET6; 7325 dst.sin6_len = sizeof(struct sockaddr_in6); 7326 dst.sin6_port = sh.dest_port; 7327 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7328 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7329 return; 7330 } 7331 inp = NULL; 7332 net = NULL; 7333 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7334 (struct sockaddr *)&src, 7335 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7336 if ((stcb != NULL) && 7337 (net != NULL) && 7338 (inp != NULL)) { 7339 /* Check the UDP port numbers */ 7340 if ((udp.uh_dport != net->port) || 7341 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7342 SCTP_TCB_UNLOCK(stcb); 7343 return; 7344 } 7345 /* Check the verification tag */ 7346 if (ntohl(sh.v_tag) != 0) { 7347 /* 7348 * This must be the verification tag used for 7349 * sending out packets. We don't consider packets 7350 * reflecting the verification tag. 7351 */ 7352 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7353 SCTP_TCB_UNLOCK(stcb); 7354 return; 7355 } 7356 } else { 7357 if (ip6cp->ip6c_m->m_pkthdr.len >= 7358 ip6cp->ip6c_off + sizeof(struct udphdr) + 7359 sizeof(struct sctphdr) + 7360 sizeof(struct sctp_chunkhdr) + 7361 offsetof(struct sctp_init, a_rwnd)) { 7362 /* 7363 * In this case we can check if we got an 7364 * INIT chunk and if the initiate tag 7365 * matches. 7366 */ 7367 uint32_t initiate_tag; 7368 uint8_t chunk_type; 7369 7370 m_copydata(ip6cp->ip6c_m, 7371 ip6cp->ip6c_off + 7372 sizeof(struct udphdr) + 7373 sizeof(struct sctphdr), 7374 sizeof(uint8_t), 7375 (caddr_t)&chunk_type); 7376 m_copydata(ip6cp->ip6c_m, 7377 ip6cp->ip6c_off + 7378 sizeof(struct udphdr) + 7379 sizeof(struct sctphdr) + 7380 sizeof(struct sctp_chunkhdr), 7381 sizeof(uint32_t), 7382 (caddr_t)&initiate_tag); 7383 if ((chunk_type != SCTP_INITIATION) || 7384 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7385 SCTP_TCB_UNLOCK(stcb); 7386 return; 7387 } 7388 } else { 7389 SCTP_TCB_UNLOCK(stcb); 7390 return; 7391 } 7392 } 7393 type = ip6cp->ip6c_icmp6->icmp6_type; 7394 code = ip6cp->ip6c_icmp6->icmp6_code; 7395 if ((type == ICMP6_DST_UNREACH) && 7396 (code == ICMP6_DST_UNREACH_NOPORT)) { 7397 type = ICMP6_PARAM_PROB; 7398 code = ICMP6_PARAMPROB_NEXTHEADER; 7399 } 7400 sctp6_notify(inp, stcb, net, type, code, 7401 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7402 } else { 7403 if ((stcb == NULL) && (inp != NULL)) { 7404 /* reduce inp's ref-count */ 7405 SCTP_INP_WLOCK(inp); 7406 SCTP_INP_DECR_REF(inp); 7407 SCTP_INP_WUNLOCK(inp); 7408 } 7409 if (stcb) { 7410 SCTP_TCB_UNLOCK(stcb); 7411 } 7412 } 7413 } 7414 #endif 7415 7416 void 7417 sctp_over_udp_stop(void) 7418 { 7419 /* 7420 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7421 * for writting! 7422 */ 7423 #ifdef INET 7424 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7425 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7426 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7427 } 7428 #endif 7429 #ifdef INET6 7430 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7431 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7432 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7433 } 7434 #endif 7435 } 7436 7437 int 7438 sctp_over_udp_start(void) 7439 { 7440 uint16_t port; 7441 int ret; 7442 #ifdef INET 7443 struct sockaddr_in sin; 7444 #endif 7445 #ifdef INET6 7446 struct sockaddr_in6 sin6; 7447 #endif 7448 /* 7449 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7450 * for writting! 7451 */ 7452 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7453 if (ntohs(port) == 0) { 7454 /* Must have a port set */ 7455 return (EINVAL); 7456 } 7457 #ifdef INET 7458 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7459 /* Already running -- must stop first */ 7460 return (EALREADY); 7461 } 7462 #endif 7463 #ifdef INET6 7464 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7465 /* Already running -- must stop first */ 7466 return (EALREADY); 7467 } 7468 #endif 7469 #ifdef INET 7470 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7471 SOCK_DGRAM, IPPROTO_UDP, 7472 curthread->td_ucred, curthread))) { 7473 sctp_over_udp_stop(); 7474 return (ret); 7475 } 7476 /* Call the special UDP hook. */ 7477 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7478 sctp_recv_udp_tunneled_packet, 7479 sctp_recv_icmp_tunneled_packet, 7480 NULL))) { 7481 sctp_over_udp_stop(); 7482 return (ret); 7483 } 7484 /* Ok, we have a socket, bind it to the port. */ 7485 memset(&sin, 0, sizeof(struct sockaddr_in)); 7486 sin.sin_len = sizeof(struct sockaddr_in); 7487 sin.sin_family = AF_INET; 7488 sin.sin_port = htons(port); 7489 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7490 (struct sockaddr *)&sin, curthread))) { 7491 sctp_over_udp_stop(); 7492 return (ret); 7493 } 7494 #endif 7495 #ifdef INET6 7496 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7497 SOCK_DGRAM, IPPROTO_UDP, 7498 curthread->td_ucred, curthread))) { 7499 sctp_over_udp_stop(); 7500 return (ret); 7501 } 7502 /* Call the special UDP hook. */ 7503 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7504 sctp_recv_udp_tunneled_packet, 7505 sctp_recv_icmp6_tunneled_packet, 7506 NULL))) { 7507 sctp_over_udp_stop(); 7508 return (ret); 7509 } 7510 /* Ok, we have a socket, bind it to the port. */ 7511 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7512 sin6.sin6_len = sizeof(struct sockaddr_in6); 7513 sin6.sin6_family = AF_INET6; 7514 sin6.sin6_port = htons(port); 7515 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7516 (struct sockaddr *)&sin6, curthread))) { 7517 sctp_over_udp_stop(); 7518 return (ret); 7519 } 7520 #endif 7521 return (0); 7522 } 7523 7524 /* 7525 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7526 * If all arguments are zero, zero is returned. 7527 */ 7528 uint32_t 7529 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7530 { 7531 if (mtu1 > 0) { 7532 if (mtu2 > 0) { 7533 if (mtu3 > 0) { 7534 return (min(mtu1, min(mtu2, mtu3))); 7535 } else { 7536 return (min(mtu1, mtu2)); 7537 } 7538 } else { 7539 if (mtu3 > 0) { 7540 return (min(mtu1, mtu3)); 7541 } else { 7542 return (mtu1); 7543 } 7544 } 7545 } else { 7546 if (mtu2 > 0) { 7547 if (mtu3 > 0) { 7548 return (min(mtu2, mtu3)); 7549 } else { 7550 return (mtu2); 7551 } 7552 } else { 7553 return (mtu3); 7554 } 7555 } 7556 } 7557 7558 void 7559 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7560 { 7561 struct in_conninfo inc; 7562 7563 memset(&inc, 0, sizeof(struct in_conninfo)); 7564 inc.inc_fibnum = fibnum; 7565 switch (addr->sa.sa_family) { 7566 #ifdef INET 7567 case AF_INET: 7568 inc.inc_faddr = addr->sin.sin_addr; 7569 break; 7570 #endif 7571 #ifdef INET6 7572 case AF_INET6: 7573 inc.inc_flags |= INC_ISIPV6; 7574 inc.inc6_faddr = addr->sin6.sin6_addr; 7575 break; 7576 #endif 7577 default: 7578 return; 7579 } 7580 tcp_hc_updatemtu(&inc, (u_long)mtu); 7581 } 7582 7583 uint32_t 7584 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7585 { 7586 struct in_conninfo inc; 7587 7588 memset(&inc, 0, sizeof(struct in_conninfo)); 7589 inc.inc_fibnum = fibnum; 7590 switch (addr->sa.sa_family) { 7591 #ifdef INET 7592 case AF_INET: 7593 inc.inc_faddr = addr->sin.sin_addr; 7594 break; 7595 #endif 7596 #ifdef INET6 7597 case AF_INET6: 7598 inc.inc_flags |= INC_ISIPV6; 7599 inc.inc6_faddr = addr->sin6.sin6_addr; 7600 break; 7601 #endif 7602 default: 7603 return (0); 7604 } 7605 return ((uint32_t)tcp_hc_getmtu(&inc)); 7606 } 7607 7608 void 7609 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7610 { 7611 #if defined(KDTRACE_HOOKS) 7612 int old_state = stcb->asoc.state; 7613 #endif 7614 7615 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7616 ("sctp_set_state: Can't set substate (new_state = %x)", 7617 new_state)); 7618 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7619 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7620 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7621 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7622 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7623 } 7624 #if defined(KDTRACE_HOOKS) 7625 if (((old_state & SCTP_STATE_MASK) != new_state) && 7626 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7627 (new_state == SCTP_STATE_INUSE))) { 7628 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7629 } 7630 #endif 7631 } 7632 7633 void 7634 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7635 { 7636 #if defined(KDTRACE_HOOKS) 7637 int old_state = stcb->asoc.state; 7638 #endif 7639 7640 KASSERT((substate & SCTP_STATE_MASK) == 0, 7641 ("sctp_add_substate: Can't set state (substate = %x)", 7642 substate)); 7643 stcb->asoc.state |= substate; 7644 #if defined(KDTRACE_HOOKS) 7645 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7646 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7647 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7648 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7649 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7650 } 7651 #endif 7652 } 7653