1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 740 tot_out = 0; 741 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 742 if ((chk->whoTo == lnet) && 743 (chk->sent < SCTP_DATAGRAM_RESEND)) { 744 tot_out += chk->book_size; 745 } 746 } 747 if (lnet->flight_size != tot_out) { 748 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 749 (void *)lnet, lnet->flight_size, 750 tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * The conversion from time to ticks and vice versa is done by rounding 776 * upwards. This way we can test in the code the time to be positive and 777 * know that this corresponds to a positive number of ticks. 778 */ 779 780 uint32_t 781 sctp_msecs_to_ticks(uint32_t msecs) 782 { 783 uint64_t temp; 784 uint32_t ticks; 785 786 if (hz == 1000) { 787 ticks = msecs; 788 } else { 789 temp = (((uint64_t)msecs * hz) + 999) / 1000; 790 if (temp > UINT32_MAX) { 791 ticks = UINT32_MAX; 792 } else { 793 ticks = (uint32_t)temp; 794 } 795 } 796 return (ticks); 797 } 798 799 uint32_t 800 sctp_ticks_to_msecs(uint32_t ticks) 801 { 802 uint64_t temp; 803 uint32_t msecs; 804 805 if (hz == 1000) { 806 msecs = ticks; 807 } else { 808 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 809 if (temp > UINT32_MAX) { 810 msecs = UINT32_MAX; 811 } else { 812 msecs = (uint32_t)temp; 813 } 814 } 815 return (msecs); 816 } 817 818 uint32_t 819 sctp_secs_to_ticks(uint32_t secs) 820 { 821 uint64_t temp; 822 uint32_t ticks; 823 824 temp = (uint64_t)secs * hz; 825 if (temp > UINT32_MAX) { 826 ticks = UINT32_MAX; 827 } else { 828 ticks = (uint32_t)temp; 829 } 830 return (ticks); 831 } 832 833 uint32_t 834 sctp_ticks_to_secs(uint32_t ticks) 835 { 836 uint64_t temp; 837 uint32_t secs; 838 839 temp = ((uint64_t)ticks + (hz - 1)) / hz; 840 if (temp > UINT32_MAX) { 841 secs = UINT32_MAX; 842 } else { 843 secs = (uint32_t)temp; 844 } 845 return (secs); 846 } 847 848 /* 849 * sctp_stop_timers_for_shutdown() should be called 850 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 851 * state to make sure that all timers are stopped. 852 */ 853 void 854 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 855 { 856 struct sctp_inpcb *inp; 857 struct sctp_nets *net; 858 859 inp = stcb->sctp_ep; 860 861 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 863 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 865 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 866 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 867 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 868 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 869 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 870 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 871 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 872 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 873 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 874 } 875 } 876 877 void 878 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 879 { 880 struct sctp_inpcb *inp; 881 struct sctp_nets *net; 882 883 inp = stcb->sctp_ep; 884 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 885 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 886 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 887 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 888 if (stop_assoc_kill_timer) { 889 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 890 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 891 } 892 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 894 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 895 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 896 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 897 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 898 /* Mobility adaptation */ 899 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 900 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 901 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 902 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 904 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 908 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 910 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 912 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 913 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 914 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 915 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 916 } 917 } 918 919 /* 920 * A list of sizes based on typical mtu's, used only if next hop size not 921 * returned. These values MUST be multiples of 4 and MUST be ordered. 922 */ 923 static uint32_t sctp_mtu_sizes[] = { 924 68, 925 296, 926 508, 927 512, 928 544, 929 576, 930 1004, 931 1492, 932 1500, 933 1536, 934 2000, 935 2048, 936 4352, 937 4464, 938 8168, 939 17912, 940 32000, 941 65532 942 }; 943 944 /* 945 * Return the largest MTU in sctp_mtu_sizes smaller than val. 946 * If val is smaller than the minimum, just return the largest 947 * multiple of 4 smaller or equal to val. 948 * Ensure that the result is a multiple of 4. 949 */ 950 uint32_t 951 sctp_get_prev_mtu(uint32_t val) 952 { 953 uint32_t i; 954 955 val &= 0xfffffffc; 956 if (val <= sctp_mtu_sizes[0]) { 957 return (val); 958 } 959 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 960 if (val <= sctp_mtu_sizes[i]) { 961 break; 962 } 963 } 964 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 965 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 966 return (sctp_mtu_sizes[i - 1]); 967 } 968 969 /* 970 * Return the smallest MTU in sctp_mtu_sizes larger than val. 971 * If val is larger than the maximum, just return the largest multiple of 4 smaller 972 * or equal to val. 973 * Ensure that the result is a multiple of 4. 974 */ 975 uint32_t 976 sctp_get_next_mtu(uint32_t val) 977 { 978 /* select another MTU that is just bigger than this one */ 979 uint32_t i; 980 981 val &= 0xfffffffc; 982 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 983 if (val < sctp_mtu_sizes[i]) { 984 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 985 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 986 return (sctp_mtu_sizes[i]); 987 } 988 } 989 return (val); 990 } 991 992 void 993 sctp_fill_random_store(struct sctp_pcb *m) 994 { 995 /* 996 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 997 * our counter. The result becomes our good random numbers and we 998 * then setup to give these out. Note that we do no locking to 999 * protect this. This is ok, since if competing folks call this we 1000 * will get more gobbled gook in the random store which is what we 1001 * want. There is a danger that two guys will use the same random 1002 * numbers, but thats ok too since that is random as well :-> 1003 */ 1004 m->store_at = 0; 1005 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1006 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1007 sizeof(m->random_counter), (uint8_t *)m->random_store); 1008 m->random_counter++; 1009 } 1010 1011 uint32_t 1012 sctp_select_initial_TSN(struct sctp_pcb *inp) 1013 { 1014 /* 1015 * A true implementation should use random selection process to get 1016 * the initial stream sequence number, using RFC1750 as a good 1017 * guideline 1018 */ 1019 uint32_t x, *xp; 1020 uint8_t *p; 1021 int store_at, new_store; 1022 1023 if (inp->initial_sequence_debug != 0) { 1024 uint32_t ret; 1025 1026 ret = inp->initial_sequence_debug; 1027 inp->initial_sequence_debug++; 1028 return (ret); 1029 } 1030 retry: 1031 store_at = inp->store_at; 1032 new_store = store_at + sizeof(uint32_t); 1033 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1034 new_store = 0; 1035 } 1036 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1037 goto retry; 1038 } 1039 if (new_store == 0) { 1040 /* Refill the random store */ 1041 sctp_fill_random_store(inp); 1042 } 1043 p = &inp->random_store[store_at]; 1044 xp = (uint32_t *)p; 1045 x = *xp; 1046 return (x); 1047 } 1048 1049 uint32_t 1050 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1051 { 1052 uint32_t x; 1053 struct timeval now; 1054 1055 if (check) { 1056 (void)SCTP_GETTIME_TIMEVAL(&now); 1057 } 1058 for (;;) { 1059 x = sctp_select_initial_TSN(&inp->sctp_ep); 1060 if (x == 0) { 1061 /* we never use 0 */ 1062 continue; 1063 } 1064 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1065 break; 1066 } 1067 } 1068 return (x); 1069 } 1070 1071 int32_t 1072 sctp_map_assoc_state(int kernel_state) 1073 { 1074 int32_t user_state; 1075 1076 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1077 user_state = SCTP_CLOSED; 1078 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1079 user_state = SCTP_SHUTDOWN_PENDING; 1080 } else { 1081 switch (kernel_state & SCTP_STATE_MASK) { 1082 case SCTP_STATE_EMPTY: 1083 user_state = SCTP_CLOSED; 1084 break; 1085 case SCTP_STATE_INUSE: 1086 user_state = SCTP_CLOSED; 1087 break; 1088 case SCTP_STATE_COOKIE_WAIT: 1089 user_state = SCTP_COOKIE_WAIT; 1090 break; 1091 case SCTP_STATE_COOKIE_ECHOED: 1092 user_state = SCTP_COOKIE_ECHOED; 1093 break; 1094 case SCTP_STATE_OPEN: 1095 user_state = SCTP_ESTABLISHED; 1096 break; 1097 case SCTP_STATE_SHUTDOWN_SENT: 1098 user_state = SCTP_SHUTDOWN_SENT; 1099 break; 1100 case SCTP_STATE_SHUTDOWN_RECEIVED: 1101 user_state = SCTP_SHUTDOWN_RECEIVED; 1102 break; 1103 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1104 user_state = SCTP_SHUTDOWN_ACK_SENT; 1105 break; 1106 default: 1107 user_state = SCTP_CLOSED; 1108 break; 1109 } 1110 } 1111 return (user_state); 1112 } 1113 1114 int 1115 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1116 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1195 sctp_select_initial_TSN(&inp->sctp_ep); 1196 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1197 /* we are optimisitic here */ 1198 asoc->peer_supports_nat = 0; 1199 asoc->sent_queue_retran_cnt = 0; 1200 1201 /* for CMT */ 1202 asoc->last_net_cmt_send_started = NULL; 1203 1204 /* This will need to be adjusted */ 1205 asoc->last_acked_seq = asoc->init_seq_number - 1; 1206 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1207 asoc->asconf_seq_in = asoc->last_acked_seq; 1208 1209 /* here we are different, we hold the next one we expect */ 1210 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1211 1212 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1213 asoc->initial_rto = inp->sctp_ep.initial_rto; 1214 1215 asoc->default_mtu = inp->sctp_ep.default_mtu; 1216 asoc->max_init_times = inp->sctp_ep.max_init_times; 1217 asoc->max_send_times = inp->sctp_ep.max_send_times; 1218 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1219 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1220 asoc->free_chunk_cnt = 0; 1221 1222 asoc->iam_blocking = 0; 1223 asoc->context = inp->sctp_context; 1224 asoc->local_strreset_support = inp->local_strreset_support; 1225 asoc->def_send = inp->def_send; 1226 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1227 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1228 asoc->pr_sctp_cnt = 0; 1229 asoc->total_output_queue_size = 0; 1230 1231 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1232 asoc->scope.ipv6_addr_legal = 1; 1233 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1234 asoc->scope.ipv4_addr_legal = 1; 1235 } else { 1236 asoc->scope.ipv4_addr_legal = 0; 1237 } 1238 } else { 1239 asoc->scope.ipv6_addr_legal = 0; 1240 asoc->scope.ipv4_addr_legal = 1; 1241 } 1242 1243 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1244 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1245 1246 asoc->smallest_mtu = inp->sctp_frag_point; 1247 asoc->minrto = inp->sctp_ep.sctp_minrto; 1248 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1249 1250 asoc->stream_locked_on = 0; 1251 asoc->ecn_echo_cnt_onq = 0; 1252 asoc->stream_locked = 0; 1253 1254 asoc->send_sack = 1; 1255 1256 LIST_INIT(&asoc->sctp_restricted_addrs); 1257 1258 TAILQ_INIT(&asoc->nets); 1259 TAILQ_INIT(&asoc->pending_reply_queue); 1260 TAILQ_INIT(&asoc->asconf_ack_sent); 1261 /* Setup to fill the hb random cache at first HB */ 1262 asoc->hb_random_idx = 4; 1263 1264 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1265 1266 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1267 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1268 1269 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1270 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1271 1272 /* 1273 * Now the stream parameters, here we allocate space for all streams 1274 * that we request by default. 1275 */ 1276 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1277 o_strms; 1278 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1279 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1280 SCTP_M_STRMO); 1281 if (asoc->strmout == NULL) { 1282 /* big trouble no memory */ 1283 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1284 return (ENOMEM); 1285 } 1286 for (i = 0; i < asoc->streamoutcnt; i++) { 1287 /* 1288 * inbound side must be set to 0xffff, also NOTE when we get 1289 * the INIT-ACK back (for INIT sender) we MUST reduce the 1290 * count (streamoutcnt) but first check if we sent to any of 1291 * the upper streams that were dropped (if some were). Those 1292 * that were dropped must be notified to the upper layer as 1293 * failed to send. 1294 */ 1295 asoc->strmout[i].next_mid_ordered = 0; 1296 asoc->strmout[i].next_mid_unordered = 0; 1297 TAILQ_INIT(&asoc->strmout[i].outqueue); 1298 asoc->strmout[i].chunks_on_queues = 0; 1299 #if defined(SCTP_DETAILED_STR_STATS) 1300 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1301 asoc->strmout[i].abandoned_sent[j] = 0; 1302 asoc->strmout[i].abandoned_unsent[j] = 0; 1303 } 1304 #else 1305 asoc->strmout[i].abandoned_sent[0] = 0; 1306 asoc->strmout[i].abandoned_unsent[0] = 0; 1307 #endif 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1312 } 1313 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1314 1315 /* Now the mapping array */ 1316 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1317 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1318 SCTP_M_MAP); 1319 if (asoc->mapping_array == NULL) { 1320 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1321 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1322 return (ENOMEM); 1323 } 1324 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1325 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1326 SCTP_M_MAP); 1327 if (asoc->nr_mapping_array == NULL) { 1328 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1329 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1334 1335 /* Now the init of the other outqueues */ 1336 TAILQ_INIT(&asoc->free_chunks); 1337 TAILQ_INIT(&asoc->control_send_queue); 1338 TAILQ_INIT(&asoc->asconf_send_queue); 1339 TAILQ_INIT(&asoc->send_queue); 1340 TAILQ_INIT(&asoc->sent_queue); 1341 TAILQ_INIT(&asoc->resetHead); 1342 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1343 TAILQ_INIT(&asoc->asconf_queue); 1344 /* authentication fields */ 1345 asoc->authinfo.random = NULL; 1346 asoc->authinfo.active_keyid = 0; 1347 asoc->authinfo.assoc_key = NULL; 1348 asoc->authinfo.assoc_keyid = 0; 1349 asoc->authinfo.recv_key = NULL; 1350 asoc->authinfo.recv_keyid = 0; 1351 LIST_INIT(&asoc->shared_keys); 1352 asoc->marked_retrans = 0; 1353 asoc->port = inp->sctp_ep.port; 1354 asoc->timoinit = 0; 1355 asoc->timodata = 0; 1356 asoc->timosack = 0; 1357 asoc->timoshutdown = 0; 1358 asoc->timoheartbeat = 0; 1359 asoc->timocookie = 0; 1360 asoc->timoshutdownack = 0; 1361 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1362 asoc->discontinuity_time = asoc->start_time; 1363 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1364 asoc->abandoned_unsent[i] = 0; 1365 asoc->abandoned_sent[i] = 0; 1366 } 1367 /* 1368 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1369 * freed later when the association is freed. 1370 */ 1371 return (0); 1372 } 1373 1374 void 1375 sctp_print_mapping_array(struct sctp_association *asoc) 1376 { 1377 unsigned int i, limit; 1378 1379 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1380 asoc->mapping_array_size, 1381 asoc->mapping_array_base_tsn, 1382 asoc->cumulative_tsn, 1383 asoc->highest_tsn_inside_map, 1384 asoc->highest_tsn_inside_nr_map); 1385 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1386 if (asoc->mapping_array[limit - 1] != 0) { 1387 break; 1388 } 1389 } 1390 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1391 for (i = 0; i < limit; i++) { 1392 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1393 } 1394 if (limit % 16) 1395 SCTP_PRINTF("\n"); 1396 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1397 if (asoc->nr_mapping_array[limit - 1]) { 1398 break; 1399 } 1400 } 1401 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1402 for (i = 0; i < limit; i++) { 1403 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1404 } 1405 if (limit % 16) 1406 SCTP_PRINTF("\n"); 1407 } 1408 1409 int 1410 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1411 { 1412 /* mapping array needs to grow */ 1413 uint8_t *new_array1, *new_array2; 1414 uint32_t new_size; 1415 1416 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1417 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1418 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1419 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1420 /* can't get more, forget it */ 1421 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1422 if (new_array1) { 1423 SCTP_FREE(new_array1, SCTP_M_MAP); 1424 } 1425 if (new_array2) { 1426 SCTP_FREE(new_array2, SCTP_M_MAP); 1427 } 1428 return (-1); 1429 } 1430 memset(new_array1, 0, new_size); 1431 memset(new_array2, 0, new_size); 1432 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1433 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1434 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1435 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1436 asoc->mapping_array = new_array1; 1437 asoc->nr_mapping_array = new_array2; 1438 asoc->mapping_array_size = new_size; 1439 return (0); 1440 } 1441 1442 1443 static void 1444 sctp_iterator_work(struct sctp_iterator *it) 1445 { 1446 struct epoch_tracker et; 1447 struct sctp_inpcb *tinp; 1448 int iteration_count = 0; 1449 int inp_skip = 0; 1450 int first_in = 1; 1451 1452 NET_EPOCH_ENTER(et); 1453 SCTP_INP_INFO_RLOCK(); 1454 SCTP_ITERATOR_LOCK(); 1455 sctp_it_ctl.cur_it = it; 1456 if (it->inp) { 1457 SCTP_INP_RLOCK(it->inp); 1458 SCTP_INP_DECR_REF(it->inp); 1459 } 1460 if (it->inp == NULL) { 1461 /* iterator is complete */ 1462 done_with_iterator: 1463 sctp_it_ctl.cur_it = NULL; 1464 SCTP_ITERATOR_UNLOCK(); 1465 SCTP_INP_INFO_RUNLOCK(); 1466 if (it->function_atend != NULL) { 1467 (*it->function_atend) (it->pointer, it->val); 1468 } 1469 SCTP_FREE(it, SCTP_M_ITER); 1470 NET_EPOCH_EXIT(et); 1471 return; 1472 } 1473 select_a_new_ep: 1474 if (first_in) { 1475 first_in = 0; 1476 } else { 1477 SCTP_INP_RLOCK(it->inp); 1478 } 1479 while (((it->pcb_flags) && 1480 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1481 ((it->pcb_features) && 1482 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1483 /* endpoint flags or features don't match, so keep looking */ 1484 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1485 SCTP_INP_RUNLOCK(it->inp); 1486 goto done_with_iterator; 1487 } 1488 tinp = it->inp; 1489 it->inp = LIST_NEXT(it->inp, sctp_list); 1490 it->stcb = NULL; 1491 SCTP_INP_RUNLOCK(tinp); 1492 if (it->inp == NULL) { 1493 goto done_with_iterator; 1494 } 1495 SCTP_INP_RLOCK(it->inp); 1496 } 1497 /* now go through each assoc which is in the desired state */ 1498 if (it->done_current_ep == 0) { 1499 if (it->function_inp != NULL) 1500 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1501 it->done_current_ep = 1; 1502 } 1503 if (it->stcb == NULL) { 1504 /* run the per instance function */ 1505 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1506 } 1507 if ((inp_skip) || it->stcb == NULL) { 1508 if (it->function_inp_end != NULL) { 1509 inp_skip = (*it->function_inp_end) (it->inp, 1510 it->pointer, 1511 it->val); 1512 } 1513 SCTP_INP_RUNLOCK(it->inp); 1514 goto no_stcb; 1515 } 1516 while (it->stcb) { 1517 SCTP_TCB_LOCK(it->stcb); 1518 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1519 /* not in the right state... keep looking */ 1520 SCTP_TCB_UNLOCK(it->stcb); 1521 goto next_assoc; 1522 } 1523 /* see if we have limited out the iterator loop */ 1524 iteration_count++; 1525 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1526 /* Pause to let others grab the lock */ 1527 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 SCTP_INP_INCR_REF(it->inp); 1530 SCTP_INP_RUNLOCK(it->inp); 1531 SCTP_ITERATOR_UNLOCK(); 1532 SCTP_INP_INFO_RUNLOCK(); 1533 SCTP_INP_INFO_RLOCK(); 1534 SCTP_ITERATOR_LOCK(); 1535 if (sctp_it_ctl.iterator_flags) { 1536 /* We won't be staying here */ 1537 SCTP_INP_DECR_REF(it->inp); 1538 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1539 if (sctp_it_ctl.iterator_flags & 1540 SCTP_ITERATOR_STOP_CUR_IT) { 1541 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1542 goto done_with_iterator; 1543 } 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_INP) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1547 goto no_stcb; 1548 } 1549 /* If we reach here huh? */ 1550 SCTP_PRINTF("Unknown it ctl flag %x\n", 1551 sctp_it_ctl.iterator_flags); 1552 sctp_it_ctl.iterator_flags = 0; 1553 } 1554 SCTP_INP_RLOCK(it->inp); 1555 SCTP_INP_DECR_REF(it->inp); 1556 SCTP_TCB_LOCK(it->stcb); 1557 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1558 iteration_count = 0; 1559 } 1560 KASSERT(it->inp == it->stcb->sctp_ep, 1561 ("%s: stcb %p does not belong to inp %p, but inp %p", 1562 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1563 1564 /* run function on this one */ 1565 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1566 1567 /* 1568 * we lie here, it really needs to have its own type but 1569 * first I must verify that this won't effect things :-0 1570 */ 1571 if (it->no_chunk_output == 0) 1572 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1573 1574 SCTP_TCB_UNLOCK(it->stcb); 1575 next_assoc: 1576 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1577 if (it->stcb == NULL) { 1578 /* Run last function */ 1579 if (it->function_inp_end != NULL) { 1580 inp_skip = (*it->function_inp_end) (it->inp, 1581 it->pointer, 1582 it->val); 1583 } 1584 } 1585 } 1586 SCTP_INP_RUNLOCK(it->inp); 1587 no_stcb: 1588 /* done with all assocs on this endpoint, move on to next endpoint */ 1589 it->done_current_ep = 0; 1590 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1591 it->inp = NULL; 1592 } else { 1593 it->inp = LIST_NEXT(it->inp, sctp_list); 1594 } 1595 it->stcb = NULL; 1596 if (it->inp == NULL) { 1597 goto done_with_iterator; 1598 } 1599 goto select_a_new_ep; 1600 } 1601 1602 void 1603 sctp_iterator_worker(void) 1604 { 1605 struct sctp_iterator *it; 1606 1607 /* This function is called with the WQ lock in place */ 1608 sctp_it_ctl.iterator_running = 1; 1609 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1610 /* now lets work on this one */ 1611 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1612 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1613 CURVNET_SET(it->vn); 1614 sctp_iterator_work(it); 1615 CURVNET_RESTORE(); 1616 SCTP_IPI_ITERATOR_WQ_LOCK(); 1617 /* sa_ignore FREED_MEMORY */ 1618 } 1619 sctp_it_ctl.iterator_running = 0; 1620 return; 1621 } 1622 1623 1624 static void 1625 sctp_handle_addr_wq(void) 1626 { 1627 /* deal with the ADDR wq from the rtsock calls */ 1628 struct sctp_laddr *wi, *nwi; 1629 struct sctp_asconf_iterator *asc; 1630 1631 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1632 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1633 if (asc == NULL) { 1634 /* Try later, no memory */ 1635 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1636 (struct sctp_inpcb *)NULL, 1637 (struct sctp_tcb *)NULL, 1638 (struct sctp_nets *)NULL); 1639 return; 1640 } 1641 LIST_INIT(&asc->list_of_work); 1642 asc->cnt = 0; 1643 1644 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1645 LIST_REMOVE(wi, sctp_nxt_addr); 1646 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1647 asc->cnt++; 1648 } 1649 1650 if (asc->cnt == 0) { 1651 SCTP_FREE(asc, SCTP_M_ASC_IT); 1652 } else { 1653 int ret; 1654 1655 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1656 sctp_asconf_iterator_stcb, 1657 NULL, /* No ep end for boundall */ 1658 SCTP_PCB_FLAGS_BOUNDALL, 1659 SCTP_PCB_ANY_FEATURES, 1660 SCTP_ASOC_ANY_STATE, 1661 (void *)asc, 0, 1662 sctp_asconf_iterator_end, NULL, 0); 1663 if (ret) { 1664 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1665 /* 1666 * Freeing if we are stopping or put back on the 1667 * addr_wq. 1668 */ 1669 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1670 sctp_asconf_iterator_end(asc, 0); 1671 } else { 1672 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1673 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1674 } 1675 SCTP_FREE(asc, SCTP_M_ASC_IT); 1676 } 1677 } 1678 } 1679 } 1680 1681 /*- 1682 * The following table shows which pointers for the inp, stcb, or net are 1683 * stored for each timer after it was started. 1684 * 1685 *|Name |Timer |inp |stcb|net | 1686 *|-----------------------------|-----------------------------|----|----|----| 1687 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1690 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1694 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1697 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1698 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1700 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1701 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1703 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1704 */ 1705 1706 void 1707 sctp_timeout_handler(void *t) 1708 { 1709 struct epoch_tracker et; 1710 struct timeval tv; 1711 struct sctp_inpcb *inp; 1712 struct sctp_tcb *stcb; 1713 struct sctp_nets *net; 1714 struct sctp_timer *tmr; 1715 struct mbuf *op_err; 1716 int type; 1717 int i, secret; 1718 bool did_output, released_asoc_reference; 1719 1720 /* 1721 * If inp, stcb or net are not NULL, then references to these were 1722 * added when the timer was started, and must be released before 1723 * this function returns. 1724 */ 1725 tmr = (struct sctp_timer *)t; 1726 inp = (struct sctp_inpcb *)tmr->ep; 1727 stcb = (struct sctp_tcb *)tmr->tcb; 1728 net = (struct sctp_nets *)tmr->net; 1729 CURVNET_SET((struct vnet *)tmr->vnet); 1730 did_output = 1; 1731 released_asoc_reference = false; 1732 1733 #ifdef SCTP_AUDITING_ENABLED 1734 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1735 sctp_auditing(3, inp, stcb, net); 1736 #endif 1737 1738 /* sanity checks... */ 1739 KASSERT(tmr->self == NULL || tmr->self == tmr, 1740 ("sctp_timeout_handler: tmr->self corrupted")); 1741 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1742 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1743 type = tmr->type; 1744 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1745 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1746 type, stcb, stcb->sctp_ep)); 1747 tmr->stopped_from = 0xa001; 1748 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1749 SCTPDBG(SCTP_DEBUG_TIMER2, 1750 "Timer type %d handler exiting due to CLOSED association.\n", 1751 type); 1752 goto out_decr; 1753 } 1754 tmr->stopped_from = 0xa002; 1755 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1756 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1757 SCTPDBG(SCTP_DEBUG_TIMER2, 1758 "Timer type %d handler exiting due to not being active.\n", 1759 type); 1760 goto out_decr; 1761 } 1762 1763 tmr->stopped_from = 0xa003; 1764 if (stcb) { 1765 SCTP_TCB_LOCK(stcb); 1766 /* 1767 * Release reference so that association can be freed if 1768 * necessary below. This is safe now that we have acquired 1769 * the lock. 1770 */ 1771 atomic_add_int(&stcb->asoc.refcnt, -1); 1772 released_asoc_reference = true; 1773 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1774 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1775 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1776 SCTPDBG(SCTP_DEBUG_TIMER2, 1777 "Timer type %d handler exiting due to CLOSED association.\n", 1778 type); 1779 goto out; 1780 } 1781 } else if (inp != NULL) { 1782 SCTP_INP_WLOCK(inp); 1783 } else { 1784 SCTP_WQ_ADDR_LOCK(); 1785 } 1786 1787 /* Record in stopped_from which timeout occurred. */ 1788 tmr->stopped_from = type; 1789 NET_EPOCH_ENTER(et); 1790 /* mark as being serviced now */ 1791 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1792 /* 1793 * Callout has been rescheduled. 1794 */ 1795 goto out; 1796 } 1797 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1798 /* 1799 * Not active, so no action. 1800 */ 1801 goto out; 1802 } 1803 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1804 1805 /* call the handler for the appropriate timer type */ 1806 switch (type) { 1807 case SCTP_TIMER_TYPE_SEND: 1808 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1809 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1810 type, inp, stcb, net)); 1811 SCTP_STAT_INCR(sctps_timodata); 1812 stcb->asoc.timodata++; 1813 stcb->asoc.num_send_timers_up--; 1814 if (stcb->asoc.num_send_timers_up < 0) { 1815 stcb->asoc.num_send_timers_up = 0; 1816 } 1817 SCTP_TCB_LOCK_ASSERT(stcb); 1818 if (sctp_t3rxt_timer(inp, stcb, net)) { 1819 /* no need to unlock on tcb its gone */ 1820 1821 goto out_decr; 1822 } 1823 SCTP_TCB_LOCK_ASSERT(stcb); 1824 #ifdef SCTP_AUDITING_ENABLED 1825 sctp_auditing(4, inp, stcb, net); 1826 #endif 1827 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1828 did_output = true; 1829 if ((stcb->asoc.num_send_timers_up == 0) && 1830 (stcb->asoc.sent_queue_cnt > 0)) { 1831 struct sctp_tmit_chunk *chk; 1832 1833 /* 1834 * Safeguard. If there on some on the sent queue 1835 * somewhere but no timers running something is 1836 * wrong... so we start a timer on the first chunk 1837 * on the send queue on whatever net it is sent to. 1838 */ 1839 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1840 if (chk->whoTo != NULL) { 1841 break; 1842 } 1843 } 1844 if (chk != NULL) { 1845 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1846 } 1847 } 1848 break; 1849 case SCTP_TIMER_TYPE_INIT: 1850 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1851 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1852 type, inp, stcb, net)); 1853 SCTP_STAT_INCR(sctps_timoinit); 1854 stcb->asoc.timoinit++; 1855 if (sctp_t1init_timer(inp, stcb, net)) { 1856 /* no need to unlock on tcb its gone */ 1857 goto out_decr; 1858 } 1859 did_output = false; 1860 break; 1861 case SCTP_TIMER_TYPE_RECV: 1862 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1863 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1864 type, inp, stcb, net)); 1865 SCTP_STAT_INCR(sctps_timosack); 1866 stcb->asoc.timosack++; 1867 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1868 #ifdef SCTP_AUDITING_ENABLED 1869 sctp_auditing(4, inp, stcb, NULL); 1870 #endif 1871 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1872 did_output = true; 1873 break; 1874 case SCTP_TIMER_TYPE_SHUTDOWN: 1875 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1876 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1877 type, inp, stcb, net)); 1878 SCTP_STAT_INCR(sctps_timoshutdown); 1879 stcb->asoc.timoshutdown++; 1880 if (sctp_shutdown_timer(inp, stcb, net)) { 1881 /* no need to unlock on tcb its gone */ 1882 goto out_decr; 1883 } 1884 #ifdef SCTP_AUDITING_ENABLED 1885 sctp_auditing(4, inp, stcb, net); 1886 #endif 1887 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1888 did_output = true; 1889 break; 1890 case SCTP_TIMER_TYPE_HEARTBEAT: 1891 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1892 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1893 type, inp, stcb, net)); 1894 SCTP_STAT_INCR(sctps_timoheartbeat); 1895 stcb->asoc.timoheartbeat++; 1896 if (sctp_heartbeat_timer(inp, stcb, net)) { 1897 /* no need to unlock on tcb its gone */ 1898 goto out_decr; 1899 } 1900 #ifdef SCTP_AUDITING_ENABLED 1901 sctp_auditing(4, inp, stcb, net); 1902 #endif 1903 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1904 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1905 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1906 did_output = true; 1907 } else { 1908 did_output = false; 1909 } 1910 break; 1911 case SCTP_TIMER_TYPE_COOKIE: 1912 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1913 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1914 type, inp, stcb, net)); 1915 SCTP_STAT_INCR(sctps_timocookie); 1916 stcb->asoc.timocookie++; 1917 if (sctp_cookie_timer(inp, stcb, net)) { 1918 /* no need to unlock on tcb its gone */ 1919 goto out_decr; 1920 } 1921 #ifdef SCTP_AUDITING_ENABLED 1922 sctp_auditing(4, inp, stcb, net); 1923 #endif 1924 /* 1925 * We consider T3 and Cookie timer pretty much the same with 1926 * respect to where from in chunk_output. 1927 */ 1928 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1929 did_output = true; 1930 break; 1931 case SCTP_TIMER_TYPE_NEWCOOKIE: 1932 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1933 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1934 type, inp, stcb, net)); 1935 SCTP_STAT_INCR(sctps_timosecret); 1936 (void)SCTP_GETTIME_TIMEVAL(&tv); 1937 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1938 inp->sctp_ep.last_secret_number = 1939 inp->sctp_ep.current_secret_number; 1940 inp->sctp_ep.current_secret_number++; 1941 if (inp->sctp_ep.current_secret_number >= 1942 SCTP_HOW_MANY_SECRETS) { 1943 inp->sctp_ep.current_secret_number = 0; 1944 } 1945 secret = (int)inp->sctp_ep.current_secret_number; 1946 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1947 inp->sctp_ep.secret_key[secret][i] = 1948 sctp_select_initial_TSN(&inp->sctp_ep); 1949 } 1950 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1951 did_output = false; 1952 break; 1953 case SCTP_TIMER_TYPE_PATHMTURAISE: 1954 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1955 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1956 type, inp, stcb, net)); 1957 SCTP_STAT_INCR(sctps_timopathmtu); 1958 sctp_pathmtu_timer(inp, stcb, net); 1959 did_output = false; 1960 break; 1961 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1962 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1963 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1964 type, inp, stcb, net)); 1965 if (sctp_shutdownack_timer(inp, stcb, net)) { 1966 /* no need to unlock on tcb its gone */ 1967 goto out_decr; 1968 } 1969 SCTP_STAT_INCR(sctps_timoshutdownack); 1970 stcb->asoc.timoshutdownack++; 1971 #ifdef SCTP_AUDITING_ENABLED 1972 sctp_auditing(4, inp, stcb, net); 1973 #endif 1974 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1975 did_output = true; 1976 break; 1977 case SCTP_TIMER_TYPE_ASCONF: 1978 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1979 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1980 type, inp, stcb, net)); 1981 SCTP_STAT_INCR(sctps_timoasconf); 1982 if (sctp_asconf_timer(inp, stcb, net)) { 1983 /* no need to unlock on tcb its gone */ 1984 goto out_decr; 1985 } 1986 #ifdef SCTP_AUDITING_ENABLED 1987 sctp_auditing(4, inp, stcb, net); 1988 #endif 1989 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1990 did_output = true; 1991 break; 1992 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1993 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1994 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1995 type, inp, stcb, net)); 1996 SCTP_STAT_INCR(sctps_timoshutdownguard); 1997 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1998 "Shutdown guard timer expired"); 1999 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2000 did_output = true; 2001 /* no need to unlock on tcb its gone */ 2002 goto out_decr; 2003 case SCTP_TIMER_TYPE_AUTOCLOSE: 2004 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2005 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2006 type, inp, stcb, net)); 2007 SCTP_STAT_INCR(sctps_timoautoclose); 2008 sctp_autoclose_timer(inp, stcb); 2009 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2010 did_output = true; 2011 break; 2012 case SCTP_TIMER_TYPE_STRRESET: 2013 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2014 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2015 type, inp, stcb, net)); 2016 SCTP_STAT_INCR(sctps_timostrmrst); 2017 if (sctp_strreset_timer(inp, stcb)) { 2018 /* no need to unlock on tcb its gone */ 2019 goto out_decr; 2020 } 2021 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2022 did_output = true; 2023 break; 2024 case SCTP_TIMER_TYPE_INPKILL: 2025 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2026 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2027 type, inp, stcb, net)); 2028 SCTP_STAT_INCR(sctps_timoinpkill); 2029 /* 2030 * special case, take away our increment since WE are the 2031 * killer 2032 */ 2033 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2034 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2035 SCTP_INP_DECR_REF(inp); 2036 SCTP_INP_WUNLOCK(inp); 2037 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2038 SCTP_CALLED_FROM_INPKILL_TIMER); 2039 inp = NULL; 2040 goto out_no_decr; 2041 case SCTP_TIMER_TYPE_ASOCKILL: 2042 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2043 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2044 type, inp, stcb, net)); 2045 SCTP_STAT_INCR(sctps_timoassockill); 2046 /* Can we free it yet? */ 2047 SCTP_INP_DECR_REF(inp); 2048 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2049 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2050 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2051 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2052 /* 2053 * free asoc, always unlocks (or destroy's) so prevent 2054 * duplicate unlock or unlock of a free mtx :-0 2055 */ 2056 stcb = NULL; 2057 goto out_no_decr; 2058 case SCTP_TIMER_TYPE_ADDR_WQ: 2059 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2060 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2061 type, inp, stcb, net)); 2062 sctp_handle_addr_wq(); 2063 did_output = true; 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 did_output = false; 2072 break; 2073 default: 2074 #ifdef INVARIANTS 2075 panic("Unknown timer type %d", type); 2076 #else 2077 did_output = false; 2078 goto out; 2079 #endif 2080 } 2081 #ifdef SCTP_AUDITING_ENABLED 2082 sctp_audit_log(0xF1, (uint8_t)type); 2083 if (inp != NULL) 2084 sctp_auditing(5, inp, stcb, net); 2085 #endif 2086 if (did_output && (stcb != NULL)) { 2087 /* 2088 * Now we need to clean up the control chunk chain if an 2089 * ECNE is on it. It must be marked as UNSENT again so next 2090 * call will continue to send it until such time that we get 2091 * a CWR, to remove it. It is, however, less likely that we 2092 * will find a ecn echo on the chain though. 2093 */ 2094 sctp_fix_ecn_echo(&stcb->asoc); 2095 } 2096 out: 2097 if (stcb != NULL) { 2098 SCTP_TCB_UNLOCK(stcb); 2099 } else if (inp != NULL) { 2100 SCTP_INP_WUNLOCK(inp); 2101 } else { 2102 SCTP_WQ_ADDR_UNLOCK(); 2103 } 2104 2105 out_decr: 2106 /* These reference counts were incremented in sctp_timer_start(). */ 2107 if (inp != NULL) { 2108 SCTP_INP_DECR_REF(inp); 2109 } 2110 if ((stcb != NULL) && !released_asoc_reference) { 2111 atomic_add_int(&stcb->asoc.refcnt, -1); 2112 } 2113 if (net != NULL) { 2114 sctp_free_remote_addr(net); 2115 } 2116 out_no_decr: 2117 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2118 CURVNET_RESTORE(); 2119 NET_EPOCH_EXIT(et); 2120 } 2121 2122 /*- 2123 * The following table shows which parameters must be provided 2124 * when calling sctp_timer_start(). For parameters not being 2125 * provided, NULL must be used. 2126 * 2127 * |Name |inp |stcb|net | 2128 * |-----------------------------|----|----|----| 2129 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2136 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2140 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2143 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2144 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2145 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2146 * 2147 */ 2148 2149 void 2150 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2151 struct sctp_nets *net) 2152 { 2153 struct sctp_timer *tmr; 2154 uint32_t to_ticks; 2155 uint32_t rndval, jitter; 2156 2157 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2158 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2159 t_type, stcb, stcb->sctp_ep)); 2160 tmr = NULL; 2161 to_ticks = 0; 2162 if (stcb != NULL) { 2163 SCTP_TCB_LOCK_ASSERT(stcb); 2164 } else if (inp != NULL) { 2165 SCTP_INP_WLOCK_ASSERT(inp); 2166 } else { 2167 SCTP_WQ_ADDR_LOCK_ASSERT(); 2168 } 2169 if (stcb != NULL) { 2170 /* 2171 * Don't restart timer on association that's about to be 2172 * killed. 2173 */ 2174 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2175 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2176 SCTPDBG(SCTP_DEBUG_TIMER2, 2177 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2178 t_type, inp, stcb, net); 2179 return; 2180 } 2181 /* Don't restart timer on net that's been removed. */ 2182 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2183 SCTPDBG(SCTP_DEBUG_TIMER2, 2184 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2185 t_type, inp, stcb, net); 2186 return; 2187 } 2188 } 2189 switch (t_type) { 2190 case SCTP_TIMER_TYPE_SEND: 2191 /* Here we use the RTO timer. */ 2192 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2193 #ifdef INVARIANTS 2194 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2195 t_type, inp, stcb, net); 2196 #else 2197 return; 2198 #endif 2199 } 2200 tmr = &net->rxt_timer; 2201 if (net->RTO == 0) { 2202 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2203 } else { 2204 to_ticks = sctp_msecs_to_ticks(net->RTO); 2205 } 2206 break; 2207 case SCTP_TIMER_TYPE_INIT: 2208 /* 2209 * Here we use the INIT timer default usually about 1 2210 * second. 2211 */ 2212 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2213 #ifdef INVARIANTS 2214 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2215 t_type, inp, stcb, net); 2216 #else 2217 return; 2218 #endif 2219 } 2220 tmr = &net->rxt_timer; 2221 if (net->RTO == 0) { 2222 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2223 } else { 2224 to_ticks = sctp_msecs_to_ticks(net->RTO); 2225 } 2226 break; 2227 case SCTP_TIMER_TYPE_RECV: 2228 /* 2229 * Here we use the Delayed-Ack timer value from the inp, 2230 * ususually about 200ms. 2231 */ 2232 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2233 #ifdef INVARIANTS 2234 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2235 t_type, inp, stcb, net); 2236 #else 2237 return; 2238 #endif 2239 } 2240 tmr = &stcb->asoc.dack_timer; 2241 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2242 break; 2243 case SCTP_TIMER_TYPE_SHUTDOWN: 2244 /* Here we use the RTO of the destination. */ 2245 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2246 #ifdef INVARIANTS 2247 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2248 t_type, inp, stcb, net); 2249 #else 2250 return; 2251 #endif 2252 } 2253 tmr = &net->rxt_timer; 2254 if (net->RTO == 0) { 2255 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2256 } else { 2257 to_ticks = sctp_msecs_to_ticks(net->RTO); 2258 } 2259 break; 2260 case SCTP_TIMER_TYPE_HEARTBEAT: 2261 /* 2262 * The net is used here so that we can add in the RTO. Even 2263 * though we use a different timer. We also add the HB timer 2264 * PLUS a random jitter. 2265 */ 2266 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2267 #ifdef INVARIANTS 2268 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2269 t_type, inp, stcb, net); 2270 #else 2271 return; 2272 #endif 2273 } 2274 if ((net->dest_state & SCTP_ADDR_NOHB) && 2275 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2276 SCTPDBG(SCTP_DEBUG_TIMER2, 2277 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2278 t_type, inp, stcb, net); 2279 return; 2280 } 2281 tmr = &net->hb_timer; 2282 if (net->RTO == 0) { 2283 to_ticks = stcb->asoc.initial_rto; 2284 } else { 2285 to_ticks = net->RTO; 2286 } 2287 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2288 jitter = rndval % to_ticks; 2289 if (jitter >= (to_ticks >> 1)) { 2290 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2291 } else { 2292 to_ticks = to_ticks - jitter; 2293 } 2294 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2295 !(net->dest_state & SCTP_ADDR_PF)) { 2296 to_ticks += net->heart_beat_delay; 2297 } 2298 /* 2299 * Now we must convert the to_ticks that are now in ms to 2300 * ticks. 2301 */ 2302 to_ticks = sctp_msecs_to_ticks(to_ticks); 2303 break; 2304 case SCTP_TIMER_TYPE_COOKIE: 2305 /* 2306 * Here we can use the RTO timer from the network since one 2307 * RTT was complete. If a retransmission happened then we 2308 * will be using the RTO initial value. 2309 */ 2310 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2311 #ifdef INVARIANTS 2312 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2313 t_type, inp, stcb, net); 2314 #else 2315 return; 2316 #endif 2317 } 2318 tmr = &net->rxt_timer; 2319 if (net->RTO == 0) { 2320 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2321 } else { 2322 to_ticks = sctp_msecs_to_ticks(net->RTO); 2323 } 2324 break; 2325 case SCTP_TIMER_TYPE_NEWCOOKIE: 2326 /* 2327 * Nothing needed but the endpoint here ususually about 60 2328 * minutes. 2329 */ 2330 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2331 #ifdef INVARIANTS 2332 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2333 t_type, inp, stcb, net); 2334 #else 2335 return; 2336 #endif 2337 } 2338 tmr = &inp->sctp_ep.signature_change; 2339 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2340 break; 2341 case SCTP_TIMER_TYPE_PATHMTURAISE: 2342 /* 2343 * Here we use the value found in the EP for PMTUD, 2344 * ususually about 10 minutes. 2345 */ 2346 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2347 #ifdef INVARIANTS 2348 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2349 t_type, inp, stcb, net); 2350 #else 2351 return; 2352 #endif 2353 } 2354 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2355 SCTPDBG(SCTP_DEBUG_TIMER2, 2356 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2357 t_type, inp, stcb, net); 2358 return; 2359 } 2360 tmr = &net->pmtu_timer; 2361 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2362 break; 2363 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2364 /* Here we use the RTO of the destination. */ 2365 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2366 #ifdef INVARIANTS 2367 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2368 t_type, inp, stcb, net); 2369 #else 2370 return; 2371 #endif 2372 } 2373 tmr = &net->rxt_timer; 2374 if (net->RTO == 0) { 2375 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2376 } else { 2377 to_ticks = sctp_msecs_to_ticks(net->RTO); 2378 } 2379 break; 2380 case SCTP_TIMER_TYPE_ASCONF: 2381 /* 2382 * Here the timer comes from the stcb but its value is from 2383 * the net's RTO. 2384 */ 2385 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2386 #ifdef INVARIANTS 2387 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2388 t_type, inp, stcb, net); 2389 #else 2390 return; 2391 #endif 2392 } 2393 tmr = &stcb->asoc.asconf_timer; 2394 if (net->RTO == 0) { 2395 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2396 } else { 2397 to_ticks = sctp_msecs_to_ticks(net->RTO); 2398 } 2399 break; 2400 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2401 /* 2402 * Here we use the endpoints shutdown guard timer usually 2403 * about 3 minutes. 2404 */ 2405 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2406 #ifdef INVARIANTS 2407 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2408 t_type, inp, stcb, net); 2409 #else 2410 return; 2411 #endif 2412 } 2413 tmr = &stcb->asoc.shut_guard_timer; 2414 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2415 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2416 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2417 } else { 2418 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2419 } 2420 } else { 2421 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2422 } 2423 break; 2424 case SCTP_TIMER_TYPE_AUTOCLOSE: 2425 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2426 #ifdef INVARIANTS 2427 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2428 t_type, inp, stcb, net); 2429 #else 2430 return; 2431 #endif 2432 } 2433 tmr = &stcb->asoc.autoclose_timer; 2434 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2435 break; 2436 case SCTP_TIMER_TYPE_STRRESET: 2437 /* 2438 * Here the timer comes from the stcb but its value is from 2439 * the net's RTO. 2440 */ 2441 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2442 #ifdef INVARIANTS 2443 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2444 t_type, inp, stcb, net); 2445 #else 2446 return; 2447 #endif 2448 } 2449 tmr = &stcb->asoc.strreset_timer; 2450 if (net->RTO == 0) { 2451 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2452 } else { 2453 to_ticks = sctp_msecs_to_ticks(net->RTO); 2454 } 2455 break; 2456 case SCTP_TIMER_TYPE_INPKILL: 2457 /* 2458 * The inp is setup to die. We re-use the signature_chage 2459 * timer since that has stopped and we are in the GONE 2460 * state. 2461 */ 2462 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2463 #ifdef INVARIANTS 2464 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2465 t_type, inp, stcb, net); 2466 #else 2467 return; 2468 #endif 2469 } 2470 tmr = &inp->sctp_ep.signature_change; 2471 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2472 break; 2473 case SCTP_TIMER_TYPE_ASOCKILL: 2474 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2475 #ifdef INVARIANTS 2476 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2477 t_type, inp, stcb, net); 2478 #else 2479 return; 2480 #endif 2481 } 2482 tmr = &stcb->asoc.strreset_timer; 2483 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2484 break; 2485 case SCTP_TIMER_TYPE_ADDR_WQ: 2486 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2487 #ifdef INVARIANTS 2488 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2489 t_type, inp, stcb, net); 2490 #else 2491 return; 2492 #endif 2493 } 2494 /* Only 1 tick away :-) */ 2495 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2496 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2497 break; 2498 case SCTP_TIMER_TYPE_PRIM_DELETED: 2499 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2500 #ifdef INVARIANTS 2501 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2502 t_type, inp, stcb, net); 2503 #else 2504 return; 2505 #endif 2506 } 2507 tmr = &stcb->asoc.delete_prim_timer; 2508 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2509 break; 2510 default: 2511 #ifdef INVARIANTS 2512 panic("Unknown timer type %d", t_type); 2513 #else 2514 return; 2515 #endif 2516 } 2517 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2518 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2519 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2520 /* 2521 * We do NOT allow you to have it already running. If it is, 2522 * we leave the current one up unchanged. 2523 */ 2524 SCTPDBG(SCTP_DEBUG_TIMER2, 2525 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2526 t_type, inp, stcb, net); 2527 return; 2528 } 2529 /* At this point we can proceed. */ 2530 if (t_type == SCTP_TIMER_TYPE_SEND) { 2531 stcb->asoc.num_send_timers_up++; 2532 } 2533 tmr->stopped_from = 0; 2534 tmr->type = t_type; 2535 tmr->ep = (void *)inp; 2536 tmr->tcb = (void *)stcb; 2537 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2538 tmr->net = NULL; 2539 } else { 2540 tmr->net = (void *)net; 2541 } 2542 tmr->self = (void *)tmr; 2543 tmr->vnet = (void *)curvnet; 2544 tmr->ticks = sctp_get_tick_count(); 2545 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2546 SCTPDBG(SCTP_DEBUG_TIMER2, 2547 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2548 t_type, to_ticks, inp, stcb, net); 2549 /* 2550 * If this is a newly scheduled callout, as opposed to a 2551 * rescheduled one, increment relevant reference counts. 2552 */ 2553 if (tmr->ep != NULL) { 2554 SCTP_INP_INCR_REF(inp); 2555 } 2556 if (tmr->tcb != NULL) { 2557 atomic_add_int(&stcb->asoc.refcnt, 1); 2558 } 2559 if (tmr->net != NULL) { 2560 atomic_add_int(&net->ref_count, 1); 2561 } 2562 } else { 2563 /* 2564 * This should not happen, since we checked for pending 2565 * above. 2566 */ 2567 SCTPDBG(SCTP_DEBUG_TIMER2, 2568 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2569 t_type, to_ticks, inp, stcb, net); 2570 } 2571 return; 2572 } 2573 2574 /*- 2575 * The following table shows which parameters must be provided 2576 * when calling sctp_timer_stop(). For parameters not being 2577 * provided, NULL must be used. 2578 * 2579 * |Name |inp |stcb|net | 2580 * |-----------------------------|----|----|----| 2581 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2582 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2583 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2584 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2585 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2586 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2587 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2588 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2589 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2590 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2591 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2592 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2593 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2594 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2595 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2596 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2597 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2598 * 2599 */ 2600 2601 void 2602 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2603 struct sctp_nets *net, uint32_t from) 2604 { 2605 struct sctp_timer *tmr; 2606 2607 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2608 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2609 t_type, stcb, stcb->sctp_ep)); 2610 if (stcb != NULL) { 2611 SCTP_TCB_LOCK_ASSERT(stcb); 2612 } else if (inp != NULL) { 2613 SCTP_INP_WLOCK_ASSERT(inp); 2614 } else { 2615 SCTP_WQ_ADDR_LOCK_ASSERT(); 2616 } 2617 tmr = NULL; 2618 switch (t_type) { 2619 case SCTP_TIMER_TYPE_SEND: 2620 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2621 #ifdef INVARIANTS 2622 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2623 t_type, inp, stcb, net); 2624 #else 2625 return; 2626 #endif 2627 } 2628 tmr = &net->rxt_timer; 2629 break; 2630 case SCTP_TIMER_TYPE_INIT: 2631 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2632 #ifdef INVARIANTS 2633 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2634 t_type, inp, stcb, net); 2635 #else 2636 return; 2637 #endif 2638 } 2639 tmr = &net->rxt_timer; 2640 break; 2641 case SCTP_TIMER_TYPE_RECV: 2642 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2643 #ifdef INVARIANTS 2644 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2645 t_type, inp, stcb, net); 2646 #else 2647 return; 2648 #endif 2649 } 2650 tmr = &stcb->asoc.dack_timer; 2651 break; 2652 case SCTP_TIMER_TYPE_SHUTDOWN: 2653 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2654 #ifdef INVARIANTS 2655 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2656 t_type, inp, stcb, net); 2657 #else 2658 return; 2659 #endif 2660 } 2661 tmr = &net->rxt_timer; 2662 break; 2663 case SCTP_TIMER_TYPE_HEARTBEAT: 2664 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2665 #ifdef INVARIANTS 2666 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2667 t_type, inp, stcb, net); 2668 #else 2669 return; 2670 #endif 2671 } 2672 tmr = &net->hb_timer; 2673 break; 2674 case SCTP_TIMER_TYPE_COOKIE: 2675 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2676 #ifdef INVARIANTS 2677 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2678 t_type, inp, stcb, net); 2679 #else 2680 return; 2681 #endif 2682 } 2683 tmr = &net->rxt_timer; 2684 break; 2685 case SCTP_TIMER_TYPE_NEWCOOKIE: 2686 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2687 #ifdef INVARIANTS 2688 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2689 t_type, inp, stcb, net); 2690 #else 2691 return; 2692 #endif 2693 } 2694 tmr = &inp->sctp_ep.signature_change; 2695 break; 2696 case SCTP_TIMER_TYPE_PATHMTURAISE: 2697 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2698 #ifdef INVARIANTS 2699 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2700 t_type, inp, stcb, net); 2701 #else 2702 return; 2703 #endif 2704 } 2705 tmr = &net->pmtu_timer; 2706 break; 2707 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2708 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2709 #ifdef INVARIANTS 2710 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2711 t_type, inp, stcb, net); 2712 #else 2713 return; 2714 #endif 2715 } 2716 tmr = &net->rxt_timer; 2717 break; 2718 case SCTP_TIMER_TYPE_ASCONF: 2719 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2720 #ifdef INVARIANTS 2721 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2722 t_type, inp, stcb, net); 2723 #else 2724 return; 2725 #endif 2726 } 2727 tmr = &stcb->asoc.asconf_timer; 2728 break; 2729 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2730 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2731 #ifdef INVARIANTS 2732 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2733 t_type, inp, stcb, net); 2734 #else 2735 return; 2736 #endif 2737 } 2738 tmr = &stcb->asoc.shut_guard_timer; 2739 break; 2740 case SCTP_TIMER_TYPE_AUTOCLOSE: 2741 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2742 #ifdef INVARIANTS 2743 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2744 t_type, inp, stcb, net); 2745 #else 2746 return; 2747 #endif 2748 } 2749 tmr = &stcb->asoc.autoclose_timer; 2750 break; 2751 case SCTP_TIMER_TYPE_STRRESET: 2752 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2753 #ifdef INVARIANTS 2754 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2755 t_type, inp, stcb, net); 2756 #else 2757 return; 2758 #endif 2759 } 2760 tmr = &stcb->asoc.strreset_timer; 2761 break; 2762 case SCTP_TIMER_TYPE_INPKILL: 2763 /* 2764 * The inp is setup to die. We re-use the signature_chage 2765 * timer since that has stopped and we are in the GONE 2766 * state. 2767 */ 2768 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2769 #ifdef INVARIANTS 2770 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2771 t_type, inp, stcb, net); 2772 #else 2773 return; 2774 #endif 2775 } 2776 tmr = &inp->sctp_ep.signature_change; 2777 break; 2778 case SCTP_TIMER_TYPE_ASOCKILL: 2779 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2780 #ifdef INVARIANTS 2781 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2782 t_type, inp, stcb, net); 2783 #else 2784 return; 2785 #endif 2786 } 2787 tmr = &stcb->asoc.strreset_timer; 2788 break; 2789 case SCTP_TIMER_TYPE_ADDR_WQ: 2790 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2791 #ifdef INVARIANTS 2792 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2793 t_type, inp, stcb, net); 2794 #else 2795 return; 2796 #endif 2797 } 2798 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2799 break; 2800 case SCTP_TIMER_TYPE_PRIM_DELETED: 2801 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2802 #ifdef INVARIANTS 2803 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2804 t_type, inp, stcb, net); 2805 #else 2806 return; 2807 #endif 2808 } 2809 tmr = &stcb->asoc.delete_prim_timer; 2810 break; 2811 default: 2812 #ifdef INVARIANTS 2813 panic("Unknown timer type %d", t_type); 2814 #else 2815 return; 2816 #endif 2817 } 2818 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2819 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2820 (tmr->type != t_type)) { 2821 /* 2822 * Ok we have a timer that is under joint use. Cookie timer 2823 * per chance with the SEND timer. We therefore are NOT 2824 * running the timer that the caller wants stopped. So just 2825 * return. 2826 */ 2827 SCTPDBG(SCTP_DEBUG_TIMER2, 2828 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2829 t_type, inp, stcb, net); 2830 return; 2831 } 2832 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2833 stcb->asoc.num_send_timers_up--; 2834 if (stcb->asoc.num_send_timers_up < 0) { 2835 stcb->asoc.num_send_timers_up = 0; 2836 } 2837 } 2838 tmr->self = NULL; 2839 tmr->stopped_from = from; 2840 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2841 KASSERT(tmr->ep == inp, 2842 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2843 t_type, inp, tmr->ep)); 2844 KASSERT(tmr->tcb == stcb, 2845 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2846 t_type, stcb, tmr->tcb)); 2847 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2848 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2849 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2850 t_type, net, tmr->net)); 2851 SCTPDBG(SCTP_DEBUG_TIMER2, 2852 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2853 t_type, inp, stcb, net); 2854 /* 2855 * If the timer was actually stopped, decrement reference 2856 * counts that were incremented in sctp_timer_start(). 2857 */ 2858 if (tmr->ep != NULL) { 2859 SCTP_INP_DECR_REF(inp); 2860 tmr->ep = NULL; 2861 } 2862 if (tmr->tcb != NULL) { 2863 atomic_add_int(&stcb->asoc.refcnt, -1); 2864 tmr->tcb = NULL; 2865 } 2866 if (tmr->net != NULL) { 2867 /* 2868 * Can't use net, since it doesn't work for 2869 * SCTP_TIMER_TYPE_ASCONF. 2870 */ 2871 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2872 tmr->net = NULL; 2873 } 2874 } else { 2875 SCTPDBG(SCTP_DEBUG_TIMER2, 2876 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2877 t_type, inp, stcb, net); 2878 } 2879 return; 2880 } 2881 2882 uint32_t 2883 sctp_calculate_len(struct mbuf *m) 2884 { 2885 uint32_t tlen = 0; 2886 struct mbuf *at; 2887 2888 at = m; 2889 while (at) { 2890 tlen += SCTP_BUF_LEN(at); 2891 at = SCTP_BUF_NEXT(at); 2892 } 2893 return (tlen); 2894 } 2895 2896 void 2897 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2898 struct sctp_association *asoc, uint32_t mtu) 2899 { 2900 /* 2901 * Reset the P-MTU size on this association, this involves changing 2902 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2903 * allow the DF flag to be cleared. 2904 */ 2905 struct sctp_tmit_chunk *chk; 2906 unsigned int eff_mtu, ovh; 2907 2908 asoc->smallest_mtu = mtu; 2909 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2910 ovh = SCTP_MIN_OVERHEAD; 2911 } else { 2912 ovh = SCTP_MIN_V4_OVERHEAD; 2913 } 2914 eff_mtu = mtu - ovh; 2915 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2916 if (chk->send_size > eff_mtu) { 2917 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2918 } 2919 } 2920 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2921 if (chk->send_size > eff_mtu) { 2922 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2923 } 2924 } 2925 } 2926 2927 2928 /* 2929 * Given an association and starting time of the current RTT period, update 2930 * RTO in number of msecs. net should point to the current network. 2931 * Return 1, if an RTO update was performed, return 0 if no update was 2932 * performed due to invalid starting point. 2933 */ 2934 2935 int 2936 sctp_calculate_rto(struct sctp_tcb *stcb, 2937 struct sctp_association *asoc, 2938 struct sctp_nets *net, 2939 struct timeval *old, 2940 int rtt_from_sack) 2941 { 2942 struct timeval now; 2943 uint64_t rtt_us; /* RTT in us */ 2944 int32_t rtt; /* RTT in ms */ 2945 uint32_t new_rto; 2946 int first_measure = 0; 2947 2948 /************************/ 2949 /* 1. calculate new RTT */ 2950 /************************/ 2951 /* get the current time */ 2952 if (stcb->asoc.use_precise_time) { 2953 (void)SCTP_GETPTIME_TIMEVAL(&now); 2954 } else { 2955 (void)SCTP_GETTIME_TIMEVAL(&now); 2956 } 2957 if ((old->tv_sec > now.tv_sec) || 2958 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2959 /* The starting point is in the future. */ 2960 return (0); 2961 } 2962 timevalsub(&now, old); 2963 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2964 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2965 /* The RTT is larger than a sane value. */ 2966 return (0); 2967 } 2968 /* store the current RTT in us */ 2969 net->rtt = rtt_us; 2970 /* compute rtt in ms */ 2971 rtt = (int32_t)(net->rtt / 1000); 2972 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2973 /* 2974 * Tell the CC module that a new update has just occurred 2975 * from a sack 2976 */ 2977 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2978 } 2979 /* 2980 * Do we need to determine the lan? We do this only on sacks i.e. 2981 * RTT being determined from data not non-data (HB/INIT->INITACK). 2982 */ 2983 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2984 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2985 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2986 net->lan_type = SCTP_LAN_INTERNET; 2987 } else { 2988 net->lan_type = SCTP_LAN_LOCAL; 2989 } 2990 } 2991 2992 /***************************/ 2993 /* 2. update RTTVAR & SRTT */ 2994 /***************************/ 2995 /*- 2996 * Compute the scaled average lastsa and the 2997 * scaled variance lastsv as described in van Jacobson 2998 * Paper "Congestion Avoidance and Control", Annex A. 2999 * 3000 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 3001 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 3002 */ 3003 if (net->RTO_measured) { 3004 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3005 net->lastsa += rtt; 3006 if (rtt < 0) { 3007 rtt = -rtt; 3008 } 3009 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3010 net->lastsv += rtt; 3011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3012 rto_logging(net, SCTP_LOG_RTTVAR); 3013 } 3014 } else { 3015 /* First RTO measurment */ 3016 net->RTO_measured = 1; 3017 first_measure = 1; 3018 net->lastsa = rtt << SCTP_RTT_SHIFT; 3019 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3021 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3022 } 3023 } 3024 if (net->lastsv == 0) { 3025 net->lastsv = SCTP_CLOCK_GRANULARITY; 3026 } 3027 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3028 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3029 (stcb->asoc.sat_network_lockout == 0)) { 3030 stcb->asoc.sat_network = 1; 3031 } else if ((!first_measure) && stcb->asoc.sat_network) { 3032 stcb->asoc.sat_network = 0; 3033 stcb->asoc.sat_network_lockout = 1; 3034 } 3035 /* bound it, per C6/C7 in Section 5.3.1 */ 3036 if (new_rto < stcb->asoc.minrto) { 3037 new_rto = stcb->asoc.minrto; 3038 } 3039 if (new_rto > stcb->asoc.maxrto) { 3040 new_rto = stcb->asoc.maxrto; 3041 } 3042 net->RTO = new_rto; 3043 return (1); 3044 } 3045 3046 /* 3047 * return a pointer to a contiguous piece of data from the given mbuf chain 3048 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3049 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3050 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3051 */ 3052 caddr_t 3053 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3054 { 3055 uint32_t count; 3056 uint8_t *ptr; 3057 3058 ptr = in_ptr; 3059 if ((off < 0) || (len <= 0)) 3060 return (NULL); 3061 3062 /* find the desired start location */ 3063 while ((m != NULL) && (off > 0)) { 3064 if (off < SCTP_BUF_LEN(m)) 3065 break; 3066 off -= SCTP_BUF_LEN(m); 3067 m = SCTP_BUF_NEXT(m); 3068 } 3069 if (m == NULL) 3070 return (NULL); 3071 3072 /* is the current mbuf large enough (eg. contiguous)? */ 3073 if ((SCTP_BUF_LEN(m) - off) >= len) { 3074 return (mtod(m, caddr_t)+off); 3075 } else { 3076 /* else, it spans more than one mbuf, so save a temp copy... */ 3077 while ((m != NULL) && (len > 0)) { 3078 count = min(SCTP_BUF_LEN(m) - off, len); 3079 memcpy(ptr, mtod(m, caddr_t)+off, count); 3080 len -= count; 3081 ptr += count; 3082 off = 0; 3083 m = SCTP_BUF_NEXT(m); 3084 } 3085 if ((m == NULL) && (len > 0)) 3086 return (NULL); 3087 else 3088 return ((caddr_t)in_ptr); 3089 } 3090 } 3091 3092 3093 3094 struct sctp_paramhdr * 3095 sctp_get_next_param(struct mbuf *m, 3096 int offset, 3097 struct sctp_paramhdr *pull, 3098 int pull_limit) 3099 { 3100 /* This just provides a typed signature to Peter's Pull routine */ 3101 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3102 (uint8_t *)pull)); 3103 } 3104 3105 3106 struct mbuf * 3107 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3108 { 3109 struct mbuf *m_last; 3110 caddr_t dp; 3111 3112 if (padlen > 3) { 3113 return (NULL); 3114 } 3115 if (padlen <= M_TRAILINGSPACE(m)) { 3116 /* 3117 * The easy way. We hope the majority of the time we hit 3118 * here :) 3119 */ 3120 m_last = m; 3121 } else { 3122 /* Hard way we must grow the mbuf chain */ 3123 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3124 if (m_last == NULL) { 3125 return (NULL); 3126 } 3127 SCTP_BUF_LEN(m_last) = 0; 3128 SCTP_BUF_NEXT(m_last) = NULL; 3129 SCTP_BUF_NEXT(m) = m_last; 3130 } 3131 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3132 SCTP_BUF_LEN(m_last) += padlen; 3133 memset(dp, 0, padlen); 3134 return (m_last); 3135 } 3136 3137 struct mbuf * 3138 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3139 { 3140 /* find the last mbuf in chain and pad it */ 3141 struct mbuf *m_at; 3142 3143 if (last_mbuf != NULL) { 3144 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3145 } else { 3146 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3147 if (SCTP_BUF_NEXT(m_at) == NULL) { 3148 return (sctp_add_pad_tombuf(m_at, padval)); 3149 } 3150 } 3151 } 3152 return (NULL); 3153 } 3154 3155 static void 3156 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3157 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3158 { 3159 struct mbuf *m_notify; 3160 struct sctp_assoc_change *sac; 3161 struct sctp_queued_to_read *control; 3162 unsigned int notif_len; 3163 uint16_t abort_len; 3164 unsigned int i; 3165 3166 if (stcb == NULL) { 3167 return; 3168 } 3169 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3170 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3171 if (abort != NULL) { 3172 abort_len = ntohs(abort->ch.chunk_length); 3173 /* 3174 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3175 * contiguous. 3176 */ 3177 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3178 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3179 } 3180 } else { 3181 abort_len = 0; 3182 } 3183 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3184 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3185 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3186 notif_len += abort_len; 3187 } 3188 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3189 if (m_notify == NULL) { 3190 /* Retry with smaller value. */ 3191 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3192 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3193 if (m_notify == NULL) { 3194 goto set_error; 3195 } 3196 } 3197 SCTP_BUF_NEXT(m_notify) = NULL; 3198 sac = mtod(m_notify, struct sctp_assoc_change *); 3199 memset(sac, 0, notif_len); 3200 sac->sac_type = SCTP_ASSOC_CHANGE; 3201 sac->sac_flags = 0; 3202 sac->sac_length = sizeof(struct sctp_assoc_change); 3203 sac->sac_state = state; 3204 sac->sac_error = error; 3205 /* XXX verify these stream counts */ 3206 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3207 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3208 sac->sac_assoc_id = sctp_get_associd(stcb); 3209 if (notif_len > sizeof(struct sctp_assoc_change)) { 3210 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3211 i = 0; 3212 if (stcb->asoc.prsctp_supported == 1) { 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3214 } 3215 if (stcb->asoc.auth_supported == 1) { 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3217 } 3218 if (stcb->asoc.asconf_supported == 1) { 3219 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3220 } 3221 if (stcb->asoc.idata_supported == 1) { 3222 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3223 } 3224 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3225 if (stcb->asoc.reconfig_supported == 1) { 3226 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3227 } 3228 sac->sac_length += i; 3229 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3230 memcpy(sac->sac_info, abort, abort_len); 3231 sac->sac_length += abort_len; 3232 } 3233 } 3234 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3235 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3236 0, 0, stcb->asoc.context, 0, 0, 0, 3237 m_notify); 3238 if (control != NULL) { 3239 control->length = SCTP_BUF_LEN(m_notify); 3240 control->spec_flags = M_NOTIFICATION; 3241 /* not that we need this */ 3242 control->tail_mbuf = m_notify; 3243 sctp_add_to_readq(stcb->sctp_ep, stcb, 3244 control, 3245 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3246 so_locked); 3247 } else { 3248 sctp_m_freem(m_notify); 3249 } 3250 } 3251 /* 3252 * For 1-to-1 style sockets, we send up and error when an ABORT 3253 * comes in. 3254 */ 3255 set_error: 3256 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3257 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3258 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3259 SOCK_LOCK(stcb->sctp_socket); 3260 if (from_peer) { 3261 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3263 stcb->sctp_socket->so_error = ECONNREFUSED; 3264 } else { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3266 stcb->sctp_socket->so_error = ECONNRESET; 3267 } 3268 } else { 3269 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3270 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3271 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3272 stcb->sctp_socket->so_error = ETIMEDOUT; 3273 } else { 3274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3275 stcb->sctp_socket->so_error = ECONNABORTED; 3276 } 3277 } 3278 SOCK_UNLOCK(stcb->sctp_socket); 3279 } 3280 /* Wake ANY sleepers */ 3281 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3282 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3283 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3284 socantrcvmore(stcb->sctp_socket); 3285 } 3286 sorwakeup(stcb->sctp_socket); 3287 sowwakeup(stcb->sctp_socket); 3288 } 3289 3290 static void 3291 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3292 struct sockaddr *sa, uint32_t error, int so_locked) 3293 { 3294 struct mbuf *m_notify; 3295 struct sctp_paddr_change *spc; 3296 struct sctp_queued_to_read *control; 3297 3298 if ((stcb == NULL) || 3299 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3300 /* event not enabled */ 3301 return; 3302 } 3303 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3304 if (m_notify == NULL) 3305 return; 3306 SCTP_BUF_LEN(m_notify) = 0; 3307 spc = mtod(m_notify, struct sctp_paddr_change *); 3308 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3309 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3310 spc->spc_flags = 0; 3311 spc->spc_length = sizeof(struct sctp_paddr_change); 3312 switch (sa->sa_family) { 3313 #ifdef INET 3314 case AF_INET: 3315 #ifdef INET6 3316 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3317 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3318 (struct sockaddr_in6 *)&spc->spc_aaddr); 3319 } else { 3320 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3321 } 3322 #else 3323 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3324 #endif 3325 break; 3326 #endif 3327 #ifdef INET6 3328 case AF_INET6: 3329 { 3330 struct sockaddr_in6 *sin6; 3331 3332 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3333 3334 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3335 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3336 if (sin6->sin6_scope_id == 0) { 3337 /* recover scope_id for user */ 3338 (void)sa6_recoverscope(sin6); 3339 } else { 3340 /* clear embedded scope_id for user */ 3341 in6_clearscope(&sin6->sin6_addr); 3342 } 3343 } 3344 break; 3345 } 3346 #endif 3347 default: 3348 /* TSNH */ 3349 break; 3350 } 3351 spc->spc_state = state; 3352 spc->spc_error = error; 3353 spc->spc_assoc_id = sctp_get_associd(stcb); 3354 3355 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3356 SCTP_BUF_NEXT(m_notify) = NULL; 3357 3358 /* append to socket */ 3359 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3360 0, 0, stcb->asoc.context, 0, 0, 0, 3361 m_notify); 3362 if (control == NULL) { 3363 /* no memory */ 3364 sctp_m_freem(m_notify); 3365 return; 3366 } 3367 control->length = SCTP_BUF_LEN(m_notify); 3368 control->spec_flags = M_NOTIFICATION; 3369 /* not that we need this */ 3370 control->tail_mbuf = m_notify; 3371 sctp_add_to_readq(stcb->sctp_ep, stcb, 3372 control, 3373 &stcb->sctp_socket->so_rcv, 1, 3374 SCTP_READ_LOCK_NOT_HELD, 3375 so_locked); 3376 } 3377 3378 3379 static void 3380 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3381 struct sctp_tmit_chunk *chk, int so_locked) 3382 { 3383 struct mbuf *m_notify; 3384 struct sctp_send_failed *ssf; 3385 struct sctp_send_failed_event *ssfe; 3386 struct sctp_queued_to_read *control; 3387 struct sctp_chunkhdr *chkhdr; 3388 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3389 3390 if ((stcb == NULL) || 3391 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3392 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3393 /* event not enabled */ 3394 return; 3395 } 3396 3397 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3398 notifhdr_len = sizeof(struct sctp_send_failed_event); 3399 } else { 3400 notifhdr_len = sizeof(struct sctp_send_failed); 3401 } 3402 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3403 if (m_notify == NULL) 3404 /* no space left */ 3405 return; 3406 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3407 if (stcb->asoc.idata_supported) { 3408 chkhdr_len = sizeof(struct sctp_idata_chunk); 3409 } else { 3410 chkhdr_len = sizeof(struct sctp_data_chunk); 3411 } 3412 /* Use some defaults in case we can't access the chunk header */ 3413 if (chk->send_size >= chkhdr_len) { 3414 payload_len = chk->send_size - chkhdr_len; 3415 } else { 3416 payload_len = 0; 3417 } 3418 padding_len = 0; 3419 if (chk->data != NULL) { 3420 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3421 if (chkhdr != NULL) { 3422 chk_len = ntohs(chkhdr->chunk_length); 3423 if ((chk_len >= chkhdr_len) && 3424 (chk->send_size >= chk_len) && 3425 (chk->send_size - chk_len < 4)) { 3426 padding_len = chk->send_size - chk_len; 3427 payload_len = chk->send_size - chkhdr_len - padding_len; 3428 } 3429 } 3430 } 3431 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3432 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3433 memset(ssfe, 0, notifhdr_len); 3434 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3435 if (sent) { 3436 ssfe->ssfe_flags = SCTP_DATA_SENT; 3437 } else { 3438 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3439 } 3440 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3441 ssfe->ssfe_error = error; 3442 /* not exactly what the user sent in, but should be close :) */ 3443 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3444 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3445 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3446 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3447 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3448 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3449 } else { 3450 ssf = mtod(m_notify, struct sctp_send_failed *); 3451 memset(ssf, 0, notifhdr_len); 3452 ssf->ssf_type = SCTP_SEND_FAILED; 3453 if (sent) { 3454 ssf->ssf_flags = SCTP_DATA_SENT; 3455 } else { 3456 ssf->ssf_flags = SCTP_DATA_UNSENT; 3457 } 3458 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3459 ssf->ssf_error = error; 3460 /* not exactly what the user sent in, but should be close :) */ 3461 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3462 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3463 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3464 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3465 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3466 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3467 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3468 } 3469 if (chk->data != NULL) { 3470 /* Trim off the sctp chunk header (it should be there) */ 3471 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3472 m_adj(chk->data, chkhdr_len); 3473 m_adj(chk->data, -padding_len); 3474 sctp_mbuf_crush(chk->data); 3475 chk->send_size -= (chkhdr_len + padding_len); 3476 } 3477 } 3478 SCTP_BUF_NEXT(m_notify) = chk->data; 3479 /* Steal off the mbuf */ 3480 chk->data = NULL; 3481 /* 3482 * For this case, we check the actual socket buffer, since the assoc 3483 * is going away we don't want to overfill the socket buffer for a 3484 * non-reader 3485 */ 3486 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3487 sctp_m_freem(m_notify); 3488 return; 3489 } 3490 /* append to socket */ 3491 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3492 0, 0, stcb->asoc.context, 0, 0, 0, 3493 m_notify); 3494 if (control == NULL) { 3495 /* no memory */ 3496 sctp_m_freem(m_notify); 3497 return; 3498 } 3499 control->length = SCTP_BUF_LEN(m_notify); 3500 control->spec_flags = M_NOTIFICATION; 3501 /* not that we need this */ 3502 control->tail_mbuf = m_notify; 3503 sctp_add_to_readq(stcb->sctp_ep, stcb, 3504 control, 3505 &stcb->sctp_socket->so_rcv, 1, 3506 SCTP_READ_LOCK_NOT_HELD, 3507 so_locked); 3508 } 3509 3510 3511 static void 3512 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3513 struct sctp_stream_queue_pending *sp, int so_locked) 3514 { 3515 struct mbuf *m_notify; 3516 struct sctp_send_failed *ssf; 3517 struct sctp_send_failed_event *ssfe; 3518 struct sctp_queued_to_read *control; 3519 int notifhdr_len; 3520 3521 if ((stcb == NULL) || 3522 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3523 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3524 /* event not enabled */ 3525 return; 3526 } 3527 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3528 notifhdr_len = sizeof(struct sctp_send_failed_event); 3529 } else { 3530 notifhdr_len = sizeof(struct sctp_send_failed); 3531 } 3532 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3533 if (m_notify == NULL) { 3534 /* no space left */ 3535 return; 3536 } 3537 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3538 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3539 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3540 memset(ssfe, 0, notifhdr_len); 3541 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3542 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3543 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3544 ssfe->ssfe_error = error; 3545 /* not exactly what the user sent in, but should be close :) */ 3546 ssfe->ssfe_info.snd_sid = sp->sid; 3547 if (sp->some_taken) { 3548 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3549 } else { 3550 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3551 } 3552 ssfe->ssfe_info.snd_ppid = sp->ppid; 3553 ssfe->ssfe_info.snd_context = sp->context; 3554 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3555 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3556 } else { 3557 ssf = mtod(m_notify, struct sctp_send_failed *); 3558 memset(ssf, 0, notifhdr_len); 3559 ssf->ssf_type = SCTP_SEND_FAILED; 3560 ssf->ssf_flags = SCTP_DATA_UNSENT; 3561 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3562 ssf->ssf_error = error; 3563 /* not exactly what the user sent in, but should be close :) */ 3564 ssf->ssf_info.sinfo_stream = sp->sid; 3565 ssf->ssf_info.sinfo_ssn = 0; 3566 if (sp->some_taken) { 3567 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3568 } else { 3569 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3570 } 3571 ssf->ssf_info.sinfo_ppid = sp->ppid; 3572 ssf->ssf_info.sinfo_context = sp->context; 3573 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3574 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3575 } 3576 SCTP_BUF_NEXT(m_notify) = sp->data; 3577 3578 /* Steal off the mbuf */ 3579 sp->data = NULL; 3580 /* 3581 * For this case, we check the actual socket buffer, since the assoc 3582 * is going away we don't want to overfill the socket buffer for a 3583 * non-reader 3584 */ 3585 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3586 sctp_m_freem(m_notify); 3587 return; 3588 } 3589 /* append to socket */ 3590 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3591 0, 0, stcb->asoc.context, 0, 0, 0, 3592 m_notify); 3593 if (control == NULL) { 3594 /* no memory */ 3595 sctp_m_freem(m_notify); 3596 return; 3597 } 3598 control->length = SCTP_BUF_LEN(m_notify); 3599 control->spec_flags = M_NOTIFICATION; 3600 /* not that we need this */ 3601 control->tail_mbuf = m_notify; 3602 sctp_add_to_readq(stcb->sctp_ep, stcb, 3603 control, 3604 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3605 } 3606 3607 3608 3609 static void 3610 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3611 { 3612 struct mbuf *m_notify; 3613 struct sctp_adaptation_event *sai; 3614 struct sctp_queued_to_read *control; 3615 3616 if ((stcb == NULL) || 3617 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3618 /* event not enabled */ 3619 return; 3620 } 3621 3622 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3623 if (m_notify == NULL) 3624 /* no space left */ 3625 return; 3626 SCTP_BUF_LEN(m_notify) = 0; 3627 sai = mtod(m_notify, struct sctp_adaptation_event *); 3628 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3629 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3630 sai->sai_flags = 0; 3631 sai->sai_length = sizeof(struct sctp_adaptation_event); 3632 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3633 sai->sai_assoc_id = sctp_get_associd(stcb); 3634 3635 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3636 SCTP_BUF_NEXT(m_notify) = NULL; 3637 3638 /* append to socket */ 3639 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3640 0, 0, stcb->asoc.context, 0, 0, 0, 3641 m_notify); 3642 if (control == NULL) { 3643 /* no memory */ 3644 sctp_m_freem(m_notify); 3645 return; 3646 } 3647 control->length = SCTP_BUF_LEN(m_notify); 3648 control->spec_flags = M_NOTIFICATION; 3649 /* not that we need this */ 3650 control->tail_mbuf = m_notify; 3651 sctp_add_to_readq(stcb->sctp_ep, stcb, 3652 control, 3653 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3654 } 3655 3656 /* This always must be called with the read-queue LOCKED in the INP */ 3657 static void 3658 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3659 uint32_t val, int so_locked) 3660 { 3661 struct mbuf *m_notify; 3662 struct sctp_pdapi_event *pdapi; 3663 struct sctp_queued_to_read *control; 3664 struct sockbuf *sb; 3665 3666 if ((stcb == NULL) || 3667 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3668 /* event not enabled */ 3669 return; 3670 } 3671 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3672 return; 3673 } 3674 3675 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3676 if (m_notify == NULL) 3677 /* no space left */ 3678 return; 3679 SCTP_BUF_LEN(m_notify) = 0; 3680 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3681 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3682 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3683 pdapi->pdapi_flags = 0; 3684 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3685 pdapi->pdapi_indication = error; 3686 pdapi->pdapi_stream = (val >> 16); 3687 pdapi->pdapi_seq = (val & 0x0000ffff); 3688 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3689 3690 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3691 SCTP_BUF_NEXT(m_notify) = NULL; 3692 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3693 0, 0, stcb->asoc.context, 0, 0, 0, 3694 m_notify); 3695 if (control == NULL) { 3696 /* no memory */ 3697 sctp_m_freem(m_notify); 3698 return; 3699 } 3700 control->length = SCTP_BUF_LEN(m_notify); 3701 control->spec_flags = M_NOTIFICATION; 3702 /* not that we need this */ 3703 control->tail_mbuf = m_notify; 3704 sb = &stcb->sctp_socket->so_rcv; 3705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3706 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3707 } 3708 sctp_sballoc(stcb, sb, m_notify); 3709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3710 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3711 } 3712 control->end_added = 1; 3713 if (stcb->asoc.control_pdapi) 3714 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3715 else { 3716 /* we really should not see this case */ 3717 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3718 } 3719 if (stcb->sctp_ep && stcb->sctp_socket) { 3720 /* This should always be the case */ 3721 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3722 } 3723 } 3724 3725 static void 3726 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3727 { 3728 struct mbuf *m_notify; 3729 struct sctp_shutdown_event *sse; 3730 struct sctp_queued_to_read *control; 3731 3732 /* 3733 * For TCP model AND UDP connected sockets we will send an error up 3734 * when an SHUTDOWN completes 3735 */ 3736 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3737 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3738 /* mark socket closed for read/write and wakeup! */ 3739 socantsendmore(stcb->sctp_socket); 3740 } 3741 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3742 /* event not enabled */ 3743 return; 3744 } 3745 3746 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3747 if (m_notify == NULL) 3748 /* no space left */ 3749 return; 3750 sse = mtod(m_notify, struct sctp_shutdown_event *); 3751 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3752 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3753 sse->sse_flags = 0; 3754 sse->sse_length = sizeof(struct sctp_shutdown_event); 3755 sse->sse_assoc_id = sctp_get_associd(stcb); 3756 3757 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3758 SCTP_BUF_NEXT(m_notify) = NULL; 3759 3760 /* append to socket */ 3761 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3762 0, 0, stcb->asoc.context, 0, 0, 0, 3763 m_notify); 3764 if (control == NULL) { 3765 /* no memory */ 3766 sctp_m_freem(m_notify); 3767 return; 3768 } 3769 control->length = SCTP_BUF_LEN(m_notify); 3770 control->spec_flags = M_NOTIFICATION; 3771 /* not that we need this */ 3772 control->tail_mbuf = m_notify; 3773 sctp_add_to_readq(stcb->sctp_ep, stcb, 3774 control, 3775 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3776 } 3777 3778 static void 3779 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3780 int so_locked) 3781 { 3782 struct mbuf *m_notify; 3783 struct sctp_sender_dry_event *event; 3784 struct sctp_queued_to_read *control; 3785 3786 if ((stcb == NULL) || 3787 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3788 /* event not enabled */ 3789 return; 3790 } 3791 3792 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3793 if (m_notify == NULL) { 3794 /* no space left */ 3795 return; 3796 } 3797 SCTP_BUF_LEN(m_notify) = 0; 3798 event = mtod(m_notify, struct sctp_sender_dry_event *); 3799 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3800 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3801 event->sender_dry_flags = 0; 3802 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3803 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3804 3805 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3806 SCTP_BUF_NEXT(m_notify) = NULL; 3807 3808 /* append to socket */ 3809 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3810 0, 0, stcb->asoc.context, 0, 0, 0, 3811 m_notify); 3812 if (control == NULL) { 3813 /* no memory */ 3814 sctp_m_freem(m_notify); 3815 return; 3816 } 3817 control->length = SCTP_BUF_LEN(m_notify); 3818 control->spec_flags = M_NOTIFICATION; 3819 /* not that we need this */ 3820 control->tail_mbuf = m_notify; 3821 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3822 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3823 } 3824 3825 3826 void 3827 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3828 { 3829 struct mbuf *m_notify; 3830 struct sctp_queued_to_read *control; 3831 struct sctp_stream_change_event *stradd; 3832 3833 if ((stcb == NULL) || 3834 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3835 /* event not enabled */ 3836 return; 3837 } 3838 if ((stcb->asoc.peer_req_out) && flag) { 3839 /* Peer made the request, don't tell the local user */ 3840 stcb->asoc.peer_req_out = 0; 3841 return; 3842 } 3843 stcb->asoc.peer_req_out = 0; 3844 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3845 if (m_notify == NULL) 3846 /* no space left */ 3847 return; 3848 SCTP_BUF_LEN(m_notify) = 0; 3849 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3850 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3851 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3852 stradd->strchange_flags = flag; 3853 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3854 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3855 stradd->strchange_instrms = numberin; 3856 stradd->strchange_outstrms = numberout; 3857 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3858 SCTP_BUF_NEXT(m_notify) = NULL; 3859 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3860 /* no space */ 3861 sctp_m_freem(m_notify); 3862 return; 3863 } 3864 /* append to socket */ 3865 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3866 0, 0, stcb->asoc.context, 0, 0, 0, 3867 m_notify); 3868 if (control == NULL) { 3869 /* no memory */ 3870 sctp_m_freem(m_notify); 3871 return; 3872 } 3873 control->length = SCTP_BUF_LEN(m_notify); 3874 control->spec_flags = M_NOTIFICATION; 3875 /* not that we need this */ 3876 control->tail_mbuf = m_notify; 3877 sctp_add_to_readq(stcb->sctp_ep, stcb, 3878 control, 3879 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3880 } 3881 3882 void 3883 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3884 { 3885 struct mbuf *m_notify; 3886 struct sctp_queued_to_read *control; 3887 struct sctp_assoc_reset_event *strasoc; 3888 3889 if ((stcb == NULL) || 3890 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3891 /* event not enabled */ 3892 return; 3893 } 3894 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3895 if (m_notify == NULL) 3896 /* no space left */ 3897 return; 3898 SCTP_BUF_LEN(m_notify) = 0; 3899 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3900 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3901 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3902 strasoc->assocreset_flags = flag; 3903 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3904 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3905 strasoc->assocreset_local_tsn = sending_tsn; 3906 strasoc->assocreset_remote_tsn = recv_tsn; 3907 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3908 SCTP_BUF_NEXT(m_notify) = NULL; 3909 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3910 /* no space */ 3911 sctp_m_freem(m_notify); 3912 return; 3913 } 3914 /* append to socket */ 3915 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3916 0, 0, stcb->asoc.context, 0, 0, 0, 3917 m_notify); 3918 if (control == NULL) { 3919 /* no memory */ 3920 sctp_m_freem(m_notify); 3921 return; 3922 } 3923 control->length = SCTP_BUF_LEN(m_notify); 3924 control->spec_flags = M_NOTIFICATION; 3925 /* not that we need this */ 3926 control->tail_mbuf = m_notify; 3927 sctp_add_to_readq(stcb->sctp_ep, stcb, 3928 control, 3929 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3930 } 3931 3932 3933 3934 static void 3935 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3936 int number_entries, uint16_t *list, int flag) 3937 { 3938 struct mbuf *m_notify; 3939 struct sctp_queued_to_read *control; 3940 struct sctp_stream_reset_event *strreset; 3941 int len; 3942 3943 if ((stcb == NULL) || 3944 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3945 /* event not enabled */ 3946 return; 3947 } 3948 3949 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3950 if (m_notify == NULL) 3951 /* no space left */ 3952 return; 3953 SCTP_BUF_LEN(m_notify) = 0; 3954 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3955 if (len > M_TRAILINGSPACE(m_notify)) { 3956 /* never enough room */ 3957 sctp_m_freem(m_notify); 3958 return; 3959 } 3960 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3961 memset(strreset, 0, len); 3962 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3963 strreset->strreset_flags = flag; 3964 strreset->strreset_length = len; 3965 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3966 if (number_entries) { 3967 int i; 3968 3969 for (i = 0; i < number_entries; i++) { 3970 strreset->strreset_stream_list[i] = ntohs(list[i]); 3971 } 3972 } 3973 SCTP_BUF_LEN(m_notify) = len; 3974 SCTP_BUF_NEXT(m_notify) = NULL; 3975 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3976 /* no space */ 3977 sctp_m_freem(m_notify); 3978 return; 3979 } 3980 /* append to socket */ 3981 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3982 0, 0, stcb->asoc.context, 0, 0, 0, 3983 m_notify); 3984 if (control == NULL) { 3985 /* no memory */ 3986 sctp_m_freem(m_notify); 3987 return; 3988 } 3989 control->length = SCTP_BUF_LEN(m_notify); 3990 control->spec_flags = M_NOTIFICATION; 3991 /* not that we need this */ 3992 control->tail_mbuf = m_notify; 3993 sctp_add_to_readq(stcb->sctp_ep, stcb, 3994 control, 3995 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3996 } 3997 3998 3999 static void 4000 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 4001 { 4002 struct mbuf *m_notify; 4003 struct sctp_remote_error *sre; 4004 struct sctp_queued_to_read *control; 4005 unsigned int notif_len; 4006 uint16_t chunk_len; 4007 4008 if ((stcb == NULL) || 4009 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4010 return; 4011 } 4012 if (chunk != NULL) { 4013 chunk_len = ntohs(chunk->ch.chunk_length); 4014 /* 4015 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4016 * contiguous. 4017 */ 4018 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4019 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4020 } 4021 } else { 4022 chunk_len = 0; 4023 } 4024 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4025 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4026 if (m_notify == NULL) { 4027 /* Retry with smaller value. */ 4028 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4029 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4030 if (m_notify == NULL) { 4031 return; 4032 } 4033 } 4034 SCTP_BUF_NEXT(m_notify) = NULL; 4035 sre = mtod(m_notify, struct sctp_remote_error *); 4036 memset(sre, 0, notif_len); 4037 sre->sre_type = SCTP_REMOTE_ERROR; 4038 sre->sre_flags = 0; 4039 sre->sre_length = sizeof(struct sctp_remote_error); 4040 sre->sre_error = error; 4041 sre->sre_assoc_id = sctp_get_associd(stcb); 4042 if (notif_len > sizeof(struct sctp_remote_error)) { 4043 memcpy(sre->sre_data, chunk, chunk_len); 4044 sre->sre_length += chunk_len; 4045 } 4046 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4047 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4048 0, 0, stcb->asoc.context, 0, 0, 0, 4049 m_notify); 4050 if (control != NULL) { 4051 control->length = SCTP_BUF_LEN(m_notify); 4052 control->spec_flags = M_NOTIFICATION; 4053 /* not that we need this */ 4054 control->tail_mbuf = m_notify; 4055 sctp_add_to_readq(stcb->sctp_ep, stcb, 4056 control, 4057 &stcb->sctp_socket->so_rcv, 1, 4058 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4059 } else { 4060 sctp_m_freem(m_notify); 4061 } 4062 } 4063 4064 4065 void 4066 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4067 uint32_t error, void *data, int so_locked) 4068 { 4069 if ((stcb == NULL) || 4070 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4071 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4072 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4073 /* If the socket is gone we are out of here */ 4074 return; 4075 } 4076 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4077 return; 4078 } 4079 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4080 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4081 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4082 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4083 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4084 /* Don't report these in front states */ 4085 return; 4086 } 4087 } 4088 switch (notification) { 4089 case SCTP_NOTIFY_ASSOC_UP: 4090 if (stcb->asoc.assoc_up_sent == 0) { 4091 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4092 stcb->asoc.assoc_up_sent = 1; 4093 } 4094 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4095 sctp_notify_adaptation_layer(stcb); 4096 } 4097 if (stcb->asoc.auth_supported == 0) { 4098 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4099 NULL, so_locked); 4100 } 4101 break; 4102 case SCTP_NOTIFY_ASSOC_DOWN: 4103 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4104 break; 4105 case SCTP_NOTIFY_INTERFACE_DOWN: 4106 { 4107 struct sctp_nets *net; 4108 4109 net = (struct sctp_nets *)data; 4110 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4111 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4112 break; 4113 } 4114 case SCTP_NOTIFY_INTERFACE_UP: 4115 { 4116 struct sctp_nets *net; 4117 4118 net = (struct sctp_nets *)data; 4119 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4120 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4121 break; 4122 } 4123 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4124 { 4125 struct sctp_nets *net; 4126 4127 net = (struct sctp_nets *)data; 4128 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4129 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4130 break; 4131 } 4132 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4133 sctp_notify_send_failed2(stcb, error, 4134 (struct sctp_stream_queue_pending *)data, so_locked); 4135 break; 4136 case SCTP_NOTIFY_SENT_DG_FAIL: 4137 sctp_notify_send_failed(stcb, 1, error, 4138 (struct sctp_tmit_chunk *)data, so_locked); 4139 break; 4140 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4141 sctp_notify_send_failed(stcb, 0, error, 4142 (struct sctp_tmit_chunk *)data, so_locked); 4143 break; 4144 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4145 { 4146 uint32_t val; 4147 4148 val = *((uint32_t *)data); 4149 4150 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4151 break; 4152 } 4153 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4154 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4155 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4156 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4157 } else { 4158 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4159 } 4160 break; 4161 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4162 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4163 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4164 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4165 } else { 4166 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4167 } 4168 break; 4169 case SCTP_NOTIFY_ASSOC_RESTART: 4170 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4171 if (stcb->asoc.auth_supported == 0) { 4172 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4173 NULL, so_locked); 4174 } 4175 break; 4176 case SCTP_NOTIFY_STR_RESET_SEND: 4177 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4178 break; 4179 case SCTP_NOTIFY_STR_RESET_RECV: 4180 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4181 break; 4182 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4183 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4184 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4185 break; 4186 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4187 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4188 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4189 break; 4190 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4191 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4192 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4193 break; 4194 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4195 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4196 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4197 break; 4198 case SCTP_NOTIFY_ASCONF_ADD_IP: 4199 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4200 error, so_locked); 4201 break; 4202 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4203 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4204 error, so_locked); 4205 break; 4206 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4207 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4208 error, so_locked); 4209 break; 4210 case SCTP_NOTIFY_PEER_SHUTDOWN: 4211 sctp_notify_shutdown_event(stcb); 4212 break; 4213 case SCTP_NOTIFY_AUTH_NEW_KEY: 4214 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4215 (uint16_t)(uintptr_t)data, 4216 so_locked); 4217 break; 4218 case SCTP_NOTIFY_AUTH_FREE_KEY: 4219 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4220 (uint16_t)(uintptr_t)data, 4221 so_locked); 4222 break; 4223 case SCTP_NOTIFY_NO_PEER_AUTH: 4224 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4225 (uint16_t)(uintptr_t)data, 4226 so_locked); 4227 break; 4228 case SCTP_NOTIFY_SENDER_DRY: 4229 sctp_notify_sender_dry_event(stcb, so_locked); 4230 break; 4231 case SCTP_NOTIFY_REMOTE_ERROR: 4232 sctp_notify_remote_error(stcb, error, data); 4233 break; 4234 default: 4235 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4236 __func__, notification, notification); 4237 break; 4238 } /* end switch */ 4239 } 4240 4241 void 4242 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked) 4243 { 4244 struct sctp_association *asoc; 4245 struct sctp_stream_out *outs; 4246 struct sctp_tmit_chunk *chk, *nchk; 4247 struct sctp_stream_queue_pending *sp, *nsp; 4248 int i; 4249 4250 if (stcb == NULL) { 4251 return; 4252 } 4253 asoc = &stcb->asoc; 4254 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4255 /* already being freed */ 4256 return; 4257 } 4258 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4259 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4260 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4261 return; 4262 } 4263 /* now through all the gunk freeing chunks */ 4264 if (holds_lock == 0) { 4265 SCTP_TCB_SEND_LOCK(stcb); 4266 } 4267 /* sent queue SHOULD be empty */ 4268 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4269 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4270 asoc->sent_queue_cnt--; 4271 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4272 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4273 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4274 #ifdef INVARIANTS 4275 } else { 4276 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4277 #endif 4278 } 4279 } 4280 if (chk->data != NULL) { 4281 sctp_free_bufspace(stcb, asoc, chk, 1); 4282 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4283 error, chk, so_locked); 4284 if (chk->data) { 4285 sctp_m_freem(chk->data); 4286 chk->data = NULL; 4287 } 4288 } 4289 sctp_free_a_chunk(stcb, chk, so_locked); 4290 /* sa_ignore FREED_MEMORY */ 4291 } 4292 /* pending send queue SHOULD be empty */ 4293 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4294 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4295 asoc->send_queue_cnt--; 4296 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4297 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4298 #ifdef INVARIANTS 4299 } else { 4300 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4301 #endif 4302 } 4303 if (chk->data != NULL) { 4304 sctp_free_bufspace(stcb, asoc, chk, 1); 4305 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4306 error, chk, so_locked); 4307 if (chk->data) { 4308 sctp_m_freem(chk->data); 4309 chk->data = NULL; 4310 } 4311 } 4312 sctp_free_a_chunk(stcb, chk, so_locked); 4313 /* sa_ignore FREED_MEMORY */ 4314 } 4315 for (i = 0; i < asoc->streamoutcnt; i++) { 4316 /* For each stream */ 4317 outs = &asoc->strmout[i]; 4318 /* clean up any sends there */ 4319 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4320 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4321 TAILQ_REMOVE(&outs->outqueue, sp, next); 4322 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4323 sctp_free_spbufspace(stcb, asoc, sp); 4324 if (sp->data) { 4325 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4326 error, (void *)sp, so_locked); 4327 if (sp->data) { 4328 sctp_m_freem(sp->data); 4329 sp->data = NULL; 4330 sp->tail_mbuf = NULL; 4331 sp->length = 0; 4332 } 4333 } 4334 if (sp->net) { 4335 sctp_free_remote_addr(sp->net); 4336 sp->net = NULL; 4337 } 4338 /* Free the chunk */ 4339 sctp_free_a_strmoq(stcb, sp, so_locked); 4340 /* sa_ignore FREED_MEMORY */ 4341 } 4342 } 4343 4344 if (holds_lock == 0) { 4345 SCTP_TCB_SEND_UNLOCK(stcb); 4346 } 4347 } 4348 4349 void 4350 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4351 struct sctp_abort_chunk *abort, int so_locked) 4352 { 4353 if (stcb == NULL) { 4354 return; 4355 } 4356 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4357 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4358 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4359 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4360 } 4361 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4362 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4363 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4364 return; 4365 } 4366 /* Tell them we lost the asoc */ 4367 sctp_report_all_outbound(stcb, error, 0, so_locked); 4368 if (from_peer) { 4369 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4370 } else { 4371 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4372 } 4373 } 4374 4375 void 4376 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4377 struct mbuf *m, int iphlen, 4378 struct sockaddr *src, struct sockaddr *dst, 4379 struct sctphdr *sh, struct mbuf *op_err, 4380 uint8_t mflowtype, uint32_t mflowid, 4381 uint32_t vrf_id, uint16_t port) 4382 { 4383 uint32_t vtag; 4384 4385 vtag = 0; 4386 if (stcb != NULL) { 4387 vtag = stcb->asoc.peer_vtag; 4388 vrf_id = stcb->asoc.vrf_id; 4389 } 4390 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4391 mflowtype, mflowid, inp->fibnum, 4392 vrf_id, port); 4393 if (stcb != NULL) { 4394 /* We have a TCB to abort, send notification too */ 4395 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4396 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4397 /* Ok, now lets free it */ 4398 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4399 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4400 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4401 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4402 } 4403 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4404 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4405 } 4406 } 4407 #ifdef SCTP_ASOCLOG_OF_TSNS 4408 void 4409 sctp_print_out_track_log(struct sctp_tcb *stcb) 4410 { 4411 #ifdef NOSIY_PRINTS 4412 int i; 4413 4414 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4415 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4416 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4417 SCTP_PRINTF("None rcvd\n"); 4418 goto none_in; 4419 } 4420 if (stcb->asoc.tsn_in_wrapped) { 4421 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4422 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4423 stcb->asoc.in_tsnlog[i].tsn, 4424 stcb->asoc.in_tsnlog[i].strm, 4425 stcb->asoc.in_tsnlog[i].seq, 4426 stcb->asoc.in_tsnlog[i].flgs, 4427 stcb->asoc.in_tsnlog[i].sz); 4428 } 4429 } 4430 if (stcb->asoc.tsn_in_at) { 4431 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4432 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4433 stcb->asoc.in_tsnlog[i].tsn, 4434 stcb->asoc.in_tsnlog[i].strm, 4435 stcb->asoc.in_tsnlog[i].seq, 4436 stcb->asoc.in_tsnlog[i].flgs, 4437 stcb->asoc.in_tsnlog[i].sz); 4438 } 4439 } 4440 none_in: 4441 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4442 if ((stcb->asoc.tsn_out_at == 0) && 4443 (stcb->asoc.tsn_out_wrapped == 0)) { 4444 SCTP_PRINTF("None sent\n"); 4445 } 4446 if (stcb->asoc.tsn_out_wrapped) { 4447 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4448 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4449 stcb->asoc.out_tsnlog[i].tsn, 4450 stcb->asoc.out_tsnlog[i].strm, 4451 stcb->asoc.out_tsnlog[i].seq, 4452 stcb->asoc.out_tsnlog[i].flgs, 4453 stcb->asoc.out_tsnlog[i].sz); 4454 } 4455 } 4456 if (stcb->asoc.tsn_out_at) { 4457 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4458 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4459 stcb->asoc.out_tsnlog[i].tsn, 4460 stcb->asoc.out_tsnlog[i].strm, 4461 stcb->asoc.out_tsnlog[i].seq, 4462 stcb->asoc.out_tsnlog[i].flgs, 4463 stcb->asoc.out_tsnlog[i].sz); 4464 } 4465 } 4466 #endif 4467 } 4468 #endif 4469 4470 void 4471 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4472 struct mbuf *op_err, 4473 int so_locked) 4474 { 4475 4476 if (stcb == NULL) { 4477 /* Got to have a TCB */ 4478 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4479 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4480 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4481 SCTP_CALLED_DIRECTLY_NOCMPSET); 4482 } 4483 } 4484 return; 4485 } else { 4486 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4487 } 4488 /* notify the peer */ 4489 sctp_send_abort_tcb(stcb, op_err, so_locked); 4490 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4491 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4492 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4493 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4494 } 4495 /* notify the ulp */ 4496 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4497 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4498 } 4499 /* now free the asoc */ 4500 #ifdef SCTP_ASOCLOG_OF_TSNS 4501 sctp_print_out_track_log(stcb); 4502 #endif 4503 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4504 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4505 } 4506 4507 void 4508 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4509 struct sockaddr *src, struct sockaddr *dst, 4510 struct sctphdr *sh, struct sctp_inpcb *inp, 4511 struct mbuf *cause, 4512 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4513 uint32_t vrf_id, uint16_t port) 4514 { 4515 struct sctp_chunkhdr *ch, chunk_buf; 4516 unsigned int chk_length; 4517 int contains_init_chunk; 4518 4519 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4520 /* Generate a TO address for future reference */ 4521 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4522 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4523 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4524 SCTP_CALLED_DIRECTLY_NOCMPSET); 4525 } 4526 } 4527 contains_init_chunk = 0; 4528 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4529 sizeof(*ch), (uint8_t *)&chunk_buf); 4530 while (ch != NULL) { 4531 chk_length = ntohs(ch->chunk_length); 4532 if (chk_length < sizeof(*ch)) { 4533 /* break to abort land */ 4534 break; 4535 } 4536 switch (ch->chunk_type) { 4537 case SCTP_INIT: 4538 contains_init_chunk = 1; 4539 break; 4540 case SCTP_PACKET_DROPPED: 4541 /* we don't respond to pkt-dropped */ 4542 return; 4543 case SCTP_ABORT_ASSOCIATION: 4544 /* we don't respond with an ABORT to an ABORT */ 4545 return; 4546 case SCTP_SHUTDOWN_COMPLETE: 4547 /* 4548 * we ignore it since we are not waiting for it and 4549 * peer is gone 4550 */ 4551 return; 4552 case SCTP_SHUTDOWN_ACK: 4553 sctp_send_shutdown_complete2(src, dst, sh, 4554 mflowtype, mflowid, fibnum, 4555 vrf_id, port); 4556 return; 4557 default: 4558 break; 4559 } 4560 offset += SCTP_SIZE32(chk_length); 4561 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4562 sizeof(*ch), (uint8_t *)&chunk_buf); 4563 } 4564 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4565 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4566 (contains_init_chunk == 0))) { 4567 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4568 mflowtype, mflowid, fibnum, 4569 vrf_id, port); 4570 } 4571 } 4572 4573 /* 4574 * check the inbound datagram to make sure there is not an abort inside it, 4575 * if there is return 1, else return 0. 4576 */ 4577 int 4578 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4579 { 4580 struct sctp_chunkhdr *ch; 4581 struct sctp_init_chunk *init_chk, chunk_buf; 4582 int offset; 4583 unsigned int chk_length; 4584 4585 offset = iphlen + sizeof(struct sctphdr); 4586 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4587 (uint8_t *)&chunk_buf); 4588 while (ch != NULL) { 4589 chk_length = ntohs(ch->chunk_length); 4590 if (chk_length < sizeof(*ch)) { 4591 /* packet is probably corrupt */ 4592 break; 4593 } 4594 /* we seem to be ok, is it an abort? */ 4595 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4596 /* yep, tell them */ 4597 return (1); 4598 } 4599 if (ch->chunk_type == SCTP_INITIATION) { 4600 /* need to update the Vtag */ 4601 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4602 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4603 if (init_chk != NULL) { 4604 *vtagfill = ntohl(init_chk->init.initiate_tag); 4605 } 4606 } 4607 /* Nope, move to the next chunk */ 4608 offset += SCTP_SIZE32(chk_length); 4609 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4610 sizeof(*ch), (uint8_t *)&chunk_buf); 4611 } 4612 return (0); 4613 } 4614 4615 /* 4616 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4617 * set (i.e. it's 0) so, create this function to compare link local scopes 4618 */ 4619 #ifdef INET6 4620 uint32_t 4621 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4622 { 4623 struct sockaddr_in6 a, b; 4624 4625 /* save copies */ 4626 a = *addr1; 4627 b = *addr2; 4628 4629 if (a.sin6_scope_id == 0) 4630 if (sa6_recoverscope(&a)) { 4631 /* can't get scope, so can't match */ 4632 return (0); 4633 } 4634 if (b.sin6_scope_id == 0) 4635 if (sa6_recoverscope(&b)) { 4636 /* can't get scope, so can't match */ 4637 return (0); 4638 } 4639 if (a.sin6_scope_id != b.sin6_scope_id) 4640 return (0); 4641 4642 return (1); 4643 } 4644 4645 /* 4646 * returns a sockaddr_in6 with embedded scope recovered and removed 4647 */ 4648 struct sockaddr_in6 * 4649 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4650 { 4651 /* check and strip embedded scope junk */ 4652 if (addr->sin6_family == AF_INET6) { 4653 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4654 if (addr->sin6_scope_id == 0) { 4655 *store = *addr; 4656 if (!sa6_recoverscope(store)) { 4657 /* use the recovered scope */ 4658 addr = store; 4659 } 4660 } else { 4661 /* else, return the original "to" addr */ 4662 in6_clearscope(&addr->sin6_addr); 4663 } 4664 } 4665 } 4666 return (addr); 4667 } 4668 #endif 4669 4670 /* 4671 * are the two addresses the same? currently a "scopeless" check returns: 1 4672 * if same, 0 if not 4673 */ 4674 int 4675 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4676 { 4677 4678 /* must be valid */ 4679 if (sa1 == NULL || sa2 == NULL) 4680 return (0); 4681 4682 /* must be the same family */ 4683 if (sa1->sa_family != sa2->sa_family) 4684 return (0); 4685 4686 switch (sa1->sa_family) { 4687 #ifdef INET6 4688 case AF_INET6: 4689 { 4690 /* IPv6 addresses */ 4691 struct sockaddr_in6 *sin6_1, *sin6_2; 4692 4693 sin6_1 = (struct sockaddr_in6 *)sa1; 4694 sin6_2 = (struct sockaddr_in6 *)sa2; 4695 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4696 sin6_2)); 4697 } 4698 #endif 4699 #ifdef INET 4700 case AF_INET: 4701 { 4702 /* IPv4 addresses */ 4703 struct sockaddr_in *sin_1, *sin_2; 4704 4705 sin_1 = (struct sockaddr_in *)sa1; 4706 sin_2 = (struct sockaddr_in *)sa2; 4707 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4708 } 4709 #endif 4710 default: 4711 /* we don't do these... */ 4712 return (0); 4713 } 4714 } 4715 4716 void 4717 sctp_print_address(struct sockaddr *sa) 4718 { 4719 #ifdef INET6 4720 char ip6buf[INET6_ADDRSTRLEN]; 4721 #endif 4722 4723 switch (sa->sa_family) { 4724 #ifdef INET6 4725 case AF_INET6: 4726 { 4727 struct sockaddr_in6 *sin6; 4728 4729 sin6 = (struct sockaddr_in6 *)sa; 4730 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4731 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4732 ntohs(sin6->sin6_port), 4733 sin6->sin6_scope_id); 4734 break; 4735 } 4736 #endif 4737 #ifdef INET 4738 case AF_INET: 4739 { 4740 struct sockaddr_in *sin; 4741 unsigned char *p; 4742 4743 sin = (struct sockaddr_in *)sa; 4744 p = (unsigned char *)&sin->sin_addr; 4745 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4746 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4747 break; 4748 } 4749 #endif 4750 default: 4751 SCTP_PRINTF("?\n"); 4752 break; 4753 } 4754 } 4755 4756 void 4757 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4758 struct sctp_inpcb *new_inp, 4759 struct sctp_tcb *stcb, 4760 int waitflags) 4761 { 4762 /* 4763 * go through our old INP and pull off any control structures that 4764 * belong to stcb and move then to the new inp. 4765 */ 4766 struct socket *old_so, *new_so; 4767 struct sctp_queued_to_read *control, *nctl; 4768 struct sctp_readhead tmp_queue; 4769 struct mbuf *m; 4770 int error = 0; 4771 4772 old_so = old_inp->sctp_socket; 4773 new_so = new_inp->sctp_socket; 4774 TAILQ_INIT(&tmp_queue); 4775 error = sblock(&old_so->so_rcv, waitflags); 4776 if (error) { 4777 /* 4778 * Gak, can't get sblock, we have a problem. data will be 4779 * left stranded.. and we don't dare look at it since the 4780 * other thread may be reading something. Oh well, its a 4781 * screwed up app that does a peeloff OR a accept while 4782 * reading from the main socket... actually its only the 4783 * peeloff() case, since I think read will fail on a 4784 * listening socket.. 4785 */ 4786 return; 4787 } 4788 /* lock the socket buffers */ 4789 SCTP_INP_READ_LOCK(old_inp); 4790 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4791 /* Pull off all for out target stcb */ 4792 if (control->stcb == stcb) { 4793 /* remove it we want it */ 4794 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4795 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4796 m = control->data; 4797 while (m) { 4798 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4799 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4800 } 4801 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4802 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4803 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4804 } 4805 m = SCTP_BUF_NEXT(m); 4806 } 4807 } 4808 } 4809 SCTP_INP_READ_UNLOCK(old_inp); 4810 /* Remove the sb-lock on the old socket */ 4811 4812 sbunlock(&old_so->so_rcv); 4813 /* Now we move them over to the new socket buffer */ 4814 SCTP_INP_READ_LOCK(new_inp); 4815 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4816 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4817 m = control->data; 4818 while (m) { 4819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4820 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4821 } 4822 sctp_sballoc(stcb, &new_so->so_rcv, m); 4823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4824 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4825 } 4826 m = SCTP_BUF_NEXT(m); 4827 } 4828 } 4829 SCTP_INP_READ_UNLOCK(new_inp); 4830 } 4831 4832 void 4833 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4834 struct sctp_tcb *stcb, 4835 int so_locked 4836 SCTP_UNUSED 4837 ) 4838 { 4839 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4840 sctp_sorwakeup(inp, inp->sctp_socket); 4841 } 4842 } 4843 4844 void 4845 sctp_add_to_readq(struct sctp_inpcb *inp, 4846 struct sctp_tcb *stcb, 4847 struct sctp_queued_to_read *control, 4848 struct sockbuf *sb, 4849 int end, 4850 int inp_read_lock_held, 4851 int so_locked) 4852 { 4853 /* 4854 * Here we must place the control on the end of the socket read 4855 * queue AND increment sb_cc so that select will work properly on 4856 * read. 4857 */ 4858 struct mbuf *m, *prev = NULL; 4859 4860 if (inp == NULL) { 4861 /* Gak, TSNH!! */ 4862 #ifdef INVARIANTS 4863 panic("Gak, inp NULL on add_to_readq"); 4864 #endif 4865 return; 4866 } 4867 if (inp_read_lock_held == 0) 4868 SCTP_INP_READ_LOCK(inp); 4869 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4870 if (!control->on_strm_q) { 4871 sctp_free_remote_addr(control->whoFrom); 4872 if (control->data) { 4873 sctp_m_freem(control->data); 4874 control->data = NULL; 4875 } 4876 sctp_free_a_readq(stcb, control); 4877 } 4878 if (inp_read_lock_held == 0) 4879 SCTP_INP_READ_UNLOCK(inp); 4880 return; 4881 } 4882 if (!(control->spec_flags & M_NOTIFICATION)) { 4883 atomic_add_int(&inp->total_recvs, 1); 4884 if (!control->do_not_ref_stcb) { 4885 atomic_add_int(&stcb->total_recvs, 1); 4886 } 4887 } 4888 m = control->data; 4889 control->held_length = 0; 4890 control->length = 0; 4891 while (m) { 4892 if (SCTP_BUF_LEN(m) == 0) { 4893 /* Skip mbufs with NO length */ 4894 if (prev == NULL) { 4895 /* First one */ 4896 control->data = sctp_m_free(m); 4897 m = control->data; 4898 } else { 4899 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4900 m = SCTP_BUF_NEXT(prev); 4901 } 4902 if (m == NULL) { 4903 control->tail_mbuf = prev; 4904 } 4905 continue; 4906 } 4907 prev = m; 4908 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4909 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4910 } 4911 sctp_sballoc(stcb, sb, m); 4912 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4913 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4914 } 4915 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4916 m = SCTP_BUF_NEXT(m); 4917 } 4918 if (prev != NULL) { 4919 control->tail_mbuf = prev; 4920 } else { 4921 /* Everything got collapsed out?? */ 4922 if (!control->on_strm_q) { 4923 sctp_free_remote_addr(control->whoFrom); 4924 sctp_free_a_readq(stcb, control); 4925 } 4926 if (inp_read_lock_held == 0) 4927 SCTP_INP_READ_UNLOCK(inp); 4928 return; 4929 } 4930 if (end) { 4931 control->end_added = 1; 4932 } 4933 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4934 control->on_read_q = 1; 4935 if (inp_read_lock_held == 0) 4936 SCTP_INP_READ_UNLOCK(inp); 4937 if (inp && inp->sctp_socket) { 4938 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4939 } 4940 } 4941 4942 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4943 *************ALTERNATE ROUTING CODE 4944 */ 4945 4946 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4947 *************ALTERNATE ROUTING CODE 4948 */ 4949 4950 struct mbuf * 4951 sctp_generate_cause(uint16_t code, char *info) 4952 { 4953 struct mbuf *m; 4954 struct sctp_gen_error_cause *cause; 4955 size_t info_len; 4956 uint16_t len; 4957 4958 if ((code == 0) || (info == NULL)) { 4959 return (NULL); 4960 } 4961 info_len = strlen(info); 4962 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4963 return (NULL); 4964 } 4965 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4966 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4967 if (m != NULL) { 4968 SCTP_BUF_LEN(m) = len; 4969 cause = mtod(m, struct sctp_gen_error_cause *); 4970 cause->code = htons(code); 4971 cause->length = htons(len); 4972 memcpy(cause->info, info, info_len); 4973 } 4974 return (m); 4975 } 4976 4977 struct mbuf * 4978 sctp_generate_no_user_data_cause(uint32_t tsn) 4979 { 4980 struct mbuf *m; 4981 struct sctp_error_no_user_data *no_user_data_cause; 4982 uint16_t len; 4983 4984 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4985 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4986 if (m != NULL) { 4987 SCTP_BUF_LEN(m) = len; 4988 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4989 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4990 no_user_data_cause->cause.length = htons(len); 4991 no_user_data_cause->tsn = htonl(tsn); 4992 } 4993 return (m); 4994 } 4995 4996 #ifdef SCTP_MBCNT_LOGGING 4997 void 4998 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4999 struct sctp_tmit_chunk *tp1, int chk_cnt) 5000 { 5001 if (tp1->data == NULL) { 5002 return; 5003 } 5004 asoc->chunks_on_out_queue -= chk_cnt; 5005 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5006 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5007 asoc->total_output_queue_size, 5008 tp1->book_size, 5009 0, 5010 tp1->mbcnt); 5011 } 5012 if (asoc->total_output_queue_size >= tp1->book_size) { 5013 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5014 } else { 5015 asoc->total_output_queue_size = 0; 5016 } 5017 5018 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5019 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5020 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5021 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5022 } else { 5023 stcb->sctp_socket->so_snd.sb_cc = 0; 5024 5025 } 5026 } 5027 } 5028 5029 #endif 5030 5031 int 5032 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5033 uint8_t sent, int so_locked) 5034 { 5035 struct sctp_stream_out *strq; 5036 struct sctp_tmit_chunk *chk = NULL, *tp2; 5037 struct sctp_stream_queue_pending *sp; 5038 uint32_t mid; 5039 uint16_t sid; 5040 uint8_t foundeom = 0; 5041 int ret_sz = 0; 5042 int notdone; 5043 int do_wakeup_routine = 0; 5044 5045 sid = tp1->rec.data.sid; 5046 mid = tp1->rec.data.mid; 5047 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5048 stcb->asoc.abandoned_sent[0]++; 5049 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5050 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5051 #if defined(SCTP_DETAILED_STR_STATS) 5052 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5053 #endif 5054 } else { 5055 stcb->asoc.abandoned_unsent[0]++; 5056 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5057 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5058 #if defined(SCTP_DETAILED_STR_STATS) 5059 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5060 #endif 5061 } 5062 do { 5063 ret_sz += tp1->book_size; 5064 if (tp1->data != NULL) { 5065 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5066 sctp_flight_size_decrease(tp1); 5067 sctp_total_flight_decrease(stcb, tp1); 5068 } 5069 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5070 stcb->asoc.peers_rwnd += tp1->send_size; 5071 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5072 if (sent) { 5073 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5074 } else { 5075 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5076 } 5077 if (tp1->data) { 5078 sctp_m_freem(tp1->data); 5079 tp1->data = NULL; 5080 } 5081 do_wakeup_routine = 1; 5082 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5083 stcb->asoc.sent_queue_cnt_removeable--; 5084 } 5085 } 5086 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5087 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5088 SCTP_DATA_NOT_FRAG) { 5089 /* not frag'ed we ae done */ 5090 notdone = 0; 5091 foundeom = 1; 5092 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5093 /* end of frag, we are done */ 5094 notdone = 0; 5095 foundeom = 1; 5096 } else { 5097 /* 5098 * Its a begin or middle piece, we must mark all of 5099 * it 5100 */ 5101 notdone = 1; 5102 tp1 = TAILQ_NEXT(tp1, sctp_next); 5103 } 5104 } while (tp1 && notdone); 5105 if (foundeom == 0) { 5106 /* 5107 * The multi-part message was scattered across the send and 5108 * sent queue. 5109 */ 5110 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5111 if ((tp1->rec.data.sid != sid) || 5112 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5113 break; 5114 } 5115 /* 5116 * save to chk in case we have some on stream out 5117 * queue. If so and we have an un-transmitted one we 5118 * don't have to fudge the TSN. 5119 */ 5120 chk = tp1; 5121 ret_sz += tp1->book_size; 5122 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5123 if (sent) { 5124 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5125 } else { 5126 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5127 } 5128 if (tp1->data) { 5129 sctp_m_freem(tp1->data); 5130 tp1->data = NULL; 5131 } 5132 /* No flight involved here book the size to 0 */ 5133 tp1->book_size = 0; 5134 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5135 foundeom = 1; 5136 } 5137 do_wakeup_routine = 1; 5138 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5139 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5140 /* 5141 * on to the sent queue so we can wait for it to be 5142 * passed by. 5143 */ 5144 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5145 sctp_next); 5146 stcb->asoc.send_queue_cnt--; 5147 stcb->asoc.sent_queue_cnt++; 5148 } 5149 } 5150 if (foundeom == 0) { 5151 /* 5152 * Still no eom found. That means there is stuff left on the 5153 * stream out queue.. yuck. 5154 */ 5155 SCTP_TCB_SEND_LOCK(stcb); 5156 strq = &stcb->asoc.strmout[sid]; 5157 sp = TAILQ_FIRST(&strq->outqueue); 5158 if (sp != NULL) { 5159 sp->discard_rest = 1; 5160 /* 5161 * We may need to put a chunk on the queue that 5162 * holds the TSN that would have been sent with the 5163 * LAST bit. 5164 */ 5165 if (chk == NULL) { 5166 /* Yep, we have to */ 5167 sctp_alloc_a_chunk(stcb, chk); 5168 if (chk == NULL) { 5169 /* 5170 * we are hosed. All we can do is 5171 * nothing.. which will cause an 5172 * abort if the peer is paying 5173 * attention. 5174 */ 5175 goto oh_well; 5176 } 5177 memset(chk, 0, sizeof(*chk)); 5178 chk->rec.data.rcv_flags = 0; 5179 chk->sent = SCTP_FORWARD_TSN_SKIP; 5180 chk->asoc = &stcb->asoc; 5181 if (stcb->asoc.idata_supported == 0) { 5182 if (sp->sinfo_flags & SCTP_UNORDERED) { 5183 chk->rec.data.mid = 0; 5184 } else { 5185 chk->rec.data.mid = strq->next_mid_ordered; 5186 } 5187 } else { 5188 if (sp->sinfo_flags & SCTP_UNORDERED) { 5189 chk->rec.data.mid = strq->next_mid_unordered; 5190 } else { 5191 chk->rec.data.mid = strq->next_mid_ordered; 5192 } 5193 } 5194 chk->rec.data.sid = sp->sid; 5195 chk->rec.data.ppid = sp->ppid; 5196 chk->rec.data.context = sp->context; 5197 chk->flags = sp->act_flags; 5198 chk->whoTo = NULL; 5199 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5200 strq->chunks_on_queues++; 5201 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5202 stcb->asoc.sent_queue_cnt++; 5203 stcb->asoc.pr_sctp_cnt++; 5204 } 5205 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5206 if (sp->sinfo_flags & SCTP_UNORDERED) { 5207 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5208 } 5209 if (stcb->asoc.idata_supported == 0) { 5210 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5211 strq->next_mid_ordered++; 5212 } 5213 } else { 5214 if (sp->sinfo_flags & SCTP_UNORDERED) { 5215 strq->next_mid_unordered++; 5216 } else { 5217 strq->next_mid_ordered++; 5218 } 5219 } 5220 oh_well: 5221 if (sp->data) { 5222 /* 5223 * Pull any data to free up the SB and allow 5224 * sender to "add more" while we will throw 5225 * away :-) 5226 */ 5227 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5228 ret_sz += sp->length; 5229 do_wakeup_routine = 1; 5230 sp->some_taken = 1; 5231 sctp_m_freem(sp->data); 5232 sp->data = NULL; 5233 sp->tail_mbuf = NULL; 5234 sp->length = 0; 5235 } 5236 } 5237 SCTP_TCB_SEND_UNLOCK(stcb); 5238 } 5239 if (do_wakeup_routine) { 5240 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5241 } 5242 return (ret_sz); 5243 } 5244 5245 /* 5246 * checks to see if the given address, sa, is one that is currently known by 5247 * the kernel note: can't distinguish the same address on multiple interfaces 5248 * and doesn't handle multiple addresses with different zone/scope id's note: 5249 * ifa_ifwithaddr() compares the entire sockaddr struct 5250 */ 5251 struct sctp_ifa * 5252 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5253 int holds_lock) 5254 { 5255 struct sctp_laddr *laddr; 5256 5257 if (holds_lock == 0) { 5258 SCTP_INP_RLOCK(inp); 5259 } 5260 5261 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5262 if (laddr->ifa == NULL) 5263 continue; 5264 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5265 continue; 5266 #ifdef INET 5267 if (addr->sa_family == AF_INET) { 5268 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5269 laddr->ifa->address.sin.sin_addr.s_addr) { 5270 /* found him. */ 5271 break; 5272 } 5273 } 5274 #endif 5275 #ifdef INET6 5276 if (addr->sa_family == AF_INET6) { 5277 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5278 &laddr->ifa->address.sin6)) { 5279 /* found him. */ 5280 break; 5281 } 5282 } 5283 #endif 5284 } 5285 if (holds_lock == 0) { 5286 SCTP_INP_RUNLOCK(inp); 5287 } 5288 if (laddr != NULL) { 5289 return (laddr->ifa); 5290 } else { 5291 return (NULL); 5292 } 5293 } 5294 5295 uint32_t 5296 sctp_get_ifa_hash_val(struct sockaddr *addr) 5297 { 5298 switch (addr->sa_family) { 5299 #ifdef INET 5300 case AF_INET: 5301 { 5302 struct sockaddr_in *sin; 5303 5304 sin = (struct sockaddr_in *)addr; 5305 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5306 } 5307 #endif 5308 #ifdef INET6 5309 case AF_INET6: 5310 { 5311 struct sockaddr_in6 *sin6; 5312 uint32_t hash_of_addr; 5313 5314 sin6 = (struct sockaddr_in6 *)addr; 5315 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5316 sin6->sin6_addr.s6_addr32[1] + 5317 sin6->sin6_addr.s6_addr32[2] + 5318 sin6->sin6_addr.s6_addr32[3]); 5319 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5320 return (hash_of_addr); 5321 } 5322 #endif 5323 default: 5324 break; 5325 } 5326 return (0); 5327 } 5328 5329 struct sctp_ifa * 5330 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5331 { 5332 struct sctp_ifa *sctp_ifap; 5333 struct sctp_vrf *vrf; 5334 struct sctp_ifalist *hash_head; 5335 uint32_t hash_of_addr; 5336 5337 if (holds_lock == 0) { 5338 SCTP_IPI_ADDR_RLOCK(); 5339 } else { 5340 SCTP_IPI_ADDR_LOCK_ASSERT(); 5341 } 5342 5343 vrf = sctp_find_vrf(vrf_id); 5344 if (vrf == NULL) { 5345 if (holds_lock == 0) 5346 SCTP_IPI_ADDR_RUNLOCK(); 5347 return (NULL); 5348 } 5349 5350 hash_of_addr = sctp_get_ifa_hash_val(addr); 5351 5352 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5353 if (hash_head == NULL) { 5354 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5355 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5356 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5357 sctp_print_address(addr); 5358 SCTP_PRINTF("No such bucket for address\n"); 5359 if (holds_lock == 0) 5360 SCTP_IPI_ADDR_RUNLOCK(); 5361 5362 return (NULL); 5363 } 5364 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5365 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5366 continue; 5367 #ifdef INET 5368 if (addr->sa_family == AF_INET) { 5369 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5370 sctp_ifap->address.sin.sin_addr.s_addr) { 5371 /* found him. */ 5372 break; 5373 } 5374 } 5375 #endif 5376 #ifdef INET6 5377 if (addr->sa_family == AF_INET6) { 5378 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5379 &sctp_ifap->address.sin6)) { 5380 /* found him. */ 5381 break; 5382 } 5383 } 5384 #endif 5385 } 5386 if (holds_lock == 0) 5387 SCTP_IPI_ADDR_RUNLOCK(); 5388 return (sctp_ifap); 5389 } 5390 5391 static void 5392 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5393 uint32_t rwnd_req) 5394 { 5395 /* User pulled some data, do we need a rwnd update? */ 5396 struct epoch_tracker et; 5397 int r_unlocked = 0; 5398 uint32_t dif, rwnd; 5399 struct socket *so = NULL; 5400 5401 if (stcb == NULL) 5402 return; 5403 5404 atomic_add_int(&stcb->asoc.refcnt, 1); 5405 5406 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5407 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5408 /* Pre-check If we are freeing no update */ 5409 goto no_lock; 5410 } 5411 SCTP_INP_INCR_REF(stcb->sctp_ep); 5412 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5413 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5414 goto out; 5415 } 5416 so = stcb->sctp_socket; 5417 if (so == NULL) { 5418 goto out; 5419 } 5420 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5421 /* Have you have freed enough to look */ 5422 *freed_so_far = 0; 5423 /* Yep, its worth a look and the lock overhead */ 5424 5425 /* Figure out what the rwnd would be */ 5426 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5427 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5428 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5429 } else { 5430 dif = 0; 5431 } 5432 if (dif >= rwnd_req) { 5433 if (hold_rlock) { 5434 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5435 r_unlocked = 1; 5436 } 5437 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5438 /* 5439 * One last check before we allow the guy possibly 5440 * to get in. There is a race, where the guy has not 5441 * reached the gate. In that case 5442 */ 5443 goto out; 5444 } 5445 SCTP_TCB_LOCK(stcb); 5446 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5447 /* No reports here */ 5448 SCTP_TCB_UNLOCK(stcb); 5449 goto out; 5450 } 5451 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5452 NET_EPOCH_ENTER(et); 5453 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5454 5455 sctp_chunk_output(stcb->sctp_ep, stcb, 5456 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5457 /* make sure no timer is running */ 5458 NET_EPOCH_EXIT(et); 5459 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5460 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5461 SCTP_TCB_UNLOCK(stcb); 5462 } else { 5463 /* Update how much we have pending */ 5464 stcb->freed_by_sorcv_sincelast = dif; 5465 } 5466 out: 5467 if (so && r_unlocked && hold_rlock) { 5468 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5469 } 5470 5471 SCTP_INP_DECR_REF(stcb->sctp_ep); 5472 no_lock: 5473 atomic_add_int(&stcb->asoc.refcnt, -1); 5474 return; 5475 } 5476 5477 int 5478 sctp_sorecvmsg(struct socket *so, 5479 struct uio *uio, 5480 struct mbuf **mp, 5481 struct sockaddr *from, 5482 int fromlen, 5483 int *msg_flags, 5484 struct sctp_sndrcvinfo *sinfo, 5485 int filling_sinfo) 5486 { 5487 /* 5488 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5489 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5490 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5491 * On the way out we may send out any combination of: 5492 * MSG_NOTIFICATION MSG_EOR 5493 * 5494 */ 5495 struct sctp_inpcb *inp = NULL; 5496 ssize_t my_len = 0; 5497 ssize_t cp_len = 0; 5498 int error = 0; 5499 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5500 struct mbuf *m = NULL; 5501 struct sctp_tcb *stcb = NULL; 5502 int wakeup_read_socket = 0; 5503 int freecnt_applied = 0; 5504 int out_flags = 0, in_flags = 0; 5505 int block_allowed = 1; 5506 uint32_t freed_so_far = 0; 5507 ssize_t copied_so_far = 0; 5508 int in_eeor_mode = 0; 5509 int no_rcv_needed = 0; 5510 uint32_t rwnd_req = 0; 5511 int hold_sblock = 0; 5512 int hold_rlock = 0; 5513 ssize_t slen = 0; 5514 uint32_t held_length = 0; 5515 int sockbuf_lock = 0; 5516 5517 if (uio == NULL) { 5518 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5519 return (EINVAL); 5520 } 5521 5522 if (msg_flags) { 5523 in_flags = *msg_flags; 5524 if (in_flags & MSG_PEEK) 5525 SCTP_STAT_INCR(sctps_read_peeks); 5526 } else { 5527 in_flags = 0; 5528 } 5529 slen = uio->uio_resid; 5530 5531 /* Pull in and set up our int flags */ 5532 if (in_flags & MSG_OOB) { 5533 /* Out of band's NOT supported */ 5534 return (EOPNOTSUPP); 5535 } 5536 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5537 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5538 return (EINVAL); 5539 } 5540 if ((in_flags & (MSG_DONTWAIT 5541 | MSG_NBIO 5542 )) || 5543 SCTP_SO_IS_NBIO(so)) { 5544 block_allowed = 0; 5545 } 5546 /* setup the endpoint */ 5547 inp = (struct sctp_inpcb *)so->so_pcb; 5548 if (inp == NULL) { 5549 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5550 return (EFAULT); 5551 } 5552 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5553 /* Must be at least a MTU's worth */ 5554 if (rwnd_req < SCTP_MIN_RWND) 5555 rwnd_req = SCTP_MIN_RWND; 5556 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5557 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5558 sctp_misc_ints(SCTP_SORECV_ENTER, 5559 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5560 } 5561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5562 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5563 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5564 } 5565 5566 5567 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5568 if (error) { 5569 goto release_unlocked; 5570 } 5571 sockbuf_lock = 1; 5572 restart: 5573 5574 restart_nosblocks: 5575 if (hold_sblock == 0) { 5576 SOCKBUF_LOCK(&so->so_rcv); 5577 hold_sblock = 1; 5578 } 5579 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5580 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5581 goto out; 5582 } 5583 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5584 if (so->so_error) { 5585 error = so->so_error; 5586 if ((in_flags & MSG_PEEK) == 0) 5587 so->so_error = 0; 5588 goto out; 5589 } else { 5590 if (so->so_rcv.sb_cc == 0) { 5591 /* indicate EOF */ 5592 error = 0; 5593 goto out; 5594 } 5595 } 5596 } 5597 if (so->so_rcv.sb_cc <= held_length) { 5598 if (so->so_error) { 5599 error = so->so_error; 5600 if ((in_flags & MSG_PEEK) == 0) { 5601 so->so_error = 0; 5602 } 5603 goto out; 5604 } 5605 if ((so->so_rcv.sb_cc == 0) && 5606 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5607 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5608 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5609 /* 5610 * For active open side clear flags for 5611 * re-use passive open is blocked by 5612 * connect. 5613 */ 5614 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5615 /* 5616 * You were aborted, passive side 5617 * always hits here 5618 */ 5619 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5620 error = ECONNRESET; 5621 } 5622 so->so_state &= ~(SS_ISCONNECTING | 5623 SS_ISDISCONNECTING | 5624 SS_ISCONFIRMING | 5625 SS_ISCONNECTED); 5626 if (error == 0) { 5627 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5628 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5629 error = ENOTCONN; 5630 } 5631 } 5632 goto out; 5633 } 5634 } 5635 if (block_allowed) { 5636 error = sbwait(&so->so_rcv); 5637 if (error) { 5638 goto out; 5639 } 5640 held_length = 0; 5641 goto restart_nosblocks; 5642 } else { 5643 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5644 error = EWOULDBLOCK; 5645 goto out; 5646 } 5647 } 5648 if (hold_sblock == 1) { 5649 SOCKBUF_UNLOCK(&so->so_rcv); 5650 hold_sblock = 0; 5651 } 5652 /* we possibly have data we can read */ 5653 /* sa_ignore FREED_MEMORY */ 5654 control = TAILQ_FIRST(&inp->read_queue); 5655 if (control == NULL) { 5656 /* 5657 * This could be happening since the appender did the 5658 * increment but as not yet did the tailq insert onto the 5659 * read_queue 5660 */ 5661 if (hold_rlock == 0) { 5662 SCTP_INP_READ_LOCK(inp); 5663 } 5664 control = TAILQ_FIRST(&inp->read_queue); 5665 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5666 #ifdef INVARIANTS 5667 panic("Huh, its non zero and nothing on control?"); 5668 #endif 5669 so->so_rcv.sb_cc = 0; 5670 } 5671 SCTP_INP_READ_UNLOCK(inp); 5672 hold_rlock = 0; 5673 goto restart; 5674 } 5675 5676 if ((control->length == 0) && 5677 (control->do_not_ref_stcb)) { 5678 /* 5679 * Clean up code for freeing assoc that left behind a 5680 * pdapi.. maybe a peer in EEOR that just closed after 5681 * sending and never indicated a EOR. 5682 */ 5683 if (hold_rlock == 0) { 5684 hold_rlock = 1; 5685 SCTP_INP_READ_LOCK(inp); 5686 } 5687 control->held_length = 0; 5688 if (control->data) { 5689 /* Hmm there is data here .. fix */ 5690 struct mbuf *m_tmp; 5691 int cnt = 0; 5692 5693 m_tmp = control->data; 5694 while (m_tmp) { 5695 cnt += SCTP_BUF_LEN(m_tmp); 5696 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5697 control->tail_mbuf = m_tmp; 5698 control->end_added = 1; 5699 } 5700 m_tmp = SCTP_BUF_NEXT(m_tmp); 5701 } 5702 control->length = cnt; 5703 } else { 5704 /* remove it */ 5705 TAILQ_REMOVE(&inp->read_queue, control, next); 5706 /* Add back any hiddend data */ 5707 sctp_free_remote_addr(control->whoFrom); 5708 sctp_free_a_readq(stcb, control); 5709 } 5710 if (hold_rlock) { 5711 hold_rlock = 0; 5712 SCTP_INP_READ_UNLOCK(inp); 5713 } 5714 goto restart; 5715 } 5716 if ((control->length == 0) && 5717 (control->end_added == 1)) { 5718 /* 5719 * Do we also need to check for (control->pdapi_aborted == 5720 * 1)? 5721 */ 5722 if (hold_rlock == 0) { 5723 hold_rlock = 1; 5724 SCTP_INP_READ_LOCK(inp); 5725 } 5726 TAILQ_REMOVE(&inp->read_queue, control, next); 5727 if (control->data) { 5728 #ifdef INVARIANTS 5729 panic("control->data not null but control->length == 0"); 5730 #else 5731 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5732 sctp_m_freem(control->data); 5733 control->data = NULL; 5734 #endif 5735 } 5736 if (control->aux_data) { 5737 sctp_m_free(control->aux_data); 5738 control->aux_data = NULL; 5739 } 5740 #ifdef INVARIANTS 5741 if (control->on_strm_q) { 5742 panic("About to free ctl:%p so:%p and its in %d", 5743 control, so, control->on_strm_q); 5744 } 5745 #endif 5746 sctp_free_remote_addr(control->whoFrom); 5747 sctp_free_a_readq(stcb, control); 5748 if (hold_rlock) { 5749 hold_rlock = 0; 5750 SCTP_INP_READ_UNLOCK(inp); 5751 } 5752 goto restart; 5753 } 5754 if (control->length == 0) { 5755 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5756 (filling_sinfo)) { 5757 /* find a more suitable one then this */ 5758 ctl = TAILQ_NEXT(control, next); 5759 while (ctl) { 5760 if ((ctl->stcb != control->stcb) && (ctl->length) && 5761 (ctl->some_taken || 5762 (ctl->spec_flags & M_NOTIFICATION) || 5763 ((ctl->do_not_ref_stcb == 0) && 5764 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5765 ) { 5766 /*- 5767 * If we have a different TCB next, and there is data 5768 * present. If we have already taken some (pdapi), OR we can 5769 * ref the tcb and no delivery as started on this stream, we 5770 * take it. Note we allow a notification on a different 5771 * assoc to be delivered.. 5772 */ 5773 control = ctl; 5774 goto found_one; 5775 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5776 (ctl->length) && 5777 ((ctl->some_taken) || 5778 ((ctl->do_not_ref_stcb == 0) && 5779 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5780 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5781 /*- 5782 * If we have the same tcb, and there is data present, and we 5783 * have the strm interleave feature present. Then if we have 5784 * taken some (pdapi) or we can refer to tht tcb AND we have 5785 * not started a delivery for this stream, we can take it. 5786 * Note we do NOT allow a notificaiton on the same assoc to 5787 * be delivered. 5788 */ 5789 control = ctl; 5790 goto found_one; 5791 } 5792 ctl = TAILQ_NEXT(ctl, next); 5793 } 5794 } 5795 /* 5796 * if we reach here, not suitable replacement is available 5797 * <or> fragment interleave is NOT on. So stuff the sb_cc 5798 * into the our held count, and its time to sleep again. 5799 */ 5800 held_length = so->so_rcv.sb_cc; 5801 control->held_length = so->so_rcv.sb_cc; 5802 goto restart; 5803 } 5804 /* Clear the held length since there is something to read */ 5805 control->held_length = 0; 5806 found_one: 5807 /* 5808 * If we reach here, control has a some data for us to read off. 5809 * Note that stcb COULD be NULL. 5810 */ 5811 if (hold_rlock == 0) { 5812 hold_rlock = 1; 5813 SCTP_INP_READ_LOCK(inp); 5814 } 5815 control->some_taken++; 5816 stcb = control->stcb; 5817 if (stcb) { 5818 if ((control->do_not_ref_stcb == 0) && 5819 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5820 if (freecnt_applied == 0) 5821 stcb = NULL; 5822 } else if (control->do_not_ref_stcb == 0) { 5823 /* you can't free it on me please */ 5824 /* 5825 * The lock on the socket buffer protects us so the 5826 * free code will stop. But since we used the 5827 * socketbuf lock and the sender uses the tcb_lock 5828 * to increment, we need to use the atomic add to 5829 * the refcnt 5830 */ 5831 if (freecnt_applied) { 5832 #ifdef INVARIANTS 5833 panic("refcnt already incremented"); 5834 #else 5835 SCTP_PRINTF("refcnt already incremented?\n"); 5836 #endif 5837 } else { 5838 atomic_add_int(&stcb->asoc.refcnt, 1); 5839 freecnt_applied = 1; 5840 } 5841 /* 5842 * Setup to remember how much we have not yet told 5843 * the peer our rwnd has opened up. Note we grab the 5844 * value from the tcb from last time. Note too that 5845 * sack sending clears this when a sack is sent, 5846 * which is fine. Once we hit the rwnd_req, we then 5847 * will go to the sctp_user_rcvd() that will not 5848 * lock until it KNOWs it MUST send a WUP-SACK. 5849 */ 5850 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5851 stcb->freed_by_sorcv_sincelast = 0; 5852 } 5853 } 5854 if (stcb && 5855 ((control->spec_flags & M_NOTIFICATION) == 0) && 5856 control->do_not_ref_stcb == 0) { 5857 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5858 } 5859 5860 /* First lets get off the sinfo and sockaddr info */ 5861 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5862 sinfo->sinfo_stream = control->sinfo_stream; 5863 sinfo->sinfo_ssn = (uint16_t)control->mid; 5864 sinfo->sinfo_flags = control->sinfo_flags; 5865 sinfo->sinfo_ppid = control->sinfo_ppid; 5866 sinfo->sinfo_context = control->sinfo_context; 5867 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5868 sinfo->sinfo_tsn = control->sinfo_tsn; 5869 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5870 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5871 nxt = TAILQ_NEXT(control, next); 5872 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5873 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5874 struct sctp_extrcvinfo *s_extra; 5875 5876 s_extra = (struct sctp_extrcvinfo *)sinfo; 5877 if ((nxt) && 5878 (nxt->length)) { 5879 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5880 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5881 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5882 } 5883 if (nxt->spec_flags & M_NOTIFICATION) { 5884 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5885 } 5886 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5887 s_extra->serinfo_next_length = nxt->length; 5888 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5889 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5890 if (nxt->tail_mbuf != NULL) { 5891 if (nxt->end_added) { 5892 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5893 } 5894 } 5895 } else { 5896 /* 5897 * we explicitly 0 this, since the memcpy 5898 * got some other things beyond the older 5899 * sinfo_ that is on the control's structure 5900 * :-D 5901 */ 5902 nxt = NULL; 5903 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5904 s_extra->serinfo_next_aid = 0; 5905 s_extra->serinfo_next_length = 0; 5906 s_extra->serinfo_next_ppid = 0; 5907 s_extra->serinfo_next_stream = 0; 5908 } 5909 } 5910 /* 5911 * update off the real current cum-ack, if we have an stcb. 5912 */ 5913 if ((control->do_not_ref_stcb == 0) && stcb) 5914 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5915 /* 5916 * mask off the high bits, we keep the actual chunk bits in 5917 * there. 5918 */ 5919 sinfo->sinfo_flags &= 0x00ff; 5920 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5921 sinfo->sinfo_flags |= SCTP_UNORDERED; 5922 } 5923 } 5924 #ifdef SCTP_ASOCLOG_OF_TSNS 5925 { 5926 int index, newindex; 5927 struct sctp_pcbtsn_rlog *entry; 5928 5929 do { 5930 index = inp->readlog_index; 5931 newindex = index + 1; 5932 if (newindex >= SCTP_READ_LOG_SIZE) { 5933 newindex = 0; 5934 } 5935 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5936 entry = &inp->readlog[index]; 5937 entry->vtag = control->sinfo_assoc_id; 5938 entry->strm = control->sinfo_stream; 5939 entry->seq = (uint16_t)control->mid; 5940 entry->sz = control->length; 5941 entry->flgs = control->sinfo_flags; 5942 } 5943 #endif 5944 if ((fromlen > 0) && (from != NULL)) { 5945 union sctp_sockstore store; 5946 size_t len; 5947 5948 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5949 #ifdef INET6 5950 case AF_INET6: 5951 len = sizeof(struct sockaddr_in6); 5952 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5953 store.sin6.sin6_port = control->port_from; 5954 break; 5955 #endif 5956 #ifdef INET 5957 case AF_INET: 5958 #ifdef INET6 5959 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5960 len = sizeof(struct sockaddr_in6); 5961 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5962 &store.sin6); 5963 store.sin6.sin6_port = control->port_from; 5964 } else { 5965 len = sizeof(struct sockaddr_in); 5966 store.sin = control->whoFrom->ro._l_addr.sin; 5967 store.sin.sin_port = control->port_from; 5968 } 5969 #else 5970 len = sizeof(struct sockaddr_in); 5971 store.sin = control->whoFrom->ro._l_addr.sin; 5972 store.sin.sin_port = control->port_from; 5973 #endif 5974 break; 5975 #endif 5976 default: 5977 len = 0; 5978 break; 5979 } 5980 memcpy(from, &store, min((size_t)fromlen, len)); 5981 #ifdef INET6 5982 { 5983 struct sockaddr_in6 lsa6, *from6; 5984 5985 from6 = (struct sockaddr_in6 *)from; 5986 sctp_recover_scope_mac(from6, (&lsa6)); 5987 } 5988 #endif 5989 } 5990 if (hold_rlock) { 5991 SCTP_INP_READ_UNLOCK(inp); 5992 hold_rlock = 0; 5993 } 5994 if (hold_sblock) { 5995 SOCKBUF_UNLOCK(&so->so_rcv); 5996 hold_sblock = 0; 5997 } 5998 /* now copy out what data we can */ 5999 if (mp == NULL) { 6000 /* copy out each mbuf in the chain up to length */ 6001 get_more_data: 6002 m = control->data; 6003 while (m) { 6004 /* Move out all we can */ 6005 cp_len = uio->uio_resid; 6006 my_len = SCTP_BUF_LEN(m); 6007 if (cp_len > my_len) { 6008 /* not enough in this buf */ 6009 cp_len = my_len; 6010 } 6011 if (hold_rlock) { 6012 SCTP_INP_READ_UNLOCK(inp); 6013 hold_rlock = 0; 6014 } 6015 if (cp_len > 0) 6016 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6017 /* re-read */ 6018 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6019 goto release; 6020 } 6021 6022 if ((control->do_not_ref_stcb == 0) && stcb && 6023 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6024 no_rcv_needed = 1; 6025 } 6026 if (error) { 6027 /* error we are out of here */ 6028 goto release; 6029 } 6030 SCTP_INP_READ_LOCK(inp); 6031 hold_rlock = 1; 6032 if (cp_len == SCTP_BUF_LEN(m)) { 6033 if ((SCTP_BUF_NEXT(m) == NULL) && 6034 (control->end_added)) { 6035 out_flags |= MSG_EOR; 6036 if ((control->do_not_ref_stcb == 0) && 6037 (control->stcb != NULL) && 6038 ((control->spec_flags & M_NOTIFICATION) == 0)) 6039 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6040 } 6041 if (control->spec_flags & M_NOTIFICATION) { 6042 out_flags |= MSG_NOTIFICATION; 6043 } 6044 /* we ate up the mbuf */ 6045 if (in_flags & MSG_PEEK) { 6046 /* just looking */ 6047 m = SCTP_BUF_NEXT(m); 6048 copied_so_far += cp_len; 6049 } else { 6050 /* dispose of the mbuf */ 6051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6052 sctp_sblog(&so->so_rcv, 6053 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6054 } 6055 sctp_sbfree(control, stcb, &so->so_rcv, m); 6056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6057 sctp_sblog(&so->so_rcv, 6058 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6059 } 6060 copied_so_far += cp_len; 6061 freed_so_far += (uint32_t)cp_len; 6062 freed_so_far += MSIZE; 6063 atomic_subtract_int(&control->length, cp_len); 6064 control->data = sctp_m_free(m); 6065 m = control->data; 6066 /* 6067 * been through it all, must hold sb 6068 * lock ok to null tail 6069 */ 6070 if (control->data == NULL) { 6071 #ifdef INVARIANTS 6072 if ((control->end_added == 0) || 6073 (TAILQ_NEXT(control, next) == NULL)) { 6074 /* 6075 * If the end is not 6076 * added, OR the 6077 * next is NOT null 6078 * we MUST have the 6079 * lock. 6080 */ 6081 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6082 panic("Hmm we don't own the lock?"); 6083 } 6084 } 6085 #endif 6086 control->tail_mbuf = NULL; 6087 #ifdef INVARIANTS 6088 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6089 panic("end_added, nothing left and no MSG_EOR"); 6090 } 6091 #endif 6092 } 6093 } 6094 } else { 6095 /* Do we need to trim the mbuf? */ 6096 if (control->spec_flags & M_NOTIFICATION) { 6097 out_flags |= MSG_NOTIFICATION; 6098 } 6099 if ((in_flags & MSG_PEEK) == 0) { 6100 SCTP_BUF_RESV_UF(m, cp_len); 6101 SCTP_BUF_LEN(m) -= (int)cp_len; 6102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6103 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6104 } 6105 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6106 if ((control->do_not_ref_stcb == 0) && 6107 stcb) { 6108 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6109 } 6110 copied_so_far += cp_len; 6111 freed_so_far += (uint32_t)cp_len; 6112 freed_so_far += MSIZE; 6113 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6114 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6115 SCTP_LOG_SBRESULT, 0); 6116 } 6117 atomic_subtract_int(&control->length, cp_len); 6118 } else { 6119 copied_so_far += cp_len; 6120 } 6121 } 6122 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6123 break; 6124 } 6125 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6126 (control->do_not_ref_stcb == 0) && 6127 (freed_so_far >= rwnd_req)) { 6128 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6129 } 6130 } /* end while(m) */ 6131 /* 6132 * At this point we have looked at it all and we either have 6133 * a MSG_EOR/or read all the user wants... <OR> 6134 * control->length == 0. 6135 */ 6136 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6137 /* we are done with this control */ 6138 if (control->length == 0) { 6139 if (control->data) { 6140 #ifdef INVARIANTS 6141 panic("control->data not null at read eor?"); 6142 #else 6143 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6144 sctp_m_freem(control->data); 6145 control->data = NULL; 6146 #endif 6147 } 6148 done_with_control: 6149 if (hold_rlock == 0) { 6150 SCTP_INP_READ_LOCK(inp); 6151 hold_rlock = 1; 6152 } 6153 TAILQ_REMOVE(&inp->read_queue, control, next); 6154 /* Add back any hiddend data */ 6155 if (control->held_length) { 6156 held_length = 0; 6157 control->held_length = 0; 6158 wakeup_read_socket = 1; 6159 } 6160 if (control->aux_data) { 6161 sctp_m_free(control->aux_data); 6162 control->aux_data = NULL; 6163 } 6164 no_rcv_needed = control->do_not_ref_stcb; 6165 sctp_free_remote_addr(control->whoFrom); 6166 control->data = NULL; 6167 #ifdef INVARIANTS 6168 if (control->on_strm_q) { 6169 panic("About to free ctl:%p so:%p and its in %d", 6170 control, so, control->on_strm_q); 6171 } 6172 #endif 6173 sctp_free_a_readq(stcb, control); 6174 control = NULL; 6175 if ((freed_so_far >= rwnd_req) && 6176 (no_rcv_needed == 0)) 6177 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6178 6179 } else { 6180 /* 6181 * The user did not read all of this 6182 * message, turn off the returned MSG_EOR 6183 * since we are leaving more behind on the 6184 * control to read. 6185 */ 6186 #ifdef INVARIANTS 6187 if (control->end_added && 6188 (control->data == NULL) && 6189 (control->tail_mbuf == NULL)) { 6190 panic("Gak, control->length is corrupt?"); 6191 } 6192 #endif 6193 no_rcv_needed = control->do_not_ref_stcb; 6194 out_flags &= ~MSG_EOR; 6195 } 6196 } 6197 if (out_flags & MSG_EOR) { 6198 goto release; 6199 } 6200 if ((uio->uio_resid == 0) || 6201 ((in_eeor_mode) && 6202 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6203 goto release; 6204 } 6205 /* 6206 * If I hit here the receiver wants more and this message is 6207 * NOT done (pd-api). So two questions. Can we block? if not 6208 * we are done. Did the user NOT set MSG_WAITALL? 6209 */ 6210 if (block_allowed == 0) { 6211 goto release; 6212 } 6213 /* 6214 * We need to wait for more data a few things: - We don't 6215 * sbunlock() so we don't get someone else reading. - We 6216 * must be sure to account for the case where what is added 6217 * is NOT to our control when we wakeup. 6218 */ 6219 6220 /* 6221 * Do we need to tell the transport a rwnd update might be 6222 * needed before we go to sleep? 6223 */ 6224 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6225 ((freed_so_far >= rwnd_req) && 6226 (control->do_not_ref_stcb == 0) && 6227 (no_rcv_needed == 0))) { 6228 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6229 } 6230 wait_some_more: 6231 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6232 goto release; 6233 } 6234 6235 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6236 goto release; 6237 6238 if (hold_rlock == 1) { 6239 SCTP_INP_READ_UNLOCK(inp); 6240 hold_rlock = 0; 6241 } 6242 if (hold_sblock == 0) { 6243 SOCKBUF_LOCK(&so->so_rcv); 6244 hold_sblock = 1; 6245 } 6246 if ((copied_so_far) && (control->length == 0) && 6247 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6248 goto release; 6249 } 6250 if (so->so_rcv.sb_cc <= control->held_length) { 6251 error = sbwait(&so->so_rcv); 6252 if (error) { 6253 goto release; 6254 } 6255 control->held_length = 0; 6256 } 6257 if (hold_sblock) { 6258 SOCKBUF_UNLOCK(&so->so_rcv); 6259 hold_sblock = 0; 6260 } 6261 if (control->length == 0) { 6262 /* still nothing here */ 6263 if (control->end_added == 1) { 6264 /* he aborted, or is done i.e.did a shutdown */ 6265 out_flags |= MSG_EOR; 6266 if (control->pdapi_aborted) { 6267 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6268 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6269 6270 out_flags |= MSG_TRUNC; 6271 } else { 6272 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6273 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6274 } 6275 goto done_with_control; 6276 } 6277 if (so->so_rcv.sb_cc > held_length) { 6278 control->held_length = so->so_rcv.sb_cc; 6279 held_length = 0; 6280 } 6281 goto wait_some_more; 6282 } else if (control->data == NULL) { 6283 /* 6284 * we must re-sync since data is probably being 6285 * added 6286 */ 6287 SCTP_INP_READ_LOCK(inp); 6288 if ((control->length > 0) && (control->data == NULL)) { 6289 /* 6290 * big trouble.. we have the lock and its 6291 * corrupt? 6292 */ 6293 #ifdef INVARIANTS 6294 panic("Impossible data==NULL length !=0"); 6295 #endif 6296 out_flags |= MSG_EOR; 6297 out_flags |= MSG_TRUNC; 6298 control->length = 0; 6299 SCTP_INP_READ_UNLOCK(inp); 6300 goto done_with_control; 6301 } 6302 SCTP_INP_READ_UNLOCK(inp); 6303 /* We will fall around to get more data */ 6304 } 6305 goto get_more_data; 6306 } else { 6307 /*- 6308 * Give caller back the mbuf chain, 6309 * store in uio_resid the length 6310 */ 6311 wakeup_read_socket = 0; 6312 if ((control->end_added == 0) || 6313 (TAILQ_NEXT(control, next) == NULL)) { 6314 /* Need to get rlock */ 6315 if (hold_rlock == 0) { 6316 SCTP_INP_READ_LOCK(inp); 6317 hold_rlock = 1; 6318 } 6319 } 6320 if (control->end_added) { 6321 out_flags |= MSG_EOR; 6322 if ((control->do_not_ref_stcb == 0) && 6323 (control->stcb != NULL) && 6324 ((control->spec_flags & M_NOTIFICATION) == 0)) 6325 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6326 } 6327 if (control->spec_flags & M_NOTIFICATION) { 6328 out_flags |= MSG_NOTIFICATION; 6329 } 6330 uio->uio_resid = control->length; 6331 *mp = control->data; 6332 m = control->data; 6333 while (m) { 6334 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6335 sctp_sblog(&so->so_rcv, 6336 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6337 } 6338 sctp_sbfree(control, stcb, &so->so_rcv, m); 6339 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6340 freed_so_far += MSIZE; 6341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6342 sctp_sblog(&so->so_rcv, 6343 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6344 } 6345 m = SCTP_BUF_NEXT(m); 6346 } 6347 control->data = control->tail_mbuf = NULL; 6348 control->length = 0; 6349 if (out_flags & MSG_EOR) { 6350 /* Done with this control */ 6351 goto done_with_control; 6352 } 6353 } 6354 release: 6355 if (hold_rlock == 1) { 6356 SCTP_INP_READ_UNLOCK(inp); 6357 hold_rlock = 0; 6358 } 6359 if (hold_sblock == 1) { 6360 SOCKBUF_UNLOCK(&so->so_rcv); 6361 hold_sblock = 0; 6362 } 6363 6364 sbunlock(&so->so_rcv); 6365 sockbuf_lock = 0; 6366 6367 release_unlocked: 6368 if (hold_sblock) { 6369 SOCKBUF_UNLOCK(&so->so_rcv); 6370 hold_sblock = 0; 6371 } 6372 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6373 if ((freed_so_far >= rwnd_req) && 6374 (control && (control->do_not_ref_stcb == 0)) && 6375 (no_rcv_needed == 0)) 6376 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6377 } 6378 out: 6379 if (msg_flags) { 6380 *msg_flags = out_flags; 6381 } 6382 if (((out_flags & MSG_EOR) == 0) && 6383 ((in_flags & MSG_PEEK) == 0) && 6384 (sinfo) && 6385 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6386 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6387 struct sctp_extrcvinfo *s_extra; 6388 6389 s_extra = (struct sctp_extrcvinfo *)sinfo; 6390 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6391 } 6392 if (hold_rlock == 1) { 6393 SCTP_INP_READ_UNLOCK(inp); 6394 } 6395 if (hold_sblock) { 6396 SOCKBUF_UNLOCK(&so->so_rcv); 6397 } 6398 if (sockbuf_lock) { 6399 sbunlock(&so->so_rcv); 6400 } 6401 6402 if (freecnt_applied) { 6403 /* 6404 * The lock on the socket buffer protects us so the free 6405 * code will stop. But since we used the socketbuf lock and 6406 * the sender uses the tcb_lock to increment, we need to use 6407 * the atomic add to the refcnt. 6408 */ 6409 if (stcb == NULL) { 6410 #ifdef INVARIANTS 6411 panic("stcb for refcnt has gone NULL?"); 6412 goto stage_left; 6413 #else 6414 goto stage_left; 6415 #endif 6416 } 6417 /* Save the value back for next time */ 6418 stcb->freed_by_sorcv_sincelast = freed_so_far; 6419 atomic_add_int(&stcb->asoc.refcnt, -1); 6420 } 6421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6422 if (stcb) { 6423 sctp_misc_ints(SCTP_SORECV_DONE, 6424 freed_so_far, 6425 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6426 stcb->asoc.my_rwnd, 6427 so->so_rcv.sb_cc); 6428 } else { 6429 sctp_misc_ints(SCTP_SORECV_DONE, 6430 freed_so_far, 6431 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6432 0, 6433 so->so_rcv.sb_cc); 6434 } 6435 } 6436 stage_left: 6437 if (wakeup_read_socket) { 6438 sctp_sorwakeup(inp, so); 6439 } 6440 return (error); 6441 } 6442 6443 6444 #ifdef SCTP_MBUF_LOGGING 6445 struct mbuf * 6446 sctp_m_free(struct mbuf *m) 6447 { 6448 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6449 sctp_log_mb(m, SCTP_MBUF_IFREE); 6450 } 6451 return (m_free(m)); 6452 } 6453 6454 void 6455 sctp_m_freem(struct mbuf *mb) 6456 { 6457 while (mb != NULL) 6458 mb = sctp_m_free(mb); 6459 } 6460 6461 #endif 6462 6463 int 6464 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6465 { 6466 /* 6467 * Given a local address. For all associations that holds the 6468 * address, request a peer-set-primary. 6469 */ 6470 struct sctp_ifa *ifa; 6471 struct sctp_laddr *wi; 6472 6473 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6474 if (ifa == NULL) { 6475 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6476 return (EADDRNOTAVAIL); 6477 } 6478 /* 6479 * Now that we have the ifa we must awaken the iterator with this 6480 * message. 6481 */ 6482 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6483 if (wi == NULL) { 6484 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6485 return (ENOMEM); 6486 } 6487 /* Now incr the count and int wi structure */ 6488 SCTP_INCR_LADDR_COUNT(); 6489 memset(wi, 0, sizeof(*wi)); 6490 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6491 wi->ifa = ifa; 6492 wi->action = SCTP_SET_PRIM_ADDR; 6493 atomic_add_int(&ifa->refcount, 1); 6494 6495 /* Now add it to the work queue */ 6496 SCTP_WQ_ADDR_LOCK(); 6497 /* 6498 * Should this really be a tailq? As it is we will process the 6499 * newest first :-0 6500 */ 6501 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6502 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6503 (struct sctp_inpcb *)NULL, 6504 (struct sctp_tcb *)NULL, 6505 (struct sctp_nets *)NULL); 6506 SCTP_WQ_ADDR_UNLOCK(); 6507 return (0); 6508 } 6509 6510 6511 int 6512 sctp_soreceive(struct socket *so, 6513 struct sockaddr **psa, 6514 struct uio *uio, 6515 struct mbuf **mp0, 6516 struct mbuf **controlp, 6517 int *flagsp) 6518 { 6519 int error, fromlen; 6520 uint8_t sockbuf[256]; 6521 struct sockaddr *from; 6522 struct sctp_extrcvinfo sinfo; 6523 int filling_sinfo = 1; 6524 int flags; 6525 struct sctp_inpcb *inp; 6526 6527 inp = (struct sctp_inpcb *)so->so_pcb; 6528 /* pickup the assoc we are reading from */ 6529 if (inp == NULL) { 6530 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6531 return (EINVAL); 6532 } 6533 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6534 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6535 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6536 (controlp == NULL)) { 6537 /* user does not want the sndrcv ctl */ 6538 filling_sinfo = 0; 6539 } 6540 if (psa) { 6541 from = (struct sockaddr *)sockbuf; 6542 fromlen = sizeof(sockbuf); 6543 from->sa_len = 0; 6544 } else { 6545 from = NULL; 6546 fromlen = 0; 6547 } 6548 6549 if (filling_sinfo) { 6550 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6551 } 6552 if (flagsp != NULL) { 6553 flags = *flagsp; 6554 } else { 6555 flags = 0; 6556 } 6557 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6558 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6559 if (flagsp != NULL) { 6560 *flagsp = flags; 6561 } 6562 if (controlp != NULL) { 6563 /* copy back the sinfo in a CMSG format */ 6564 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6565 *controlp = sctp_build_ctl_nchunk(inp, 6566 (struct sctp_sndrcvinfo *)&sinfo); 6567 } else { 6568 *controlp = NULL; 6569 } 6570 } 6571 if (psa) { 6572 /* copy back the address info */ 6573 if (from && from->sa_len) { 6574 *psa = sodupsockaddr(from, M_NOWAIT); 6575 } else { 6576 *psa = NULL; 6577 } 6578 } 6579 return (error); 6580 } 6581 6582 6583 6584 6585 6586 int 6587 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6588 int totaddr, int *error) 6589 { 6590 int added = 0; 6591 int i; 6592 struct sctp_inpcb *inp; 6593 struct sockaddr *sa; 6594 size_t incr = 0; 6595 #ifdef INET 6596 struct sockaddr_in *sin; 6597 #endif 6598 #ifdef INET6 6599 struct sockaddr_in6 *sin6; 6600 #endif 6601 6602 sa = addr; 6603 inp = stcb->sctp_ep; 6604 *error = 0; 6605 for (i = 0; i < totaddr; i++) { 6606 switch (sa->sa_family) { 6607 #ifdef INET 6608 case AF_INET: 6609 incr = sizeof(struct sockaddr_in); 6610 sin = (struct sockaddr_in *)sa; 6611 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6612 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6613 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6614 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6615 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6616 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6617 *error = EINVAL; 6618 goto out_now; 6619 } 6620 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6621 SCTP_DONOT_SETSCOPE, 6622 SCTP_ADDR_IS_CONFIRMED)) { 6623 /* assoc gone no un-lock */ 6624 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6625 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6626 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6627 *error = ENOBUFS; 6628 goto out_now; 6629 } 6630 added++; 6631 break; 6632 #endif 6633 #ifdef INET6 6634 case AF_INET6: 6635 incr = sizeof(struct sockaddr_in6); 6636 sin6 = (struct sockaddr_in6 *)sa; 6637 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6638 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6639 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6640 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6641 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6642 *error = EINVAL; 6643 goto out_now; 6644 } 6645 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6646 SCTP_DONOT_SETSCOPE, 6647 SCTP_ADDR_IS_CONFIRMED)) { 6648 /* assoc gone no un-lock */ 6649 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6650 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6651 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6652 *error = ENOBUFS; 6653 goto out_now; 6654 } 6655 added++; 6656 break; 6657 #endif 6658 default: 6659 break; 6660 } 6661 sa = (struct sockaddr *)((caddr_t)sa + incr); 6662 } 6663 out_now: 6664 return (added); 6665 } 6666 6667 int 6668 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6669 unsigned int totaddr, 6670 unsigned int *num_v4, unsigned int *num_v6, 6671 unsigned int limit) 6672 { 6673 struct sockaddr *sa; 6674 struct sctp_tcb *stcb; 6675 unsigned int incr, at, i; 6676 6677 at = 0; 6678 sa = addr; 6679 *num_v6 = *num_v4 = 0; 6680 /* account and validate addresses */ 6681 if (totaddr == 0) { 6682 return (EINVAL); 6683 } 6684 for (i = 0; i < totaddr; i++) { 6685 if (at + sizeof(struct sockaddr) > limit) { 6686 return (EINVAL); 6687 } 6688 switch (sa->sa_family) { 6689 #ifdef INET 6690 case AF_INET: 6691 incr = (unsigned int)sizeof(struct sockaddr_in); 6692 if (sa->sa_len != incr) { 6693 return (EINVAL); 6694 } 6695 (*num_v4) += 1; 6696 break; 6697 #endif 6698 #ifdef INET6 6699 case AF_INET6: 6700 { 6701 struct sockaddr_in6 *sin6; 6702 6703 sin6 = (struct sockaddr_in6 *)sa; 6704 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6705 /* Must be non-mapped for connectx */ 6706 return (EINVAL); 6707 } 6708 incr = (unsigned int)sizeof(struct sockaddr_in6); 6709 if (sa->sa_len != incr) { 6710 return (EINVAL); 6711 } 6712 (*num_v6) += 1; 6713 break; 6714 } 6715 #endif 6716 default: 6717 return (EINVAL); 6718 } 6719 if ((at + incr) > limit) { 6720 return (EINVAL); 6721 } 6722 SCTP_INP_INCR_REF(inp); 6723 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6724 if (stcb != NULL) { 6725 SCTP_TCB_UNLOCK(stcb); 6726 return (EALREADY); 6727 } else { 6728 SCTP_INP_DECR_REF(inp); 6729 } 6730 at += incr; 6731 sa = (struct sockaddr *)((caddr_t)sa + incr); 6732 } 6733 return (0); 6734 } 6735 6736 /* 6737 * sctp_bindx(ADD) for one address. 6738 * assumes all arguments are valid/checked by caller. 6739 */ 6740 void 6741 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6742 struct sockaddr *sa, uint32_t vrf_id, int *error, 6743 void *p) 6744 { 6745 #if defined(INET) && defined(INET6) 6746 struct sockaddr_in sin; 6747 #endif 6748 #ifdef INET6 6749 struct sockaddr_in6 *sin6; 6750 #endif 6751 #ifdef INET 6752 struct sockaddr_in *sinp; 6753 #endif 6754 struct sockaddr *addr_to_use; 6755 struct sctp_inpcb *lep; 6756 uint16_t port; 6757 6758 /* see if we're bound all already! */ 6759 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6760 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6761 *error = EINVAL; 6762 return; 6763 } 6764 switch (sa->sa_family) { 6765 #ifdef INET6 6766 case AF_INET6: 6767 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6768 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6769 *error = EINVAL; 6770 return; 6771 } 6772 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6773 /* can only bind v6 on PF_INET6 sockets */ 6774 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6775 *error = EINVAL; 6776 return; 6777 } 6778 sin6 = (struct sockaddr_in6 *)sa; 6779 port = sin6->sin6_port; 6780 #ifdef INET 6781 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6782 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6783 SCTP_IPV6_V6ONLY(inp)) { 6784 /* can't bind v4-mapped on PF_INET sockets */ 6785 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6786 *error = EINVAL; 6787 return; 6788 } 6789 in6_sin6_2_sin(&sin, sin6); 6790 addr_to_use = (struct sockaddr *)&sin; 6791 } else { 6792 addr_to_use = sa; 6793 } 6794 #else 6795 addr_to_use = sa; 6796 #endif 6797 break; 6798 #endif 6799 #ifdef INET 6800 case AF_INET: 6801 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6802 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6803 *error = EINVAL; 6804 return; 6805 } 6806 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6807 SCTP_IPV6_V6ONLY(inp)) { 6808 /* can't bind v4 on PF_INET sockets */ 6809 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6810 *error = EINVAL; 6811 return; 6812 } 6813 sinp = (struct sockaddr_in *)sa; 6814 port = sinp->sin_port; 6815 addr_to_use = sa; 6816 break; 6817 #endif 6818 default: 6819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6820 *error = EINVAL; 6821 return; 6822 } 6823 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6824 if (p == NULL) { 6825 /* Can't get proc for Net/Open BSD */ 6826 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6827 *error = EINVAL; 6828 return; 6829 } 6830 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6831 return; 6832 } 6833 /* Validate the incoming port. */ 6834 if ((port != 0) && (port != inp->sctp_lport)) { 6835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6836 *error = EINVAL; 6837 return; 6838 } 6839 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6840 if (lep == NULL) { 6841 /* add the address */ 6842 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6843 SCTP_ADD_IP_ADDRESS, vrf_id); 6844 } else { 6845 if (lep != inp) { 6846 *error = EADDRINUSE; 6847 } 6848 SCTP_INP_DECR_REF(lep); 6849 } 6850 } 6851 6852 /* 6853 * sctp_bindx(DELETE) for one address. 6854 * assumes all arguments are valid/checked by caller. 6855 */ 6856 void 6857 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6858 struct sockaddr *sa, uint32_t vrf_id, int *error) 6859 { 6860 struct sockaddr *addr_to_use; 6861 #if defined(INET) && defined(INET6) 6862 struct sockaddr_in6 *sin6; 6863 struct sockaddr_in sin; 6864 #endif 6865 6866 /* see if we're bound all already! */ 6867 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6868 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6869 *error = EINVAL; 6870 return; 6871 } 6872 switch (sa->sa_family) { 6873 #ifdef INET6 6874 case AF_INET6: 6875 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6876 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6877 *error = EINVAL; 6878 return; 6879 } 6880 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6881 /* can only bind v6 on PF_INET6 sockets */ 6882 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6883 *error = EINVAL; 6884 return; 6885 } 6886 #ifdef INET 6887 sin6 = (struct sockaddr_in6 *)sa; 6888 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6889 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6890 SCTP_IPV6_V6ONLY(inp)) { 6891 /* can't bind mapped-v4 on PF_INET sockets */ 6892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6893 *error = EINVAL; 6894 return; 6895 } 6896 in6_sin6_2_sin(&sin, sin6); 6897 addr_to_use = (struct sockaddr *)&sin; 6898 } else { 6899 addr_to_use = sa; 6900 } 6901 #else 6902 addr_to_use = sa; 6903 #endif 6904 break; 6905 #endif 6906 #ifdef INET 6907 case AF_INET: 6908 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6909 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6910 *error = EINVAL; 6911 return; 6912 } 6913 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6914 SCTP_IPV6_V6ONLY(inp)) { 6915 /* can't bind v4 on PF_INET sockets */ 6916 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6917 *error = EINVAL; 6918 return; 6919 } 6920 addr_to_use = sa; 6921 break; 6922 #endif 6923 default: 6924 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6925 *error = EINVAL; 6926 return; 6927 } 6928 /* No lock required mgmt_ep_sa does its own locking. */ 6929 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6930 vrf_id); 6931 } 6932 6933 /* 6934 * returns the valid local address count for an assoc, taking into account 6935 * all scoping rules 6936 */ 6937 int 6938 sctp_local_addr_count(struct sctp_tcb *stcb) 6939 { 6940 int loopback_scope; 6941 #if defined(INET) 6942 int ipv4_local_scope, ipv4_addr_legal; 6943 #endif 6944 #if defined(INET6) 6945 int local_scope, site_scope, ipv6_addr_legal; 6946 #endif 6947 struct sctp_vrf *vrf; 6948 struct sctp_ifn *sctp_ifn; 6949 struct sctp_ifa *sctp_ifa; 6950 int count = 0; 6951 6952 /* Turn on all the appropriate scopes */ 6953 loopback_scope = stcb->asoc.scope.loopback_scope; 6954 #if defined(INET) 6955 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6956 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6957 #endif 6958 #if defined(INET6) 6959 local_scope = stcb->asoc.scope.local_scope; 6960 site_scope = stcb->asoc.scope.site_scope; 6961 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6962 #endif 6963 SCTP_IPI_ADDR_RLOCK(); 6964 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6965 if (vrf == NULL) { 6966 /* no vrf, no addresses */ 6967 SCTP_IPI_ADDR_RUNLOCK(); 6968 return (0); 6969 } 6970 6971 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6972 /* 6973 * bound all case: go through all ifns on the vrf 6974 */ 6975 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6976 if ((loopback_scope == 0) && 6977 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6978 continue; 6979 } 6980 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6981 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6982 continue; 6983 switch (sctp_ifa->address.sa.sa_family) { 6984 #ifdef INET 6985 case AF_INET: 6986 if (ipv4_addr_legal) { 6987 struct sockaddr_in *sin; 6988 6989 sin = &sctp_ifa->address.sin; 6990 if (sin->sin_addr.s_addr == 0) { 6991 /* 6992 * skip unspecified 6993 * addrs 6994 */ 6995 continue; 6996 } 6997 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6998 &sin->sin_addr) != 0) { 6999 continue; 7000 } 7001 if ((ipv4_local_scope == 0) && 7002 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7003 continue; 7004 } 7005 /* count this one */ 7006 count++; 7007 } else { 7008 continue; 7009 } 7010 break; 7011 #endif 7012 #ifdef INET6 7013 case AF_INET6: 7014 if (ipv6_addr_legal) { 7015 struct sockaddr_in6 *sin6; 7016 7017 sin6 = &sctp_ifa->address.sin6; 7018 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7019 continue; 7020 } 7021 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7022 &sin6->sin6_addr) != 0) { 7023 continue; 7024 } 7025 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7026 if (local_scope == 0) 7027 continue; 7028 if (sin6->sin6_scope_id == 0) { 7029 if (sa6_recoverscope(sin6) != 0) 7030 /* 7031 * 7032 * bad 7033 * link 7034 * 7035 * local 7036 * 7037 * address 7038 */ 7039 continue; 7040 } 7041 } 7042 if ((site_scope == 0) && 7043 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7044 continue; 7045 } 7046 /* count this one */ 7047 count++; 7048 } 7049 break; 7050 #endif 7051 default: 7052 /* TSNH */ 7053 break; 7054 } 7055 } 7056 } 7057 } else { 7058 /* 7059 * subset bound case 7060 */ 7061 struct sctp_laddr *laddr; 7062 7063 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7064 sctp_nxt_addr) { 7065 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7066 continue; 7067 } 7068 /* count this one */ 7069 count++; 7070 } 7071 } 7072 SCTP_IPI_ADDR_RUNLOCK(); 7073 return (count); 7074 } 7075 7076 #if defined(SCTP_LOCAL_TRACE_BUF) 7077 7078 void 7079 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7080 { 7081 uint32_t saveindex, newindex; 7082 7083 do { 7084 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7085 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7086 newindex = 1; 7087 } else { 7088 newindex = saveindex + 1; 7089 } 7090 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7091 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7092 saveindex = 0; 7093 } 7094 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7095 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7096 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7097 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7098 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7099 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7100 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7101 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7102 } 7103 7104 #endif 7105 static void 7106 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7107 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7108 { 7109 struct ip *iph; 7110 #ifdef INET6 7111 struct ip6_hdr *ip6; 7112 #endif 7113 struct mbuf *sp, *last; 7114 struct udphdr *uhdr; 7115 uint16_t port; 7116 7117 if ((m->m_flags & M_PKTHDR) == 0) { 7118 /* Can't handle one that is not a pkt hdr */ 7119 goto out; 7120 } 7121 /* Pull the src port */ 7122 iph = mtod(m, struct ip *); 7123 uhdr = (struct udphdr *)((caddr_t)iph + off); 7124 port = uhdr->uh_sport; 7125 /* 7126 * Split out the mbuf chain. Leave the IP header in m, place the 7127 * rest in the sp. 7128 */ 7129 sp = m_split(m, off, M_NOWAIT); 7130 if (sp == NULL) { 7131 /* Gak, drop packet, we can't do a split */ 7132 goto out; 7133 } 7134 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7135 /* Gak, packet can't have an SCTP header in it - too small */ 7136 m_freem(sp); 7137 goto out; 7138 } 7139 /* Now pull up the UDP header and SCTP header together */ 7140 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7141 if (sp == NULL) { 7142 /* Gak pullup failed */ 7143 goto out; 7144 } 7145 /* Trim out the UDP header */ 7146 m_adj(sp, sizeof(struct udphdr)); 7147 7148 /* Now reconstruct the mbuf chain */ 7149 for (last = m; last->m_next; last = last->m_next); 7150 last->m_next = sp; 7151 m->m_pkthdr.len += sp->m_pkthdr.len; 7152 /* 7153 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7154 * checksum and it was valid. Since CSUM_DATA_VALID == 7155 * CSUM_SCTP_VALID this would imply that the HW also verified the 7156 * SCTP checksum. Therefore, clear the bit. 7157 */ 7158 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7159 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7160 m->m_pkthdr.len, 7161 if_name(m->m_pkthdr.rcvif), 7162 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7163 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7164 iph = mtod(m, struct ip *); 7165 switch (iph->ip_v) { 7166 #ifdef INET 7167 case IPVERSION: 7168 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7169 sctp_input_with_port(m, off, port); 7170 break; 7171 #endif 7172 #ifdef INET6 7173 case IPV6_VERSION >> 4: 7174 ip6 = mtod(m, struct ip6_hdr *); 7175 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7176 sctp6_input_with_port(&m, &off, port); 7177 break; 7178 #endif 7179 default: 7180 goto out; 7181 break; 7182 } 7183 return; 7184 out: 7185 m_freem(m); 7186 } 7187 7188 #ifdef INET 7189 static void 7190 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7191 { 7192 struct ip *outer_ip, *inner_ip; 7193 struct sctphdr *sh; 7194 struct icmp *icmp; 7195 struct udphdr *udp; 7196 struct sctp_inpcb *inp; 7197 struct sctp_tcb *stcb; 7198 struct sctp_nets *net; 7199 struct sctp_init_chunk *ch; 7200 struct sockaddr_in src, dst; 7201 uint8_t type, code; 7202 7203 inner_ip = (struct ip *)vip; 7204 icmp = (struct icmp *)((caddr_t)inner_ip - 7205 (sizeof(struct icmp) - sizeof(struct ip))); 7206 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7207 if (ntohs(outer_ip->ip_len) < 7208 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7209 return; 7210 } 7211 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7212 sh = (struct sctphdr *)(udp + 1); 7213 memset(&src, 0, sizeof(struct sockaddr_in)); 7214 src.sin_family = AF_INET; 7215 src.sin_len = sizeof(struct sockaddr_in); 7216 src.sin_port = sh->src_port; 7217 src.sin_addr = inner_ip->ip_src; 7218 memset(&dst, 0, sizeof(struct sockaddr_in)); 7219 dst.sin_family = AF_INET; 7220 dst.sin_len = sizeof(struct sockaddr_in); 7221 dst.sin_port = sh->dest_port; 7222 dst.sin_addr = inner_ip->ip_dst; 7223 /* 7224 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7225 * holds our local endpoint address. Thus we reverse the dst and the 7226 * src in the lookup. 7227 */ 7228 inp = NULL; 7229 net = NULL; 7230 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7231 (struct sockaddr *)&src, 7232 &inp, &net, 1, 7233 SCTP_DEFAULT_VRFID); 7234 if ((stcb != NULL) && 7235 (net != NULL) && 7236 (inp != NULL)) { 7237 /* Check the UDP port numbers */ 7238 if ((udp->uh_dport != net->port) || 7239 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7240 SCTP_TCB_UNLOCK(stcb); 7241 return; 7242 } 7243 /* Check the verification tag */ 7244 if (ntohl(sh->v_tag) != 0) { 7245 /* 7246 * This must be the verification tag used for 7247 * sending out packets. We don't consider packets 7248 * reflecting the verification tag. 7249 */ 7250 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7251 SCTP_TCB_UNLOCK(stcb); 7252 return; 7253 } 7254 } else { 7255 if (ntohs(outer_ip->ip_len) >= 7256 sizeof(struct ip) + 7257 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7258 /* 7259 * In this case we can check if we got an 7260 * INIT chunk and if the initiate tag 7261 * matches. 7262 */ 7263 ch = (struct sctp_init_chunk *)(sh + 1); 7264 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7265 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7266 SCTP_TCB_UNLOCK(stcb); 7267 return; 7268 } 7269 } else { 7270 SCTP_TCB_UNLOCK(stcb); 7271 return; 7272 } 7273 } 7274 type = icmp->icmp_type; 7275 code = icmp->icmp_code; 7276 if ((type == ICMP_UNREACH) && 7277 (code == ICMP_UNREACH_PORT)) { 7278 code = ICMP_UNREACH_PROTOCOL; 7279 } 7280 sctp_notify(inp, stcb, net, type, code, 7281 ntohs(inner_ip->ip_len), 7282 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7283 } else { 7284 if ((stcb == NULL) && (inp != NULL)) { 7285 /* reduce ref-count */ 7286 SCTP_INP_WLOCK(inp); 7287 SCTP_INP_DECR_REF(inp); 7288 SCTP_INP_WUNLOCK(inp); 7289 } 7290 if (stcb) { 7291 SCTP_TCB_UNLOCK(stcb); 7292 } 7293 } 7294 return; 7295 } 7296 #endif 7297 7298 #ifdef INET6 7299 static void 7300 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7301 { 7302 struct ip6ctlparam *ip6cp; 7303 struct sctp_inpcb *inp; 7304 struct sctp_tcb *stcb; 7305 struct sctp_nets *net; 7306 struct sctphdr sh; 7307 struct udphdr udp; 7308 struct sockaddr_in6 src, dst; 7309 uint8_t type, code; 7310 7311 ip6cp = (struct ip6ctlparam *)d; 7312 /* 7313 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7314 */ 7315 if (ip6cp->ip6c_m == NULL) { 7316 return; 7317 } 7318 /* 7319 * Check if we can safely examine the ports and the verification tag 7320 * of the SCTP common header. 7321 */ 7322 if (ip6cp->ip6c_m->m_pkthdr.len < 7323 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7324 return; 7325 } 7326 /* Copy out the UDP header. */ 7327 memset(&udp, 0, sizeof(struct udphdr)); 7328 m_copydata(ip6cp->ip6c_m, 7329 ip6cp->ip6c_off, 7330 sizeof(struct udphdr), 7331 (caddr_t)&udp); 7332 /* Copy out the port numbers and the verification tag. */ 7333 memset(&sh, 0, sizeof(struct sctphdr)); 7334 m_copydata(ip6cp->ip6c_m, 7335 ip6cp->ip6c_off + sizeof(struct udphdr), 7336 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7337 (caddr_t)&sh); 7338 memset(&src, 0, sizeof(struct sockaddr_in6)); 7339 src.sin6_family = AF_INET6; 7340 src.sin6_len = sizeof(struct sockaddr_in6); 7341 src.sin6_port = sh.src_port; 7342 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7343 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7344 return; 7345 } 7346 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7347 dst.sin6_family = AF_INET6; 7348 dst.sin6_len = sizeof(struct sockaddr_in6); 7349 dst.sin6_port = sh.dest_port; 7350 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7351 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7352 return; 7353 } 7354 inp = NULL; 7355 net = NULL; 7356 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7357 (struct sockaddr *)&src, 7358 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7359 if ((stcb != NULL) && 7360 (net != NULL) && 7361 (inp != NULL)) { 7362 /* Check the UDP port numbers */ 7363 if ((udp.uh_dport != net->port) || 7364 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7365 SCTP_TCB_UNLOCK(stcb); 7366 return; 7367 } 7368 /* Check the verification tag */ 7369 if (ntohl(sh.v_tag) != 0) { 7370 /* 7371 * This must be the verification tag used for 7372 * sending out packets. We don't consider packets 7373 * reflecting the verification tag. 7374 */ 7375 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7376 SCTP_TCB_UNLOCK(stcb); 7377 return; 7378 } 7379 } else { 7380 if (ip6cp->ip6c_m->m_pkthdr.len >= 7381 ip6cp->ip6c_off + sizeof(struct udphdr) + 7382 sizeof(struct sctphdr) + 7383 sizeof(struct sctp_chunkhdr) + 7384 offsetof(struct sctp_init, a_rwnd)) { 7385 /* 7386 * In this case we can check if we got an 7387 * INIT chunk and if the initiate tag 7388 * matches. 7389 */ 7390 uint32_t initiate_tag; 7391 uint8_t chunk_type; 7392 7393 m_copydata(ip6cp->ip6c_m, 7394 ip6cp->ip6c_off + 7395 sizeof(struct udphdr) + 7396 sizeof(struct sctphdr), 7397 sizeof(uint8_t), 7398 (caddr_t)&chunk_type); 7399 m_copydata(ip6cp->ip6c_m, 7400 ip6cp->ip6c_off + 7401 sizeof(struct udphdr) + 7402 sizeof(struct sctphdr) + 7403 sizeof(struct sctp_chunkhdr), 7404 sizeof(uint32_t), 7405 (caddr_t)&initiate_tag); 7406 if ((chunk_type != SCTP_INITIATION) || 7407 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7408 SCTP_TCB_UNLOCK(stcb); 7409 return; 7410 } 7411 } else { 7412 SCTP_TCB_UNLOCK(stcb); 7413 return; 7414 } 7415 } 7416 type = ip6cp->ip6c_icmp6->icmp6_type; 7417 code = ip6cp->ip6c_icmp6->icmp6_code; 7418 if ((type == ICMP6_DST_UNREACH) && 7419 (code == ICMP6_DST_UNREACH_NOPORT)) { 7420 type = ICMP6_PARAM_PROB; 7421 code = ICMP6_PARAMPROB_NEXTHEADER; 7422 } 7423 sctp6_notify(inp, stcb, net, type, code, 7424 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7425 } else { 7426 if ((stcb == NULL) && (inp != NULL)) { 7427 /* reduce inp's ref-count */ 7428 SCTP_INP_WLOCK(inp); 7429 SCTP_INP_DECR_REF(inp); 7430 SCTP_INP_WUNLOCK(inp); 7431 } 7432 if (stcb) { 7433 SCTP_TCB_UNLOCK(stcb); 7434 } 7435 } 7436 } 7437 #endif 7438 7439 void 7440 sctp_over_udp_stop(void) 7441 { 7442 /* 7443 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7444 * for writting! 7445 */ 7446 #ifdef INET 7447 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7448 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7449 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7450 } 7451 #endif 7452 #ifdef INET6 7453 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7454 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7455 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7456 } 7457 #endif 7458 } 7459 7460 int 7461 sctp_over_udp_start(void) 7462 { 7463 uint16_t port; 7464 int ret; 7465 #ifdef INET 7466 struct sockaddr_in sin; 7467 #endif 7468 #ifdef INET6 7469 struct sockaddr_in6 sin6; 7470 #endif 7471 /* 7472 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7473 * for writting! 7474 */ 7475 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7476 if (ntohs(port) == 0) { 7477 /* Must have a port set */ 7478 return (EINVAL); 7479 } 7480 #ifdef INET 7481 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7482 /* Already running -- must stop first */ 7483 return (EALREADY); 7484 } 7485 #endif 7486 #ifdef INET6 7487 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7488 /* Already running -- must stop first */ 7489 return (EALREADY); 7490 } 7491 #endif 7492 #ifdef INET 7493 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7494 SOCK_DGRAM, IPPROTO_UDP, 7495 curthread->td_ucred, curthread))) { 7496 sctp_over_udp_stop(); 7497 return (ret); 7498 } 7499 /* Call the special UDP hook. */ 7500 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7501 sctp_recv_udp_tunneled_packet, 7502 sctp_recv_icmp_tunneled_packet, 7503 NULL))) { 7504 sctp_over_udp_stop(); 7505 return (ret); 7506 } 7507 /* Ok, we have a socket, bind it to the port. */ 7508 memset(&sin, 0, sizeof(struct sockaddr_in)); 7509 sin.sin_len = sizeof(struct sockaddr_in); 7510 sin.sin_family = AF_INET; 7511 sin.sin_port = htons(port); 7512 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7513 (struct sockaddr *)&sin, curthread))) { 7514 sctp_over_udp_stop(); 7515 return (ret); 7516 } 7517 #endif 7518 #ifdef INET6 7519 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7520 SOCK_DGRAM, IPPROTO_UDP, 7521 curthread->td_ucred, curthread))) { 7522 sctp_over_udp_stop(); 7523 return (ret); 7524 } 7525 /* Call the special UDP hook. */ 7526 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7527 sctp_recv_udp_tunneled_packet, 7528 sctp_recv_icmp6_tunneled_packet, 7529 NULL))) { 7530 sctp_over_udp_stop(); 7531 return (ret); 7532 } 7533 /* Ok, we have a socket, bind it to the port. */ 7534 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7535 sin6.sin6_len = sizeof(struct sockaddr_in6); 7536 sin6.sin6_family = AF_INET6; 7537 sin6.sin6_port = htons(port); 7538 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7539 (struct sockaddr *)&sin6, curthread))) { 7540 sctp_over_udp_stop(); 7541 return (ret); 7542 } 7543 #endif 7544 return (0); 7545 } 7546 7547 /* 7548 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7549 * If all arguments are zero, zero is returned. 7550 */ 7551 uint32_t 7552 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7553 { 7554 if (mtu1 > 0) { 7555 if (mtu2 > 0) { 7556 if (mtu3 > 0) { 7557 return (min(mtu1, min(mtu2, mtu3))); 7558 } else { 7559 return (min(mtu1, mtu2)); 7560 } 7561 } else { 7562 if (mtu3 > 0) { 7563 return (min(mtu1, mtu3)); 7564 } else { 7565 return (mtu1); 7566 } 7567 } 7568 } else { 7569 if (mtu2 > 0) { 7570 if (mtu3 > 0) { 7571 return (min(mtu2, mtu3)); 7572 } else { 7573 return (mtu2); 7574 } 7575 } else { 7576 return (mtu3); 7577 } 7578 } 7579 } 7580 7581 void 7582 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7583 { 7584 struct in_conninfo inc; 7585 7586 memset(&inc, 0, sizeof(struct in_conninfo)); 7587 inc.inc_fibnum = fibnum; 7588 switch (addr->sa.sa_family) { 7589 #ifdef INET 7590 case AF_INET: 7591 inc.inc_faddr = addr->sin.sin_addr; 7592 break; 7593 #endif 7594 #ifdef INET6 7595 case AF_INET6: 7596 inc.inc_flags |= INC_ISIPV6; 7597 inc.inc6_faddr = addr->sin6.sin6_addr; 7598 break; 7599 #endif 7600 default: 7601 return; 7602 } 7603 tcp_hc_updatemtu(&inc, (u_long)mtu); 7604 } 7605 7606 uint32_t 7607 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7608 { 7609 struct in_conninfo inc; 7610 7611 memset(&inc, 0, sizeof(struct in_conninfo)); 7612 inc.inc_fibnum = fibnum; 7613 switch (addr->sa.sa_family) { 7614 #ifdef INET 7615 case AF_INET: 7616 inc.inc_faddr = addr->sin.sin_addr; 7617 break; 7618 #endif 7619 #ifdef INET6 7620 case AF_INET6: 7621 inc.inc_flags |= INC_ISIPV6; 7622 inc.inc6_faddr = addr->sin6.sin6_addr; 7623 break; 7624 #endif 7625 default: 7626 return (0); 7627 } 7628 return ((uint32_t)tcp_hc_getmtu(&inc)); 7629 } 7630 7631 void 7632 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7633 { 7634 #if defined(KDTRACE_HOOKS) 7635 int old_state = stcb->asoc.state; 7636 #endif 7637 7638 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7639 ("sctp_set_state: Can't set substate (new_state = %x)", 7640 new_state)); 7641 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7642 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7643 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7644 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7645 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7646 } 7647 #if defined(KDTRACE_HOOKS) 7648 if (((old_state & SCTP_STATE_MASK) != new_state) && 7649 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7650 (new_state == SCTP_STATE_INUSE))) { 7651 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7652 } 7653 #endif 7654 } 7655 7656 void 7657 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7658 { 7659 #if defined(KDTRACE_HOOKS) 7660 int old_state = stcb->asoc.state; 7661 #endif 7662 7663 KASSERT((substate & SCTP_STATE_MASK) == 0, 7664 ("sctp_add_substate: Can't set state (substate = %x)", 7665 substate)); 7666 stcb->asoc.state |= substate; 7667 #if defined(KDTRACE_HOOKS) 7668 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7669 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7670 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7671 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7672 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7673 } 7674 #endif 7675 } 7676