1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1116 { 1117 struct sctp_association *asoc; 1118 1119 /* 1120 * Anything set to zero is taken care of by the allocation routine's 1121 * bzero 1122 */ 1123 1124 /* 1125 * Up front select what scoping to apply on addresses I tell my peer 1126 * Not sure what to do with these right now, we will need to come up 1127 * with a way to set them. We may need to pass them through from the 1128 * caller in the sctp_aloc_assoc() function. 1129 */ 1130 int i; 1131 #if defined(SCTP_DETAILED_STR_STATS) 1132 int j; 1133 #endif 1134 1135 asoc = &stcb->asoc; 1136 /* init all variables to a known value. */ 1137 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1138 asoc->max_burst = inp->sctp_ep.max_burst; 1139 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1140 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1141 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1142 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1143 asoc->ecn_supported = inp->ecn_supported; 1144 asoc->prsctp_supported = inp->prsctp_supported; 1145 asoc->auth_supported = inp->auth_supported; 1146 asoc->asconf_supported = inp->asconf_supported; 1147 asoc->reconfig_supported = inp->reconfig_supported; 1148 asoc->nrsack_supported = inp->nrsack_supported; 1149 asoc->pktdrop_supported = inp->pktdrop_supported; 1150 asoc->idata_supported = inp->idata_supported; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1194 sctp_select_initial_TSN(&inp->sctp_ep); 1195 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1196 /* we are optimisitic here */ 1197 asoc->peer_supports_nat = 0; 1198 asoc->sent_queue_retran_cnt = 0; 1199 1200 /* for CMT */ 1201 asoc->last_net_cmt_send_started = NULL; 1202 1203 /* This will need to be adjusted */ 1204 asoc->last_acked_seq = asoc->init_seq_number - 1; 1205 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1206 asoc->asconf_seq_in = asoc->last_acked_seq; 1207 1208 /* here we are different, we hold the next one we expect */ 1209 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1210 1211 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1212 asoc->initial_rto = inp->sctp_ep.initial_rto; 1213 1214 asoc->default_mtu = inp->sctp_ep.default_mtu; 1215 asoc->max_init_times = inp->sctp_ep.max_init_times; 1216 asoc->max_send_times = inp->sctp_ep.max_send_times; 1217 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1218 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1219 asoc->free_chunk_cnt = 0; 1220 1221 asoc->iam_blocking = 0; 1222 asoc->context = inp->sctp_context; 1223 asoc->local_strreset_support = inp->local_strreset_support; 1224 asoc->def_send = inp->def_send; 1225 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1226 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1227 asoc->pr_sctp_cnt = 0; 1228 asoc->total_output_queue_size = 0; 1229 1230 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1231 asoc->scope.ipv6_addr_legal = 1; 1232 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1233 asoc->scope.ipv4_addr_legal = 1; 1234 } else { 1235 asoc->scope.ipv4_addr_legal = 0; 1236 } 1237 } else { 1238 asoc->scope.ipv6_addr_legal = 0; 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } 1241 1242 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1243 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1244 1245 asoc->smallest_mtu = inp->sctp_frag_point; 1246 asoc->minrto = inp->sctp_ep.sctp_minrto; 1247 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1248 1249 asoc->stream_locked_on = 0; 1250 asoc->ecn_echo_cnt_onq = 0; 1251 asoc->stream_locked = 0; 1252 1253 asoc->send_sack = 1; 1254 1255 LIST_INIT(&asoc->sctp_restricted_addrs); 1256 1257 TAILQ_INIT(&asoc->nets); 1258 TAILQ_INIT(&asoc->pending_reply_queue); 1259 TAILQ_INIT(&asoc->asconf_ack_sent); 1260 /* Setup to fill the hb random cache at first HB */ 1261 asoc->hb_random_idx = 4; 1262 1263 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1264 1265 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1266 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1267 1268 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1269 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1270 1271 /* 1272 * Now the stream parameters, here we allocate space for all streams 1273 * that we request by default. 1274 */ 1275 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1276 o_strms; 1277 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1278 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1279 SCTP_M_STRMO); 1280 if (asoc->strmout == NULL) { 1281 /* big trouble no memory */ 1282 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1283 return (ENOMEM); 1284 } 1285 for (i = 0; i < asoc->streamoutcnt; i++) { 1286 /* 1287 * inbound side must be set to 0xffff, also NOTE when we get 1288 * the INIT-ACK back (for INIT sender) we MUST reduce the 1289 * count (streamoutcnt) but first check if we sent to any of 1290 * the upper streams that were dropped (if some were). Those 1291 * that were dropped must be notified to the upper layer as 1292 * failed to send. 1293 */ 1294 TAILQ_INIT(&asoc->strmout[i].outqueue); 1295 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1296 asoc->strmout[i].chunks_on_queues = 0; 1297 #if defined(SCTP_DETAILED_STR_STATS) 1298 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1299 asoc->strmout[i].abandoned_sent[j] = 0; 1300 asoc->strmout[i].abandoned_unsent[j] = 0; 1301 } 1302 #else 1303 asoc->strmout[i].abandoned_sent[0] = 0; 1304 asoc->strmout[i].abandoned_unsent[0] = 0; 1305 #endif 1306 asoc->strmout[i].next_mid_ordered = 0; 1307 asoc->strmout[i].next_mid_unordered = 0; 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 } 1312 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1313 1314 /* Now the mapping array */ 1315 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1316 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1317 SCTP_M_MAP); 1318 if (asoc->mapping_array == NULL) { 1319 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1320 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1321 return (ENOMEM); 1322 } 1323 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1324 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->nr_mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1330 return (ENOMEM); 1331 } 1332 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1333 1334 /* Now the init of the other outqueues */ 1335 TAILQ_INIT(&asoc->free_chunks); 1336 TAILQ_INIT(&asoc->control_send_queue); 1337 TAILQ_INIT(&asoc->asconf_send_queue); 1338 TAILQ_INIT(&asoc->send_queue); 1339 TAILQ_INIT(&asoc->sent_queue); 1340 TAILQ_INIT(&asoc->resetHead); 1341 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1342 TAILQ_INIT(&asoc->asconf_queue); 1343 /* authentication fields */ 1344 asoc->authinfo.random = NULL; 1345 asoc->authinfo.active_keyid = 0; 1346 asoc->authinfo.assoc_key = NULL; 1347 asoc->authinfo.assoc_keyid = 0; 1348 asoc->authinfo.recv_key = NULL; 1349 asoc->authinfo.recv_keyid = 0; 1350 LIST_INIT(&asoc->shared_keys); 1351 asoc->marked_retrans = 0; 1352 asoc->port = inp->sctp_ep.port; 1353 asoc->timoinit = 0; 1354 asoc->timodata = 0; 1355 asoc->timosack = 0; 1356 asoc->timoshutdown = 0; 1357 asoc->timoheartbeat = 0; 1358 asoc->timocookie = 0; 1359 asoc->timoshutdownack = 0; 1360 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1361 asoc->discontinuity_time = asoc->start_time; 1362 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1363 asoc->abandoned_unsent[i] = 0; 1364 asoc->abandoned_sent[i] = 0; 1365 } 1366 /* 1367 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1368 * freed later when the association is freed. 1369 */ 1370 return (0); 1371 } 1372 1373 void 1374 sctp_print_mapping_array(struct sctp_association *asoc) 1375 { 1376 unsigned int i, limit; 1377 1378 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1379 asoc->mapping_array_size, 1380 asoc->mapping_array_base_tsn, 1381 asoc->cumulative_tsn, 1382 asoc->highest_tsn_inside_map, 1383 asoc->highest_tsn_inside_nr_map); 1384 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1385 if (asoc->mapping_array[limit - 1] != 0) { 1386 break; 1387 } 1388 } 1389 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1390 for (i = 0; i < limit; i++) { 1391 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1392 } 1393 if (limit % 16) 1394 SCTP_PRINTF("\n"); 1395 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1396 if (asoc->nr_mapping_array[limit - 1]) { 1397 break; 1398 } 1399 } 1400 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1401 for (i = 0; i < limit; i++) { 1402 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1403 } 1404 if (limit % 16) 1405 SCTP_PRINTF("\n"); 1406 } 1407 1408 int 1409 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1410 { 1411 /* mapping array needs to grow */ 1412 uint8_t *new_array1, *new_array2; 1413 uint32_t new_size; 1414 1415 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1416 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1417 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1418 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1419 /* can't get more, forget it */ 1420 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1421 if (new_array1) { 1422 SCTP_FREE(new_array1, SCTP_M_MAP); 1423 } 1424 if (new_array2) { 1425 SCTP_FREE(new_array2, SCTP_M_MAP); 1426 } 1427 return (-1); 1428 } 1429 memset(new_array1, 0, new_size); 1430 memset(new_array2, 0, new_size); 1431 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1432 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1433 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1434 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1435 asoc->mapping_array = new_array1; 1436 asoc->nr_mapping_array = new_array2; 1437 asoc->mapping_array_size = new_size; 1438 return (0); 1439 } 1440 1441 static void 1442 sctp_iterator_work(struct sctp_iterator *it) 1443 { 1444 struct epoch_tracker et; 1445 struct sctp_inpcb *tinp; 1446 int iteration_count = 0; 1447 int inp_skip = 0; 1448 int first_in = 1; 1449 1450 NET_EPOCH_ENTER(et); 1451 SCTP_INP_INFO_RLOCK(); 1452 SCTP_ITERATOR_LOCK(); 1453 sctp_it_ctl.cur_it = it; 1454 if (it->inp) { 1455 SCTP_INP_RLOCK(it->inp); 1456 SCTP_INP_DECR_REF(it->inp); 1457 } 1458 if (it->inp == NULL) { 1459 /* iterator is complete */ 1460 done_with_iterator: 1461 sctp_it_ctl.cur_it = NULL; 1462 SCTP_ITERATOR_UNLOCK(); 1463 SCTP_INP_INFO_RUNLOCK(); 1464 if (it->function_atend != NULL) { 1465 (*it->function_atend) (it->pointer, it->val); 1466 } 1467 SCTP_FREE(it, SCTP_M_ITER); 1468 NET_EPOCH_EXIT(et); 1469 return; 1470 } 1471 select_a_new_ep: 1472 if (first_in) { 1473 first_in = 0; 1474 } else { 1475 SCTP_INP_RLOCK(it->inp); 1476 } 1477 while (((it->pcb_flags) && 1478 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1479 ((it->pcb_features) && 1480 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1481 /* endpoint flags or features don't match, so keep looking */ 1482 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1483 SCTP_INP_RUNLOCK(it->inp); 1484 goto done_with_iterator; 1485 } 1486 tinp = it->inp; 1487 it->inp = LIST_NEXT(it->inp, sctp_list); 1488 it->stcb = NULL; 1489 SCTP_INP_RUNLOCK(tinp); 1490 if (it->inp == NULL) { 1491 goto done_with_iterator; 1492 } 1493 SCTP_INP_RLOCK(it->inp); 1494 } 1495 /* now go through each assoc which is in the desired state */ 1496 if (it->done_current_ep == 0) { 1497 if (it->function_inp != NULL) 1498 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1499 it->done_current_ep = 1; 1500 } 1501 if (it->stcb == NULL) { 1502 /* run the per instance function */ 1503 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1504 } 1505 if ((inp_skip) || it->stcb == NULL) { 1506 if (it->function_inp_end != NULL) { 1507 inp_skip = (*it->function_inp_end) (it->inp, 1508 it->pointer, 1509 it->val); 1510 } 1511 SCTP_INP_RUNLOCK(it->inp); 1512 goto no_stcb; 1513 } 1514 while (it->stcb) { 1515 SCTP_TCB_LOCK(it->stcb); 1516 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1517 /* not in the right state... keep looking */ 1518 SCTP_TCB_UNLOCK(it->stcb); 1519 goto next_assoc; 1520 } 1521 /* see if we have limited out the iterator loop */ 1522 iteration_count++; 1523 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1524 /* Pause to let others grab the lock */ 1525 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 SCTP_INP_INCR_REF(it->inp); 1528 SCTP_INP_RUNLOCK(it->inp); 1529 SCTP_ITERATOR_UNLOCK(); 1530 SCTP_INP_INFO_RUNLOCK(); 1531 SCTP_INP_INFO_RLOCK(); 1532 SCTP_ITERATOR_LOCK(); 1533 if (sctp_it_ctl.iterator_flags) { 1534 /* We won't be staying here */ 1535 SCTP_INP_DECR_REF(it->inp); 1536 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1537 if (sctp_it_ctl.iterator_flags & 1538 SCTP_ITERATOR_STOP_CUR_IT) { 1539 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1540 goto done_with_iterator; 1541 } 1542 if (sctp_it_ctl.iterator_flags & 1543 SCTP_ITERATOR_STOP_CUR_INP) { 1544 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1545 goto no_stcb; 1546 } 1547 /* If we reach here huh? */ 1548 SCTP_PRINTF("Unknown it ctl flag %x\n", 1549 sctp_it_ctl.iterator_flags); 1550 sctp_it_ctl.iterator_flags = 0; 1551 } 1552 SCTP_INP_RLOCK(it->inp); 1553 SCTP_INP_DECR_REF(it->inp); 1554 SCTP_TCB_LOCK(it->stcb); 1555 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1556 iteration_count = 0; 1557 } 1558 KASSERT(it->inp == it->stcb->sctp_ep, 1559 ("%s: stcb %p does not belong to inp %p, but inp %p", 1560 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1561 1562 /* run function on this one */ 1563 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1564 1565 /* 1566 * we lie here, it really needs to have its own type but 1567 * first I must verify that this won't effect things :-0 1568 */ 1569 if (it->no_chunk_output == 0) 1570 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 1572 SCTP_TCB_UNLOCK(it->stcb); 1573 next_assoc: 1574 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1575 if (it->stcb == NULL) { 1576 /* Run last function */ 1577 if (it->function_inp_end != NULL) { 1578 inp_skip = (*it->function_inp_end) (it->inp, 1579 it->pointer, 1580 it->val); 1581 } 1582 } 1583 } 1584 SCTP_INP_RUNLOCK(it->inp); 1585 no_stcb: 1586 /* done with all assocs on this endpoint, move on to next endpoint */ 1587 it->done_current_ep = 0; 1588 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1589 it->inp = NULL; 1590 } else { 1591 it->inp = LIST_NEXT(it->inp, sctp_list); 1592 } 1593 it->stcb = NULL; 1594 if (it->inp == NULL) { 1595 goto done_with_iterator; 1596 } 1597 goto select_a_new_ep; 1598 } 1599 1600 void 1601 sctp_iterator_worker(void) 1602 { 1603 struct sctp_iterator *it; 1604 1605 /* This function is called with the WQ lock in place */ 1606 sctp_it_ctl.iterator_running = 1; 1607 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1608 /* now lets work on this one */ 1609 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1610 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1611 CURVNET_SET(it->vn); 1612 sctp_iterator_work(it); 1613 CURVNET_RESTORE(); 1614 SCTP_IPI_ITERATOR_WQ_LOCK(); 1615 /* sa_ignore FREED_MEMORY */ 1616 } 1617 sctp_it_ctl.iterator_running = 0; 1618 return; 1619 } 1620 1621 static void 1622 sctp_handle_addr_wq(void) 1623 { 1624 /* deal with the ADDR wq from the rtsock calls */ 1625 struct sctp_laddr *wi, *nwi; 1626 struct sctp_asconf_iterator *asc; 1627 1628 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1629 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1630 if (asc == NULL) { 1631 /* Try later, no memory */ 1632 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1633 (struct sctp_inpcb *)NULL, 1634 (struct sctp_tcb *)NULL, 1635 (struct sctp_nets *)NULL); 1636 return; 1637 } 1638 LIST_INIT(&asc->list_of_work); 1639 asc->cnt = 0; 1640 1641 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1642 LIST_REMOVE(wi, sctp_nxt_addr); 1643 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1644 asc->cnt++; 1645 } 1646 1647 if (asc->cnt == 0) { 1648 SCTP_FREE(asc, SCTP_M_ASC_IT); 1649 } else { 1650 int ret; 1651 1652 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1653 sctp_asconf_iterator_stcb, 1654 NULL, /* No ep end for boundall */ 1655 SCTP_PCB_FLAGS_BOUNDALL, 1656 SCTP_PCB_ANY_FEATURES, 1657 SCTP_ASOC_ANY_STATE, 1658 (void *)asc, 0, 1659 sctp_asconf_iterator_end, NULL, 0); 1660 if (ret) { 1661 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1662 /* 1663 * Freeing if we are stopping or put back on the 1664 * addr_wq. 1665 */ 1666 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1667 sctp_asconf_iterator_end(asc, 0); 1668 } else { 1669 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1670 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1671 } 1672 SCTP_FREE(asc, SCTP_M_ASC_IT); 1673 } 1674 } 1675 } 1676 } 1677 1678 /*- 1679 * The following table shows which pointers for the inp, stcb, or net are 1680 * stored for each timer after it was started. 1681 * 1682 *|Name |Timer |inp |stcb|net | 1683 *|-----------------------------|-----------------------------|----|----|----| 1684 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1685 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1686 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1687 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1690 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1691 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1696 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1697 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1698 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1700 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1701 */ 1702 1703 void 1704 sctp_timeout_handler(void *t) 1705 { 1706 struct epoch_tracker et; 1707 struct timeval tv; 1708 struct sctp_inpcb *inp; 1709 struct sctp_tcb *stcb; 1710 struct sctp_nets *net; 1711 struct sctp_timer *tmr; 1712 struct mbuf *op_err; 1713 int type; 1714 int i, secret; 1715 bool did_output, released_asoc_reference; 1716 1717 /* 1718 * If inp, stcb or net are not NULL, then references to these were 1719 * added when the timer was started, and must be released before 1720 * this function returns. 1721 */ 1722 tmr = (struct sctp_timer *)t; 1723 inp = (struct sctp_inpcb *)tmr->ep; 1724 stcb = (struct sctp_tcb *)tmr->tcb; 1725 net = (struct sctp_nets *)tmr->net; 1726 CURVNET_SET((struct vnet *)tmr->vnet); 1727 NET_EPOCH_ENTER(et); 1728 released_asoc_reference = false; 1729 1730 #ifdef SCTP_AUDITING_ENABLED 1731 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1732 sctp_auditing(3, inp, stcb, net); 1733 #endif 1734 1735 /* sanity checks... */ 1736 KASSERT(tmr->self == NULL || tmr->self == tmr, 1737 ("sctp_timeout_handler: tmr->self corrupted")); 1738 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1739 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1740 type = tmr->type; 1741 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1742 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1743 type, stcb, stcb->sctp_ep)); 1744 tmr->stopped_from = 0xa001; 1745 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1746 SCTPDBG(SCTP_DEBUG_TIMER2, 1747 "Timer type %d handler exiting due to CLOSED association.\n", 1748 type); 1749 goto out_decr; 1750 } 1751 tmr->stopped_from = 0xa002; 1752 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1753 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1754 SCTPDBG(SCTP_DEBUG_TIMER2, 1755 "Timer type %d handler exiting due to not being active.\n", 1756 type); 1757 goto out_decr; 1758 } 1759 1760 tmr->stopped_from = 0xa003; 1761 if (stcb) { 1762 SCTP_TCB_LOCK(stcb); 1763 /* 1764 * Release reference so that association can be freed if 1765 * necessary below. This is safe now that we have acquired 1766 * the lock. 1767 */ 1768 atomic_add_int(&stcb->asoc.refcnt, -1); 1769 released_asoc_reference = true; 1770 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1771 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1772 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1773 SCTPDBG(SCTP_DEBUG_TIMER2, 1774 "Timer type %d handler exiting due to CLOSED association.\n", 1775 type); 1776 goto out; 1777 } 1778 } else if (inp != NULL) { 1779 SCTP_INP_WLOCK(inp); 1780 } else { 1781 SCTP_WQ_ADDR_LOCK(); 1782 } 1783 1784 /* Record in stopped_from which timeout occurred. */ 1785 tmr->stopped_from = type; 1786 /* mark as being serviced now */ 1787 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1788 /* 1789 * Callout has been rescheduled. 1790 */ 1791 goto out; 1792 } 1793 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1794 /* 1795 * Not active, so no action. 1796 */ 1797 goto out; 1798 } 1799 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1800 1801 /* call the handler for the appropriate timer type */ 1802 switch (type) { 1803 case SCTP_TIMER_TYPE_SEND: 1804 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1805 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1806 type, inp, stcb, net)); 1807 SCTP_STAT_INCR(sctps_timodata); 1808 stcb->asoc.timodata++; 1809 stcb->asoc.num_send_timers_up--; 1810 if (stcb->asoc.num_send_timers_up < 0) { 1811 stcb->asoc.num_send_timers_up = 0; 1812 } 1813 SCTP_TCB_LOCK_ASSERT(stcb); 1814 if (sctp_t3rxt_timer(inp, stcb, net)) { 1815 /* no need to unlock on tcb its gone */ 1816 1817 goto out_decr; 1818 } 1819 SCTP_TCB_LOCK_ASSERT(stcb); 1820 #ifdef SCTP_AUDITING_ENABLED 1821 sctp_auditing(4, inp, stcb, net); 1822 #endif 1823 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1824 did_output = true; 1825 if ((stcb->asoc.num_send_timers_up == 0) && 1826 (stcb->asoc.sent_queue_cnt > 0)) { 1827 struct sctp_tmit_chunk *chk; 1828 1829 /* 1830 * Safeguard. If there on some on the sent queue 1831 * somewhere but no timers running something is 1832 * wrong... so we start a timer on the first chunk 1833 * on the send queue on whatever net it is sent to. 1834 */ 1835 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1836 if (chk->whoTo != NULL) { 1837 break; 1838 } 1839 } 1840 if (chk != NULL) { 1841 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1842 } 1843 } 1844 break; 1845 case SCTP_TIMER_TYPE_INIT: 1846 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1847 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1848 type, inp, stcb, net)); 1849 SCTP_STAT_INCR(sctps_timoinit); 1850 stcb->asoc.timoinit++; 1851 if (sctp_t1init_timer(inp, stcb, net)) { 1852 /* no need to unlock on tcb its gone */ 1853 goto out_decr; 1854 } 1855 did_output = false; 1856 break; 1857 case SCTP_TIMER_TYPE_RECV: 1858 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1859 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1860 type, inp, stcb, net)); 1861 SCTP_STAT_INCR(sctps_timosack); 1862 stcb->asoc.timosack++; 1863 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1864 #ifdef SCTP_AUDITING_ENABLED 1865 sctp_auditing(4, inp, stcb, NULL); 1866 #endif 1867 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1868 did_output = true; 1869 break; 1870 case SCTP_TIMER_TYPE_SHUTDOWN: 1871 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1872 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1873 type, inp, stcb, net)); 1874 SCTP_STAT_INCR(sctps_timoshutdown); 1875 stcb->asoc.timoshutdown++; 1876 if (sctp_shutdown_timer(inp, stcb, net)) { 1877 /* no need to unlock on tcb its gone */ 1878 goto out_decr; 1879 } 1880 #ifdef SCTP_AUDITING_ENABLED 1881 sctp_auditing(4, inp, stcb, net); 1882 #endif 1883 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1884 did_output = true; 1885 break; 1886 case SCTP_TIMER_TYPE_HEARTBEAT: 1887 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1888 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1889 type, inp, stcb, net)); 1890 SCTP_STAT_INCR(sctps_timoheartbeat); 1891 stcb->asoc.timoheartbeat++; 1892 if (sctp_heartbeat_timer(inp, stcb, net)) { 1893 /* no need to unlock on tcb its gone */ 1894 goto out_decr; 1895 } 1896 #ifdef SCTP_AUDITING_ENABLED 1897 sctp_auditing(4, inp, stcb, net); 1898 #endif 1899 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1900 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1901 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1902 did_output = true; 1903 } else { 1904 did_output = false; 1905 } 1906 break; 1907 case SCTP_TIMER_TYPE_COOKIE: 1908 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1909 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1910 type, inp, stcb, net)); 1911 SCTP_STAT_INCR(sctps_timocookie); 1912 stcb->asoc.timocookie++; 1913 if (sctp_cookie_timer(inp, stcb, net)) { 1914 /* no need to unlock on tcb its gone */ 1915 goto out_decr; 1916 } 1917 #ifdef SCTP_AUDITING_ENABLED 1918 sctp_auditing(4, inp, stcb, net); 1919 #endif 1920 /* 1921 * We consider T3 and Cookie timer pretty much the same with 1922 * respect to where from in chunk_output. 1923 */ 1924 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1925 did_output = true; 1926 break; 1927 case SCTP_TIMER_TYPE_NEWCOOKIE: 1928 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1929 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1930 type, inp, stcb, net)); 1931 SCTP_STAT_INCR(sctps_timosecret); 1932 (void)SCTP_GETTIME_TIMEVAL(&tv); 1933 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1934 inp->sctp_ep.last_secret_number = 1935 inp->sctp_ep.current_secret_number; 1936 inp->sctp_ep.current_secret_number++; 1937 if (inp->sctp_ep.current_secret_number >= 1938 SCTP_HOW_MANY_SECRETS) { 1939 inp->sctp_ep.current_secret_number = 0; 1940 } 1941 secret = (int)inp->sctp_ep.current_secret_number; 1942 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1943 inp->sctp_ep.secret_key[secret][i] = 1944 sctp_select_initial_TSN(&inp->sctp_ep); 1945 } 1946 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1947 did_output = false; 1948 break; 1949 case SCTP_TIMER_TYPE_PATHMTURAISE: 1950 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1951 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1952 type, inp, stcb, net)); 1953 SCTP_STAT_INCR(sctps_timopathmtu); 1954 sctp_pathmtu_timer(inp, stcb, net); 1955 did_output = false; 1956 break; 1957 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1958 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1959 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1960 type, inp, stcb, net)); 1961 if (sctp_shutdownack_timer(inp, stcb, net)) { 1962 /* no need to unlock on tcb its gone */ 1963 goto out_decr; 1964 } 1965 SCTP_STAT_INCR(sctps_timoshutdownack); 1966 stcb->asoc.timoshutdownack++; 1967 #ifdef SCTP_AUDITING_ENABLED 1968 sctp_auditing(4, inp, stcb, net); 1969 #endif 1970 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1971 did_output = true; 1972 break; 1973 case SCTP_TIMER_TYPE_ASCONF: 1974 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1975 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1976 type, inp, stcb, net)); 1977 SCTP_STAT_INCR(sctps_timoasconf); 1978 if (sctp_asconf_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1989 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoshutdownguard); 1993 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1994 "Shutdown guard timer expired"); 1995 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 case SCTP_TIMER_TYPE_AUTOCLOSE: 1999 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2000 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2001 type, inp, stcb, net)); 2002 SCTP_STAT_INCR(sctps_timoautoclose); 2003 sctp_autoclose_timer(inp, stcb); 2004 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2005 did_output = true; 2006 break; 2007 case SCTP_TIMER_TYPE_STRRESET: 2008 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2009 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2010 type, inp, stcb, net)); 2011 SCTP_STAT_INCR(sctps_timostrmrst); 2012 if (sctp_strreset_timer(inp, stcb)) { 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 } 2016 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2017 did_output = true; 2018 break; 2019 case SCTP_TIMER_TYPE_INPKILL: 2020 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2021 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2022 type, inp, stcb, net)); 2023 SCTP_STAT_INCR(sctps_timoinpkill); 2024 /* 2025 * special case, take away our increment since WE are the 2026 * killer 2027 */ 2028 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2029 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2030 SCTP_INP_DECR_REF(inp); 2031 SCTP_INP_WUNLOCK(inp); 2032 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2033 SCTP_CALLED_FROM_INPKILL_TIMER); 2034 inp = NULL; 2035 goto out_decr; 2036 case SCTP_TIMER_TYPE_ASOCKILL: 2037 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoassockill); 2041 /* Can we free it yet? */ 2042 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2043 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2044 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2045 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2046 /* 2047 * free asoc, always unlocks (or destroy's) so prevent 2048 * duplicate unlock or unlock of a free mtx :-0 2049 */ 2050 stcb = NULL; 2051 goto out_decr; 2052 case SCTP_TIMER_TYPE_ADDR_WQ: 2053 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2054 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2055 type, inp, stcb, net)); 2056 sctp_handle_addr_wq(); 2057 did_output = true; 2058 break; 2059 case SCTP_TIMER_TYPE_PRIM_DELETED: 2060 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2061 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2062 type, inp, stcb, net)); 2063 SCTP_STAT_INCR(sctps_timodelprim); 2064 sctp_delete_prim_timer(inp, stcb); 2065 did_output = false; 2066 break; 2067 default: 2068 #ifdef INVARIANTS 2069 panic("Unknown timer type %d", type); 2070 #else 2071 goto out; 2072 #endif 2073 } 2074 #ifdef SCTP_AUDITING_ENABLED 2075 sctp_audit_log(0xF1, (uint8_t)type); 2076 if (inp != NULL) 2077 sctp_auditing(5, inp, stcb, net); 2078 #endif 2079 if (did_output && (stcb != NULL)) { 2080 /* 2081 * Now we need to clean up the control chunk chain if an 2082 * ECNE is on it. It must be marked as UNSENT again so next 2083 * call will continue to send it until such time that we get 2084 * a CWR, to remove it. It is, however, less likely that we 2085 * will find a ecn echo on the chain though. 2086 */ 2087 sctp_fix_ecn_echo(&stcb->asoc); 2088 } 2089 out: 2090 if (stcb != NULL) { 2091 SCTP_TCB_UNLOCK(stcb); 2092 } else if (inp != NULL) { 2093 SCTP_INP_WUNLOCK(inp); 2094 } else { 2095 SCTP_WQ_ADDR_UNLOCK(); 2096 } 2097 2098 out_decr: 2099 /* These reference counts were incremented in sctp_timer_start(). */ 2100 if (inp != NULL) { 2101 SCTP_INP_DECR_REF(inp); 2102 } 2103 if ((stcb != NULL) && !released_asoc_reference) { 2104 atomic_add_int(&stcb->asoc.refcnt, -1); 2105 } 2106 if (net != NULL) { 2107 sctp_free_remote_addr(net); 2108 } 2109 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2110 CURVNET_RESTORE(); 2111 NET_EPOCH_EXIT(et); 2112 } 2113 2114 /*- 2115 * The following table shows which parameters must be provided 2116 * when calling sctp_timer_start(). For parameters not being 2117 * provided, NULL must be used. 2118 * 2119 * |Name |inp |stcb|net | 2120 * |-----------------------------|----|----|----| 2121 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2122 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2123 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2124 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2126 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2128 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2133 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2135 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2136 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2137 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2138 * 2139 */ 2140 2141 void 2142 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2143 struct sctp_nets *net) 2144 { 2145 struct sctp_timer *tmr; 2146 uint32_t to_ticks; 2147 uint32_t rndval, jitter; 2148 2149 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2150 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2151 t_type, stcb, stcb->sctp_ep)); 2152 tmr = NULL; 2153 if (stcb != NULL) { 2154 SCTP_TCB_LOCK_ASSERT(stcb); 2155 } else if (inp != NULL) { 2156 SCTP_INP_WLOCK_ASSERT(inp); 2157 } else { 2158 SCTP_WQ_ADDR_LOCK_ASSERT(); 2159 } 2160 if (stcb != NULL) { 2161 /* 2162 * Don't restart timer on association that's about to be 2163 * killed. 2164 */ 2165 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2166 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2167 SCTPDBG(SCTP_DEBUG_TIMER2, 2168 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2169 t_type, inp, stcb, net); 2170 return; 2171 } 2172 /* Don't restart timer on net that's been removed. */ 2173 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2174 SCTPDBG(SCTP_DEBUG_TIMER2, 2175 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2176 t_type, inp, stcb, net); 2177 return; 2178 } 2179 } 2180 switch (t_type) { 2181 case SCTP_TIMER_TYPE_SEND: 2182 /* Here we use the RTO timer. */ 2183 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2184 #ifdef INVARIANTS 2185 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2186 t_type, inp, stcb, net); 2187 #else 2188 return; 2189 #endif 2190 } 2191 tmr = &net->rxt_timer; 2192 if (net->RTO == 0) { 2193 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2194 } else { 2195 to_ticks = sctp_msecs_to_ticks(net->RTO); 2196 } 2197 break; 2198 case SCTP_TIMER_TYPE_INIT: 2199 /* 2200 * Here we use the INIT timer default usually about 1 2201 * second. 2202 */ 2203 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2204 #ifdef INVARIANTS 2205 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2206 t_type, inp, stcb, net); 2207 #else 2208 return; 2209 #endif 2210 } 2211 tmr = &net->rxt_timer; 2212 if (net->RTO == 0) { 2213 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2214 } else { 2215 to_ticks = sctp_msecs_to_ticks(net->RTO); 2216 } 2217 break; 2218 case SCTP_TIMER_TYPE_RECV: 2219 /* 2220 * Here we use the Delayed-Ack timer value from the inp, 2221 * ususually about 200ms. 2222 */ 2223 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2224 #ifdef INVARIANTS 2225 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2226 t_type, inp, stcb, net); 2227 #else 2228 return; 2229 #endif 2230 } 2231 tmr = &stcb->asoc.dack_timer; 2232 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2233 break; 2234 case SCTP_TIMER_TYPE_SHUTDOWN: 2235 /* Here we use the RTO of the destination. */ 2236 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2237 #ifdef INVARIANTS 2238 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2239 t_type, inp, stcb, net); 2240 #else 2241 return; 2242 #endif 2243 } 2244 tmr = &net->rxt_timer; 2245 if (net->RTO == 0) { 2246 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2247 } else { 2248 to_ticks = sctp_msecs_to_ticks(net->RTO); 2249 } 2250 break; 2251 case SCTP_TIMER_TYPE_HEARTBEAT: 2252 /* 2253 * The net is used here so that we can add in the RTO. Even 2254 * though we use a different timer. We also add the HB timer 2255 * PLUS a random jitter. 2256 */ 2257 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2258 #ifdef INVARIANTS 2259 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2260 t_type, inp, stcb, net); 2261 #else 2262 return; 2263 #endif 2264 } 2265 if ((net->dest_state & SCTP_ADDR_NOHB) && 2266 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2267 SCTPDBG(SCTP_DEBUG_TIMER2, 2268 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2269 t_type, inp, stcb, net); 2270 return; 2271 } 2272 tmr = &net->hb_timer; 2273 if (net->RTO == 0) { 2274 to_ticks = stcb->asoc.initial_rto; 2275 } else { 2276 to_ticks = net->RTO; 2277 } 2278 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2279 jitter = rndval % to_ticks; 2280 if (jitter >= (to_ticks >> 1)) { 2281 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2282 } else { 2283 to_ticks = to_ticks - jitter; 2284 } 2285 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2286 !(net->dest_state & SCTP_ADDR_PF)) { 2287 to_ticks += net->heart_beat_delay; 2288 } 2289 /* 2290 * Now we must convert the to_ticks that are now in ms to 2291 * ticks. 2292 */ 2293 to_ticks = sctp_msecs_to_ticks(to_ticks); 2294 break; 2295 case SCTP_TIMER_TYPE_COOKIE: 2296 /* 2297 * Here we can use the RTO timer from the network since one 2298 * RTT was complete. If a retransmission happened then we 2299 * will be using the RTO initial value. 2300 */ 2301 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2302 #ifdef INVARIANTS 2303 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2304 t_type, inp, stcb, net); 2305 #else 2306 return; 2307 #endif 2308 } 2309 tmr = &net->rxt_timer; 2310 if (net->RTO == 0) { 2311 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2312 } else { 2313 to_ticks = sctp_msecs_to_ticks(net->RTO); 2314 } 2315 break; 2316 case SCTP_TIMER_TYPE_NEWCOOKIE: 2317 /* 2318 * Nothing needed but the endpoint here ususually about 60 2319 * minutes. 2320 */ 2321 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2322 #ifdef INVARIANTS 2323 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2324 t_type, inp, stcb, net); 2325 #else 2326 return; 2327 #endif 2328 } 2329 tmr = &inp->sctp_ep.signature_change; 2330 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2331 break; 2332 case SCTP_TIMER_TYPE_PATHMTURAISE: 2333 /* 2334 * Here we use the value found in the EP for PMTUD, 2335 * ususually about 10 minutes. 2336 */ 2337 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2338 #ifdef INVARIANTS 2339 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2340 t_type, inp, stcb, net); 2341 #else 2342 return; 2343 #endif 2344 } 2345 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2346 SCTPDBG(SCTP_DEBUG_TIMER2, 2347 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2348 t_type, inp, stcb, net); 2349 return; 2350 } 2351 tmr = &net->pmtu_timer; 2352 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2353 break; 2354 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2355 /* Here we use the RTO of the destination. */ 2356 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2357 #ifdef INVARIANTS 2358 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2359 t_type, inp, stcb, net); 2360 #else 2361 return; 2362 #endif 2363 } 2364 tmr = &net->rxt_timer; 2365 if (net->RTO == 0) { 2366 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2367 } else { 2368 to_ticks = sctp_msecs_to_ticks(net->RTO); 2369 } 2370 break; 2371 case SCTP_TIMER_TYPE_ASCONF: 2372 /* 2373 * Here the timer comes from the stcb but its value is from 2374 * the net's RTO. 2375 */ 2376 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2377 #ifdef INVARIANTS 2378 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2379 t_type, inp, stcb, net); 2380 #else 2381 return; 2382 #endif 2383 } 2384 tmr = &stcb->asoc.asconf_timer; 2385 if (net->RTO == 0) { 2386 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2387 } else { 2388 to_ticks = sctp_msecs_to_ticks(net->RTO); 2389 } 2390 break; 2391 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2392 /* 2393 * Here we use the endpoints shutdown guard timer usually 2394 * about 3 minutes. 2395 */ 2396 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2397 #ifdef INVARIANTS 2398 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2399 t_type, inp, stcb, net); 2400 #else 2401 return; 2402 #endif 2403 } 2404 tmr = &stcb->asoc.shut_guard_timer; 2405 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2406 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2407 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2408 } else { 2409 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2410 } 2411 } else { 2412 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2413 } 2414 break; 2415 case SCTP_TIMER_TYPE_AUTOCLOSE: 2416 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2417 #ifdef INVARIANTS 2418 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2419 t_type, inp, stcb, net); 2420 #else 2421 return; 2422 #endif 2423 } 2424 tmr = &stcb->asoc.autoclose_timer; 2425 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2426 break; 2427 case SCTP_TIMER_TYPE_STRRESET: 2428 /* 2429 * Here the timer comes from the stcb but its value is from 2430 * the net's RTO. 2431 */ 2432 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2433 #ifdef INVARIANTS 2434 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2435 t_type, inp, stcb, net); 2436 #else 2437 return; 2438 #endif 2439 } 2440 tmr = &stcb->asoc.strreset_timer; 2441 if (net->RTO == 0) { 2442 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2443 } else { 2444 to_ticks = sctp_msecs_to_ticks(net->RTO); 2445 } 2446 break; 2447 case SCTP_TIMER_TYPE_INPKILL: 2448 /* 2449 * The inp is setup to die. We re-use the signature_chage 2450 * timer since that has stopped and we are in the GONE 2451 * state. 2452 */ 2453 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2454 #ifdef INVARIANTS 2455 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2456 t_type, inp, stcb, net); 2457 #else 2458 return; 2459 #endif 2460 } 2461 tmr = &inp->sctp_ep.signature_change; 2462 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2463 break; 2464 case SCTP_TIMER_TYPE_ASOCKILL: 2465 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2466 #ifdef INVARIANTS 2467 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2468 t_type, inp, stcb, net); 2469 #else 2470 return; 2471 #endif 2472 } 2473 tmr = &stcb->asoc.strreset_timer; 2474 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2475 break; 2476 case SCTP_TIMER_TYPE_ADDR_WQ: 2477 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2478 #ifdef INVARIANTS 2479 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2480 t_type, inp, stcb, net); 2481 #else 2482 return; 2483 #endif 2484 } 2485 /* Only 1 tick away :-) */ 2486 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2487 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2488 break; 2489 case SCTP_TIMER_TYPE_PRIM_DELETED: 2490 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2491 #ifdef INVARIANTS 2492 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2493 t_type, inp, stcb, net); 2494 #else 2495 return; 2496 #endif 2497 } 2498 tmr = &stcb->asoc.delete_prim_timer; 2499 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2500 break; 2501 default: 2502 #ifdef INVARIANTS 2503 panic("Unknown timer type %d", t_type); 2504 #else 2505 return; 2506 #endif 2507 } 2508 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2509 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2510 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2511 /* 2512 * We do NOT allow you to have it already running. If it is, 2513 * we leave the current one up unchanged. 2514 */ 2515 SCTPDBG(SCTP_DEBUG_TIMER2, 2516 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2517 t_type, inp, stcb, net); 2518 return; 2519 } 2520 /* At this point we can proceed. */ 2521 if (t_type == SCTP_TIMER_TYPE_SEND) { 2522 stcb->asoc.num_send_timers_up++; 2523 } 2524 tmr->stopped_from = 0; 2525 tmr->type = t_type; 2526 tmr->ep = (void *)inp; 2527 tmr->tcb = (void *)stcb; 2528 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2529 tmr->net = NULL; 2530 } else { 2531 tmr->net = (void *)net; 2532 } 2533 tmr->self = (void *)tmr; 2534 tmr->vnet = (void *)curvnet; 2535 tmr->ticks = sctp_get_tick_count(); 2536 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2537 SCTPDBG(SCTP_DEBUG_TIMER2, 2538 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2539 t_type, to_ticks, inp, stcb, net); 2540 /* 2541 * If this is a newly scheduled callout, as opposed to a 2542 * rescheduled one, increment relevant reference counts. 2543 */ 2544 if (tmr->ep != NULL) { 2545 SCTP_INP_INCR_REF(inp); 2546 } 2547 if (tmr->tcb != NULL) { 2548 atomic_add_int(&stcb->asoc.refcnt, 1); 2549 } 2550 if (tmr->net != NULL) { 2551 atomic_add_int(&net->ref_count, 1); 2552 } 2553 } else { 2554 /* 2555 * This should not happen, since we checked for pending 2556 * above. 2557 */ 2558 SCTPDBG(SCTP_DEBUG_TIMER2, 2559 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2560 t_type, to_ticks, inp, stcb, net); 2561 } 2562 return; 2563 } 2564 2565 /*- 2566 * The following table shows which parameters must be provided 2567 * when calling sctp_timer_stop(). For parameters not being 2568 * provided, NULL must be used. 2569 * 2570 * |Name |inp |stcb|net | 2571 * |-----------------------------|----|----|----| 2572 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2573 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2574 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2575 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2576 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2577 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2578 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2579 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2580 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2581 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2582 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2583 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2584 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2585 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2586 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2587 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2588 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2589 * 2590 */ 2591 2592 void 2593 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2594 struct sctp_nets *net, uint32_t from) 2595 { 2596 struct sctp_timer *tmr; 2597 2598 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2599 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2600 t_type, stcb, stcb->sctp_ep)); 2601 if (stcb != NULL) { 2602 SCTP_TCB_LOCK_ASSERT(stcb); 2603 } else if (inp != NULL) { 2604 SCTP_INP_WLOCK_ASSERT(inp); 2605 } else { 2606 SCTP_WQ_ADDR_LOCK_ASSERT(); 2607 } 2608 tmr = NULL; 2609 switch (t_type) { 2610 case SCTP_TIMER_TYPE_SEND: 2611 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2612 #ifdef INVARIANTS 2613 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2614 t_type, inp, stcb, net); 2615 #else 2616 return; 2617 #endif 2618 } 2619 tmr = &net->rxt_timer; 2620 break; 2621 case SCTP_TIMER_TYPE_INIT: 2622 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2623 #ifdef INVARIANTS 2624 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2625 t_type, inp, stcb, net); 2626 #else 2627 return; 2628 #endif 2629 } 2630 tmr = &net->rxt_timer; 2631 break; 2632 case SCTP_TIMER_TYPE_RECV: 2633 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2634 #ifdef INVARIANTS 2635 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2636 t_type, inp, stcb, net); 2637 #else 2638 return; 2639 #endif 2640 } 2641 tmr = &stcb->asoc.dack_timer; 2642 break; 2643 case SCTP_TIMER_TYPE_SHUTDOWN: 2644 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2645 #ifdef INVARIANTS 2646 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2647 t_type, inp, stcb, net); 2648 #else 2649 return; 2650 #endif 2651 } 2652 tmr = &net->rxt_timer; 2653 break; 2654 case SCTP_TIMER_TYPE_HEARTBEAT: 2655 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2656 #ifdef INVARIANTS 2657 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2658 t_type, inp, stcb, net); 2659 #else 2660 return; 2661 #endif 2662 } 2663 tmr = &net->hb_timer; 2664 break; 2665 case SCTP_TIMER_TYPE_COOKIE: 2666 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2667 #ifdef INVARIANTS 2668 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2669 t_type, inp, stcb, net); 2670 #else 2671 return; 2672 #endif 2673 } 2674 tmr = &net->rxt_timer; 2675 break; 2676 case SCTP_TIMER_TYPE_NEWCOOKIE: 2677 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2678 #ifdef INVARIANTS 2679 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2680 t_type, inp, stcb, net); 2681 #else 2682 return; 2683 #endif 2684 } 2685 tmr = &inp->sctp_ep.signature_change; 2686 break; 2687 case SCTP_TIMER_TYPE_PATHMTURAISE: 2688 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2689 #ifdef INVARIANTS 2690 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2691 t_type, inp, stcb, net); 2692 #else 2693 return; 2694 #endif 2695 } 2696 tmr = &net->pmtu_timer; 2697 break; 2698 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2699 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2700 #ifdef INVARIANTS 2701 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2702 t_type, inp, stcb, net); 2703 #else 2704 return; 2705 #endif 2706 } 2707 tmr = &net->rxt_timer; 2708 break; 2709 case SCTP_TIMER_TYPE_ASCONF: 2710 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2711 #ifdef INVARIANTS 2712 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2713 t_type, inp, stcb, net); 2714 #else 2715 return; 2716 #endif 2717 } 2718 tmr = &stcb->asoc.asconf_timer; 2719 break; 2720 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2721 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2722 #ifdef INVARIANTS 2723 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2724 t_type, inp, stcb, net); 2725 #else 2726 return; 2727 #endif 2728 } 2729 tmr = &stcb->asoc.shut_guard_timer; 2730 break; 2731 case SCTP_TIMER_TYPE_AUTOCLOSE: 2732 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2733 #ifdef INVARIANTS 2734 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2735 t_type, inp, stcb, net); 2736 #else 2737 return; 2738 #endif 2739 } 2740 tmr = &stcb->asoc.autoclose_timer; 2741 break; 2742 case SCTP_TIMER_TYPE_STRRESET: 2743 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2744 #ifdef INVARIANTS 2745 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2746 t_type, inp, stcb, net); 2747 #else 2748 return; 2749 #endif 2750 } 2751 tmr = &stcb->asoc.strreset_timer; 2752 break; 2753 case SCTP_TIMER_TYPE_INPKILL: 2754 /* 2755 * The inp is setup to die. We re-use the signature_chage 2756 * timer since that has stopped and we are in the GONE 2757 * state. 2758 */ 2759 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2760 #ifdef INVARIANTS 2761 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2762 t_type, inp, stcb, net); 2763 #else 2764 return; 2765 #endif 2766 } 2767 tmr = &inp->sctp_ep.signature_change; 2768 break; 2769 case SCTP_TIMER_TYPE_ASOCKILL: 2770 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2771 #ifdef INVARIANTS 2772 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2773 t_type, inp, stcb, net); 2774 #else 2775 return; 2776 #endif 2777 } 2778 tmr = &stcb->asoc.strreset_timer; 2779 break; 2780 case SCTP_TIMER_TYPE_ADDR_WQ: 2781 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2782 #ifdef INVARIANTS 2783 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2784 t_type, inp, stcb, net); 2785 #else 2786 return; 2787 #endif 2788 } 2789 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2790 break; 2791 case SCTP_TIMER_TYPE_PRIM_DELETED: 2792 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2793 #ifdef INVARIANTS 2794 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2795 t_type, inp, stcb, net); 2796 #else 2797 return; 2798 #endif 2799 } 2800 tmr = &stcb->asoc.delete_prim_timer; 2801 break; 2802 default: 2803 #ifdef INVARIANTS 2804 panic("Unknown timer type %d", t_type); 2805 #else 2806 return; 2807 #endif 2808 } 2809 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2810 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2811 (tmr->type != t_type)) { 2812 /* 2813 * Ok we have a timer that is under joint use. Cookie timer 2814 * per chance with the SEND timer. We therefore are NOT 2815 * running the timer that the caller wants stopped. So just 2816 * return. 2817 */ 2818 SCTPDBG(SCTP_DEBUG_TIMER2, 2819 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2820 t_type, inp, stcb, net); 2821 return; 2822 } 2823 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2824 stcb->asoc.num_send_timers_up--; 2825 if (stcb->asoc.num_send_timers_up < 0) { 2826 stcb->asoc.num_send_timers_up = 0; 2827 } 2828 } 2829 tmr->self = NULL; 2830 tmr->stopped_from = from; 2831 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2832 KASSERT(tmr->ep == inp, 2833 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2834 t_type, inp, tmr->ep)); 2835 KASSERT(tmr->tcb == stcb, 2836 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2837 t_type, stcb, tmr->tcb)); 2838 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2839 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2840 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2841 t_type, net, tmr->net)); 2842 SCTPDBG(SCTP_DEBUG_TIMER2, 2843 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2844 t_type, inp, stcb, net); 2845 /* 2846 * If the timer was actually stopped, decrement reference 2847 * counts that were incremented in sctp_timer_start(). 2848 */ 2849 if (tmr->ep != NULL) { 2850 SCTP_INP_DECR_REF(inp); 2851 tmr->ep = NULL; 2852 } 2853 if (tmr->tcb != NULL) { 2854 atomic_add_int(&stcb->asoc.refcnt, -1); 2855 tmr->tcb = NULL; 2856 } 2857 if (tmr->net != NULL) { 2858 /* 2859 * Can't use net, since it doesn't work for 2860 * SCTP_TIMER_TYPE_ASCONF. 2861 */ 2862 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2863 tmr->net = NULL; 2864 } 2865 } else { 2866 SCTPDBG(SCTP_DEBUG_TIMER2, 2867 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2868 t_type, inp, stcb, net); 2869 } 2870 return; 2871 } 2872 2873 uint32_t 2874 sctp_calculate_len(struct mbuf *m) 2875 { 2876 uint32_t tlen = 0; 2877 struct mbuf *at; 2878 2879 at = m; 2880 while (at) { 2881 tlen += SCTP_BUF_LEN(at); 2882 at = SCTP_BUF_NEXT(at); 2883 } 2884 return (tlen); 2885 } 2886 2887 void 2888 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2889 struct sctp_association *asoc, uint32_t mtu) 2890 { 2891 /* 2892 * Reset the P-MTU size on this association, this involves changing 2893 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2894 * allow the DF flag to be cleared. 2895 */ 2896 struct sctp_tmit_chunk *chk; 2897 unsigned int eff_mtu, ovh; 2898 2899 asoc->smallest_mtu = mtu; 2900 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2901 ovh = SCTP_MIN_OVERHEAD; 2902 } else { 2903 ovh = SCTP_MIN_V4_OVERHEAD; 2904 } 2905 eff_mtu = mtu - ovh; 2906 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2907 if (chk->send_size > eff_mtu) { 2908 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2909 } 2910 } 2911 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2912 if (chk->send_size > eff_mtu) { 2913 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2914 } 2915 } 2916 } 2917 2918 /* 2919 * Given an association and starting time of the current RTT period, update 2920 * RTO in number of msecs. net should point to the current network. 2921 * Return 1, if an RTO update was performed, return 0 if no update was 2922 * performed due to invalid starting point. 2923 */ 2924 2925 int 2926 sctp_calculate_rto(struct sctp_tcb *stcb, 2927 struct sctp_association *asoc, 2928 struct sctp_nets *net, 2929 struct timeval *old, 2930 int rtt_from_sack) 2931 { 2932 struct timeval now; 2933 uint64_t rtt_us; /* RTT in us */ 2934 int32_t rtt; /* RTT in ms */ 2935 uint32_t new_rto; 2936 int first_measure = 0; 2937 2938 /************************/ 2939 /* 1. calculate new RTT */ 2940 /************************/ 2941 /* get the current time */ 2942 if (stcb->asoc.use_precise_time) { 2943 (void)SCTP_GETPTIME_TIMEVAL(&now); 2944 } else { 2945 (void)SCTP_GETTIME_TIMEVAL(&now); 2946 } 2947 if ((old->tv_sec > now.tv_sec) || 2948 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2949 /* The starting point is in the future. */ 2950 return (0); 2951 } 2952 timevalsub(&now, old); 2953 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2954 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2955 /* The RTT is larger than a sane value. */ 2956 return (0); 2957 } 2958 /* store the current RTT in us */ 2959 net->rtt = rtt_us; 2960 /* compute rtt in ms */ 2961 rtt = (int32_t)(net->rtt / 1000); 2962 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2963 /* 2964 * Tell the CC module that a new update has just occurred 2965 * from a sack 2966 */ 2967 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2968 } 2969 /* 2970 * Do we need to determine the lan? We do this only on sacks i.e. 2971 * RTT being determined from data not non-data (HB/INIT->INITACK). 2972 */ 2973 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2974 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2975 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2976 net->lan_type = SCTP_LAN_INTERNET; 2977 } else { 2978 net->lan_type = SCTP_LAN_LOCAL; 2979 } 2980 } 2981 2982 /***************************/ 2983 /* 2. update RTTVAR & SRTT */ 2984 /***************************/ 2985 /*- 2986 * Compute the scaled average lastsa and the 2987 * scaled variance lastsv as described in van Jacobson 2988 * Paper "Congestion Avoidance and Control", Annex A. 2989 * 2990 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2991 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2992 */ 2993 if (net->RTO_measured) { 2994 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2995 net->lastsa += rtt; 2996 if (rtt < 0) { 2997 rtt = -rtt; 2998 } 2999 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3000 net->lastsv += rtt; 3001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3002 rto_logging(net, SCTP_LOG_RTTVAR); 3003 } 3004 } else { 3005 /* First RTO measurment */ 3006 net->RTO_measured = 1; 3007 first_measure = 1; 3008 net->lastsa = rtt << SCTP_RTT_SHIFT; 3009 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3011 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3012 } 3013 } 3014 if (net->lastsv == 0) { 3015 net->lastsv = SCTP_CLOCK_GRANULARITY; 3016 } 3017 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3018 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3019 (stcb->asoc.sat_network_lockout == 0)) { 3020 stcb->asoc.sat_network = 1; 3021 } else if ((!first_measure) && stcb->asoc.sat_network) { 3022 stcb->asoc.sat_network = 0; 3023 stcb->asoc.sat_network_lockout = 1; 3024 } 3025 /* bound it, per C6/C7 in Section 5.3.1 */ 3026 if (new_rto < stcb->asoc.minrto) { 3027 new_rto = stcb->asoc.minrto; 3028 } 3029 if (new_rto > stcb->asoc.maxrto) { 3030 new_rto = stcb->asoc.maxrto; 3031 } 3032 net->RTO = new_rto; 3033 return (1); 3034 } 3035 3036 /* 3037 * return a pointer to a contiguous piece of data from the given mbuf chain 3038 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3039 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3040 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3041 */ 3042 caddr_t 3043 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3044 { 3045 uint32_t count; 3046 uint8_t *ptr; 3047 3048 ptr = in_ptr; 3049 if ((off < 0) || (len <= 0)) 3050 return (NULL); 3051 3052 /* find the desired start location */ 3053 while ((m != NULL) && (off > 0)) { 3054 if (off < SCTP_BUF_LEN(m)) 3055 break; 3056 off -= SCTP_BUF_LEN(m); 3057 m = SCTP_BUF_NEXT(m); 3058 } 3059 if (m == NULL) 3060 return (NULL); 3061 3062 /* is the current mbuf large enough (eg. contiguous)? */ 3063 if ((SCTP_BUF_LEN(m) - off) >= len) { 3064 return (mtod(m, caddr_t)+off); 3065 } else { 3066 /* else, it spans more than one mbuf, so save a temp copy... */ 3067 while ((m != NULL) && (len > 0)) { 3068 count = min(SCTP_BUF_LEN(m) - off, len); 3069 memcpy(ptr, mtod(m, caddr_t)+off, count); 3070 len -= count; 3071 ptr += count; 3072 off = 0; 3073 m = SCTP_BUF_NEXT(m); 3074 } 3075 if ((m == NULL) && (len > 0)) 3076 return (NULL); 3077 else 3078 return ((caddr_t)in_ptr); 3079 } 3080 } 3081 3082 struct sctp_paramhdr * 3083 sctp_get_next_param(struct mbuf *m, 3084 int offset, 3085 struct sctp_paramhdr *pull, 3086 int pull_limit) 3087 { 3088 /* This just provides a typed signature to Peter's Pull routine */ 3089 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3090 (uint8_t *)pull)); 3091 } 3092 3093 struct mbuf * 3094 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3095 { 3096 struct mbuf *m_last; 3097 caddr_t dp; 3098 3099 if (padlen > 3) { 3100 return (NULL); 3101 } 3102 if (padlen <= M_TRAILINGSPACE(m)) { 3103 /* 3104 * The easy way. We hope the majority of the time we hit 3105 * here :) 3106 */ 3107 m_last = m; 3108 } else { 3109 /* Hard way we must grow the mbuf chain */ 3110 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3111 if (m_last == NULL) { 3112 return (NULL); 3113 } 3114 SCTP_BUF_LEN(m_last) = 0; 3115 SCTP_BUF_NEXT(m_last) = NULL; 3116 SCTP_BUF_NEXT(m) = m_last; 3117 } 3118 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3119 SCTP_BUF_LEN(m_last) += padlen; 3120 memset(dp, 0, padlen); 3121 return (m_last); 3122 } 3123 3124 struct mbuf * 3125 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3126 { 3127 /* find the last mbuf in chain and pad it */ 3128 struct mbuf *m_at; 3129 3130 if (last_mbuf != NULL) { 3131 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3132 } else { 3133 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3134 if (SCTP_BUF_NEXT(m_at) == NULL) { 3135 return (sctp_add_pad_tombuf(m_at, padval)); 3136 } 3137 } 3138 } 3139 return (NULL); 3140 } 3141 3142 static void 3143 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3144 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3145 { 3146 struct mbuf *m_notify; 3147 struct sctp_assoc_change *sac; 3148 struct sctp_queued_to_read *control; 3149 unsigned int notif_len; 3150 uint16_t abort_len; 3151 unsigned int i; 3152 3153 if (stcb == NULL) { 3154 return; 3155 } 3156 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3157 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3158 if (abort != NULL) { 3159 abort_len = ntohs(abort->ch.chunk_length); 3160 /* 3161 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3162 * contiguous. 3163 */ 3164 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3165 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3166 } 3167 } else { 3168 abort_len = 0; 3169 } 3170 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3171 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3172 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3173 notif_len += abort_len; 3174 } 3175 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3176 if (m_notify == NULL) { 3177 /* Retry with smaller value. */ 3178 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3179 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3180 if (m_notify == NULL) { 3181 goto set_error; 3182 } 3183 } 3184 SCTP_BUF_NEXT(m_notify) = NULL; 3185 sac = mtod(m_notify, struct sctp_assoc_change *); 3186 memset(sac, 0, notif_len); 3187 sac->sac_type = SCTP_ASSOC_CHANGE; 3188 sac->sac_flags = 0; 3189 sac->sac_length = sizeof(struct sctp_assoc_change); 3190 sac->sac_state = state; 3191 sac->sac_error = error; 3192 /* XXX verify these stream counts */ 3193 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3194 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3195 sac->sac_assoc_id = sctp_get_associd(stcb); 3196 if (notif_len > sizeof(struct sctp_assoc_change)) { 3197 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3198 i = 0; 3199 if (stcb->asoc.prsctp_supported == 1) { 3200 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3201 } 3202 if (stcb->asoc.auth_supported == 1) { 3203 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3204 } 3205 if (stcb->asoc.asconf_supported == 1) { 3206 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3207 } 3208 if (stcb->asoc.idata_supported == 1) { 3209 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3210 } 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3212 if (stcb->asoc.reconfig_supported == 1) { 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3214 } 3215 sac->sac_length += i; 3216 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3217 memcpy(sac->sac_info, abort, abort_len); 3218 sac->sac_length += abort_len; 3219 } 3220 } 3221 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3222 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3223 0, 0, stcb->asoc.context, 0, 0, 0, 3224 m_notify); 3225 if (control != NULL) { 3226 control->length = SCTP_BUF_LEN(m_notify); 3227 control->spec_flags = M_NOTIFICATION; 3228 /* not that we need this */ 3229 control->tail_mbuf = m_notify; 3230 sctp_add_to_readq(stcb->sctp_ep, stcb, 3231 control, 3232 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3233 so_locked); 3234 } else { 3235 sctp_m_freem(m_notify); 3236 } 3237 } 3238 /* 3239 * For 1-to-1 style sockets, we send up and error when an ABORT 3240 * comes in. 3241 */ 3242 set_error: 3243 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3244 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3245 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3246 SOCK_LOCK(stcb->sctp_socket); 3247 if (from_peer) { 3248 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3249 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3250 stcb->sctp_socket->so_error = ECONNREFUSED; 3251 } else { 3252 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3253 stcb->sctp_socket->so_error = ECONNRESET; 3254 } 3255 } else { 3256 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3257 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3258 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3259 stcb->sctp_socket->so_error = ETIMEDOUT; 3260 } else { 3261 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3262 stcb->sctp_socket->so_error = ECONNABORTED; 3263 } 3264 } 3265 SOCK_UNLOCK(stcb->sctp_socket); 3266 } 3267 /* Wake ANY sleepers */ 3268 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3269 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3270 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3271 socantrcvmore(stcb->sctp_socket); 3272 } 3273 sorwakeup(stcb->sctp_socket); 3274 sowwakeup(stcb->sctp_socket); 3275 } 3276 3277 static void 3278 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3279 struct sockaddr *sa, uint32_t error, int so_locked) 3280 { 3281 struct mbuf *m_notify; 3282 struct sctp_paddr_change *spc; 3283 struct sctp_queued_to_read *control; 3284 3285 if ((stcb == NULL) || 3286 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3287 /* event not enabled */ 3288 return; 3289 } 3290 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3291 if (m_notify == NULL) 3292 return; 3293 SCTP_BUF_LEN(m_notify) = 0; 3294 spc = mtod(m_notify, struct sctp_paddr_change *); 3295 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3296 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3297 spc->spc_flags = 0; 3298 spc->spc_length = sizeof(struct sctp_paddr_change); 3299 switch (sa->sa_family) { 3300 #ifdef INET 3301 case AF_INET: 3302 #ifdef INET6 3303 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3304 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3305 (struct sockaddr_in6 *)&spc->spc_aaddr); 3306 } else { 3307 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3308 } 3309 #else 3310 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3311 #endif 3312 break; 3313 #endif 3314 #ifdef INET6 3315 case AF_INET6: 3316 { 3317 struct sockaddr_in6 *sin6; 3318 3319 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3320 3321 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3322 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3323 if (sin6->sin6_scope_id == 0) { 3324 /* recover scope_id for user */ 3325 (void)sa6_recoverscope(sin6); 3326 } else { 3327 /* clear embedded scope_id for user */ 3328 in6_clearscope(&sin6->sin6_addr); 3329 } 3330 } 3331 break; 3332 } 3333 #endif 3334 default: 3335 /* TSNH */ 3336 break; 3337 } 3338 spc->spc_state = state; 3339 spc->spc_error = error; 3340 spc->spc_assoc_id = sctp_get_associd(stcb); 3341 3342 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3343 SCTP_BUF_NEXT(m_notify) = NULL; 3344 3345 /* append to socket */ 3346 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3347 0, 0, stcb->asoc.context, 0, 0, 0, 3348 m_notify); 3349 if (control == NULL) { 3350 /* no memory */ 3351 sctp_m_freem(m_notify); 3352 return; 3353 } 3354 control->length = SCTP_BUF_LEN(m_notify); 3355 control->spec_flags = M_NOTIFICATION; 3356 /* not that we need this */ 3357 control->tail_mbuf = m_notify; 3358 sctp_add_to_readq(stcb->sctp_ep, stcb, 3359 control, 3360 &stcb->sctp_socket->so_rcv, 1, 3361 SCTP_READ_LOCK_NOT_HELD, 3362 so_locked); 3363 } 3364 3365 static void 3366 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3367 struct sctp_tmit_chunk *chk, int so_locked) 3368 { 3369 struct mbuf *m_notify; 3370 struct sctp_send_failed *ssf; 3371 struct sctp_send_failed_event *ssfe; 3372 struct sctp_queued_to_read *control; 3373 struct sctp_chunkhdr *chkhdr; 3374 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3375 3376 if ((stcb == NULL) || 3377 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3378 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3379 /* event not enabled */ 3380 return; 3381 } 3382 3383 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3384 notifhdr_len = sizeof(struct sctp_send_failed_event); 3385 } else { 3386 notifhdr_len = sizeof(struct sctp_send_failed); 3387 } 3388 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3389 if (m_notify == NULL) 3390 /* no space left */ 3391 return; 3392 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3393 if (stcb->asoc.idata_supported) { 3394 chkhdr_len = sizeof(struct sctp_idata_chunk); 3395 } else { 3396 chkhdr_len = sizeof(struct sctp_data_chunk); 3397 } 3398 /* Use some defaults in case we can't access the chunk header */ 3399 if (chk->send_size >= chkhdr_len) { 3400 payload_len = chk->send_size - chkhdr_len; 3401 } else { 3402 payload_len = 0; 3403 } 3404 padding_len = 0; 3405 if (chk->data != NULL) { 3406 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3407 if (chkhdr != NULL) { 3408 chk_len = ntohs(chkhdr->chunk_length); 3409 if ((chk_len >= chkhdr_len) && 3410 (chk->send_size >= chk_len) && 3411 (chk->send_size - chk_len < 4)) { 3412 padding_len = chk->send_size - chk_len; 3413 payload_len = chk->send_size - chkhdr_len - padding_len; 3414 } 3415 } 3416 } 3417 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3418 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3419 memset(ssfe, 0, notifhdr_len); 3420 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3421 if (sent) { 3422 ssfe->ssfe_flags = SCTP_DATA_SENT; 3423 } else { 3424 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3425 } 3426 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3427 ssfe->ssfe_error = error; 3428 /* not exactly what the user sent in, but should be close :) */ 3429 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3430 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3431 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3432 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3433 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3434 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3435 } else { 3436 ssf = mtod(m_notify, struct sctp_send_failed *); 3437 memset(ssf, 0, notifhdr_len); 3438 ssf->ssf_type = SCTP_SEND_FAILED; 3439 if (sent) { 3440 ssf->ssf_flags = SCTP_DATA_SENT; 3441 } else { 3442 ssf->ssf_flags = SCTP_DATA_UNSENT; 3443 } 3444 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3445 ssf->ssf_error = error; 3446 /* not exactly what the user sent in, but should be close :) */ 3447 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3448 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3449 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3450 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3451 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3452 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3453 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3454 } 3455 if (chk->data != NULL) { 3456 /* Trim off the sctp chunk header (it should be there) */ 3457 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3458 m_adj(chk->data, chkhdr_len); 3459 m_adj(chk->data, -padding_len); 3460 sctp_mbuf_crush(chk->data); 3461 chk->send_size -= (chkhdr_len + padding_len); 3462 } 3463 } 3464 SCTP_BUF_NEXT(m_notify) = chk->data; 3465 /* Steal off the mbuf */ 3466 chk->data = NULL; 3467 /* 3468 * For this case, we check the actual socket buffer, since the assoc 3469 * is going away we don't want to overfill the socket buffer for a 3470 * non-reader 3471 */ 3472 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3473 sctp_m_freem(m_notify); 3474 return; 3475 } 3476 /* append to socket */ 3477 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3478 0, 0, stcb->asoc.context, 0, 0, 0, 3479 m_notify); 3480 if (control == NULL) { 3481 /* no memory */ 3482 sctp_m_freem(m_notify); 3483 return; 3484 } 3485 control->length = SCTP_BUF_LEN(m_notify); 3486 control->spec_flags = M_NOTIFICATION; 3487 /* not that we need this */ 3488 control->tail_mbuf = m_notify; 3489 sctp_add_to_readq(stcb->sctp_ep, stcb, 3490 control, 3491 &stcb->sctp_socket->so_rcv, 1, 3492 SCTP_READ_LOCK_NOT_HELD, 3493 so_locked); 3494 } 3495 3496 static void 3497 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3498 struct sctp_stream_queue_pending *sp, int so_locked) 3499 { 3500 struct mbuf *m_notify; 3501 struct sctp_send_failed *ssf; 3502 struct sctp_send_failed_event *ssfe; 3503 struct sctp_queued_to_read *control; 3504 int notifhdr_len; 3505 3506 if ((stcb == NULL) || 3507 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3508 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3509 /* event not enabled */ 3510 return; 3511 } 3512 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3513 notifhdr_len = sizeof(struct sctp_send_failed_event); 3514 } else { 3515 notifhdr_len = sizeof(struct sctp_send_failed); 3516 } 3517 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3518 if (m_notify == NULL) { 3519 /* no space left */ 3520 return; 3521 } 3522 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3523 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3524 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3525 memset(ssfe, 0, notifhdr_len); 3526 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3527 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3528 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3529 ssfe->ssfe_error = error; 3530 /* not exactly what the user sent in, but should be close :) */ 3531 ssfe->ssfe_info.snd_sid = sp->sid; 3532 if (sp->some_taken) { 3533 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3534 } else { 3535 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3536 } 3537 ssfe->ssfe_info.snd_ppid = sp->ppid; 3538 ssfe->ssfe_info.snd_context = sp->context; 3539 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3540 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3541 } else { 3542 ssf = mtod(m_notify, struct sctp_send_failed *); 3543 memset(ssf, 0, notifhdr_len); 3544 ssf->ssf_type = SCTP_SEND_FAILED; 3545 ssf->ssf_flags = SCTP_DATA_UNSENT; 3546 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3547 ssf->ssf_error = error; 3548 /* not exactly what the user sent in, but should be close :) */ 3549 ssf->ssf_info.sinfo_stream = sp->sid; 3550 ssf->ssf_info.sinfo_ssn = 0; 3551 if (sp->some_taken) { 3552 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3553 } else { 3554 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3555 } 3556 ssf->ssf_info.sinfo_ppid = sp->ppid; 3557 ssf->ssf_info.sinfo_context = sp->context; 3558 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3559 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3560 } 3561 SCTP_BUF_NEXT(m_notify) = sp->data; 3562 3563 /* Steal off the mbuf */ 3564 sp->data = NULL; 3565 /* 3566 * For this case, we check the actual socket buffer, since the assoc 3567 * is going away we don't want to overfill the socket buffer for a 3568 * non-reader 3569 */ 3570 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3571 sctp_m_freem(m_notify); 3572 return; 3573 } 3574 /* append to socket */ 3575 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3576 0, 0, stcb->asoc.context, 0, 0, 0, 3577 m_notify); 3578 if (control == NULL) { 3579 /* no memory */ 3580 sctp_m_freem(m_notify); 3581 return; 3582 } 3583 control->length = SCTP_BUF_LEN(m_notify); 3584 control->spec_flags = M_NOTIFICATION; 3585 /* not that we need this */ 3586 control->tail_mbuf = m_notify; 3587 sctp_add_to_readq(stcb->sctp_ep, stcb, 3588 control, 3589 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3590 } 3591 3592 static void 3593 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3594 { 3595 struct mbuf *m_notify; 3596 struct sctp_adaptation_event *sai; 3597 struct sctp_queued_to_read *control; 3598 3599 if ((stcb == NULL) || 3600 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3601 /* event not enabled */ 3602 return; 3603 } 3604 3605 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3606 if (m_notify == NULL) 3607 /* no space left */ 3608 return; 3609 SCTP_BUF_LEN(m_notify) = 0; 3610 sai = mtod(m_notify, struct sctp_adaptation_event *); 3611 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3612 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3613 sai->sai_flags = 0; 3614 sai->sai_length = sizeof(struct sctp_adaptation_event); 3615 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3616 sai->sai_assoc_id = sctp_get_associd(stcb); 3617 3618 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3619 SCTP_BUF_NEXT(m_notify) = NULL; 3620 3621 /* append to socket */ 3622 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3623 0, 0, stcb->asoc.context, 0, 0, 0, 3624 m_notify); 3625 if (control == NULL) { 3626 /* no memory */ 3627 sctp_m_freem(m_notify); 3628 return; 3629 } 3630 control->length = SCTP_BUF_LEN(m_notify); 3631 control->spec_flags = M_NOTIFICATION; 3632 /* not that we need this */ 3633 control->tail_mbuf = m_notify; 3634 sctp_add_to_readq(stcb->sctp_ep, stcb, 3635 control, 3636 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3637 } 3638 3639 /* This always must be called with the read-queue LOCKED in the INP */ 3640 static void 3641 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3642 uint32_t val, int so_locked) 3643 { 3644 struct mbuf *m_notify; 3645 struct sctp_pdapi_event *pdapi; 3646 struct sctp_queued_to_read *control; 3647 struct sockbuf *sb; 3648 3649 if ((stcb == NULL) || 3650 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3651 /* event not enabled */ 3652 return; 3653 } 3654 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3655 return; 3656 } 3657 3658 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3659 if (m_notify == NULL) 3660 /* no space left */ 3661 return; 3662 SCTP_BUF_LEN(m_notify) = 0; 3663 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3664 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3665 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3666 pdapi->pdapi_flags = 0; 3667 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3668 pdapi->pdapi_indication = error; 3669 pdapi->pdapi_stream = (val >> 16); 3670 pdapi->pdapi_seq = (val & 0x0000ffff); 3671 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3672 3673 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3674 SCTP_BUF_NEXT(m_notify) = NULL; 3675 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3676 0, 0, stcb->asoc.context, 0, 0, 0, 3677 m_notify); 3678 if (control == NULL) { 3679 /* no memory */ 3680 sctp_m_freem(m_notify); 3681 return; 3682 } 3683 control->length = SCTP_BUF_LEN(m_notify); 3684 control->spec_flags = M_NOTIFICATION; 3685 /* not that we need this */ 3686 control->tail_mbuf = m_notify; 3687 sb = &stcb->sctp_socket->so_rcv; 3688 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3689 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3690 } 3691 sctp_sballoc(stcb, sb, m_notify); 3692 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3693 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3694 } 3695 control->end_added = 1; 3696 if (stcb->asoc.control_pdapi) 3697 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3698 else { 3699 /* we really should not see this case */ 3700 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3701 } 3702 if (stcb->sctp_ep && stcb->sctp_socket) { 3703 /* This should always be the case */ 3704 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3705 } 3706 } 3707 3708 static void 3709 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3710 { 3711 struct mbuf *m_notify; 3712 struct sctp_shutdown_event *sse; 3713 struct sctp_queued_to_read *control; 3714 3715 /* 3716 * For TCP model AND UDP connected sockets we will send an error up 3717 * when an SHUTDOWN completes 3718 */ 3719 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3720 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3721 /* mark socket closed for read/write and wakeup! */ 3722 socantsendmore(stcb->sctp_socket); 3723 } 3724 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3725 /* event not enabled */ 3726 return; 3727 } 3728 3729 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3730 if (m_notify == NULL) 3731 /* no space left */ 3732 return; 3733 sse = mtod(m_notify, struct sctp_shutdown_event *); 3734 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3735 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3736 sse->sse_flags = 0; 3737 sse->sse_length = sizeof(struct sctp_shutdown_event); 3738 sse->sse_assoc_id = sctp_get_associd(stcb); 3739 3740 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3741 SCTP_BUF_NEXT(m_notify) = NULL; 3742 3743 /* append to socket */ 3744 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3745 0, 0, stcb->asoc.context, 0, 0, 0, 3746 m_notify); 3747 if (control == NULL) { 3748 /* no memory */ 3749 sctp_m_freem(m_notify); 3750 return; 3751 } 3752 control->length = SCTP_BUF_LEN(m_notify); 3753 control->spec_flags = M_NOTIFICATION; 3754 /* not that we need this */ 3755 control->tail_mbuf = m_notify; 3756 sctp_add_to_readq(stcb->sctp_ep, stcb, 3757 control, 3758 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3759 } 3760 3761 static void 3762 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3763 int so_locked) 3764 { 3765 struct mbuf *m_notify; 3766 struct sctp_sender_dry_event *event; 3767 struct sctp_queued_to_read *control; 3768 3769 if ((stcb == NULL) || 3770 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3771 /* event not enabled */ 3772 return; 3773 } 3774 3775 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3776 if (m_notify == NULL) { 3777 /* no space left */ 3778 return; 3779 } 3780 SCTP_BUF_LEN(m_notify) = 0; 3781 event = mtod(m_notify, struct sctp_sender_dry_event *); 3782 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3783 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3784 event->sender_dry_flags = 0; 3785 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3786 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3787 3788 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3789 SCTP_BUF_NEXT(m_notify) = NULL; 3790 3791 /* append to socket */ 3792 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3793 0, 0, stcb->asoc.context, 0, 0, 0, 3794 m_notify); 3795 if (control == NULL) { 3796 /* no memory */ 3797 sctp_m_freem(m_notify); 3798 return; 3799 } 3800 control->length = SCTP_BUF_LEN(m_notify); 3801 control->spec_flags = M_NOTIFICATION; 3802 /* not that we need this */ 3803 control->tail_mbuf = m_notify; 3804 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3805 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3806 } 3807 3808 void 3809 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3810 { 3811 struct mbuf *m_notify; 3812 struct sctp_queued_to_read *control; 3813 struct sctp_stream_change_event *stradd; 3814 3815 if ((stcb == NULL) || 3816 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3817 /* event not enabled */ 3818 return; 3819 } 3820 if ((stcb->asoc.peer_req_out) && flag) { 3821 /* Peer made the request, don't tell the local user */ 3822 stcb->asoc.peer_req_out = 0; 3823 return; 3824 } 3825 stcb->asoc.peer_req_out = 0; 3826 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3827 if (m_notify == NULL) 3828 /* no space left */ 3829 return; 3830 SCTP_BUF_LEN(m_notify) = 0; 3831 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3832 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3833 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3834 stradd->strchange_flags = flag; 3835 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3836 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3837 stradd->strchange_instrms = numberin; 3838 stradd->strchange_outstrms = numberout; 3839 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3840 SCTP_BUF_NEXT(m_notify) = NULL; 3841 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3842 /* no space */ 3843 sctp_m_freem(m_notify); 3844 return; 3845 } 3846 /* append to socket */ 3847 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3848 0, 0, stcb->asoc.context, 0, 0, 0, 3849 m_notify); 3850 if (control == NULL) { 3851 /* no memory */ 3852 sctp_m_freem(m_notify); 3853 return; 3854 } 3855 control->length = SCTP_BUF_LEN(m_notify); 3856 control->spec_flags = M_NOTIFICATION; 3857 /* not that we need this */ 3858 control->tail_mbuf = m_notify; 3859 sctp_add_to_readq(stcb->sctp_ep, stcb, 3860 control, 3861 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3862 } 3863 3864 void 3865 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3866 { 3867 struct mbuf *m_notify; 3868 struct sctp_queued_to_read *control; 3869 struct sctp_assoc_reset_event *strasoc; 3870 3871 if ((stcb == NULL) || 3872 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3873 /* event not enabled */ 3874 return; 3875 } 3876 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3877 if (m_notify == NULL) 3878 /* no space left */ 3879 return; 3880 SCTP_BUF_LEN(m_notify) = 0; 3881 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3882 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3883 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3884 strasoc->assocreset_flags = flag; 3885 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3886 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3887 strasoc->assocreset_local_tsn = sending_tsn; 3888 strasoc->assocreset_remote_tsn = recv_tsn; 3889 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3890 SCTP_BUF_NEXT(m_notify) = NULL; 3891 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3892 /* no space */ 3893 sctp_m_freem(m_notify); 3894 return; 3895 } 3896 /* append to socket */ 3897 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3898 0, 0, stcb->asoc.context, 0, 0, 0, 3899 m_notify); 3900 if (control == NULL) { 3901 /* no memory */ 3902 sctp_m_freem(m_notify); 3903 return; 3904 } 3905 control->length = SCTP_BUF_LEN(m_notify); 3906 control->spec_flags = M_NOTIFICATION; 3907 /* not that we need this */ 3908 control->tail_mbuf = m_notify; 3909 sctp_add_to_readq(stcb->sctp_ep, stcb, 3910 control, 3911 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3912 } 3913 3914 static void 3915 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3916 int number_entries, uint16_t *list, int flag) 3917 { 3918 struct mbuf *m_notify; 3919 struct sctp_queued_to_read *control; 3920 struct sctp_stream_reset_event *strreset; 3921 int len; 3922 3923 if ((stcb == NULL) || 3924 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3925 /* event not enabled */ 3926 return; 3927 } 3928 3929 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3930 if (m_notify == NULL) 3931 /* no space left */ 3932 return; 3933 SCTP_BUF_LEN(m_notify) = 0; 3934 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3935 if (len > M_TRAILINGSPACE(m_notify)) { 3936 /* never enough room */ 3937 sctp_m_freem(m_notify); 3938 return; 3939 } 3940 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3941 memset(strreset, 0, len); 3942 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3943 strreset->strreset_flags = flag; 3944 strreset->strreset_length = len; 3945 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3946 if (number_entries) { 3947 int i; 3948 3949 for (i = 0; i < number_entries; i++) { 3950 strreset->strreset_stream_list[i] = ntohs(list[i]); 3951 } 3952 } 3953 SCTP_BUF_LEN(m_notify) = len; 3954 SCTP_BUF_NEXT(m_notify) = NULL; 3955 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3956 /* no space */ 3957 sctp_m_freem(m_notify); 3958 return; 3959 } 3960 /* append to socket */ 3961 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3962 0, 0, stcb->asoc.context, 0, 0, 0, 3963 m_notify); 3964 if (control == NULL) { 3965 /* no memory */ 3966 sctp_m_freem(m_notify); 3967 return; 3968 } 3969 control->length = SCTP_BUF_LEN(m_notify); 3970 control->spec_flags = M_NOTIFICATION; 3971 /* not that we need this */ 3972 control->tail_mbuf = m_notify; 3973 sctp_add_to_readq(stcb->sctp_ep, stcb, 3974 control, 3975 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3976 } 3977 3978 static void 3979 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3980 { 3981 struct mbuf *m_notify; 3982 struct sctp_remote_error *sre; 3983 struct sctp_queued_to_read *control; 3984 unsigned int notif_len; 3985 uint16_t chunk_len; 3986 3987 if ((stcb == NULL) || 3988 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3989 return; 3990 } 3991 if (chunk != NULL) { 3992 chunk_len = ntohs(chunk->ch.chunk_length); 3993 /* 3994 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3995 * contiguous. 3996 */ 3997 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3998 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3999 } 4000 } else { 4001 chunk_len = 0; 4002 } 4003 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4004 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4005 if (m_notify == NULL) { 4006 /* Retry with smaller value. */ 4007 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4008 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4009 if (m_notify == NULL) { 4010 return; 4011 } 4012 } 4013 SCTP_BUF_NEXT(m_notify) = NULL; 4014 sre = mtod(m_notify, struct sctp_remote_error *); 4015 memset(sre, 0, notif_len); 4016 sre->sre_type = SCTP_REMOTE_ERROR; 4017 sre->sre_flags = 0; 4018 sre->sre_length = sizeof(struct sctp_remote_error); 4019 sre->sre_error = error; 4020 sre->sre_assoc_id = sctp_get_associd(stcb); 4021 if (notif_len > sizeof(struct sctp_remote_error)) { 4022 memcpy(sre->sre_data, chunk, chunk_len); 4023 sre->sre_length += chunk_len; 4024 } 4025 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4026 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4027 0, 0, stcb->asoc.context, 0, 0, 0, 4028 m_notify); 4029 if (control != NULL) { 4030 control->length = SCTP_BUF_LEN(m_notify); 4031 control->spec_flags = M_NOTIFICATION; 4032 /* not that we need this */ 4033 control->tail_mbuf = m_notify; 4034 sctp_add_to_readq(stcb->sctp_ep, stcb, 4035 control, 4036 &stcb->sctp_socket->so_rcv, 1, 4037 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4038 } else { 4039 sctp_m_freem(m_notify); 4040 } 4041 } 4042 4043 void 4044 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4045 uint32_t error, void *data, int so_locked) 4046 { 4047 if ((stcb == NULL) || 4048 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4049 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4050 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4051 /* If the socket is gone we are out of here */ 4052 return; 4053 } 4054 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4055 return; 4056 } 4057 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4058 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4059 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4060 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4061 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4062 /* Don't report these in front states */ 4063 return; 4064 } 4065 } 4066 switch (notification) { 4067 case SCTP_NOTIFY_ASSOC_UP: 4068 if (stcb->asoc.assoc_up_sent == 0) { 4069 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4070 stcb->asoc.assoc_up_sent = 1; 4071 } 4072 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4073 sctp_notify_adaptation_layer(stcb); 4074 } 4075 if (stcb->asoc.auth_supported == 0) { 4076 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4077 NULL, so_locked); 4078 } 4079 break; 4080 case SCTP_NOTIFY_ASSOC_DOWN: 4081 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4082 break; 4083 case SCTP_NOTIFY_INTERFACE_DOWN: 4084 { 4085 struct sctp_nets *net; 4086 4087 net = (struct sctp_nets *)data; 4088 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4089 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4090 break; 4091 } 4092 case SCTP_NOTIFY_INTERFACE_UP: 4093 { 4094 struct sctp_nets *net; 4095 4096 net = (struct sctp_nets *)data; 4097 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4098 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4099 break; 4100 } 4101 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4102 { 4103 struct sctp_nets *net; 4104 4105 net = (struct sctp_nets *)data; 4106 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4107 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4108 break; 4109 } 4110 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4111 sctp_notify_send_failed2(stcb, error, 4112 (struct sctp_stream_queue_pending *)data, so_locked); 4113 break; 4114 case SCTP_NOTIFY_SENT_DG_FAIL: 4115 sctp_notify_send_failed(stcb, 1, error, 4116 (struct sctp_tmit_chunk *)data, so_locked); 4117 break; 4118 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4119 sctp_notify_send_failed(stcb, 0, error, 4120 (struct sctp_tmit_chunk *)data, so_locked); 4121 break; 4122 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4123 { 4124 uint32_t val; 4125 4126 val = *((uint32_t *)data); 4127 4128 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4129 break; 4130 } 4131 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4132 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4133 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4134 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4135 } else { 4136 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4137 } 4138 break; 4139 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4140 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4141 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4142 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4143 } else { 4144 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4145 } 4146 break; 4147 case SCTP_NOTIFY_ASSOC_RESTART: 4148 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4149 if (stcb->asoc.auth_supported == 0) { 4150 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4151 NULL, so_locked); 4152 } 4153 break; 4154 case SCTP_NOTIFY_STR_RESET_SEND: 4155 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4156 break; 4157 case SCTP_NOTIFY_STR_RESET_RECV: 4158 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4159 break; 4160 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4161 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4162 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4163 break; 4164 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4165 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4166 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4167 break; 4168 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4169 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4170 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4171 break; 4172 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4173 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4174 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4175 break; 4176 case SCTP_NOTIFY_ASCONF_ADD_IP: 4177 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4178 error, so_locked); 4179 break; 4180 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4181 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4182 error, so_locked); 4183 break; 4184 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4185 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4186 error, so_locked); 4187 break; 4188 case SCTP_NOTIFY_PEER_SHUTDOWN: 4189 sctp_notify_shutdown_event(stcb); 4190 break; 4191 case SCTP_NOTIFY_AUTH_NEW_KEY: 4192 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4193 (uint16_t)(uintptr_t)data, 4194 so_locked); 4195 break; 4196 case SCTP_NOTIFY_AUTH_FREE_KEY: 4197 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4198 (uint16_t)(uintptr_t)data, 4199 so_locked); 4200 break; 4201 case SCTP_NOTIFY_NO_PEER_AUTH: 4202 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4203 (uint16_t)(uintptr_t)data, 4204 so_locked); 4205 break; 4206 case SCTP_NOTIFY_SENDER_DRY: 4207 sctp_notify_sender_dry_event(stcb, so_locked); 4208 break; 4209 case SCTP_NOTIFY_REMOTE_ERROR: 4210 sctp_notify_remote_error(stcb, error, data); 4211 break; 4212 default: 4213 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4214 __func__, notification, notification); 4215 break; 4216 } /* end switch */ 4217 } 4218 4219 void 4220 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4221 { 4222 struct sctp_association *asoc; 4223 struct sctp_stream_out *outs; 4224 struct sctp_tmit_chunk *chk, *nchk; 4225 struct sctp_stream_queue_pending *sp, *nsp; 4226 int i; 4227 4228 if (stcb == NULL) { 4229 return; 4230 } 4231 asoc = &stcb->asoc; 4232 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4233 /* already being freed */ 4234 return; 4235 } 4236 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4237 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4238 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4239 return; 4240 } 4241 /* now through all the gunk freeing chunks */ 4242 /* sent queue SHOULD be empty */ 4243 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4244 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4245 asoc->sent_queue_cnt--; 4246 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4247 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4248 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4249 #ifdef INVARIANTS 4250 } else { 4251 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4252 #endif 4253 } 4254 } 4255 if (chk->data != NULL) { 4256 sctp_free_bufspace(stcb, asoc, chk, 1); 4257 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4258 error, chk, so_locked); 4259 if (chk->data) { 4260 sctp_m_freem(chk->data); 4261 chk->data = NULL; 4262 } 4263 } 4264 sctp_free_a_chunk(stcb, chk, so_locked); 4265 /* sa_ignore FREED_MEMORY */ 4266 } 4267 /* pending send queue SHOULD be empty */ 4268 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4269 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4270 asoc->send_queue_cnt--; 4271 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4272 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4273 #ifdef INVARIANTS 4274 } else { 4275 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4276 #endif 4277 } 4278 if (chk->data != NULL) { 4279 sctp_free_bufspace(stcb, asoc, chk, 1); 4280 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4281 error, chk, so_locked); 4282 if (chk->data) { 4283 sctp_m_freem(chk->data); 4284 chk->data = NULL; 4285 } 4286 } 4287 sctp_free_a_chunk(stcb, chk, so_locked); 4288 /* sa_ignore FREED_MEMORY */ 4289 } 4290 for (i = 0; i < asoc->streamoutcnt; i++) { 4291 /* For each stream */ 4292 outs = &asoc->strmout[i]; 4293 /* clean up any sends there */ 4294 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4295 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4296 TAILQ_REMOVE(&outs->outqueue, sp, next); 4297 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4298 sctp_free_spbufspace(stcb, asoc, sp); 4299 if (sp->data) { 4300 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4301 error, (void *)sp, so_locked); 4302 if (sp->data) { 4303 sctp_m_freem(sp->data); 4304 sp->data = NULL; 4305 sp->tail_mbuf = NULL; 4306 sp->length = 0; 4307 } 4308 } 4309 if (sp->net) { 4310 sctp_free_remote_addr(sp->net); 4311 sp->net = NULL; 4312 } 4313 /* Free the chunk */ 4314 sctp_free_a_strmoq(stcb, sp, so_locked); 4315 /* sa_ignore FREED_MEMORY */ 4316 } 4317 } 4318 } 4319 4320 void 4321 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4322 struct sctp_abort_chunk *abort, int so_locked) 4323 { 4324 if (stcb == NULL) { 4325 return; 4326 } 4327 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4328 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4329 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4330 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4331 } 4332 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4333 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4334 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4335 return; 4336 } 4337 SCTP_TCB_SEND_LOCK(stcb); 4338 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4339 /* Tell them we lost the asoc */ 4340 sctp_report_all_outbound(stcb, error, so_locked); 4341 SCTP_TCB_SEND_UNLOCK(stcb); 4342 if (from_peer) { 4343 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4344 } else { 4345 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4346 } 4347 } 4348 4349 void 4350 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4351 struct mbuf *m, int iphlen, 4352 struct sockaddr *src, struct sockaddr *dst, 4353 struct sctphdr *sh, struct mbuf *op_err, 4354 uint8_t mflowtype, uint32_t mflowid, 4355 uint32_t vrf_id, uint16_t port) 4356 { 4357 uint32_t vtag; 4358 4359 vtag = 0; 4360 if (stcb != NULL) { 4361 vtag = stcb->asoc.peer_vtag; 4362 vrf_id = stcb->asoc.vrf_id; 4363 } 4364 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4365 mflowtype, mflowid, inp->fibnum, 4366 vrf_id, port); 4367 if (stcb != NULL) { 4368 /* We have a TCB to abort, send notification too */ 4369 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4370 /* Ok, now lets free it */ 4371 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4372 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4373 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4374 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4375 } 4376 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4377 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4378 } 4379 } 4380 #ifdef SCTP_ASOCLOG_OF_TSNS 4381 void 4382 sctp_print_out_track_log(struct sctp_tcb *stcb) 4383 { 4384 #ifdef NOSIY_PRINTS 4385 int i; 4386 4387 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4388 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4389 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4390 SCTP_PRINTF("None rcvd\n"); 4391 goto none_in; 4392 } 4393 if (stcb->asoc.tsn_in_wrapped) { 4394 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4395 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4396 stcb->asoc.in_tsnlog[i].tsn, 4397 stcb->asoc.in_tsnlog[i].strm, 4398 stcb->asoc.in_tsnlog[i].seq, 4399 stcb->asoc.in_tsnlog[i].flgs, 4400 stcb->asoc.in_tsnlog[i].sz); 4401 } 4402 } 4403 if (stcb->asoc.tsn_in_at) { 4404 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4405 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4406 stcb->asoc.in_tsnlog[i].tsn, 4407 stcb->asoc.in_tsnlog[i].strm, 4408 stcb->asoc.in_tsnlog[i].seq, 4409 stcb->asoc.in_tsnlog[i].flgs, 4410 stcb->asoc.in_tsnlog[i].sz); 4411 } 4412 } 4413 none_in: 4414 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4415 if ((stcb->asoc.tsn_out_at == 0) && 4416 (stcb->asoc.tsn_out_wrapped == 0)) { 4417 SCTP_PRINTF("None sent\n"); 4418 } 4419 if (stcb->asoc.tsn_out_wrapped) { 4420 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4421 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4422 stcb->asoc.out_tsnlog[i].tsn, 4423 stcb->asoc.out_tsnlog[i].strm, 4424 stcb->asoc.out_tsnlog[i].seq, 4425 stcb->asoc.out_tsnlog[i].flgs, 4426 stcb->asoc.out_tsnlog[i].sz); 4427 } 4428 } 4429 if (stcb->asoc.tsn_out_at) { 4430 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4431 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4432 stcb->asoc.out_tsnlog[i].tsn, 4433 stcb->asoc.out_tsnlog[i].strm, 4434 stcb->asoc.out_tsnlog[i].seq, 4435 stcb->asoc.out_tsnlog[i].flgs, 4436 stcb->asoc.out_tsnlog[i].sz); 4437 } 4438 } 4439 #endif 4440 } 4441 #endif 4442 4443 void 4444 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4445 struct mbuf *op_err, 4446 int so_locked) 4447 { 4448 4449 if (stcb == NULL) { 4450 /* Got to have a TCB */ 4451 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4452 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4453 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4454 SCTP_CALLED_DIRECTLY_NOCMPSET); 4455 } 4456 } 4457 return; 4458 } 4459 /* notify the peer */ 4460 sctp_send_abort_tcb(stcb, op_err, so_locked); 4461 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4462 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4463 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4464 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4465 } 4466 /* notify the ulp */ 4467 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4468 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4469 } 4470 /* now free the asoc */ 4471 #ifdef SCTP_ASOCLOG_OF_TSNS 4472 sctp_print_out_track_log(stcb); 4473 #endif 4474 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4475 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4476 } 4477 4478 void 4479 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4480 struct sockaddr *src, struct sockaddr *dst, 4481 struct sctphdr *sh, struct sctp_inpcb *inp, 4482 struct mbuf *cause, 4483 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4484 uint32_t vrf_id, uint16_t port) 4485 { 4486 struct sctp_chunkhdr *ch, chunk_buf; 4487 unsigned int chk_length; 4488 int contains_init_chunk; 4489 4490 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4491 /* Generate a TO address for future reference */ 4492 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4493 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4494 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4495 SCTP_CALLED_DIRECTLY_NOCMPSET); 4496 } 4497 } 4498 contains_init_chunk = 0; 4499 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4500 sizeof(*ch), (uint8_t *)&chunk_buf); 4501 while (ch != NULL) { 4502 chk_length = ntohs(ch->chunk_length); 4503 if (chk_length < sizeof(*ch)) { 4504 /* break to abort land */ 4505 break; 4506 } 4507 switch (ch->chunk_type) { 4508 case SCTP_INIT: 4509 contains_init_chunk = 1; 4510 break; 4511 case SCTP_PACKET_DROPPED: 4512 /* we don't respond to pkt-dropped */ 4513 return; 4514 case SCTP_ABORT_ASSOCIATION: 4515 /* we don't respond with an ABORT to an ABORT */ 4516 return; 4517 case SCTP_SHUTDOWN_COMPLETE: 4518 /* 4519 * we ignore it since we are not waiting for it and 4520 * peer is gone 4521 */ 4522 return; 4523 case SCTP_SHUTDOWN_ACK: 4524 sctp_send_shutdown_complete2(src, dst, sh, 4525 mflowtype, mflowid, fibnum, 4526 vrf_id, port); 4527 return; 4528 default: 4529 break; 4530 } 4531 offset += SCTP_SIZE32(chk_length); 4532 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4533 sizeof(*ch), (uint8_t *)&chunk_buf); 4534 } 4535 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4536 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4537 (contains_init_chunk == 0))) { 4538 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4539 mflowtype, mflowid, fibnum, 4540 vrf_id, port); 4541 } 4542 } 4543 4544 /* 4545 * check the inbound datagram to make sure there is not an abort inside it, 4546 * if there is return 1, else return 0. 4547 */ 4548 int 4549 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4550 { 4551 struct sctp_chunkhdr *ch; 4552 struct sctp_init_chunk *init_chk, chunk_buf; 4553 int offset; 4554 unsigned int chk_length; 4555 4556 offset = iphlen + sizeof(struct sctphdr); 4557 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4558 (uint8_t *)&chunk_buf); 4559 while (ch != NULL) { 4560 chk_length = ntohs(ch->chunk_length); 4561 if (chk_length < sizeof(*ch)) { 4562 /* packet is probably corrupt */ 4563 break; 4564 } 4565 /* we seem to be ok, is it an abort? */ 4566 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4567 /* yep, tell them */ 4568 return (1); 4569 } 4570 if (ch->chunk_type == SCTP_INITIATION) { 4571 /* need to update the Vtag */ 4572 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4573 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4574 if (init_chk != NULL) { 4575 *vtagfill = ntohl(init_chk->init.initiate_tag); 4576 } 4577 } 4578 /* Nope, move to the next chunk */ 4579 offset += SCTP_SIZE32(chk_length); 4580 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4581 sizeof(*ch), (uint8_t *)&chunk_buf); 4582 } 4583 return (0); 4584 } 4585 4586 /* 4587 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4588 * set (i.e. it's 0) so, create this function to compare link local scopes 4589 */ 4590 #ifdef INET6 4591 uint32_t 4592 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4593 { 4594 struct sockaddr_in6 a, b; 4595 4596 /* save copies */ 4597 a = *addr1; 4598 b = *addr2; 4599 4600 if (a.sin6_scope_id == 0) 4601 if (sa6_recoverscope(&a)) { 4602 /* can't get scope, so can't match */ 4603 return (0); 4604 } 4605 if (b.sin6_scope_id == 0) 4606 if (sa6_recoverscope(&b)) { 4607 /* can't get scope, so can't match */ 4608 return (0); 4609 } 4610 if (a.sin6_scope_id != b.sin6_scope_id) 4611 return (0); 4612 4613 return (1); 4614 } 4615 4616 /* 4617 * returns a sockaddr_in6 with embedded scope recovered and removed 4618 */ 4619 struct sockaddr_in6 * 4620 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4621 { 4622 /* check and strip embedded scope junk */ 4623 if (addr->sin6_family == AF_INET6) { 4624 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4625 if (addr->sin6_scope_id == 0) { 4626 *store = *addr; 4627 if (!sa6_recoverscope(store)) { 4628 /* use the recovered scope */ 4629 addr = store; 4630 } 4631 } else { 4632 /* else, return the original "to" addr */ 4633 in6_clearscope(&addr->sin6_addr); 4634 } 4635 } 4636 } 4637 return (addr); 4638 } 4639 #endif 4640 4641 /* 4642 * are the two addresses the same? currently a "scopeless" check returns: 1 4643 * if same, 0 if not 4644 */ 4645 int 4646 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4647 { 4648 4649 /* must be valid */ 4650 if (sa1 == NULL || sa2 == NULL) 4651 return (0); 4652 4653 /* must be the same family */ 4654 if (sa1->sa_family != sa2->sa_family) 4655 return (0); 4656 4657 switch (sa1->sa_family) { 4658 #ifdef INET6 4659 case AF_INET6: 4660 { 4661 /* IPv6 addresses */ 4662 struct sockaddr_in6 *sin6_1, *sin6_2; 4663 4664 sin6_1 = (struct sockaddr_in6 *)sa1; 4665 sin6_2 = (struct sockaddr_in6 *)sa2; 4666 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4667 sin6_2)); 4668 } 4669 #endif 4670 #ifdef INET 4671 case AF_INET: 4672 { 4673 /* IPv4 addresses */ 4674 struct sockaddr_in *sin_1, *sin_2; 4675 4676 sin_1 = (struct sockaddr_in *)sa1; 4677 sin_2 = (struct sockaddr_in *)sa2; 4678 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4679 } 4680 #endif 4681 default: 4682 /* we don't do these... */ 4683 return (0); 4684 } 4685 } 4686 4687 void 4688 sctp_print_address(struct sockaddr *sa) 4689 { 4690 #ifdef INET6 4691 char ip6buf[INET6_ADDRSTRLEN]; 4692 #endif 4693 4694 switch (sa->sa_family) { 4695 #ifdef INET6 4696 case AF_INET6: 4697 { 4698 struct sockaddr_in6 *sin6; 4699 4700 sin6 = (struct sockaddr_in6 *)sa; 4701 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4702 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4703 ntohs(sin6->sin6_port), 4704 sin6->sin6_scope_id); 4705 break; 4706 } 4707 #endif 4708 #ifdef INET 4709 case AF_INET: 4710 { 4711 struct sockaddr_in *sin; 4712 unsigned char *p; 4713 4714 sin = (struct sockaddr_in *)sa; 4715 p = (unsigned char *)&sin->sin_addr; 4716 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4717 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4718 break; 4719 } 4720 #endif 4721 default: 4722 SCTP_PRINTF("?\n"); 4723 break; 4724 } 4725 } 4726 4727 void 4728 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4729 struct sctp_inpcb *new_inp, 4730 struct sctp_tcb *stcb, 4731 int waitflags) 4732 { 4733 /* 4734 * go through our old INP and pull off any control structures that 4735 * belong to stcb and move then to the new inp. 4736 */ 4737 struct socket *old_so, *new_so; 4738 struct sctp_queued_to_read *control, *nctl; 4739 struct sctp_readhead tmp_queue; 4740 struct mbuf *m; 4741 int error = 0; 4742 4743 old_so = old_inp->sctp_socket; 4744 new_so = new_inp->sctp_socket; 4745 TAILQ_INIT(&tmp_queue); 4746 error = sblock(&old_so->so_rcv, waitflags); 4747 if (error) { 4748 /* 4749 * Gak, can't get sblock, we have a problem. data will be 4750 * left stranded.. and we don't dare look at it since the 4751 * other thread may be reading something. Oh well, its a 4752 * screwed up app that does a peeloff OR a accept while 4753 * reading from the main socket... actually its only the 4754 * peeloff() case, since I think read will fail on a 4755 * listening socket.. 4756 */ 4757 return; 4758 } 4759 /* lock the socket buffers */ 4760 SCTP_INP_READ_LOCK(old_inp); 4761 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4762 /* Pull off all for out target stcb */ 4763 if (control->stcb == stcb) { 4764 /* remove it we want it */ 4765 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4766 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4767 m = control->data; 4768 while (m) { 4769 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4770 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4771 } 4772 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4773 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4774 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4775 } 4776 m = SCTP_BUF_NEXT(m); 4777 } 4778 } 4779 } 4780 SCTP_INP_READ_UNLOCK(old_inp); 4781 /* Remove the sb-lock on the old socket */ 4782 4783 sbunlock(&old_so->so_rcv); 4784 /* Now we move them over to the new socket buffer */ 4785 SCTP_INP_READ_LOCK(new_inp); 4786 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4787 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4788 m = control->data; 4789 while (m) { 4790 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4791 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4792 } 4793 sctp_sballoc(stcb, &new_so->so_rcv, m); 4794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4795 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4796 } 4797 m = SCTP_BUF_NEXT(m); 4798 } 4799 } 4800 SCTP_INP_READ_UNLOCK(new_inp); 4801 } 4802 4803 void 4804 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4805 struct sctp_tcb *stcb, 4806 int so_locked 4807 SCTP_UNUSED 4808 ) 4809 { 4810 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4811 sctp_sorwakeup(inp, inp->sctp_socket); 4812 } 4813 } 4814 4815 void 4816 sctp_add_to_readq(struct sctp_inpcb *inp, 4817 struct sctp_tcb *stcb, 4818 struct sctp_queued_to_read *control, 4819 struct sockbuf *sb, 4820 int end, 4821 int inp_read_lock_held, 4822 int so_locked) 4823 { 4824 /* 4825 * Here we must place the control on the end of the socket read 4826 * queue AND increment sb_cc so that select will work properly on 4827 * read. 4828 */ 4829 struct mbuf *m, *prev = NULL; 4830 4831 if (inp == NULL) { 4832 /* Gak, TSNH!! */ 4833 #ifdef INVARIANTS 4834 panic("Gak, inp NULL on add_to_readq"); 4835 #endif 4836 return; 4837 } 4838 if (inp_read_lock_held == 0) 4839 SCTP_INP_READ_LOCK(inp); 4840 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4841 if (!control->on_strm_q) { 4842 sctp_free_remote_addr(control->whoFrom); 4843 if (control->data) { 4844 sctp_m_freem(control->data); 4845 control->data = NULL; 4846 } 4847 sctp_free_a_readq(stcb, control); 4848 } 4849 if (inp_read_lock_held == 0) 4850 SCTP_INP_READ_UNLOCK(inp); 4851 return; 4852 } 4853 if (!(control->spec_flags & M_NOTIFICATION)) { 4854 atomic_add_int(&inp->total_recvs, 1); 4855 if (!control->do_not_ref_stcb) { 4856 atomic_add_int(&stcb->total_recvs, 1); 4857 } 4858 } 4859 m = control->data; 4860 control->held_length = 0; 4861 control->length = 0; 4862 while (m) { 4863 if (SCTP_BUF_LEN(m) == 0) { 4864 /* Skip mbufs with NO length */ 4865 if (prev == NULL) { 4866 /* First one */ 4867 control->data = sctp_m_free(m); 4868 m = control->data; 4869 } else { 4870 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4871 m = SCTP_BUF_NEXT(prev); 4872 } 4873 if (m == NULL) { 4874 control->tail_mbuf = prev; 4875 } 4876 continue; 4877 } 4878 prev = m; 4879 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4880 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4881 } 4882 sctp_sballoc(stcb, sb, m); 4883 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4884 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4885 } 4886 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4887 m = SCTP_BUF_NEXT(m); 4888 } 4889 if (prev != NULL) { 4890 control->tail_mbuf = prev; 4891 } else { 4892 /* Everything got collapsed out?? */ 4893 if (!control->on_strm_q) { 4894 sctp_free_remote_addr(control->whoFrom); 4895 sctp_free_a_readq(stcb, control); 4896 } 4897 if (inp_read_lock_held == 0) 4898 SCTP_INP_READ_UNLOCK(inp); 4899 return; 4900 } 4901 if (end) { 4902 control->end_added = 1; 4903 } 4904 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4905 control->on_read_q = 1; 4906 if (inp_read_lock_held == 0) 4907 SCTP_INP_READ_UNLOCK(inp); 4908 if (inp && inp->sctp_socket) { 4909 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4910 } 4911 } 4912 4913 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4914 *************ALTERNATE ROUTING CODE 4915 */ 4916 4917 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4918 *************ALTERNATE ROUTING CODE 4919 */ 4920 4921 struct mbuf * 4922 sctp_generate_cause(uint16_t code, char *info) 4923 { 4924 struct mbuf *m; 4925 struct sctp_gen_error_cause *cause; 4926 size_t info_len; 4927 uint16_t len; 4928 4929 if ((code == 0) || (info == NULL)) { 4930 return (NULL); 4931 } 4932 info_len = strlen(info); 4933 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4934 return (NULL); 4935 } 4936 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4937 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4938 if (m != NULL) { 4939 SCTP_BUF_LEN(m) = len; 4940 cause = mtod(m, struct sctp_gen_error_cause *); 4941 cause->code = htons(code); 4942 cause->length = htons(len); 4943 memcpy(cause->info, info, info_len); 4944 } 4945 return (m); 4946 } 4947 4948 struct mbuf * 4949 sctp_generate_no_user_data_cause(uint32_t tsn) 4950 { 4951 struct mbuf *m; 4952 struct sctp_error_no_user_data *no_user_data_cause; 4953 uint16_t len; 4954 4955 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4956 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4957 if (m != NULL) { 4958 SCTP_BUF_LEN(m) = len; 4959 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4960 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4961 no_user_data_cause->cause.length = htons(len); 4962 no_user_data_cause->tsn = htonl(tsn); 4963 } 4964 return (m); 4965 } 4966 4967 #ifdef SCTP_MBCNT_LOGGING 4968 void 4969 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4970 struct sctp_tmit_chunk *tp1, int chk_cnt) 4971 { 4972 if (tp1->data == NULL) { 4973 return; 4974 } 4975 asoc->chunks_on_out_queue -= chk_cnt; 4976 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4977 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4978 asoc->total_output_queue_size, 4979 tp1->book_size, 4980 0, 4981 tp1->mbcnt); 4982 } 4983 if (asoc->total_output_queue_size >= tp1->book_size) { 4984 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4985 } else { 4986 asoc->total_output_queue_size = 0; 4987 } 4988 4989 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4990 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4991 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4992 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4993 } else { 4994 stcb->sctp_socket->so_snd.sb_cc = 0; 4995 } 4996 } 4997 } 4998 4999 #endif 5000 5001 int 5002 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5003 uint8_t sent, int so_locked) 5004 { 5005 struct sctp_stream_out *strq; 5006 struct sctp_tmit_chunk *chk = NULL, *tp2; 5007 struct sctp_stream_queue_pending *sp; 5008 uint32_t mid; 5009 uint16_t sid; 5010 uint8_t foundeom = 0; 5011 int ret_sz = 0; 5012 int notdone; 5013 int do_wakeup_routine = 0; 5014 5015 sid = tp1->rec.data.sid; 5016 mid = tp1->rec.data.mid; 5017 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5018 stcb->asoc.abandoned_sent[0]++; 5019 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5020 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5021 #if defined(SCTP_DETAILED_STR_STATS) 5022 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5023 #endif 5024 } else { 5025 stcb->asoc.abandoned_unsent[0]++; 5026 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5027 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5028 #if defined(SCTP_DETAILED_STR_STATS) 5029 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5030 #endif 5031 } 5032 do { 5033 ret_sz += tp1->book_size; 5034 if (tp1->data != NULL) { 5035 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5036 sctp_flight_size_decrease(tp1); 5037 sctp_total_flight_decrease(stcb, tp1); 5038 } 5039 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5040 stcb->asoc.peers_rwnd += tp1->send_size; 5041 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5042 if (sent) { 5043 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5044 } else { 5045 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5046 } 5047 if (tp1->data) { 5048 sctp_m_freem(tp1->data); 5049 tp1->data = NULL; 5050 } 5051 do_wakeup_routine = 1; 5052 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5053 stcb->asoc.sent_queue_cnt_removeable--; 5054 } 5055 } 5056 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5057 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5058 SCTP_DATA_NOT_FRAG) { 5059 /* not frag'ed we ae done */ 5060 notdone = 0; 5061 foundeom = 1; 5062 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5063 /* end of frag, we are done */ 5064 notdone = 0; 5065 foundeom = 1; 5066 } else { 5067 /* 5068 * Its a begin or middle piece, we must mark all of 5069 * it 5070 */ 5071 notdone = 1; 5072 tp1 = TAILQ_NEXT(tp1, sctp_next); 5073 } 5074 } while (tp1 && notdone); 5075 if (foundeom == 0) { 5076 /* 5077 * The multi-part message was scattered across the send and 5078 * sent queue. 5079 */ 5080 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5081 if ((tp1->rec.data.sid != sid) || 5082 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5083 break; 5084 } 5085 /* 5086 * save to chk in case we have some on stream out 5087 * queue. If so and we have an un-transmitted one we 5088 * don't have to fudge the TSN. 5089 */ 5090 chk = tp1; 5091 ret_sz += tp1->book_size; 5092 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5093 if (sent) { 5094 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5095 } else { 5096 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5097 } 5098 if (tp1->data) { 5099 sctp_m_freem(tp1->data); 5100 tp1->data = NULL; 5101 } 5102 /* No flight involved here book the size to 0 */ 5103 tp1->book_size = 0; 5104 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5105 foundeom = 1; 5106 } 5107 do_wakeup_routine = 1; 5108 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5109 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5110 /* 5111 * on to the sent queue so we can wait for it to be 5112 * passed by. 5113 */ 5114 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5115 sctp_next); 5116 stcb->asoc.send_queue_cnt--; 5117 stcb->asoc.sent_queue_cnt++; 5118 } 5119 } 5120 if (foundeom == 0) { 5121 /* 5122 * Still no eom found. That means there is stuff left on the 5123 * stream out queue.. yuck. 5124 */ 5125 SCTP_TCB_SEND_LOCK(stcb); 5126 strq = &stcb->asoc.strmout[sid]; 5127 sp = TAILQ_FIRST(&strq->outqueue); 5128 if (sp != NULL) { 5129 sp->discard_rest = 1; 5130 /* 5131 * We may need to put a chunk on the queue that 5132 * holds the TSN that would have been sent with the 5133 * LAST bit. 5134 */ 5135 if (chk == NULL) { 5136 /* Yep, we have to */ 5137 sctp_alloc_a_chunk(stcb, chk); 5138 if (chk == NULL) { 5139 /* 5140 * we are hosed. All we can do is 5141 * nothing.. which will cause an 5142 * abort if the peer is paying 5143 * attention. 5144 */ 5145 goto oh_well; 5146 } 5147 memset(chk, 0, sizeof(*chk)); 5148 chk->rec.data.rcv_flags = 0; 5149 chk->sent = SCTP_FORWARD_TSN_SKIP; 5150 chk->asoc = &stcb->asoc; 5151 if (stcb->asoc.idata_supported == 0) { 5152 if (sp->sinfo_flags & SCTP_UNORDERED) { 5153 chk->rec.data.mid = 0; 5154 } else { 5155 chk->rec.data.mid = strq->next_mid_ordered; 5156 } 5157 } else { 5158 if (sp->sinfo_flags & SCTP_UNORDERED) { 5159 chk->rec.data.mid = strq->next_mid_unordered; 5160 } else { 5161 chk->rec.data.mid = strq->next_mid_ordered; 5162 } 5163 } 5164 chk->rec.data.sid = sp->sid; 5165 chk->rec.data.ppid = sp->ppid; 5166 chk->rec.data.context = sp->context; 5167 chk->flags = sp->act_flags; 5168 chk->whoTo = NULL; 5169 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5170 strq->chunks_on_queues++; 5171 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5172 stcb->asoc.sent_queue_cnt++; 5173 stcb->asoc.pr_sctp_cnt++; 5174 } 5175 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5176 if (sp->sinfo_flags & SCTP_UNORDERED) { 5177 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5178 } 5179 if (stcb->asoc.idata_supported == 0) { 5180 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5181 strq->next_mid_ordered++; 5182 } 5183 } else { 5184 if (sp->sinfo_flags & SCTP_UNORDERED) { 5185 strq->next_mid_unordered++; 5186 } else { 5187 strq->next_mid_ordered++; 5188 } 5189 } 5190 oh_well: 5191 if (sp->data) { 5192 /* 5193 * Pull any data to free up the SB and allow 5194 * sender to "add more" while we will throw 5195 * away :-) 5196 */ 5197 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5198 ret_sz += sp->length; 5199 do_wakeup_routine = 1; 5200 sp->some_taken = 1; 5201 sctp_m_freem(sp->data); 5202 sp->data = NULL; 5203 sp->tail_mbuf = NULL; 5204 sp->length = 0; 5205 } 5206 } 5207 SCTP_TCB_SEND_UNLOCK(stcb); 5208 } 5209 if (do_wakeup_routine) { 5210 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5211 } 5212 return (ret_sz); 5213 } 5214 5215 /* 5216 * checks to see if the given address, sa, is one that is currently known by 5217 * the kernel note: can't distinguish the same address on multiple interfaces 5218 * and doesn't handle multiple addresses with different zone/scope id's note: 5219 * ifa_ifwithaddr() compares the entire sockaddr struct 5220 */ 5221 struct sctp_ifa * 5222 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5223 int holds_lock) 5224 { 5225 struct sctp_laddr *laddr; 5226 5227 if (holds_lock == 0) { 5228 SCTP_INP_RLOCK(inp); 5229 } 5230 5231 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5232 if (laddr->ifa == NULL) 5233 continue; 5234 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5235 continue; 5236 #ifdef INET 5237 if (addr->sa_family == AF_INET) { 5238 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5239 laddr->ifa->address.sin.sin_addr.s_addr) { 5240 /* found him. */ 5241 break; 5242 } 5243 } 5244 #endif 5245 #ifdef INET6 5246 if (addr->sa_family == AF_INET6) { 5247 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5248 &laddr->ifa->address.sin6)) { 5249 /* found him. */ 5250 break; 5251 } 5252 } 5253 #endif 5254 } 5255 if (holds_lock == 0) { 5256 SCTP_INP_RUNLOCK(inp); 5257 } 5258 if (laddr != NULL) { 5259 return (laddr->ifa); 5260 } else { 5261 return (NULL); 5262 } 5263 } 5264 5265 uint32_t 5266 sctp_get_ifa_hash_val(struct sockaddr *addr) 5267 { 5268 switch (addr->sa_family) { 5269 #ifdef INET 5270 case AF_INET: 5271 { 5272 struct sockaddr_in *sin; 5273 5274 sin = (struct sockaddr_in *)addr; 5275 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5276 } 5277 #endif 5278 #ifdef INET6 5279 case AF_INET6: 5280 { 5281 struct sockaddr_in6 *sin6; 5282 uint32_t hash_of_addr; 5283 5284 sin6 = (struct sockaddr_in6 *)addr; 5285 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5286 sin6->sin6_addr.s6_addr32[1] + 5287 sin6->sin6_addr.s6_addr32[2] + 5288 sin6->sin6_addr.s6_addr32[3]); 5289 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5290 return (hash_of_addr); 5291 } 5292 #endif 5293 default: 5294 break; 5295 } 5296 return (0); 5297 } 5298 5299 struct sctp_ifa * 5300 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5301 { 5302 struct sctp_ifa *sctp_ifap; 5303 struct sctp_vrf *vrf; 5304 struct sctp_ifalist *hash_head; 5305 uint32_t hash_of_addr; 5306 5307 if (holds_lock == 0) { 5308 SCTP_IPI_ADDR_RLOCK(); 5309 } else { 5310 SCTP_IPI_ADDR_LOCK_ASSERT(); 5311 } 5312 5313 vrf = sctp_find_vrf(vrf_id); 5314 if (vrf == NULL) { 5315 if (holds_lock == 0) 5316 SCTP_IPI_ADDR_RUNLOCK(); 5317 return (NULL); 5318 } 5319 5320 hash_of_addr = sctp_get_ifa_hash_val(addr); 5321 5322 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5323 if (hash_head == NULL) { 5324 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5325 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5326 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5327 sctp_print_address(addr); 5328 SCTP_PRINTF("No such bucket for address\n"); 5329 if (holds_lock == 0) 5330 SCTP_IPI_ADDR_RUNLOCK(); 5331 5332 return (NULL); 5333 } 5334 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5335 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5336 continue; 5337 #ifdef INET 5338 if (addr->sa_family == AF_INET) { 5339 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5340 sctp_ifap->address.sin.sin_addr.s_addr) { 5341 /* found him. */ 5342 break; 5343 } 5344 } 5345 #endif 5346 #ifdef INET6 5347 if (addr->sa_family == AF_INET6) { 5348 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5349 &sctp_ifap->address.sin6)) { 5350 /* found him. */ 5351 break; 5352 } 5353 } 5354 #endif 5355 } 5356 if (holds_lock == 0) 5357 SCTP_IPI_ADDR_RUNLOCK(); 5358 return (sctp_ifap); 5359 } 5360 5361 static void 5362 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5363 uint32_t rwnd_req) 5364 { 5365 /* User pulled some data, do we need a rwnd update? */ 5366 struct epoch_tracker et; 5367 int r_unlocked = 0; 5368 uint32_t dif, rwnd; 5369 struct socket *so = NULL; 5370 5371 if (stcb == NULL) 5372 return; 5373 5374 atomic_add_int(&stcb->asoc.refcnt, 1); 5375 5376 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5377 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5378 /* Pre-check If we are freeing no update */ 5379 goto no_lock; 5380 } 5381 SCTP_INP_INCR_REF(stcb->sctp_ep); 5382 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5383 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5384 goto out; 5385 } 5386 so = stcb->sctp_socket; 5387 if (so == NULL) { 5388 goto out; 5389 } 5390 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5391 /* Have you have freed enough to look */ 5392 *freed_so_far = 0; 5393 /* Yep, its worth a look and the lock overhead */ 5394 5395 /* Figure out what the rwnd would be */ 5396 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5397 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5398 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5399 } else { 5400 dif = 0; 5401 } 5402 if (dif >= rwnd_req) { 5403 if (hold_rlock) { 5404 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5405 r_unlocked = 1; 5406 } 5407 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5408 /* 5409 * One last check before we allow the guy possibly 5410 * to get in. There is a race, where the guy has not 5411 * reached the gate. In that case 5412 */ 5413 goto out; 5414 } 5415 SCTP_TCB_LOCK(stcb); 5416 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5417 /* No reports here */ 5418 SCTP_TCB_UNLOCK(stcb); 5419 goto out; 5420 } 5421 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5422 NET_EPOCH_ENTER(et); 5423 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5424 5425 sctp_chunk_output(stcb->sctp_ep, stcb, 5426 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5427 /* make sure no timer is running */ 5428 NET_EPOCH_EXIT(et); 5429 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5430 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5431 SCTP_TCB_UNLOCK(stcb); 5432 } else { 5433 /* Update how much we have pending */ 5434 stcb->freed_by_sorcv_sincelast = dif; 5435 } 5436 out: 5437 if (so && r_unlocked && hold_rlock) { 5438 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5439 } 5440 5441 SCTP_INP_DECR_REF(stcb->sctp_ep); 5442 no_lock: 5443 atomic_add_int(&stcb->asoc.refcnt, -1); 5444 return; 5445 } 5446 5447 int 5448 sctp_sorecvmsg(struct socket *so, 5449 struct uio *uio, 5450 struct mbuf **mp, 5451 struct sockaddr *from, 5452 int fromlen, 5453 int *msg_flags, 5454 struct sctp_sndrcvinfo *sinfo, 5455 int filling_sinfo) 5456 { 5457 /* 5458 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5459 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5460 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5461 * On the way out we may send out any combination of: 5462 * MSG_NOTIFICATION MSG_EOR 5463 * 5464 */ 5465 struct sctp_inpcb *inp = NULL; 5466 ssize_t my_len = 0; 5467 ssize_t cp_len = 0; 5468 int error = 0; 5469 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5470 struct mbuf *m = NULL; 5471 struct sctp_tcb *stcb = NULL; 5472 int wakeup_read_socket = 0; 5473 int freecnt_applied = 0; 5474 int out_flags = 0, in_flags = 0; 5475 int block_allowed = 1; 5476 uint32_t freed_so_far = 0; 5477 ssize_t copied_so_far = 0; 5478 int in_eeor_mode = 0; 5479 int no_rcv_needed = 0; 5480 uint32_t rwnd_req = 0; 5481 int hold_sblock = 0; 5482 int hold_rlock = 0; 5483 ssize_t slen = 0; 5484 uint32_t held_length = 0; 5485 int sockbuf_lock = 0; 5486 5487 if (uio == NULL) { 5488 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5489 return (EINVAL); 5490 } 5491 5492 if (msg_flags) { 5493 in_flags = *msg_flags; 5494 if (in_flags & MSG_PEEK) 5495 SCTP_STAT_INCR(sctps_read_peeks); 5496 } else { 5497 in_flags = 0; 5498 } 5499 slen = uio->uio_resid; 5500 5501 /* Pull in and set up our int flags */ 5502 if (in_flags & MSG_OOB) { 5503 /* Out of band's NOT supported */ 5504 return (EOPNOTSUPP); 5505 } 5506 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5507 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5508 return (EINVAL); 5509 } 5510 if ((in_flags & (MSG_DONTWAIT 5511 | MSG_NBIO 5512 )) || 5513 SCTP_SO_IS_NBIO(so)) { 5514 block_allowed = 0; 5515 } 5516 /* setup the endpoint */ 5517 inp = (struct sctp_inpcb *)so->so_pcb; 5518 if (inp == NULL) { 5519 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5520 return (EFAULT); 5521 } 5522 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5523 /* Must be at least a MTU's worth */ 5524 if (rwnd_req < SCTP_MIN_RWND) 5525 rwnd_req = SCTP_MIN_RWND; 5526 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5528 sctp_misc_ints(SCTP_SORECV_ENTER, 5529 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5530 } 5531 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5532 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5533 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5534 } 5535 5536 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5537 if (error) { 5538 goto release_unlocked; 5539 } 5540 sockbuf_lock = 1; 5541 restart: 5542 5543 restart_nosblocks: 5544 if (hold_sblock == 0) { 5545 SOCKBUF_LOCK(&so->so_rcv); 5546 hold_sblock = 1; 5547 } 5548 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5549 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5550 goto out; 5551 } 5552 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5553 if (so->so_error) { 5554 error = so->so_error; 5555 if ((in_flags & MSG_PEEK) == 0) 5556 so->so_error = 0; 5557 goto out; 5558 } else { 5559 if (so->so_rcv.sb_cc == 0) { 5560 /* indicate EOF */ 5561 error = 0; 5562 goto out; 5563 } 5564 } 5565 } 5566 if (so->so_rcv.sb_cc <= held_length) { 5567 if (so->so_error) { 5568 error = so->so_error; 5569 if ((in_flags & MSG_PEEK) == 0) { 5570 so->so_error = 0; 5571 } 5572 goto out; 5573 } 5574 if ((so->so_rcv.sb_cc == 0) && 5575 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5576 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5577 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5578 /* 5579 * For active open side clear flags for 5580 * re-use passive open is blocked by 5581 * connect. 5582 */ 5583 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5584 /* 5585 * You were aborted, passive side 5586 * always hits here 5587 */ 5588 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5589 error = ECONNRESET; 5590 } 5591 so->so_state &= ~(SS_ISCONNECTING | 5592 SS_ISDISCONNECTING | 5593 SS_ISCONFIRMING | 5594 SS_ISCONNECTED); 5595 if (error == 0) { 5596 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5597 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5598 error = ENOTCONN; 5599 } 5600 } 5601 goto out; 5602 } 5603 } 5604 if (block_allowed) { 5605 error = sbwait(&so->so_rcv); 5606 if (error) { 5607 goto out; 5608 } 5609 held_length = 0; 5610 goto restart_nosblocks; 5611 } else { 5612 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5613 error = EWOULDBLOCK; 5614 goto out; 5615 } 5616 } 5617 if (hold_sblock == 1) { 5618 SOCKBUF_UNLOCK(&so->so_rcv); 5619 hold_sblock = 0; 5620 } 5621 /* we possibly have data we can read */ 5622 /* sa_ignore FREED_MEMORY */ 5623 control = TAILQ_FIRST(&inp->read_queue); 5624 if (control == NULL) { 5625 /* 5626 * This could be happening since the appender did the 5627 * increment but as not yet did the tailq insert onto the 5628 * read_queue 5629 */ 5630 if (hold_rlock == 0) { 5631 SCTP_INP_READ_LOCK(inp); 5632 } 5633 control = TAILQ_FIRST(&inp->read_queue); 5634 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5635 #ifdef INVARIANTS 5636 panic("Huh, its non zero and nothing on control?"); 5637 #endif 5638 so->so_rcv.sb_cc = 0; 5639 } 5640 SCTP_INP_READ_UNLOCK(inp); 5641 hold_rlock = 0; 5642 goto restart; 5643 } 5644 5645 if ((control->length == 0) && 5646 (control->do_not_ref_stcb)) { 5647 /* 5648 * Clean up code for freeing assoc that left behind a 5649 * pdapi.. maybe a peer in EEOR that just closed after 5650 * sending and never indicated a EOR. 5651 */ 5652 if (hold_rlock == 0) { 5653 hold_rlock = 1; 5654 SCTP_INP_READ_LOCK(inp); 5655 } 5656 control->held_length = 0; 5657 if (control->data) { 5658 /* Hmm there is data here .. fix */ 5659 struct mbuf *m_tmp; 5660 int cnt = 0; 5661 5662 m_tmp = control->data; 5663 while (m_tmp) { 5664 cnt += SCTP_BUF_LEN(m_tmp); 5665 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5666 control->tail_mbuf = m_tmp; 5667 control->end_added = 1; 5668 } 5669 m_tmp = SCTP_BUF_NEXT(m_tmp); 5670 } 5671 control->length = cnt; 5672 } else { 5673 /* remove it */ 5674 TAILQ_REMOVE(&inp->read_queue, control, next); 5675 /* Add back any hiddend data */ 5676 sctp_free_remote_addr(control->whoFrom); 5677 sctp_free_a_readq(stcb, control); 5678 } 5679 if (hold_rlock) { 5680 hold_rlock = 0; 5681 SCTP_INP_READ_UNLOCK(inp); 5682 } 5683 goto restart; 5684 } 5685 if ((control->length == 0) && 5686 (control->end_added == 1)) { 5687 /* 5688 * Do we also need to check for (control->pdapi_aborted == 5689 * 1)? 5690 */ 5691 if (hold_rlock == 0) { 5692 hold_rlock = 1; 5693 SCTP_INP_READ_LOCK(inp); 5694 } 5695 TAILQ_REMOVE(&inp->read_queue, control, next); 5696 if (control->data) { 5697 #ifdef INVARIANTS 5698 panic("control->data not null but control->length == 0"); 5699 #else 5700 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5701 sctp_m_freem(control->data); 5702 control->data = NULL; 5703 #endif 5704 } 5705 if (control->aux_data) { 5706 sctp_m_free(control->aux_data); 5707 control->aux_data = NULL; 5708 } 5709 #ifdef INVARIANTS 5710 if (control->on_strm_q) { 5711 panic("About to free ctl:%p so:%p and its in %d", 5712 control, so, control->on_strm_q); 5713 } 5714 #endif 5715 sctp_free_remote_addr(control->whoFrom); 5716 sctp_free_a_readq(stcb, control); 5717 if (hold_rlock) { 5718 hold_rlock = 0; 5719 SCTP_INP_READ_UNLOCK(inp); 5720 } 5721 goto restart; 5722 } 5723 if (control->length == 0) { 5724 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5725 (filling_sinfo)) { 5726 /* find a more suitable one then this */ 5727 ctl = TAILQ_NEXT(control, next); 5728 while (ctl) { 5729 if ((ctl->stcb != control->stcb) && (ctl->length) && 5730 (ctl->some_taken || 5731 (ctl->spec_flags & M_NOTIFICATION) || 5732 ((ctl->do_not_ref_stcb == 0) && 5733 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5734 ) { 5735 /*- 5736 * If we have a different TCB next, and there is data 5737 * present. If we have already taken some (pdapi), OR we can 5738 * ref the tcb and no delivery as started on this stream, we 5739 * take it. Note we allow a notification on a different 5740 * assoc to be delivered.. 5741 */ 5742 control = ctl; 5743 goto found_one; 5744 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5745 (ctl->length) && 5746 ((ctl->some_taken) || 5747 ((ctl->do_not_ref_stcb == 0) && 5748 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5749 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5750 /*- 5751 * If we have the same tcb, and there is data present, and we 5752 * have the strm interleave feature present. Then if we have 5753 * taken some (pdapi) or we can refer to tht tcb AND we have 5754 * not started a delivery for this stream, we can take it. 5755 * Note we do NOT allow a notificaiton on the same assoc to 5756 * be delivered. 5757 */ 5758 control = ctl; 5759 goto found_one; 5760 } 5761 ctl = TAILQ_NEXT(ctl, next); 5762 } 5763 } 5764 /* 5765 * if we reach here, not suitable replacement is available 5766 * <or> fragment interleave is NOT on. So stuff the sb_cc 5767 * into the our held count, and its time to sleep again. 5768 */ 5769 held_length = so->so_rcv.sb_cc; 5770 control->held_length = so->so_rcv.sb_cc; 5771 goto restart; 5772 } 5773 /* Clear the held length since there is something to read */ 5774 control->held_length = 0; 5775 found_one: 5776 /* 5777 * If we reach here, control has a some data for us to read off. 5778 * Note that stcb COULD be NULL. 5779 */ 5780 if (hold_rlock == 0) { 5781 hold_rlock = 1; 5782 SCTP_INP_READ_LOCK(inp); 5783 } 5784 control->some_taken++; 5785 stcb = control->stcb; 5786 if (stcb) { 5787 if ((control->do_not_ref_stcb == 0) && 5788 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5789 if (freecnt_applied == 0) 5790 stcb = NULL; 5791 } else if (control->do_not_ref_stcb == 0) { 5792 /* you can't free it on me please */ 5793 /* 5794 * The lock on the socket buffer protects us so the 5795 * free code will stop. But since we used the 5796 * socketbuf lock and the sender uses the tcb_lock 5797 * to increment, we need to use the atomic add to 5798 * the refcnt 5799 */ 5800 if (freecnt_applied) { 5801 #ifdef INVARIANTS 5802 panic("refcnt already incremented"); 5803 #else 5804 SCTP_PRINTF("refcnt already incremented?\n"); 5805 #endif 5806 } else { 5807 atomic_add_int(&stcb->asoc.refcnt, 1); 5808 freecnt_applied = 1; 5809 } 5810 /* 5811 * Setup to remember how much we have not yet told 5812 * the peer our rwnd has opened up. Note we grab the 5813 * value from the tcb from last time. Note too that 5814 * sack sending clears this when a sack is sent, 5815 * which is fine. Once we hit the rwnd_req, we then 5816 * will go to the sctp_user_rcvd() that will not 5817 * lock until it KNOWs it MUST send a WUP-SACK. 5818 */ 5819 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5820 stcb->freed_by_sorcv_sincelast = 0; 5821 } 5822 } 5823 if (stcb && 5824 ((control->spec_flags & M_NOTIFICATION) == 0) && 5825 control->do_not_ref_stcb == 0) { 5826 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5827 } 5828 5829 /* First lets get off the sinfo and sockaddr info */ 5830 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5831 sinfo->sinfo_stream = control->sinfo_stream; 5832 sinfo->sinfo_ssn = (uint16_t)control->mid; 5833 sinfo->sinfo_flags = control->sinfo_flags; 5834 sinfo->sinfo_ppid = control->sinfo_ppid; 5835 sinfo->sinfo_context = control->sinfo_context; 5836 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5837 sinfo->sinfo_tsn = control->sinfo_tsn; 5838 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5839 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5840 nxt = TAILQ_NEXT(control, next); 5841 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5842 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5843 struct sctp_extrcvinfo *s_extra; 5844 5845 s_extra = (struct sctp_extrcvinfo *)sinfo; 5846 if ((nxt) && 5847 (nxt->length)) { 5848 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5849 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5850 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5851 } 5852 if (nxt->spec_flags & M_NOTIFICATION) { 5853 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5854 } 5855 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5856 s_extra->serinfo_next_length = nxt->length; 5857 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5858 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5859 if (nxt->tail_mbuf != NULL) { 5860 if (nxt->end_added) { 5861 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5862 } 5863 } 5864 } else { 5865 /* 5866 * we explicitly 0 this, since the memcpy 5867 * got some other things beyond the older 5868 * sinfo_ that is on the control's structure 5869 * :-D 5870 */ 5871 nxt = NULL; 5872 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5873 s_extra->serinfo_next_aid = 0; 5874 s_extra->serinfo_next_length = 0; 5875 s_extra->serinfo_next_ppid = 0; 5876 s_extra->serinfo_next_stream = 0; 5877 } 5878 } 5879 /* 5880 * update off the real current cum-ack, if we have an stcb. 5881 */ 5882 if ((control->do_not_ref_stcb == 0) && stcb) 5883 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5884 /* 5885 * mask off the high bits, we keep the actual chunk bits in 5886 * there. 5887 */ 5888 sinfo->sinfo_flags &= 0x00ff; 5889 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5890 sinfo->sinfo_flags |= SCTP_UNORDERED; 5891 } 5892 } 5893 #ifdef SCTP_ASOCLOG_OF_TSNS 5894 { 5895 int index, newindex; 5896 struct sctp_pcbtsn_rlog *entry; 5897 5898 do { 5899 index = inp->readlog_index; 5900 newindex = index + 1; 5901 if (newindex >= SCTP_READ_LOG_SIZE) { 5902 newindex = 0; 5903 } 5904 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5905 entry = &inp->readlog[index]; 5906 entry->vtag = control->sinfo_assoc_id; 5907 entry->strm = control->sinfo_stream; 5908 entry->seq = (uint16_t)control->mid; 5909 entry->sz = control->length; 5910 entry->flgs = control->sinfo_flags; 5911 } 5912 #endif 5913 if ((fromlen > 0) && (from != NULL)) { 5914 union sctp_sockstore store; 5915 size_t len; 5916 5917 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5918 #ifdef INET6 5919 case AF_INET6: 5920 len = sizeof(struct sockaddr_in6); 5921 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5922 store.sin6.sin6_port = control->port_from; 5923 break; 5924 #endif 5925 #ifdef INET 5926 case AF_INET: 5927 #ifdef INET6 5928 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5929 len = sizeof(struct sockaddr_in6); 5930 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5931 &store.sin6); 5932 store.sin6.sin6_port = control->port_from; 5933 } else { 5934 len = sizeof(struct sockaddr_in); 5935 store.sin = control->whoFrom->ro._l_addr.sin; 5936 store.sin.sin_port = control->port_from; 5937 } 5938 #else 5939 len = sizeof(struct sockaddr_in); 5940 store.sin = control->whoFrom->ro._l_addr.sin; 5941 store.sin.sin_port = control->port_from; 5942 #endif 5943 break; 5944 #endif 5945 default: 5946 len = 0; 5947 break; 5948 } 5949 memcpy(from, &store, min((size_t)fromlen, len)); 5950 #ifdef INET6 5951 { 5952 struct sockaddr_in6 lsa6, *from6; 5953 5954 from6 = (struct sockaddr_in6 *)from; 5955 sctp_recover_scope_mac(from6, (&lsa6)); 5956 } 5957 #endif 5958 } 5959 if (hold_rlock) { 5960 SCTP_INP_READ_UNLOCK(inp); 5961 hold_rlock = 0; 5962 } 5963 if (hold_sblock) { 5964 SOCKBUF_UNLOCK(&so->so_rcv); 5965 hold_sblock = 0; 5966 } 5967 /* now copy out what data we can */ 5968 if (mp == NULL) { 5969 /* copy out each mbuf in the chain up to length */ 5970 get_more_data: 5971 m = control->data; 5972 while (m) { 5973 /* Move out all we can */ 5974 cp_len = uio->uio_resid; 5975 my_len = SCTP_BUF_LEN(m); 5976 if (cp_len > my_len) { 5977 /* not enough in this buf */ 5978 cp_len = my_len; 5979 } 5980 if (hold_rlock) { 5981 SCTP_INP_READ_UNLOCK(inp); 5982 hold_rlock = 0; 5983 } 5984 if (cp_len > 0) 5985 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5986 /* re-read */ 5987 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5988 goto release; 5989 } 5990 5991 if ((control->do_not_ref_stcb == 0) && stcb && 5992 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5993 no_rcv_needed = 1; 5994 } 5995 if (error) { 5996 /* error we are out of here */ 5997 goto release; 5998 } 5999 SCTP_INP_READ_LOCK(inp); 6000 hold_rlock = 1; 6001 if (cp_len == SCTP_BUF_LEN(m)) { 6002 if ((SCTP_BUF_NEXT(m) == NULL) && 6003 (control->end_added)) { 6004 out_flags |= MSG_EOR; 6005 if ((control->do_not_ref_stcb == 0) && 6006 (control->stcb != NULL) && 6007 ((control->spec_flags & M_NOTIFICATION) == 0)) 6008 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6009 } 6010 if (control->spec_flags & M_NOTIFICATION) { 6011 out_flags |= MSG_NOTIFICATION; 6012 } 6013 /* we ate up the mbuf */ 6014 if (in_flags & MSG_PEEK) { 6015 /* just looking */ 6016 m = SCTP_BUF_NEXT(m); 6017 copied_so_far += cp_len; 6018 } else { 6019 /* dispose of the mbuf */ 6020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6021 sctp_sblog(&so->so_rcv, 6022 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6023 } 6024 sctp_sbfree(control, stcb, &so->so_rcv, m); 6025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6026 sctp_sblog(&so->so_rcv, 6027 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6028 } 6029 copied_so_far += cp_len; 6030 freed_so_far += (uint32_t)cp_len; 6031 freed_so_far += MSIZE; 6032 atomic_subtract_int(&control->length, cp_len); 6033 control->data = sctp_m_free(m); 6034 m = control->data; 6035 /* 6036 * been through it all, must hold sb 6037 * lock ok to null tail 6038 */ 6039 if (control->data == NULL) { 6040 #ifdef INVARIANTS 6041 if ((control->end_added == 0) || 6042 (TAILQ_NEXT(control, next) == NULL)) { 6043 /* 6044 * If the end is not 6045 * added, OR the 6046 * next is NOT null 6047 * we MUST have the 6048 * lock. 6049 */ 6050 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6051 panic("Hmm we don't own the lock?"); 6052 } 6053 } 6054 #endif 6055 control->tail_mbuf = NULL; 6056 #ifdef INVARIANTS 6057 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6058 panic("end_added, nothing left and no MSG_EOR"); 6059 } 6060 #endif 6061 } 6062 } 6063 } else { 6064 /* Do we need to trim the mbuf? */ 6065 if (control->spec_flags & M_NOTIFICATION) { 6066 out_flags |= MSG_NOTIFICATION; 6067 } 6068 if ((in_flags & MSG_PEEK) == 0) { 6069 SCTP_BUF_RESV_UF(m, cp_len); 6070 SCTP_BUF_LEN(m) -= (int)cp_len; 6071 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6072 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6073 } 6074 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6075 if ((control->do_not_ref_stcb == 0) && 6076 stcb) { 6077 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6078 } 6079 copied_so_far += cp_len; 6080 freed_so_far += (uint32_t)cp_len; 6081 freed_so_far += MSIZE; 6082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6083 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6084 SCTP_LOG_SBRESULT, 0); 6085 } 6086 atomic_subtract_int(&control->length, cp_len); 6087 } else { 6088 copied_so_far += cp_len; 6089 } 6090 } 6091 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6092 break; 6093 } 6094 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6095 (control->do_not_ref_stcb == 0) && 6096 (freed_so_far >= rwnd_req)) { 6097 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6098 } 6099 } /* end while(m) */ 6100 /* 6101 * At this point we have looked at it all and we either have 6102 * a MSG_EOR/or read all the user wants... <OR> 6103 * control->length == 0. 6104 */ 6105 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6106 /* we are done with this control */ 6107 if (control->length == 0) { 6108 if (control->data) { 6109 #ifdef INVARIANTS 6110 panic("control->data not null at read eor?"); 6111 #else 6112 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6113 sctp_m_freem(control->data); 6114 control->data = NULL; 6115 #endif 6116 } 6117 done_with_control: 6118 if (hold_rlock == 0) { 6119 SCTP_INP_READ_LOCK(inp); 6120 hold_rlock = 1; 6121 } 6122 TAILQ_REMOVE(&inp->read_queue, control, next); 6123 /* Add back any hiddend data */ 6124 if (control->held_length) { 6125 held_length = 0; 6126 control->held_length = 0; 6127 wakeup_read_socket = 1; 6128 } 6129 if (control->aux_data) { 6130 sctp_m_free(control->aux_data); 6131 control->aux_data = NULL; 6132 } 6133 no_rcv_needed = control->do_not_ref_stcb; 6134 sctp_free_remote_addr(control->whoFrom); 6135 control->data = NULL; 6136 #ifdef INVARIANTS 6137 if (control->on_strm_q) { 6138 panic("About to free ctl:%p so:%p and its in %d", 6139 control, so, control->on_strm_q); 6140 } 6141 #endif 6142 sctp_free_a_readq(stcb, control); 6143 control = NULL; 6144 if ((freed_so_far >= rwnd_req) && 6145 (no_rcv_needed == 0)) 6146 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6147 6148 } else { 6149 /* 6150 * The user did not read all of this 6151 * message, turn off the returned MSG_EOR 6152 * since we are leaving more behind on the 6153 * control to read. 6154 */ 6155 #ifdef INVARIANTS 6156 if (control->end_added && 6157 (control->data == NULL) && 6158 (control->tail_mbuf == NULL)) { 6159 panic("Gak, control->length is corrupt?"); 6160 } 6161 #endif 6162 no_rcv_needed = control->do_not_ref_stcb; 6163 out_flags &= ~MSG_EOR; 6164 } 6165 } 6166 if (out_flags & MSG_EOR) { 6167 goto release; 6168 } 6169 if ((uio->uio_resid == 0) || 6170 ((in_eeor_mode) && 6171 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6172 goto release; 6173 } 6174 /* 6175 * If I hit here the receiver wants more and this message is 6176 * NOT done (pd-api). So two questions. Can we block? if not 6177 * we are done. Did the user NOT set MSG_WAITALL? 6178 */ 6179 if (block_allowed == 0) { 6180 goto release; 6181 } 6182 /* 6183 * We need to wait for more data a few things: - We don't 6184 * sbunlock() so we don't get someone else reading. - We 6185 * must be sure to account for the case where what is added 6186 * is NOT to our control when we wakeup. 6187 */ 6188 6189 /* 6190 * Do we need to tell the transport a rwnd update might be 6191 * needed before we go to sleep? 6192 */ 6193 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6194 ((freed_so_far >= rwnd_req) && 6195 (control->do_not_ref_stcb == 0) && 6196 (no_rcv_needed == 0))) { 6197 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6198 } 6199 wait_some_more: 6200 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6201 goto release; 6202 } 6203 6204 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6205 goto release; 6206 6207 if (hold_rlock == 1) { 6208 SCTP_INP_READ_UNLOCK(inp); 6209 hold_rlock = 0; 6210 } 6211 if (hold_sblock == 0) { 6212 SOCKBUF_LOCK(&so->so_rcv); 6213 hold_sblock = 1; 6214 } 6215 if ((copied_so_far) && (control->length == 0) && 6216 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6217 goto release; 6218 } 6219 if (so->so_rcv.sb_cc <= control->held_length) { 6220 error = sbwait(&so->so_rcv); 6221 if (error) { 6222 goto release; 6223 } 6224 control->held_length = 0; 6225 } 6226 if (hold_sblock) { 6227 SOCKBUF_UNLOCK(&so->so_rcv); 6228 hold_sblock = 0; 6229 } 6230 if (control->length == 0) { 6231 /* still nothing here */ 6232 if (control->end_added == 1) { 6233 /* he aborted, or is done i.e.did a shutdown */ 6234 out_flags |= MSG_EOR; 6235 if (control->pdapi_aborted) { 6236 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6237 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6238 6239 out_flags |= MSG_TRUNC; 6240 } else { 6241 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6242 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6243 } 6244 goto done_with_control; 6245 } 6246 if (so->so_rcv.sb_cc > held_length) { 6247 control->held_length = so->so_rcv.sb_cc; 6248 held_length = 0; 6249 } 6250 goto wait_some_more; 6251 } else if (control->data == NULL) { 6252 /* 6253 * we must re-sync since data is probably being 6254 * added 6255 */ 6256 SCTP_INP_READ_LOCK(inp); 6257 if ((control->length > 0) && (control->data == NULL)) { 6258 /* 6259 * big trouble.. we have the lock and its 6260 * corrupt? 6261 */ 6262 #ifdef INVARIANTS 6263 panic("Impossible data==NULL length !=0"); 6264 #endif 6265 out_flags |= MSG_EOR; 6266 out_flags |= MSG_TRUNC; 6267 control->length = 0; 6268 SCTP_INP_READ_UNLOCK(inp); 6269 goto done_with_control; 6270 } 6271 SCTP_INP_READ_UNLOCK(inp); 6272 /* We will fall around to get more data */ 6273 } 6274 goto get_more_data; 6275 } else { 6276 /*- 6277 * Give caller back the mbuf chain, 6278 * store in uio_resid the length 6279 */ 6280 wakeup_read_socket = 0; 6281 if ((control->end_added == 0) || 6282 (TAILQ_NEXT(control, next) == NULL)) { 6283 /* Need to get rlock */ 6284 if (hold_rlock == 0) { 6285 SCTP_INP_READ_LOCK(inp); 6286 hold_rlock = 1; 6287 } 6288 } 6289 if (control->end_added) { 6290 out_flags |= MSG_EOR; 6291 if ((control->do_not_ref_stcb == 0) && 6292 (control->stcb != NULL) && 6293 ((control->spec_flags & M_NOTIFICATION) == 0)) 6294 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6295 } 6296 if (control->spec_flags & M_NOTIFICATION) { 6297 out_flags |= MSG_NOTIFICATION; 6298 } 6299 uio->uio_resid = control->length; 6300 *mp = control->data; 6301 m = control->data; 6302 while (m) { 6303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6304 sctp_sblog(&so->so_rcv, 6305 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6306 } 6307 sctp_sbfree(control, stcb, &so->so_rcv, m); 6308 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6309 freed_so_far += MSIZE; 6310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6311 sctp_sblog(&so->so_rcv, 6312 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6313 } 6314 m = SCTP_BUF_NEXT(m); 6315 } 6316 control->data = control->tail_mbuf = NULL; 6317 control->length = 0; 6318 if (out_flags & MSG_EOR) { 6319 /* Done with this control */ 6320 goto done_with_control; 6321 } 6322 } 6323 release: 6324 if (hold_rlock == 1) { 6325 SCTP_INP_READ_UNLOCK(inp); 6326 hold_rlock = 0; 6327 } 6328 if (hold_sblock == 1) { 6329 SOCKBUF_UNLOCK(&so->so_rcv); 6330 hold_sblock = 0; 6331 } 6332 6333 sbunlock(&so->so_rcv); 6334 sockbuf_lock = 0; 6335 6336 release_unlocked: 6337 if (hold_sblock) { 6338 SOCKBUF_UNLOCK(&so->so_rcv); 6339 hold_sblock = 0; 6340 } 6341 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6342 if ((freed_so_far >= rwnd_req) && 6343 (control && (control->do_not_ref_stcb == 0)) && 6344 (no_rcv_needed == 0)) 6345 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6346 } 6347 out: 6348 if (msg_flags) { 6349 *msg_flags = out_flags; 6350 } 6351 if (((out_flags & MSG_EOR) == 0) && 6352 ((in_flags & MSG_PEEK) == 0) && 6353 (sinfo) && 6354 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6355 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6356 struct sctp_extrcvinfo *s_extra; 6357 6358 s_extra = (struct sctp_extrcvinfo *)sinfo; 6359 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6360 } 6361 if (hold_rlock == 1) { 6362 SCTP_INP_READ_UNLOCK(inp); 6363 } 6364 if (hold_sblock) { 6365 SOCKBUF_UNLOCK(&so->so_rcv); 6366 } 6367 if (sockbuf_lock) { 6368 sbunlock(&so->so_rcv); 6369 } 6370 6371 if (freecnt_applied) { 6372 /* 6373 * The lock on the socket buffer protects us so the free 6374 * code will stop. But since we used the socketbuf lock and 6375 * the sender uses the tcb_lock to increment, we need to use 6376 * the atomic add to the refcnt. 6377 */ 6378 if (stcb == NULL) { 6379 #ifdef INVARIANTS 6380 panic("stcb for refcnt has gone NULL?"); 6381 goto stage_left; 6382 #else 6383 goto stage_left; 6384 #endif 6385 } 6386 /* Save the value back for next time */ 6387 stcb->freed_by_sorcv_sincelast = freed_so_far; 6388 atomic_add_int(&stcb->asoc.refcnt, -1); 6389 } 6390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6391 if (stcb) { 6392 sctp_misc_ints(SCTP_SORECV_DONE, 6393 freed_so_far, 6394 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6395 stcb->asoc.my_rwnd, 6396 so->so_rcv.sb_cc); 6397 } else { 6398 sctp_misc_ints(SCTP_SORECV_DONE, 6399 freed_so_far, 6400 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6401 0, 6402 so->so_rcv.sb_cc); 6403 } 6404 } 6405 stage_left: 6406 if (wakeup_read_socket) { 6407 sctp_sorwakeup(inp, so); 6408 } 6409 return (error); 6410 } 6411 6412 #ifdef SCTP_MBUF_LOGGING 6413 struct mbuf * 6414 sctp_m_free(struct mbuf *m) 6415 { 6416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6417 sctp_log_mb(m, SCTP_MBUF_IFREE); 6418 } 6419 return (m_free(m)); 6420 } 6421 6422 void 6423 sctp_m_freem(struct mbuf *mb) 6424 { 6425 while (mb != NULL) 6426 mb = sctp_m_free(mb); 6427 } 6428 6429 #endif 6430 6431 int 6432 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6433 { 6434 /* 6435 * Given a local address. For all associations that holds the 6436 * address, request a peer-set-primary. 6437 */ 6438 struct sctp_ifa *ifa; 6439 struct sctp_laddr *wi; 6440 6441 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6442 if (ifa == NULL) { 6443 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6444 return (EADDRNOTAVAIL); 6445 } 6446 /* 6447 * Now that we have the ifa we must awaken the iterator with this 6448 * message. 6449 */ 6450 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6451 if (wi == NULL) { 6452 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6453 return (ENOMEM); 6454 } 6455 /* Now incr the count and int wi structure */ 6456 SCTP_INCR_LADDR_COUNT(); 6457 memset(wi, 0, sizeof(*wi)); 6458 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6459 wi->ifa = ifa; 6460 wi->action = SCTP_SET_PRIM_ADDR; 6461 atomic_add_int(&ifa->refcount, 1); 6462 6463 /* Now add it to the work queue */ 6464 SCTP_WQ_ADDR_LOCK(); 6465 /* 6466 * Should this really be a tailq? As it is we will process the 6467 * newest first :-0 6468 */ 6469 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6470 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6471 (struct sctp_inpcb *)NULL, 6472 (struct sctp_tcb *)NULL, 6473 (struct sctp_nets *)NULL); 6474 SCTP_WQ_ADDR_UNLOCK(); 6475 return (0); 6476 } 6477 6478 int 6479 sctp_soreceive(struct socket *so, 6480 struct sockaddr **psa, 6481 struct uio *uio, 6482 struct mbuf **mp0, 6483 struct mbuf **controlp, 6484 int *flagsp) 6485 { 6486 int error, fromlen; 6487 uint8_t sockbuf[256]; 6488 struct sockaddr *from; 6489 struct sctp_extrcvinfo sinfo; 6490 int filling_sinfo = 1; 6491 int flags; 6492 struct sctp_inpcb *inp; 6493 6494 inp = (struct sctp_inpcb *)so->so_pcb; 6495 /* pickup the assoc we are reading from */ 6496 if (inp == NULL) { 6497 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6498 return (EINVAL); 6499 } 6500 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6501 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6502 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6503 (controlp == NULL)) { 6504 /* user does not want the sndrcv ctl */ 6505 filling_sinfo = 0; 6506 } 6507 if (psa) { 6508 from = (struct sockaddr *)sockbuf; 6509 fromlen = sizeof(sockbuf); 6510 from->sa_len = 0; 6511 } else { 6512 from = NULL; 6513 fromlen = 0; 6514 } 6515 6516 if (filling_sinfo) { 6517 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6518 } 6519 if (flagsp != NULL) { 6520 flags = *flagsp; 6521 } else { 6522 flags = 0; 6523 } 6524 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6525 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6526 if (flagsp != NULL) { 6527 *flagsp = flags; 6528 } 6529 if (controlp != NULL) { 6530 /* copy back the sinfo in a CMSG format */ 6531 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6532 *controlp = sctp_build_ctl_nchunk(inp, 6533 (struct sctp_sndrcvinfo *)&sinfo); 6534 } else { 6535 *controlp = NULL; 6536 } 6537 } 6538 if (psa) { 6539 /* copy back the address info */ 6540 if (from && from->sa_len) { 6541 *psa = sodupsockaddr(from, M_NOWAIT); 6542 } else { 6543 *psa = NULL; 6544 } 6545 } 6546 return (error); 6547 } 6548 6549 int 6550 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6551 int totaddr, int *error) 6552 { 6553 int added = 0; 6554 int i; 6555 struct sctp_inpcb *inp; 6556 struct sockaddr *sa; 6557 size_t incr = 0; 6558 #ifdef INET 6559 struct sockaddr_in *sin; 6560 #endif 6561 #ifdef INET6 6562 struct sockaddr_in6 *sin6; 6563 #endif 6564 6565 sa = addr; 6566 inp = stcb->sctp_ep; 6567 *error = 0; 6568 for (i = 0; i < totaddr; i++) { 6569 switch (sa->sa_family) { 6570 #ifdef INET 6571 case AF_INET: 6572 incr = sizeof(struct sockaddr_in); 6573 sin = (struct sockaddr_in *)sa; 6574 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6575 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6576 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6577 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6578 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6579 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6580 *error = EINVAL; 6581 goto out_now; 6582 } 6583 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6584 SCTP_DONOT_SETSCOPE, 6585 SCTP_ADDR_IS_CONFIRMED)) { 6586 /* assoc gone no un-lock */ 6587 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6588 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6589 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6590 *error = ENOBUFS; 6591 goto out_now; 6592 } 6593 added++; 6594 break; 6595 #endif 6596 #ifdef INET6 6597 case AF_INET6: 6598 incr = sizeof(struct sockaddr_in6); 6599 sin6 = (struct sockaddr_in6 *)sa; 6600 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6601 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6602 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6603 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6604 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6605 *error = EINVAL; 6606 goto out_now; 6607 } 6608 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6609 SCTP_DONOT_SETSCOPE, 6610 SCTP_ADDR_IS_CONFIRMED)) { 6611 /* assoc gone no un-lock */ 6612 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6613 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6614 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6615 *error = ENOBUFS; 6616 goto out_now; 6617 } 6618 added++; 6619 break; 6620 #endif 6621 default: 6622 break; 6623 } 6624 sa = (struct sockaddr *)((caddr_t)sa + incr); 6625 } 6626 out_now: 6627 return (added); 6628 } 6629 6630 int 6631 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6632 unsigned int totaddr, 6633 unsigned int *num_v4, unsigned int *num_v6, 6634 unsigned int limit) 6635 { 6636 struct sockaddr *sa; 6637 struct sctp_tcb *stcb; 6638 unsigned int incr, at, i; 6639 6640 at = 0; 6641 sa = addr; 6642 *num_v6 = *num_v4 = 0; 6643 /* account and validate addresses */ 6644 if (totaddr == 0) { 6645 return (EINVAL); 6646 } 6647 for (i = 0; i < totaddr; i++) { 6648 if (at + sizeof(struct sockaddr) > limit) { 6649 return (EINVAL); 6650 } 6651 switch (sa->sa_family) { 6652 #ifdef INET 6653 case AF_INET: 6654 incr = (unsigned int)sizeof(struct sockaddr_in); 6655 if (sa->sa_len != incr) { 6656 return (EINVAL); 6657 } 6658 (*num_v4) += 1; 6659 break; 6660 #endif 6661 #ifdef INET6 6662 case AF_INET6: 6663 { 6664 struct sockaddr_in6 *sin6; 6665 6666 sin6 = (struct sockaddr_in6 *)sa; 6667 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6668 /* Must be non-mapped for connectx */ 6669 return (EINVAL); 6670 } 6671 incr = (unsigned int)sizeof(struct sockaddr_in6); 6672 if (sa->sa_len != incr) { 6673 return (EINVAL); 6674 } 6675 (*num_v6) += 1; 6676 break; 6677 } 6678 #endif 6679 default: 6680 return (EINVAL); 6681 } 6682 if ((at + incr) > limit) { 6683 return (EINVAL); 6684 } 6685 SCTP_INP_INCR_REF(inp); 6686 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6687 if (stcb != NULL) { 6688 SCTP_TCB_UNLOCK(stcb); 6689 return (EALREADY); 6690 } else { 6691 SCTP_INP_DECR_REF(inp); 6692 } 6693 at += incr; 6694 sa = (struct sockaddr *)((caddr_t)sa + incr); 6695 } 6696 return (0); 6697 } 6698 6699 /* 6700 * sctp_bindx(ADD) for one address. 6701 * assumes all arguments are valid/checked by caller. 6702 */ 6703 void 6704 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6705 struct sockaddr *sa, uint32_t vrf_id, int *error, 6706 void *p) 6707 { 6708 #if defined(INET) && defined(INET6) 6709 struct sockaddr_in sin; 6710 #endif 6711 #ifdef INET6 6712 struct sockaddr_in6 *sin6; 6713 #endif 6714 #ifdef INET 6715 struct sockaddr_in *sinp; 6716 #endif 6717 struct sockaddr *addr_to_use; 6718 struct sctp_inpcb *lep; 6719 uint16_t port; 6720 6721 /* see if we're bound all already! */ 6722 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6723 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6724 *error = EINVAL; 6725 return; 6726 } 6727 switch (sa->sa_family) { 6728 #ifdef INET6 6729 case AF_INET6: 6730 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6731 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6732 *error = EINVAL; 6733 return; 6734 } 6735 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6736 /* can only bind v6 on PF_INET6 sockets */ 6737 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6738 *error = EINVAL; 6739 return; 6740 } 6741 sin6 = (struct sockaddr_in6 *)sa; 6742 port = sin6->sin6_port; 6743 #ifdef INET 6744 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6745 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6746 SCTP_IPV6_V6ONLY(inp)) { 6747 /* can't bind v4-mapped on PF_INET sockets */ 6748 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6749 *error = EINVAL; 6750 return; 6751 } 6752 in6_sin6_2_sin(&sin, sin6); 6753 addr_to_use = (struct sockaddr *)&sin; 6754 } else { 6755 addr_to_use = sa; 6756 } 6757 #else 6758 addr_to_use = sa; 6759 #endif 6760 break; 6761 #endif 6762 #ifdef INET 6763 case AF_INET: 6764 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6765 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6766 *error = EINVAL; 6767 return; 6768 } 6769 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6770 SCTP_IPV6_V6ONLY(inp)) { 6771 /* can't bind v4 on PF_INET sockets */ 6772 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6773 *error = EINVAL; 6774 return; 6775 } 6776 sinp = (struct sockaddr_in *)sa; 6777 port = sinp->sin_port; 6778 addr_to_use = sa; 6779 break; 6780 #endif 6781 default: 6782 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6783 *error = EINVAL; 6784 return; 6785 } 6786 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6787 if (p == NULL) { 6788 /* Can't get proc for Net/Open BSD */ 6789 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6790 *error = EINVAL; 6791 return; 6792 } 6793 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6794 return; 6795 } 6796 /* Validate the incoming port. */ 6797 if ((port != 0) && (port != inp->sctp_lport)) { 6798 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6799 *error = EINVAL; 6800 return; 6801 } 6802 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6803 if (lep == NULL) { 6804 /* add the address */ 6805 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6806 SCTP_ADD_IP_ADDRESS, vrf_id); 6807 } else { 6808 if (lep != inp) { 6809 *error = EADDRINUSE; 6810 } 6811 SCTP_INP_DECR_REF(lep); 6812 } 6813 } 6814 6815 /* 6816 * sctp_bindx(DELETE) for one address. 6817 * assumes all arguments are valid/checked by caller. 6818 */ 6819 void 6820 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6821 struct sockaddr *sa, uint32_t vrf_id, int *error) 6822 { 6823 struct sockaddr *addr_to_use; 6824 #if defined(INET) && defined(INET6) 6825 struct sockaddr_in6 *sin6; 6826 struct sockaddr_in sin; 6827 #endif 6828 6829 /* see if we're bound all already! */ 6830 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6831 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6832 *error = EINVAL; 6833 return; 6834 } 6835 switch (sa->sa_family) { 6836 #ifdef INET6 6837 case AF_INET6: 6838 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6839 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6840 *error = EINVAL; 6841 return; 6842 } 6843 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6844 /* can only bind v6 on PF_INET6 sockets */ 6845 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6846 *error = EINVAL; 6847 return; 6848 } 6849 #ifdef INET 6850 sin6 = (struct sockaddr_in6 *)sa; 6851 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6852 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6853 SCTP_IPV6_V6ONLY(inp)) { 6854 /* can't bind mapped-v4 on PF_INET sockets */ 6855 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6856 *error = EINVAL; 6857 return; 6858 } 6859 in6_sin6_2_sin(&sin, sin6); 6860 addr_to_use = (struct sockaddr *)&sin; 6861 } else { 6862 addr_to_use = sa; 6863 } 6864 #else 6865 addr_to_use = sa; 6866 #endif 6867 break; 6868 #endif 6869 #ifdef INET 6870 case AF_INET: 6871 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6872 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6873 *error = EINVAL; 6874 return; 6875 } 6876 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6877 SCTP_IPV6_V6ONLY(inp)) { 6878 /* can't bind v4 on PF_INET sockets */ 6879 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6880 *error = EINVAL; 6881 return; 6882 } 6883 addr_to_use = sa; 6884 break; 6885 #endif 6886 default: 6887 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6888 *error = EINVAL; 6889 return; 6890 } 6891 /* No lock required mgmt_ep_sa does its own locking. */ 6892 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6893 vrf_id); 6894 } 6895 6896 /* 6897 * returns the valid local address count for an assoc, taking into account 6898 * all scoping rules 6899 */ 6900 int 6901 sctp_local_addr_count(struct sctp_tcb *stcb) 6902 { 6903 int loopback_scope; 6904 #if defined(INET) 6905 int ipv4_local_scope, ipv4_addr_legal; 6906 #endif 6907 #if defined(INET6) 6908 int local_scope, site_scope, ipv6_addr_legal; 6909 #endif 6910 struct sctp_vrf *vrf; 6911 struct sctp_ifn *sctp_ifn; 6912 struct sctp_ifa *sctp_ifa; 6913 int count = 0; 6914 6915 /* Turn on all the appropriate scopes */ 6916 loopback_scope = stcb->asoc.scope.loopback_scope; 6917 #if defined(INET) 6918 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6919 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6920 #endif 6921 #if defined(INET6) 6922 local_scope = stcb->asoc.scope.local_scope; 6923 site_scope = stcb->asoc.scope.site_scope; 6924 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6925 #endif 6926 SCTP_IPI_ADDR_RLOCK(); 6927 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6928 if (vrf == NULL) { 6929 /* no vrf, no addresses */ 6930 SCTP_IPI_ADDR_RUNLOCK(); 6931 return (0); 6932 } 6933 6934 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6935 /* 6936 * bound all case: go through all ifns on the vrf 6937 */ 6938 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6939 if ((loopback_scope == 0) && 6940 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6941 continue; 6942 } 6943 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6944 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6945 continue; 6946 switch (sctp_ifa->address.sa.sa_family) { 6947 #ifdef INET 6948 case AF_INET: 6949 if (ipv4_addr_legal) { 6950 struct sockaddr_in *sin; 6951 6952 sin = &sctp_ifa->address.sin; 6953 if (sin->sin_addr.s_addr == 0) { 6954 /* 6955 * skip unspecified 6956 * addrs 6957 */ 6958 continue; 6959 } 6960 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6961 &sin->sin_addr) != 0) { 6962 continue; 6963 } 6964 if ((ipv4_local_scope == 0) && 6965 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6966 continue; 6967 } 6968 /* count this one */ 6969 count++; 6970 } else { 6971 continue; 6972 } 6973 break; 6974 #endif 6975 #ifdef INET6 6976 case AF_INET6: 6977 if (ipv6_addr_legal) { 6978 struct sockaddr_in6 *sin6; 6979 6980 sin6 = &sctp_ifa->address.sin6; 6981 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6982 continue; 6983 } 6984 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6985 &sin6->sin6_addr) != 0) { 6986 continue; 6987 } 6988 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6989 if (local_scope == 0) 6990 continue; 6991 if (sin6->sin6_scope_id == 0) { 6992 if (sa6_recoverscope(sin6) != 0) 6993 /* 6994 * 6995 * bad 6996 * link 6997 * 6998 * local 6999 * 7000 * address 7001 */ 7002 continue; 7003 } 7004 } 7005 if ((site_scope == 0) && 7006 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7007 continue; 7008 } 7009 /* count this one */ 7010 count++; 7011 } 7012 break; 7013 #endif 7014 default: 7015 /* TSNH */ 7016 break; 7017 } 7018 } 7019 } 7020 } else { 7021 /* 7022 * subset bound case 7023 */ 7024 struct sctp_laddr *laddr; 7025 7026 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7027 sctp_nxt_addr) { 7028 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7029 continue; 7030 } 7031 /* count this one */ 7032 count++; 7033 } 7034 } 7035 SCTP_IPI_ADDR_RUNLOCK(); 7036 return (count); 7037 } 7038 7039 #if defined(SCTP_LOCAL_TRACE_BUF) 7040 7041 void 7042 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7043 { 7044 uint32_t saveindex, newindex; 7045 7046 do { 7047 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7048 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7049 newindex = 1; 7050 } else { 7051 newindex = saveindex + 1; 7052 } 7053 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7054 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7055 saveindex = 0; 7056 } 7057 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7058 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7059 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7060 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7061 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7062 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7063 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7064 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7065 } 7066 7067 #endif 7068 static void 7069 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7070 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7071 { 7072 struct ip *iph; 7073 #ifdef INET6 7074 struct ip6_hdr *ip6; 7075 #endif 7076 struct mbuf *sp, *last; 7077 struct udphdr *uhdr; 7078 uint16_t port; 7079 7080 if ((m->m_flags & M_PKTHDR) == 0) { 7081 /* Can't handle one that is not a pkt hdr */ 7082 goto out; 7083 } 7084 /* Pull the src port */ 7085 iph = mtod(m, struct ip *); 7086 uhdr = (struct udphdr *)((caddr_t)iph + off); 7087 port = uhdr->uh_sport; 7088 /* 7089 * Split out the mbuf chain. Leave the IP header in m, place the 7090 * rest in the sp. 7091 */ 7092 sp = m_split(m, off, M_NOWAIT); 7093 if (sp == NULL) { 7094 /* Gak, drop packet, we can't do a split */ 7095 goto out; 7096 } 7097 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7098 /* Gak, packet can't have an SCTP header in it - too small */ 7099 m_freem(sp); 7100 goto out; 7101 } 7102 /* Now pull up the UDP header and SCTP header together */ 7103 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7104 if (sp == NULL) { 7105 /* Gak pullup failed */ 7106 goto out; 7107 } 7108 /* Trim out the UDP header */ 7109 m_adj(sp, sizeof(struct udphdr)); 7110 7111 /* Now reconstruct the mbuf chain */ 7112 for (last = m; last->m_next; last = last->m_next); 7113 last->m_next = sp; 7114 m->m_pkthdr.len += sp->m_pkthdr.len; 7115 /* 7116 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7117 * checksum and it was valid. Since CSUM_DATA_VALID == 7118 * CSUM_SCTP_VALID this would imply that the HW also verified the 7119 * SCTP checksum. Therefore, clear the bit. 7120 */ 7121 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7122 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7123 m->m_pkthdr.len, 7124 if_name(m->m_pkthdr.rcvif), 7125 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7126 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7127 iph = mtod(m, struct ip *); 7128 switch (iph->ip_v) { 7129 #ifdef INET 7130 case IPVERSION: 7131 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7132 sctp_input_with_port(m, off, port); 7133 break; 7134 #endif 7135 #ifdef INET6 7136 case IPV6_VERSION >> 4: 7137 ip6 = mtod(m, struct ip6_hdr *); 7138 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7139 sctp6_input_with_port(&m, &off, port); 7140 break; 7141 #endif 7142 default: 7143 goto out; 7144 break; 7145 } 7146 return; 7147 out: 7148 m_freem(m); 7149 } 7150 7151 #ifdef INET 7152 static void 7153 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7154 { 7155 struct ip *outer_ip, *inner_ip; 7156 struct sctphdr *sh; 7157 struct icmp *icmp; 7158 struct udphdr *udp; 7159 struct sctp_inpcb *inp; 7160 struct sctp_tcb *stcb; 7161 struct sctp_nets *net; 7162 struct sctp_init_chunk *ch; 7163 struct sockaddr_in src, dst; 7164 uint8_t type, code; 7165 7166 inner_ip = (struct ip *)vip; 7167 icmp = (struct icmp *)((caddr_t)inner_ip - 7168 (sizeof(struct icmp) - sizeof(struct ip))); 7169 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7170 if (ntohs(outer_ip->ip_len) < 7171 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7172 return; 7173 } 7174 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7175 sh = (struct sctphdr *)(udp + 1); 7176 memset(&src, 0, sizeof(struct sockaddr_in)); 7177 src.sin_family = AF_INET; 7178 src.sin_len = sizeof(struct sockaddr_in); 7179 src.sin_port = sh->src_port; 7180 src.sin_addr = inner_ip->ip_src; 7181 memset(&dst, 0, sizeof(struct sockaddr_in)); 7182 dst.sin_family = AF_INET; 7183 dst.sin_len = sizeof(struct sockaddr_in); 7184 dst.sin_port = sh->dest_port; 7185 dst.sin_addr = inner_ip->ip_dst; 7186 /* 7187 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7188 * holds our local endpoint address. Thus we reverse the dst and the 7189 * src in the lookup. 7190 */ 7191 inp = NULL; 7192 net = NULL; 7193 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7194 (struct sockaddr *)&src, 7195 &inp, &net, 1, 7196 SCTP_DEFAULT_VRFID); 7197 if ((stcb != NULL) && 7198 (net != NULL) && 7199 (inp != NULL)) { 7200 /* Check the UDP port numbers */ 7201 if ((udp->uh_dport != net->port) || 7202 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7203 SCTP_TCB_UNLOCK(stcb); 7204 return; 7205 } 7206 /* Check the verification tag */ 7207 if (ntohl(sh->v_tag) != 0) { 7208 /* 7209 * This must be the verification tag used for 7210 * sending out packets. We don't consider packets 7211 * reflecting the verification tag. 7212 */ 7213 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7214 SCTP_TCB_UNLOCK(stcb); 7215 return; 7216 } 7217 } else { 7218 if (ntohs(outer_ip->ip_len) >= 7219 sizeof(struct ip) + 7220 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7221 /* 7222 * In this case we can check if we got an 7223 * INIT chunk and if the initiate tag 7224 * matches. 7225 */ 7226 ch = (struct sctp_init_chunk *)(sh + 1); 7227 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7228 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7229 SCTP_TCB_UNLOCK(stcb); 7230 return; 7231 } 7232 } else { 7233 SCTP_TCB_UNLOCK(stcb); 7234 return; 7235 } 7236 } 7237 type = icmp->icmp_type; 7238 code = icmp->icmp_code; 7239 if ((type == ICMP_UNREACH) && 7240 (code == ICMP_UNREACH_PORT)) { 7241 code = ICMP_UNREACH_PROTOCOL; 7242 } 7243 sctp_notify(inp, stcb, net, type, code, 7244 ntohs(inner_ip->ip_len), 7245 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7246 } else { 7247 if ((stcb == NULL) && (inp != NULL)) { 7248 /* reduce ref-count */ 7249 SCTP_INP_WLOCK(inp); 7250 SCTP_INP_DECR_REF(inp); 7251 SCTP_INP_WUNLOCK(inp); 7252 } 7253 if (stcb) { 7254 SCTP_TCB_UNLOCK(stcb); 7255 } 7256 } 7257 return; 7258 } 7259 #endif 7260 7261 #ifdef INET6 7262 static void 7263 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7264 { 7265 struct ip6ctlparam *ip6cp; 7266 struct sctp_inpcb *inp; 7267 struct sctp_tcb *stcb; 7268 struct sctp_nets *net; 7269 struct sctphdr sh; 7270 struct udphdr udp; 7271 struct sockaddr_in6 src, dst; 7272 uint8_t type, code; 7273 7274 ip6cp = (struct ip6ctlparam *)d; 7275 /* 7276 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7277 */ 7278 if (ip6cp->ip6c_m == NULL) { 7279 return; 7280 } 7281 /* 7282 * Check if we can safely examine the ports and the verification tag 7283 * of the SCTP common header. 7284 */ 7285 if (ip6cp->ip6c_m->m_pkthdr.len < 7286 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7287 return; 7288 } 7289 /* Copy out the UDP header. */ 7290 memset(&udp, 0, sizeof(struct udphdr)); 7291 m_copydata(ip6cp->ip6c_m, 7292 ip6cp->ip6c_off, 7293 sizeof(struct udphdr), 7294 (caddr_t)&udp); 7295 /* Copy out the port numbers and the verification tag. */ 7296 memset(&sh, 0, sizeof(struct sctphdr)); 7297 m_copydata(ip6cp->ip6c_m, 7298 ip6cp->ip6c_off + sizeof(struct udphdr), 7299 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7300 (caddr_t)&sh); 7301 memset(&src, 0, sizeof(struct sockaddr_in6)); 7302 src.sin6_family = AF_INET6; 7303 src.sin6_len = sizeof(struct sockaddr_in6); 7304 src.sin6_port = sh.src_port; 7305 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7306 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7307 return; 7308 } 7309 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7310 dst.sin6_family = AF_INET6; 7311 dst.sin6_len = sizeof(struct sockaddr_in6); 7312 dst.sin6_port = sh.dest_port; 7313 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7314 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7315 return; 7316 } 7317 inp = NULL; 7318 net = NULL; 7319 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7320 (struct sockaddr *)&src, 7321 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7322 if ((stcb != NULL) && 7323 (net != NULL) && 7324 (inp != NULL)) { 7325 /* Check the UDP port numbers */ 7326 if ((udp.uh_dport != net->port) || 7327 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7328 SCTP_TCB_UNLOCK(stcb); 7329 return; 7330 } 7331 /* Check the verification tag */ 7332 if (ntohl(sh.v_tag) != 0) { 7333 /* 7334 * This must be the verification tag used for 7335 * sending out packets. We don't consider packets 7336 * reflecting the verification tag. 7337 */ 7338 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7339 SCTP_TCB_UNLOCK(stcb); 7340 return; 7341 } 7342 } else { 7343 if (ip6cp->ip6c_m->m_pkthdr.len >= 7344 ip6cp->ip6c_off + sizeof(struct udphdr) + 7345 sizeof(struct sctphdr) + 7346 sizeof(struct sctp_chunkhdr) + 7347 offsetof(struct sctp_init, a_rwnd)) { 7348 /* 7349 * In this case we can check if we got an 7350 * INIT chunk and if the initiate tag 7351 * matches. 7352 */ 7353 uint32_t initiate_tag; 7354 uint8_t chunk_type; 7355 7356 m_copydata(ip6cp->ip6c_m, 7357 ip6cp->ip6c_off + 7358 sizeof(struct udphdr) + 7359 sizeof(struct sctphdr), 7360 sizeof(uint8_t), 7361 (caddr_t)&chunk_type); 7362 m_copydata(ip6cp->ip6c_m, 7363 ip6cp->ip6c_off + 7364 sizeof(struct udphdr) + 7365 sizeof(struct sctphdr) + 7366 sizeof(struct sctp_chunkhdr), 7367 sizeof(uint32_t), 7368 (caddr_t)&initiate_tag); 7369 if ((chunk_type != SCTP_INITIATION) || 7370 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7371 SCTP_TCB_UNLOCK(stcb); 7372 return; 7373 } 7374 } else { 7375 SCTP_TCB_UNLOCK(stcb); 7376 return; 7377 } 7378 } 7379 type = ip6cp->ip6c_icmp6->icmp6_type; 7380 code = ip6cp->ip6c_icmp6->icmp6_code; 7381 if ((type == ICMP6_DST_UNREACH) && 7382 (code == ICMP6_DST_UNREACH_NOPORT)) { 7383 type = ICMP6_PARAM_PROB; 7384 code = ICMP6_PARAMPROB_NEXTHEADER; 7385 } 7386 sctp6_notify(inp, stcb, net, type, code, 7387 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7388 } else { 7389 if ((stcb == NULL) && (inp != NULL)) { 7390 /* reduce inp's ref-count */ 7391 SCTP_INP_WLOCK(inp); 7392 SCTP_INP_DECR_REF(inp); 7393 SCTP_INP_WUNLOCK(inp); 7394 } 7395 if (stcb) { 7396 SCTP_TCB_UNLOCK(stcb); 7397 } 7398 } 7399 } 7400 #endif 7401 7402 void 7403 sctp_over_udp_stop(void) 7404 { 7405 /* 7406 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7407 * for writting! 7408 */ 7409 #ifdef INET 7410 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7411 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7412 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7413 } 7414 #endif 7415 #ifdef INET6 7416 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7417 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7418 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7419 } 7420 #endif 7421 } 7422 7423 int 7424 sctp_over_udp_start(void) 7425 { 7426 uint16_t port; 7427 int ret; 7428 #ifdef INET 7429 struct sockaddr_in sin; 7430 #endif 7431 #ifdef INET6 7432 struct sockaddr_in6 sin6; 7433 #endif 7434 /* 7435 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7436 * for writting! 7437 */ 7438 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7439 if (ntohs(port) == 0) { 7440 /* Must have a port set */ 7441 return (EINVAL); 7442 } 7443 #ifdef INET 7444 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7445 /* Already running -- must stop first */ 7446 return (EALREADY); 7447 } 7448 #endif 7449 #ifdef INET6 7450 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7451 /* Already running -- must stop first */ 7452 return (EALREADY); 7453 } 7454 #endif 7455 #ifdef INET 7456 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7457 SOCK_DGRAM, IPPROTO_UDP, 7458 curthread->td_ucred, curthread))) { 7459 sctp_over_udp_stop(); 7460 return (ret); 7461 } 7462 /* Call the special UDP hook. */ 7463 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7464 sctp_recv_udp_tunneled_packet, 7465 sctp_recv_icmp_tunneled_packet, 7466 NULL))) { 7467 sctp_over_udp_stop(); 7468 return (ret); 7469 } 7470 /* Ok, we have a socket, bind it to the port. */ 7471 memset(&sin, 0, sizeof(struct sockaddr_in)); 7472 sin.sin_len = sizeof(struct sockaddr_in); 7473 sin.sin_family = AF_INET; 7474 sin.sin_port = htons(port); 7475 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7476 (struct sockaddr *)&sin, curthread))) { 7477 sctp_over_udp_stop(); 7478 return (ret); 7479 } 7480 #endif 7481 #ifdef INET6 7482 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7483 SOCK_DGRAM, IPPROTO_UDP, 7484 curthread->td_ucred, curthread))) { 7485 sctp_over_udp_stop(); 7486 return (ret); 7487 } 7488 /* Call the special UDP hook. */ 7489 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7490 sctp_recv_udp_tunneled_packet, 7491 sctp_recv_icmp6_tunneled_packet, 7492 NULL))) { 7493 sctp_over_udp_stop(); 7494 return (ret); 7495 } 7496 /* Ok, we have a socket, bind it to the port. */ 7497 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7498 sin6.sin6_len = sizeof(struct sockaddr_in6); 7499 sin6.sin6_family = AF_INET6; 7500 sin6.sin6_port = htons(port); 7501 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7502 (struct sockaddr *)&sin6, curthread))) { 7503 sctp_over_udp_stop(); 7504 return (ret); 7505 } 7506 #endif 7507 return (0); 7508 } 7509 7510 /* 7511 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7512 * If all arguments are zero, zero is returned. 7513 */ 7514 uint32_t 7515 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7516 { 7517 if (mtu1 > 0) { 7518 if (mtu2 > 0) { 7519 if (mtu3 > 0) { 7520 return (min(mtu1, min(mtu2, mtu3))); 7521 } else { 7522 return (min(mtu1, mtu2)); 7523 } 7524 } else { 7525 if (mtu3 > 0) { 7526 return (min(mtu1, mtu3)); 7527 } else { 7528 return (mtu1); 7529 } 7530 } 7531 } else { 7532 if (mtu2 > 0) { 7533 if (mtu3 > 0) { 7534 return (min(mtu2, mtu3)); 7535 } else { 7536 return (mtu2); 7537 } 7538 } else { 7539 return (mtu3); 7540 } 7541 } 7542 } 7543 7544 void 7545 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7546 { 7547 struct in_conninfo inc; 7548 7549 memset(&inc, 0, sizeof(struct in_conninfo)); 7550 inc.inc_fibnum = fibnum; 7551 switch (addr->sa.sa_family) { 7552 #ifdef INET 7553 case AF_INET: 7554 inc.inc_faddr = addr->sin.sin_addr; 7555 break; 7556 #endif 7557 #ifdef INET6 7558 case AF_INET6: 7559 inc.inc_flags |= INC_ISIPV6; 7560 inc.inc6_faddr = addr->sin6.sin6_addr; 7561 break; 7562 #endif 7563 default: 7564 return; 7565 } 7566 tcp_hc_updatemtu(&inc, (u_long)mtu); 7567 } 7568 7569 uint32_t 7570 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7571 { 7572 struct in_conninfo inc; 7573 7574 memset(&inc, 0, sizeof(struct in_conninfo)); 7575 inc.inc_fibnum = fibnum; 7576 switch (addr->sa.sa_family) { 7577 #ifdef INET 7578 case AF_INET: 7579 inc.inc_faddr = addr->sin.sin_addr; 7580 break; 7581 #endif 7582 #ifdef INET6 7583 case AF_INET6: 7584 inc.inc_flags |= INC_ISIPV6; 7585 inc.inc6_faddr = addr->sin6.sin6_addr; 7586 break; 7587 #endif 7588 default: 7589 return (0); 7590 } 7591 return ((uint32_t)tcp_hc_getmtu(&inc)); 7592 } 7593 7594 void 7595 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7596 { 7597 #if defined(KDTRACE_HOOKS) 7598 int old_state = stcb->asoc.state; 7599 #endif 7600 7601 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7602 ("sctp_set_state: Can't set substate (new_state = %x)", 7603 new_state)); 7604 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7605 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7606 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7607 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7608 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7609 } 7610 #if defined(KDTRACE_HOOKS) 7611 if (((old_state & SCTP_STATE_MASK) != new_state) && 7612 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7613 (new_state == SCTP_STATE_INUSE))) { 7614 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7615 } 7616 #endif 7617 } 7618 7619 void 7620 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7621 { 7622 #if defined(KDTRACE_HOOKS) 7623 int old_state = stcb->asoc.state; 7624 #endif 7625 7626 KASSERT((substate & SCTP_STATE_MASK) == 0, 7627 ("sctp_add_substate: Can't set state (substate = %x)", 7628 substate)); 7629 stcb->asoc.state |= substate; 7630 #if defined(KDTRACE_HOOKS) 7631 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7632 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7633 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7634 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7635 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7636 } 7637 #endif 7638 } 7639