1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 66 #ifndef KTR_SCTP 67 #define KTR_SCTP KTR_SUBSYS 68 #endif 69 70 extern const struct sctp_cc_functions sctp_cc_functions[]; 71 extern const struct sctp_ss_functions sctp_ss_functions[]; 72 73 void 74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 75 { 76 #if defined(SCTP_LOCAL_TRACE_BUF) 77 struct sctp_cwnd_log sctp_clog; 78 79 sctp_clog.x.sb.stcb = stcb; 80 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 81 if (stcb) 82 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 83 else 84 sctp_clog.x.sb.stcb_sbcc = 0; 85 sctp_clog.x.sb.incr = incr; 86 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 87 SCTP_LOG_EVENT_SB, 88 from, 89 sctp_clog.x.misc.log1, 90 sctp_clog.x.misc.log2, 91 sctp_clog.x.misc.log3, 92 sctp_clog.x.misc.log4); 93 #endif 94 } 95 96 void 97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 98 { 99 #if defined(SCTP_LOCAL_TRACE_BUF) 100 struct sctp_cwnd_log sctp_clog; 101 102 sctp_clog.x.close.inp = (void *)inp; 103 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 104 if (stcb) { 105 sctp_clog.x.close.stcb = (void *)stcb; 106 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 107 } else { 108 sctp_clog.x.close.stcb = 0; 109 sctp_clog.x.close.state = 0; 110 } 111 sctp_clog.x.close.loc = loc; 112 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 113 SCTP_LOG_EVENT_CLOSE, 114 0, 115 sctp_clog.x.misc.log1, 116 sctp_clog.x.misc.log2, 117 sctp_clog.x.misc.log3, 118 sctp_clog.x.misc.log4); 119 #endif 120 } 121 122 void 123 rto_logging(struct sctp_nets *net, int from) 124 { 125 #if defined(SCTP_LOCAL_TRACE_BUF) 126 struct sctp_cwnd_log sctp_clog; 127 128 memset(&sctp_clog, 0, sizeof(sctp_clog)); 129 sctp_clog.x.rto.net = (void *)net; 130 sctp_clog.x.rto.rtt = net->rtt / 1000; 131 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 132 SCTP_LOG_EVENT_RTT, 133 from, 134 sctp_clog.x.misc.log1, 135 sctp_clog.x.misc.log2, 136 sctp_clog.x.misc.log3, 137 sctp_clog.x.misc.log4); 138 #endif 139 } 140 141 void 142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 143 { 144 #if defined(SCTP_LOCAL_TRACE_BUF) 145 struct sctp_cwnd_log sctp_clog; 146 147 sctp_clog.x.strlog.stcb = stcb; 148 sctp_clog.x.strlog.n_tsn = tsn; 149 sctp_clog.x.strlog.n_sseq = sseq; 150 sctp_clog.x.strlog.e_tsn = 0; 151 sctp_clog.x.strlog.e_sseq = 0; 152 sctp_clog.x.strlog.strm = stream; 153 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 154 SCTP_LOG_EVENT_STRM, 155 from, 156 sctp_clog.x.misc.log1, 157 sctp_clog.x.misc.log2, 158 sctp_clog.x.misc.log3, 159 sctp_clog.x.misc.log4); 160 #endif 161 } 162 163 void 164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 165 { 166 #if defined(SCTP_LOCAL_TRACE_BUF) 167 struct sctp_cwnd_log sctp_clog; 168 169 sctp_clog.x.nagle.stcb = (void *)stcb; 170 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 171 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 172 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 173 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 174 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 175 SCTP_LOG_EVENT_NAGLE, 176 action, 177 sctp_clog.x.misc.log1, 178 sctp_clog.x.misc.log2, 179 sctp_clog.x.misc.log3, 180 sctp_clog.x.misc.log4); 181 #endif 182 } 183 184 void 185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 186 { 187 #if defined(SCTP_LOCAL_TRACE_BUF) 188 struct sctp_cwnd_log sctp_clog; 189 190 sctp_clog.x.sack.cumack = cumack; 191 sctp_clog.x.sack.oldcumack = old_cumack; 192 sctp_clog.x.sack.tsn = tsn; 193 sctp_clog.x.sack.numGaps = gaps; 194 sctp_clog.x.sack.numDups = dups; 195 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 196 SCTP_LOG_EVENT_SACK, 197 from, 198 sctp_clog.x.misc.log1, 199 sctp_clog.x.misc.log2, 200 sctp_clog.x.misc.log3, 201 sctp_clog.x.misc.log4); 202 #endif 203 } 204 205 void 206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 207 { 208 #if defined(SCTP_LOCAL_TRACE_BUF) 209 struct sctp_cwnd_log sctp_clog; 210 211 memset(&sctp_clog, 0, sizeof(sctp_clog)); 212 sctp_clog.x.map.base = map; 213 sctp_clog.x.map.cum = cum; 214 sctp_clog.x.map.high = high; 215 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 216 SCTP_LOG_EVENT_MAP, 217 from, 218 sctp_clog.x.misc.log1, 219 sctp_clog.x.misc.log2, 220 sctp_clog.x.misc.log3, 221 sctp_clog.x.misc.log4); 222 #endif 223 } 224 225 void 226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 227 { 228 #if defined(SCTP_LOCAL_TRACE_BUF) 229 struct sctp_cwnd_log sctp_clog; 230 231 memset(&sctp_clog, 0, sizeof(sctp_clog)); 232 sctp_clog.x.fr.largest_tsn = biggest_tsn; 233 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 234 sctp_clog.x.fr.tsn = tsn; 235 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 236 SCTP_LOG_EVENT_FR, 237 from, 238 sctp_clog.x.misc.log1, 239 sctp_clog.x.misc.log2, 240 sctp_clog.x.misc.log3, 241 sctp_clog.x.misc.log4); 242 #endif 243 } 244 245 #ifdef SCTP_MBUF_LOGGING 246 void 247 sctp_log_mb(struct mbuf *m, int from) 248 { 249 #if defined(SCTP_LOCAL_TRACE_BUF) 250 struct sctp_cwnd_log sctp_clog; 251 252 sctp_clog.x.mb.mp = m; 253 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 254 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 255 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 256 if (SCTP_BUF_IS_EXTENDED(m)) { 257 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 258 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 259 } else { 260 sctp_clog.x.mb.ext = 0; 261 sctp_clog.x.mb.refcnt = 0; 262 } 263 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 264 SCTP_LOG_EVENT_MBUF, 265 from, 266 sctp_clog.x.misc.log1, 267 sctp_clog.x.misc.log2, 268 sctp_clog.x.misc.log3, 269 sctp_clog.x.misc.log4); 270 #endif 271 } 272 273 void 274 sctp_log_mbc(struct mbuf *m, int from) 275 { 276 struct mbuf *mat; 277 278 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 279 sctp_log_mb(mat, from); 280 } 281 } 282 #endif 283 284 void 285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 286 { 287 #if defined(SCTP_LOCAL_TRACE_BUF) 288 struct sctp_cwnd_log sctp_clog; 289 290 if (control == NULL) { 291 SCTP_PRINTF("Gak log of NULL?\n"); 292 return; 293 } 294 sctp_clog.x.strlog.stcb = control->stcb; 295 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 296 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 297 sctp_clog.x.strlog.strm = control->sinfo_stream; 298 if (poschk != NULL) { 299 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 300 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 301 } else { 302 sctp_clog.x.strlog.e_tsn = 0; 303 sctp_clog.x.strlog.e_sseq = 0; 304 } 305 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 306 SCTP_LOG_EVENT_STRM, 307 from, 308 sctp_clog.x.misc.log1, 309 sctp_clog.x.misc.log2, 310 sctp_clog.x.misc.log3, 311 sctp_clog.x.misc.log4); 312 #endif 313 } 314 315 void 316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 317 { 318 #if defined(SCTP_LOCAL_TRACE_BUF) 319 struct sctp_cwnd_log sctp_clog; 320 321 sctp_clog.x.cwnd.net = net; 322 if (stcb->asoc.send_queue_cnt > 255) 323 sctp_clog.x.cwnd.cnt_in_send = 255; 324 else 325 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 326 if (stcb->asoc.stream_queue_cnt > 255) 327 sctp_clog.x.cwnd.cnt_in_str = 255; 328 else 329 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 330 331 if (net) { 332 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 333 sctp_clog.x.cwnd.inflight = net->flight_size; 334 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 335 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 336 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 337 } 338 if (SCTP_CWNDLOG_PRESEND == from) { 339 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 340 } 341 sctp_clog.x.cwnd.cwnd_augment = augment; 342 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 343 SCTP_LOG_EVENT_CWND, 344 from, 345 sctp_clog.x.misc.log1, 346 sctp_clog.x.misc.log2, 347 sctp_clog.x.misc.log3, 348 sctp_clog.x.misc.log4); 349 #endif 350 } 351 352 void 353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 354 { 355 #if defined(SCTP_LOCAL_TRACE_BUF) 356 struct sctp_cwnd_log sctp_clog; 357 358 memset(&sctp_clog, 0, sizeof(sctp_clog)); 359 if (inp) { 360 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 361 362 } else { 363 sctp_clog.x.lock.sock = (void *)NULL; 364 } 365 sctp_clog.x.lock.inp = (void *)inp; 366 if (stcb) { 367 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 368 } else { 369 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 370 } 371 if (inp) { 372 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 373 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 374 } else { 375 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 376 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 377 } 378 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 379 if (inp && (inp->sctp_socket)) { 380 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 382 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 383 } else { 384 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 386 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 387 } 388 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 389 SCTP_LOG_LOCK_EVENT, 390 from, 391 sctp_clog.x.misc.log1, 392 sctp_clog.x.misc.log2, 393 sctp_clog.x.misc.log3, 394 sctp_clog.x.misc.log4); 395 #endif 396 } 397 398 void 399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 400 { 401 #if defined(SCTP_LOCAL_TRACE_BUF) 402 struct sctp_cwnd_log sctp_clog; 403 404 memset(&sctp_clog, 0, sizeof(sctp_clog)); 405 sctp_clog.x.cwnd.net = net; 406 sctp_clog.x.cwnd.cwnd_new_value = error; 407 sctp_clog.x.cwnd.inflight = net->flight_size; 408 sctp_clog.x.cwnd.cwnd_augment = burst; 409 if (stcb->asoc.send_queue_cnt > 255) 410 sctp_clog.x.cwnd.cnt_in_send = 255; 411 else 412 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 413 if (stcb->asoc.stream_queue_cnt > 255) 414 sctp_clog.x.cwnd.cnt_in_str = 255; 415 else 416 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 417 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 418 SCTP_LOG_EVENT_MAXBURST, 419 from, 420 sctp_clog.x.misc.log1, 421 sctp_clog.x.misc.log2, 422 sctp_clog.x.misc.log3, 423 sctp_clog.x.misc.log4); 424 #endif 425 } 426 427 void 428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 429 { 430 #if defined(SCTP_LOCAL_TRACE_BUF) 431 struct sctp_cwnd_log sctp_clog; 432 433 sctp_clog.x.rwnd.rwnd = peers_rwnd; 434 sctp_clog.x.rwnd.send_size = snd_size; 435 sctp_clog.x.rwnd.overhead = overhead; 436 sctp_clog.x.rwnd.new_rwnd = 0; 437 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 438 SCTP_LOG_EVENT_RWND, 439 from, 440 sctp_clog.x.misc.log1, 441 sctp_clog.x.misc.log2, 442 sctp_clog.x.misc.log3, 443 sctp_clog.x.misc.log4); 444 #endif 445 } 446 447 void 448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 449 { 450 #if defined(SCTP_LOCAL_TRACE_BUF) 451 struct sctp_cwnd_log sctp_clog; 452 453 sctp_clog.x.rwnd.rwnd = peers_rwnd; 454 sctp_clog.x.rwnd.send_size = flight_size; 455 sctp_clog.x.rwnd.overhead = overhead; 456 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 457 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 458 SCTP_LOG_EVENT_RWND, 459 from, 460 sctp_clog.x.misc.log1, 461 sctp_clog.x.misc.log2, 462 sctp_clog.x.misc.log3, 463 sctp_clog.x.misc.log4); 464 #endif 465 } 466 467 #ifdef SCTP_MBCNT_LOGGING 468 static void 469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 470 { 471 #if defined(SCTP_LOCAL_TRACE_BUF) 472 struct sctp_cwnd_log sctp_clog; 473 474 sctp_clog.x.mbcnt.total_queue_size = total_oq; 475 sctp_clog.x.mbcnt.size_change = book; 476 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 477 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 478 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 479 SCTP_LOG_EVENT_MBCNT, 480 from, 481 sctp_clog.x.misc.log1, 482 sctp_clog.x.misc.log2, 483 sctp_clog.x.misc.log3, 484 sctp_clog.x.misc.log4); 485 #endif 486 } 487 #endif 488 489 void 490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 491 { 492 #if defined(SCTP_LOCAL_TRACE_BUF) 493 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 494 SCTP_LOG_MISC_EVENT, 495 from, 496 a, b, c, d); 497 #endif 498 } 499 500 void 501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 502 { 503 #if defined(SCTP_LOCAL_TRACE_BUF) 504 struct sctp_cwnd_log sctp_clog; 505 506 sctp_clog.x.wake.stcb = (void *)stcb; 507 sctp_clog.x.wake.wake_cnt = wake_cnt; 508 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 509 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 510 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 511 512 if (stcb->asoc.stream_queue_cnt < 0xff) 513 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 514 else 515 sctp_clog.x.wake.stream_qcnt = 0xff; 516 517 if (stcb->asoc.chunks_on_out_queue < 0xff) 518 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 519 else 520 sctp_clog.x.wake.chunks_on_oque = 0xff; 521 522 sctp_clog.x.wake.sctpflags = 0; 523 /* set in the defered mode stuff */ 524 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 525 sctp_clog.x.wake.sctpflags |= 1; 526 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 527 sctp_clog.x.wake.sctpflags |= 2; 528 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 529 sctp_clog.x.wake.sctpflags |= 4; 530 /* what about the sb */ 531 if (stcb->sctp_socket) { 532 struct socket *so = stcb->sctp_socket; 533 534 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 535 } else { 536 sctp_clog.x.wake.sbflags = 0xff; 537 } 538 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 539 SCTP_LOG_EVENT_WAKE, 540 from, 541 sctp_clog.x.misc.log1, 542 sctp_clog.x.misc.log2, 543 sctp_clog.x.misc.log3, 544 sctp_clog.x.misc.log4); 545 #endif 546 } 547 548 void 549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 550 { 551 #if defined(SCTP_LOCAL_TRACE_BUF) 552 struct sctp_cwnd_log sctp_clog; 553 554 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 555 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 556 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 557 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 558 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 559 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 560 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 561 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 562 SCTP_LOG_EVENT_BLOCK, 563 from, 564 sctp_clog.x.misc.log1, 565 sctp_clog.x.misc.log2, 566 sctp_clog.x.misc.log3, 567 sctp_clog.x.misc.log4); 568 #endif 569 } 570 571 int 572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 573 { 574 /* May need to fix this if ktrdump does not work */ 575 return (0); 576 } 577 578 #ifdef SCTP_AUDITING_ENABLED 579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 580 static int sctp_audit_indx = 0; 581 582 static 583 void 584 sctp_print_audit_report(void) 585 { 586 int i; 587 int cnt; 588 589 cnt = 0; 590 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 591 if ((sctp_audit_data[i][0] == 0xe0) && 592 (sctp_audit_data[i][1] == 0x01)) { 593 cnt = 0; 594 SCTP_PRINTF("\n"); 595 } else if (sctp_audit_data[i][0] == 0xf0) { 596 cnt = 0; 597 SCTP_PRINTF("\n"); 598 } else if ((sctp_audit_data[i][0] == 0xc0) && 599 (sctp_audit_data[i][1] == 0x01)) { 600 SCTP_PRINTF("\n"); 601 cnt = 0; 602 } 603 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 604 (uint32_t)sctp_audit_data[i][1]); 605 cnt++; 606 if ((cnt % 14) == 0) 607 SCTP_PRINTF("\n"); 608 } 609 for (i = 0; i < sctp_audit_indx; i++) { 610 if ((sctp_audit_data[i][0] == 0xe0) && 611 (sctp_audit_data[i][1] == 0x01)) { 612 cnt = 0; 613 SCTP_PRINTF("\n"); 614 } else if (sctp_audit_data[i][0] == 0xf0) { 615 cnt = 0; 616 SCTP_PRINTF("\n"); 617 } else if ((sctp_audit_data[i][0] == 0xc0) && 618 (sctp_audit_data[i][1] == 0x01)) { 619 SCTP_PRINTF("\n"); 620 cnt = 0; 621 } 622 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 623 (uint32_t)sctp_audit_data[i][1]); 624 cnt++; 625 if ((cnt % 14) == 0) 626 SCTP_PRINTF("\n"); 627 } 628 SCTP_PRINTF("\n"); 629 } 630 631 void 632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 633 struct sctp_nets *net) 634 { 635 int resend_cnt, tot_out, rep, tot_book_cnt; 636 struct sctp_nets *lnet; 637 struct sctp_tmit_chunk *chk; 638 639 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 640 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 641 sctp_audit_indx++; 642 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 643 sctp_audit_indx = 0; 644 } 645 if (inp == NULL) { 646 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 647 sctp_audit_data[sctp_audit_indx][1] = 0x01; 648 sctp_audit_indx++; 649 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 650 sctp_audit_indx = 0; 651 } 652 return; 653 } 654 if (stcb == NULL) { 655 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 656 sctp_audit_data[sctp_audit_indx][1] = 0x02; 657 sctp_audit_indx++; 658 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 659 sctp_audit_indx = 0; 660 } 661 return; 662 } 663 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 664 sctp_audit_data[sctp_audit_indx][1] = 665 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 666 sctp_audit_indx++; 667 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 668 sctp_audit_indx = 0; 669 } 670 rep = 0; 671 tot_book_cnt = 0; 672 resend_cnt = tot_out = 0; 673 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 674 if (chk->sent == SCTP_DATAGRAM_RESEND) { 675 resend_cnt++; 676 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 677 tot_out += chk->book_size; 678 tot_book_cnt++; 679 } 680 } 681 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 682 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 683 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 684 sctp_audit_indx++; 685 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 686 sctp_audit_indx = 0; 687 } 688 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 689 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 690 rep = 1; 691 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 692 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 693 sctp_audit_data[sctp_audit_indx][1] = 694 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 695 sctp_audit_indx++; 696 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 697 sctp_audit_indx = 0; 698 } 699 } 700 if (tot_out != stcb->asoc.total_flight) { 701 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 702 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 703 sctp_audit_indx++; 704 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 705 sctp_audit_indx = 0; 706 } 707 rep = 1; 708 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 709 (int)stcb->asoc.total_flight); 710 stcb->asoc.total_flight = tot_out; 711 } 712 if (tot_book_cnt != stcb->asoc.total_flight_count) { 713 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 714 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 715 sctp_audit_indx++; 716 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 717 sctp_audit_indx = 0; 718 } 719 rep = 1; 720 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 721 722 stcb->asoc.total_flight_count = tot_book_cnt; 723 } 724 tot_out = 0; 725 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 726 tot_out += lnet->flight_size; 727 } 728 if (tot_out != stcb->asoc.total_flight) { 729 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 730 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 731 sctp_audit_indx++; 732 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 733 sctp_audit_indx = 0; 734 } 735 rep = 1; 736 SCTP_PRINTF("real flight:%d net total was %d\n", 737 stcb->asoc.total_flight, tot_out); 738 /* now corrective action */ 739 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 740 741 tot_out = 0; 742 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 743 if ((chk->whoTo == lnet) && 744 (chk->sent < SCTP_DATAGRAM_RESEND)) { 745 tot_out += chk->book_size; 746 } 747 } 748 if (lnet->flight_size != tot_out) { 749 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 750 (void *)lnet, lnet->flight_size, 751 tot_out); 752 lnet->flight_size = tot_out; 753 } 754 } 755 } 756 if (rep) { 757 sctp_print_audit_report(); 758 } 759 } 760 761 void 762 sctp_audit_log(uint8_t ev, uint8_t fd) 763 { 764 765 sctp_audit_data[sctp_audit_indx][0] = ev; 766 sctp_audit_data[sctp_audit_indx][1] = fd; 767 sctp_audit_indx++; 768 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 769 sctp_audit_indx = 0; 770 } 771 } 772 773 #endif 774 775 /* 776 * The conversion from time to ticks and vice versa is done by rounding 777 * upwards. This way we can test in the code the time to be positive and 778 * know that this corresponds to a positive number of ticks. 779 */ 780 781 uint32_t 782 sctp_msecs_to_ticks(uint32_t msecs) 783 { 784 uint64_t temp; 785 uint32_t ticks; 786 787 if (hz == 1000) { 788 ticks = msecs; 789 } else { 790 temp = (((uint64_t)msecs * hz) + 999) / 1000; 791 if (temp > UINT32_MAX) { 792 ticks = UINT32_MAX; 793 } else { 794 ticks = (uint32_t)temp; 795 } 796 } 797 return (ticks); 798 } 799 800 uint32_t 801 sctp_ticks_to_msecs(uint32_t ticks) 802 { 803 uint64_t temp; 804 uint32_t msecs; 805 806 if (hz == 1000) { 807 msecs = ticks; 808 } else { 809 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 810 if (temp > UINT32_MAX) { 811 msecs = UINT32_MAX; 812 } else { 813 msecs = (uint32_t)temp; 814 } 815 } 816 return (msecs); 817 } 818 819 uint32_t 820 sctp_secs_to_ticks(uint32_t secs) 821 { 822 uint64_t temp; 823 uint32_t ticks; 824 825 temp = (uint64_t)secs * hz; 826 if (temp > UINT32_MAX) { 827 ticks = UINT32_MAX; 828 } else { 829 ticks = (uint32_t)temp; 830 } 831 return (ticks); 832 } 833 834 uint32_t 835 sctp_ticks_to_secs(uint32_t ticks) 836 { 837 uint64_t temp; 838 uint32_t secs; 839 840 temp = ((uint64_t)ticks + (hz - 1)) / hz; 841 if (temp > UINT32_MAX) { 842 secs = UINT32_MAX; 843 } else { 844 secs = (uint32_t)temp; 845 } 846 return (secs); 847 } 848 849 /* 850 * sctp_stop_timers_for_shutdown() should be called 851 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 852 * state to make sure that all timers are stopped. 853 */ 854 void 855 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 856 { 857 struct sctp_inpcb *inp; 858 struct sctp_nets *net; 859 860 inp = stcb->sctp_ep; 861 862 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 864 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 866 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 868 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 869 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 870 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 871 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 873 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 874 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 875 } 876 } 877 878 void 879 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 880 { 881 struct sctp_inpcb *inp; 882 struct sctp_nets *net; 883 884 inp = stcb->sctp_ep; 885 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 887 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 888 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 889 if (stop_assoc_kill_timer) { 890 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 891 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 892 } 893 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 895 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 897 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 898 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 899 /* Mobility adaptation */ 900 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 901 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 902 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 903 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 905 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 907 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 909 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 911 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 913 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 915 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 916 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 917 } 918 } 919 920 /* 921 * A list of sizes based on typical mtu's, used only if next hop size not 922 * returned. These values MUST be multiples of 4 and MUST be ordered. 923 */ 924 static uint32_t sctp_mtu_sizes[] = { 925 68, 926 296, 927 508, 928 512, 929 544, 930 576, 931 1004, 932 1492, 933 1500, 934 1536, 935 2000, 936 2048, 937 4352, 938 4464, 939 8168, 940 17912, 941 32000, 942 65532 943 }; 944 945 /* 946 * Return the largest MTU in sctp_mtu_sizes smaller than val. 947 * If val is smaller than the minimum, just return the largest 948 * multiple of 4 smaller or equal to val. 949 * Ensure that the result is a multiple of 4. 950 */ 951 uint32_t 952 sctp_get_prev_mtu(uint32_t val) 953 { 954 uint32_t i; 955 956 val &= 0xfffffffc; 957 if (val <= sctp_mtu_sizes[0]) { 958 return (val); 959 } 960 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 961 if (val <= sctp_mtu_sizes[i]) { 962 break; 963 } 964 } 965 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 966 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 967 return (sctp_mtu_sizes[i - 1]); 968 } 969 970 /* 971 * Return the smallest MTU in sctp_mtu_sizes larger than val. 972 * If val is larger than the maximum, just return the largest multiple of 4 smaller 973 * or equal to val. 974 * Ensure that the result is a multiple of 4. 975 */ 976 uint32_t 977 sctp_get_next_mtu(uint32_t val) 978 { 979 /* select another MTU that is just bigger than this one */ 980 uint32_t i; 981 982 val &= 0xfffffffc; 983 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 984 if (val < sctp_mtu_sizes[i]) { 985 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 986 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 987 return (sctp_mtu_sizes[i]); 988 } 989 } 990 return (val); 991 } 992 993 void 994 sctp_fill_random_store(struct sctp_pcb *m) 995 { 996 /* 997 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 998 * our counter. The result becomes our good random numbers and we 999 * then setup to give these out. Note that we do no locking to 1000 * protect this. This is ok, since if competing folks call this we 1001 * will get more gobbled gook in the random store which is what we 1002 * want. There is a danger that two guys will use the same random 1003 * numbers, but thats ok too since that is random as well :-> 1004 */ 1005 m->store_at = 0; 1006 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1007 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1008 sizeof(m->random_counter), (uint8_t *)m->random_store); 1009 m->random_counter++; 1010 } 1011 1012 uint32_t 1013 sctp_select_initial_TSN(struct sctp_pcb *inp) 1014 { 1015 /* 1016 * A true implementation should use random selection process to get 1017 * the initial stream sequence number, using RFC1750 as a good 1018 * guideline 1019 */ 1020 uint32_t x, *xp; 1021 uint8_t *p; 1022 int store_at, new_store; 1023 1024 if (inp->initial_sequence_debug != 0) { 1025 uint32_t ret; 1026 1027 ret = inp->initial_sequence_debug; 1028 inp->initial_sequence_debug++; 1029 return (ret); 1030 } 1031 retry: 1032 store_at = inp->store_at; 1033 new_store = store_at + sizeof(uint32_t); 1034 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1035 new_store = 0; 1036 } 1037 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1038 goto retry; 1039 } 1040 if (new_store == 0) { 1041 /* Refill the random store */ 1042 sctp_fill_random_store(inp); 1043 } 1044 p = &inp->random_store[store_at]; 1045 xp = (uint32_t *)p; 1046 x = *xp; 1047 return (x); 1048 } 1049 1050 uint32_t 1051 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1052 { 1053 uint32_t x; 1054 struct timeval now; 1055 1056 if (check) { 1057 (void)SCTP_GETTIME_TIMEVAL(&now); 1058 } 1059 for (;;) { 1060 x = sctp_select_initial_TSN(&inp->sctp_ep); 1061 if (x == 0) { 1062 /* we never use 0 */ 1063 continue; 1064 } 1065 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1066 break; 1067 } 1068 } 1069 return (x); 1070 } 1071 1072 int32_t 1073 sctp_map_assoc_state(int kernel_state) 1074 { 1075 int32_t user_state; 1076 1077 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1078 user_state = SCTP_CLOSED; 1079 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1080 user_state = SCTP_SHUTDOWN_PENDING; 1081 } else { 1082 switch (kernel_state & SCTP_STATE_MASK) { 1083 case SCTP_STATE_EMPTY: 1084 user_state = SCTP_CLOSED; 1085 break; 1086 case SCTP_STATE_INUSE: 1087 user_state = SCTP_CLOSED; 1088 break; 1089 case SCTP_STATE_COOKIE_WAIT: 1090 user_state = SCTP_COOKIE_WAIT; 1091 break; 1092 case SCTP_STATE_COOKIE_ECHOED: 1093 user_state = SCTP_COOKIE_ECHOED; 1094 break; 1095 case SCTP_STATE_OPEN: 1096 user_state = SCTP_ESTABLISHED; 1097 break; 1098 case SCTP_STATE_SHUTDOWN_SENT: 1099 user_state = SCTP_SHUTDOWN_SENT; 1100 break; 1101 case SCTP_STATE_SHUTDOWN_RECEIVED: 1102 user_state = SCTP_SHUTDOWN_RECEIVED; 1103 break; 1104 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1105 user_state = SCTP_SHUTDOWN_ACK_SENT; 1106 break; 1107 default: 1108 user_state = SCTP_CLOSED; 1109 break; 1110 } 1111 } 1112 return (user_state); 1113 } 1114 1115 int 1116 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1117 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1118 { 1119 struct sctp_association *asoc; 1120 1121 /* 1122 * Anything set to zero is taken care of by the allocation routine's 1123 * bzero 1124 */ 1125 1126 /* 1127 * Up front select what scoping to apply on addresses I tell my peer 1128 * Not sure what to do with these right now, we will need to come up 1129 * with a way to set them. We may need to pass them through from the 1130 * caller in the sctp_aloc_assoc() function. 1131 */ 1132 int i; 1133 #if defined(SCTP_DETAILED_STR_STATS) 1134 int j; 1135 #endif 1136 1137 asoc = &stcb->asoc; 1138 /* init all variables to a known value. */ 1139 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1140 asoc->max_burst = inp->sctp_ep.max_burst; 1141 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1142 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1143 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1144 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1145 asoc->ecn_supported = inp->ecn_supported; 1146 asoc->prsctp_supported = inp->prsctp_supported; 1147 asoc->idata_supported = inp->idata_supported; 1148 asoc->auth_supported = inp->auth_supported; 1149 asoc->asconf_supported = inp->asconf_supported; 1150 asoc->reconfig_supported = inp->reconfig_supported; 1151 asoc->nrsack_supported = inp->nrsack_supported; 1152 asoc->pktdrop_supported = inp->pktdrop_supported; 1153 asoc->idata_supported = inp->idata_supported; 1154 asoc->sctp_cmt_pf = (uint8_t)0; 1155 asoc->sctp_frag_point = inp->sctp_frag_point; 1156 asoc->sctp_features = inp->sctp_features; 1157 asoc->default_dscp = inp->sctp_ep.default_dscp; 1158 asoc->max_cwnd = inp->max_cwnd; 1159 #ifdef INET6 1160 if (inp->sctp_ep.default_flowlabel) { 1161 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1162 } else { 1163 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1164 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1165 asoc->default_flowlabel &= 0x000fffff; 1166 asoc->default_flowlabel |= 0x80000000; 1167 } else { 1168 asoc->default_flowlabel = 0; 1169 } 1170 } 1171 #endif 1172 asoc->sb_send_resv = 0; 1173 if (override_tag) { 1174 asoc->my_vtag = override_tag; 1175 } else { 1176 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1177 } 1178 /* Get the nonce tags */ 1179 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1180 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1181 asoc->vrf_id = vrf_id; 1182 1183 #ifdef SCTP_ASOCLOG_OF_TSNS 1184 asoc->tsn_in_at = 0; 1185 asoc->tsn_out_at = 0; 1186 asoc->tsn_in_wrapped = 0; 1187 asoc->tsn_out_wrapped = 0; 1188 asoc->cumack_log_at = 0; 1189 asoc->cumack_log_atsnt = 0; 1190 #endif 1191 #ifdef SCTP_FS_SPEC_LOG 1192 asoc->fs_index = 0; 1193 #endif 1194 asoc->refcnt = 0; 1195 asoc->assoc_up_sent = 0; 1196 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1197 sctp_select_initial_TSN(&inp->sctp_ep); 1198 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1199 /* we are optimisitic here */ 1200 asoc->peer_supports_nat = 0; 1201 asoc->sent_queue_retran_cnt = 0; 1202 1203 /* for CMT */ 1204 asoc->last_net_cmt_send_started = NULL; 1205 1206 /* This will need to be adjusted */ 1207 asoc->last_acked_seq = asoc->init_seq_number - 1; 1208 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1209 asoc->asconf_seq_in = asoc->last_acked_seq; 1210 1211 /* here we are different, we hold the next one we expect */ 1212 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1213 1214 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1215 asoc->initial_rto = inp->sctp_ep.initial_rto; 1216 1217 asoc->default_mtu = inp->sctp_ep.default_mtu; 1218 asoc->max_init_times = inp->sctp_ep.max_init_times; 1219 asoc->max_send_times = inp->sctp_ep.max_send_times; 1220 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1221 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1222 asoc->free_chunk_cnt = 0; 1223 1224 asoc->iam_blocking = 0; 1225 asoc->context = inp->sctp_context; 1226 asoc->local_strreset_support = inp->local_strreset_support; 1227 asoc->def_send = inp->def_send; 1228 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1229 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1230 asoc->pr_sctp_cnt = 0; 1231 asoc->total_output_queue_size = 0; 1232 1233 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1234 asoc->scope.ipv6_addr_legal = 1; 1235 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1236 asoc->scope.ipv4_addr_legal = 1; 1237 } else { 1238 asoc->scope.ipv4_addr_legal = 0; 1239 } 1240 } else { 1241 asoc->scope.ipv6_addr_legal = 0; 1242 asoc->scope.ipv4_addr_legal = 1; 1243 } 1244 1245 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1246 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1247 1248 asoc->smallest_mtu = inp->sctp_frag_point; 1249 asoc->minrto = inp->sctp_ep.sctp_minrto; 1250 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1251 1252 asoc->stream_locked_on = 0; 1253 asoc->ecn_echo_cnt_onq = 0; 1254 asoc->stream_locked = 0; 1255 1256 asoc->send_sack = 1; 1257 1258 LIST_INIT(&asoc->sctp_restricted_addrs); 1259 1260 TAILQ_INIT(&asoc->nets); 1261 TAILQ_INIT(&asoc->pending_reply_queue); 1262 TAILQ_INIT(&asoc->asconf_ack_sent); 1263 /* Setup to fill the hb random cache at first HB */ 1264 asoc->hb_random_idx = 4; 1265 1266 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1267 1268 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1269 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1270 1271 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1272 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1273 1274 /* 1275 * Now the stream parameters, here we allocate space for all streams 1276 * that we request by default. 1277 */ 1278 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1279 o_strms; 1280 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1281 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1282 SCTP_M_STRMO); 1283 if (asoc->strmout == NULL) { 1284 /* big trouble no memory */ 1285 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1286 return (ENOMEM); 1287 } 1288 for (i = 0; i < asoc->streamoutcnt; i++) { 1289 /* 1290 * inbound side must be set to 0xffff, also NOTE when we get 1291 * the INIT-ACK back (for INIT sender) we MUST reduce the 1292 * count (streamoutcnt) but first check if we sent to any of 1293 * the upper streams that were dropped (if some were). Those 1294 * that were dropped must be notified to the upper layer as 1295 * failed to send. 1296 */ 1297 asoc->strmout[i].next_mid_ordered = 0; 1298 asoc->strmout[i].next_mid_unordered = 0; 1299 TAILQ_INIT(&asoc->strmout[i].outqueue); 1300 asoc->strmout[i].chunks_on_queues = 0; 1301 #if defined(SCTP_DETAILED_STR_STATS) 1302 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1303 asoc->strmout[i].abandoned_sent[j] = 0; 1304 asoc->strmout[i].abandoned_unsent[j] = 0; 1305 } 1306 #else 1307 asoc->strmout[i].abandoned_sent[0] = 0; 1308 asoc->strmout[i].abandoned_unsent[0] = 0; 1309 #endif 1310 asoc->strmout[i].sid = i; 1311 asoc->strmout[i].last_msg_incomplete = 0; 1312 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1313 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1314 } 1315 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1316 1317 /* Now the mapping array */ 1318 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1319 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1320 SCTP_M_MAP); 1321 if (asoc->mapping_array == NULL) { 1322 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1323 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1324 return (ENOMEM); 1325 } 1326 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1327 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1328 SCTP_M_MAP); 1329 if (asoc->nr_mapping_array == NULL) { 1330 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1331 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1332 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1333 return (ENOMEM); 1334 } 1335 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1336 1337 /* Now the init of the other outqueues */ 1338 TAILQ_INIT(&asoc->free_chunks); 1339 TAILQ_INIT(&asoc->control_send_queue); 1340 TAILQ_INIT(&asoc->asconf_send_queue); 1341 TAILQ_INIT(&asoc->send_queue); 1342 TAILQ_INIT(&asoc->sent_queue); 1343 TAILQ_INIT(&asoc->resetHead); 1344 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1345 TAILQ_INIT(&asoc->asconf_queue); 1346 /* authentication fields */ 1347 asoc->authinfo.random = NULL; 1348 asoc->authinfo.active_keyid = 0; 1349 asoc->authinfo.assoc_key = NULL; 1350 asoc->authinfo.assoc_keyid = 0; 1351 asoc->authinfo.recv_key = NULL; 1352 asoc->authinfo.recv_keyid = 0; 1353 LIST_INIT(&asoc->shared_keys); 1354 asoc->marked_retrans = 0; 1355 asoc->port = inp->sctp_ep.port; 1356 asoc->timoinit = 0; 1357 asoc->timodata = 0; 1358 asoc->timosack = 0; 1359 asoc->timoshutdown = 0; 1360 asoc->timoheartbeat = 0; 1361 asoc->timocookie = 0; 1362 asoc->timoshutdownack = 0; 1363 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1364 asoc->discontinuity_time = asoc->start_time; 1365 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1366 asoc->abandoned_unsent[i] = 0; 1367 asoc->abandoned_sent[i] = 0; 1368 } 1369 /* 1370 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1371 * freed later when the association is freed. 1372 */ 1373 return (0); 1374 } 1375 1376 void 1377 sctp_print_mapping_array(struct sctp_association *asoc) 1378 { 1379 unsigned int i, limit; 1380 1381 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1382 asoc->mapping_array_size, 1383 asoc->mapping_array_base_tsn, 1384 asoc->cumulative_tsn, 1385 asoc->highest_tsn_inside_map, 1386 asoc->highest_tsn_inside_nr_map); 1387 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1388 if (asoc->mapping_array[limit - 1] != 0) { 1389 break; 1390 } 1391 } 1392 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1393 for (i = 0; i < limit; i++) { 1394 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1395 } 1396 if (limit % 16) 1397 SCTP_PRINTF("\n"); 1398 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1399 if (asoc->nr_mapping_array[limit - 1]) { 1400 break; 1401 } 1402 } 1403 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1404 for (i = 0; i < limit; i++) { 1405 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1406 } 1407 if (limit % 16) 1408 SCTP_PRINTF("\n"); 1409 } 1410 1411 int 1412 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1413 { 1414 /* mapping array needs to grow */ 1415 uint8_t *new_array1, *new_array2; 1416 uint32_t new_size; 1417 1418 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1419 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1420 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1421 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1422 /* can't get more, forget it */ 1423 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1424 if (new_array1) { 1425 SCTP_FREE(new_array1, SCTP_M_MAP); 1426 } 1427 if (new_array2) { 1428 SCTP_FREE(new_array2, SCTP_M_MAP); 1429 } 1430 return (-1); 1431 } 1432 memset(new_array1, 0, new_size); 1433 memset(new_array2, 0, new_size); 1434 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1435 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1436 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1437 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1438 asoc->mapping_array = new_array1; 1439 asoc->nr_mapping_array = new_array2; 1440 asoc->mapping_array_size = new_size; 1441 return (0); 1442 } 1443 1444 1445 static void 1446 sctp_iterator_work(struct sctp_iterator *it) 1447 { 1448 struct epoch_tracker et; 1449 struct sctp_inpcb *tinp; 1450 int iteration_count = 0; 1451 int inp_skip = 0; 1452 int first_in = 1; 1453 1454 NET_EPOCH_ENTER(et); 1455 SCTP_INP_INFO_RLOCK(); 1456 SCTP_ITERATOR_LOCK(); 1457 sctp_it_ctl.cur_it = it; 1458 if (it->inp) { 1459 SCTP_INP_RLOCK(it->inp); 1460 SCTP_INP_DECR_REF(it->inp); 1461 } 1462 if (it->inp == NULL) { 1463 /* iterator is complete */ 1464 done_with_iterator: 1465 sctp_it_ctl.cur_it = NULL; 1466 SCTP_ITERATOR_UNLOCK(); 1467 SCTP_INP_INFO_RUNLOCK(); 1468 if (it->function_atend != NULL) { 1469 (*it->function_atend) (it->pointer, it->val); 1470 } 1471 SCTP_FREE(it, SCTP_M_ITER); 1472 NET_EPOCH_EXIT(et); 1473 return; 1474 } 1475 select_a_new_ep: 1476 if (first_in) { 1477 first_in = 0; 1478 } else { 1479 SCTP_INP_RLOCK(it->inp); 1480 } 1481 while (((it->pcb_flags) && 1482 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1483 ((it->pcb_features) && 1484 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1485 /* endpoint flags or features don't match, so keep looking */ 1486 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1487 SCTP_INP_RUNLOCK(it->inp); 1488 goto done_with_iterator; 1489 } 1490 tinp = it->inp; 1491 it->inp = LIST_NEXT(it->inp, sctp_list); 1492 SCTP_INP_RUNLOCK(tinp); 1493 if (it->inp == NULL) { 1494 goto done_with_iterator; 1495 } 1496 SCTP_INP_RLOCK(it->inp); 1497 } 1498 /* now go through each assoc which is in the desired state */ 1499 if (it->done_current_ep == 0) { 1500 if (it->function_inp != NULL) 1501 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1502 it->done_current_ep = 1; 1503 } 1504 if (it->stcb == NULL) { 1505 /* run the per instance function */ 1506 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1507 } 1508 if ((inp_skip) || it->stcb == NULL) { 1509 if (it->function_inp_end != NULL) { 1510 inp_skip = (*it->function_inp_end) (it->inp, 1511 it->pointer, 1512 it->val); 1513 } 1514 SCTP_INP_RUNLOCK(it->inp); 1515 goto no_stcb; 1516 } 1517 while (it->stcb) { 1518 SCTP_TCB_LOCK(it->stcb); 1519 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1520 /* not in the right state... keep looking */ 1521 SCTP_TCB_UNLOCK(it->stcb); 1522 goto next_assoc; 1523 } 1524 /* see if we have limited out the iterator loop */ 1525 iteration_count++; 1526 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1527 /* Pause to let others grab the lock */ 1528 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1529 SCTP_TCB_UNLOCK(it->stcb); 1530 SCTP_INP_INCR_REF(it->inp); 1531 SCTP_INP_RUNLOCK(it->inp); 1532 SCTP_ITERATOR_UNLOCK(); 1533 SCTP_INP_INFO_RUNLOCK(); 1534 SCTP_INP_INFO_RLOCK(); 1535 SCTP_ITERATOR_LOCK(); 1536 if (sctp_it_ctl.iterator_flags) { 1537 /* We won't be staying here */ 1538 SCTP_INP_DECR_REF(it->inp); 1539 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1540 if (sctp_it_ctl.iterator_flags & 1541 SCTP_ITERATOR_STOP_CUR_IT) { 1542 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1543 goto done_with_iterator; 1544 } 1545 if (sctp_it_ctl.iterator_flags & 1546 SCTP_ITERATOR_STOP_CUR_INP) { 1547 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1548 goto no_stcb; 1549 } 1550 /* If we reach here huh? */ 1551 SCTP_PRINTF("Unknown it ctl flag %x\n", 1552 sctp_it_ctl.iterator_flags); 1553 sctp_it_ctl.iterator_flags = 0; 1554 } 1555 SCTP_INP_RLOCK(it->inp); 1556 SCTP_INP_DECR_REF(it->inp); 1557 SCTP_TCB_LOCK(it->stcb); 1558 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1559 iteration_count = 0; 1560 } 1561 1562 /* run function on this one */ 1563 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1564 1565 /* 1566 * we lie here, it really needs to have its own type but 1567 * first I must verify that this won't effect things :-0 1568 */ 1569 if (it->no_chunk_output == 0) 1570 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 1572 SCTP_TCB_UNLOCK(it->stcb); 1573 next_assoc: 1574 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1575 if (it->stcb == NULL) { 1576 /* Run last function */ 1577 if (it->function_inp_end != NULL) { 1578 inp_skip = (*it->function_inp_end) (it->inp, 1579 it->pointer, 1580 it->val); 1581 } 1582 } 1583 } 1584 SCTP_INP_RUNLOCK(it->inp); 1585 no_stcb: 1586 /* done with all assocs on this endpoint, move on to next endpoint */ 1587 it->done_current_ep = 0; 1588 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1589 it->inp = NULL; 1590 } else { 1591 it->inp = LIST_NEXT(it->inp, sctp_list); 1592 } 1593 if (it->inp == NULL) { 1594 goto done_with_iterator; 1595 } 1596 goto select_a_new_ep; 1597 } 1598 1599 void 1600 sctp_iterator_worker(void) 1601 { 1602 struct sctp_iterator *it; 1603 1604 /* This function is called with the WQ lock in place */ 1605 sctp_it_ctl.iterator_running = 1; 1606 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1607 /* now lets work on this one */ 1608 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1609 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1610 CURVNET_SET(it->vn); 1611 sctp_iterator_work(it); 1612 CURVNET_RESTORE(); 1613 SCTP_IPI_ITERATOR_WQ_LOCK(); 1614 /* sa_ignore FREED_MEMORY */ 1615 } 1616 sctp_it_ctl.iterator_running = 0; 1617 return; 1618 } 1619 1620 1621 static void 1622 sctp_handle_addr_wq(void) 1623 { 1624 /* deal with the ADDR wq from the rtsock calls */ 1625 struct sctp_laddr *wi, *nwi; 1626 struct sctp_asconf_iterator *asc; 1627 1628 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1629 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1630 if (asc == NULL) { 1631 /* Try later, no memory */ 1632 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1633 (struct sctp_inpcb *)NULL, 1634 (struct sctp_tcb *)NULL, 1635 (struct sctp_nets *)NULL); 1636 return; 1637 } 1638 LIST_INIT(&asc->list_of_work); 1639 asc->cnt = 0; 1640 1641 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1642 LIST_REMOVE(wi, sctp_nxt_addr); 1643 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1644 asc->cnt++; 1645 } 1646 1647 if (asc->cnt == 0) { 1648 SCTP_FREE(asc, SCTP_M_ASC_IT); 1649 } else { 1650 int ret; 1651 1652 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1653 sctp_asconf_iterator_stcb, 1654 NULL, /* No ep end for boundall */ 1655 SCTP_PCB_FLAGS_BOUNDALL, 1656 SCTP_PCB_ANY_FEATURES, 1657 SCTP_ASOC_ANY_STATE, 1658 (void *)asc, 0, 1659 sctp_asconf_iterator_end, NULL, 0); 1660 if (ret) { 1661 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1662 /* 1663 * Freeing if we are stopping or put back on the 1664 * addr_wq. 1665 */ 1666 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1667 sctp_asconf_iterator_end(asc, 0); 1668 } else { 1669 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1670 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1671 } 1672 SCTP_FREE(asc, SCTP_M_ASC_IT); 1673 } 1674 } 1675 } 1676 } 1677 1678 /*- 1679 * The following table shows which pointers for the inp, stcb, or net are 1680 * stored for each timer after it was started. 1681 * 1682 *|Name |Timer |inp |stcb|net | 1683 *|-----------------------------|-----------------------------|----|----|----| 1684 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1685 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1686 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1687 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1690 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1691 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1696 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1697 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1698 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1700 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1701 */ 1702 1703 void 1704 sctp_timeout_handler(void *t) 1705 { 1706 struct epoch_tracker et; 1707 struct timeval tv; 1708 struct sctp_inpcb *inp; 1709 struct sctp_tcb *stcb; 1710 struct sctp_nets *net; 1711 struct sctp_timer *tmr; 1712 struct mbuf *op_err; 1713 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1714 struct socket *so; 1715 #endif 1716 int did_output; 1717 int type; 1718 int i, secret; 1719 1720 tmr = (struct sctp_timer *)t; 1721 inp = (struct sctp_inpcb *)tmr->ep; 1722 stcb = (struct sctp_tcb *)tmr->tcb; 1723 net = (struct sctp_nets *)tmr->net; 1724 CURVNET_SET((struct vnet *)tmr->vnet); 1725 did_output = 1; 1726 1727 #ifdef SCTP_AUDITING_ENABLED 1728 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1729 sctp_auditing(3, inp, stcb, net); 1730 #endif 1731 1732 /* sanity checks... */ 1733 KASSERT(tmr->self == tmr, 1734 ("sctp_timeout_handler: tmr->self corrupted")); 1735 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1736 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1737 type = tmr->type; 1738 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1739 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1740 type, stcb, stcb->sctp_ep)); 1741 if (inp) { 1742 SCTP_INP_INCR_REF(inp); 1743 } 1744 tmr->stopped_from = 0xa001; 1745 if (stcb) { 1746 atomic_add_int(&stcb->asoc.refcnt, 1); 1747 if (stcb->asoc.state == 0) { 1748 atomic_add_int(&stcb->asoc.refcnt, -1); 1749 if (inp) { 1750 SCTP_INP_DECR_REF(inp); 1751 } 1752 SCTPDBG(SCTP_DEBUG_TIMER2, 1753 "Timer type %d handler exiting due to CLOSED association.\n", 1754 type); 1755 CURVNET_RESTORE(); 1756 return; 1757 } 1758 } 1759 tmr->stopped_from = 0xa002; 1760 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1761 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1762 if (inp) { 1763 SCTP_INP_DECR_REF(inp); 1764 } 1765 if (stcb) { 1766 atomic_add_int(&stcb->asoc.refcnt, -1); 1767 } 1768 SCTPDBG(SCTP_DEBUG_TIMER2, 1769 "Timer type %d handler exiting due to not being active.\n", 1770 type); 1771 CURVNET_RESTORE(); 1772 return; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 atomic_add_int(&stcb->asoc.refcnt, -1); 1779 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1780 ((stcb->asoc.state == 0) || 1781 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1782 SCTP_TCB_UNLOCK(stcb); 1783 if (inp) { 1784 SCTP_INP_DECR_REF(inp); 1785 } 1786 SCTPDBG(SCTP_DEBUG_TIMER2, 1787 "Timer type %d handler exiting due to CLOSED association.\n", 1788 type); 1789 CURVNET_RESTORE(); 1790 return; 1791 } 1792 } else if (inp != NULL) { 1793 SCTP_INP_WLOCK(inp); 1794 } else { 1795 SCTP_WQ_ADDR_LOCK(); 1796 } 1797 1798 /* Record in stopped_from which timeout occurred. */ 1799 tmr->stopped_from = type; 1800 NET_EPOCH_ENTER(et); 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto get_out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto get_out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 if ((stcb->asoc.num_send_timers_up == 0) && 1840 (stcb->asoc.sent_queue_cnt > 0)) { 1841 struct sctp_tmit_chunk *chk; 1842 1843 /* 1844 * safeguard. If there on some on the sent queue 1845 * somewhere but no timers running something is 1846 * wrong... so we start a timer on the first chunk 1847 * on the send queue on whatever net it is sent to. 1848 */ 1849 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1850 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1851 chk->whoTo); 1852 } 1853 break; 1854 case SCTP_TIMER_TYPE_INIT: 1855 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1856 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1857 type, inp, stcb, net)); 1858 SCTP_STAT_INCR(sctps_timoinit); 1859 stcb->asoc.timoinit++; 1860 if (sctp_t1init_timer(inp, stcb, net)) { 1861 /* no need to unlock on tcb its gone */ 1862 goto out_decr; 1863 } 1864 /* We do output but not here */ 1865 did_output = 0; 1866 break; 1867 case SCTP_TIMER_TYPE_RECV: 1868 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1869 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1870 type, inp, stcb, net)); 1871 SCTP_STAT_INCR(sctps_timosack); 1872 stcb->asoc.timosack++; 1873 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1874 #ifdef SCTP_AUDITING_ENABLED 1875 sctp_auditing(4, inp, stcb, NULL); 1876 #endif 1877 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1878 break; 1879 case SCTP_TIMER_TYPE_SHUTDOWN: 1880 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1881 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1882 type, inp, stcb, net)); 1883 SCTP_STAT_INCR(sctps_timoshutdown); 1884 stcb->asoc.timoshutdown++; 1885 if (sctp_shutdown_timer(inp, stcb, net)) { 1886 /* no need to unlock on tcb its gone */ 1887 goto out_decr; 1888 } 1889 #ifdef SCTP_AUDITING_ENABLED 1890 sctp_auditing(4, inp, stcb, net); 1891 #endif 1892 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1893 break; 1894 case SCTP_TIMER_TYPE_HEARTBEAT: 1895 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1896 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1897 type, inp, stcb, net)); 1898 SCTP_STAT_INCR(sctps_timoheartbeat); 1899 stcb->asoc.timoheartbeat++; 1900 if (sctp_heartbeat_timer(inp, stcb, net)) { 1901 /* no need to unlock on tcb its gone */ 1902 goto out_decr; 1903 } 1904 #ifdef SCTP_AUDITING_ENABLED 1905 sctp_auditing(4, inp, stcb, net); 1906 #endif 1907 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1908 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1909 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1910 } 1911 break; 1912 case SCTP_TIMER_TYPE_COOKIE: 1913 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1914 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1915 type, inp, stcb, net)); 1916 SCTP_STAT_INCR(sctps_timocookie); 1917 stcb->asoc.timocookie++; 1918 if (sctp_cookie_timer(inp, stcb, net)) { 1919 /* no need to unlock on tcb its gone */ 1920 goto out_decr; 1921 } 1922 #ifdef SCTP_AUDITING_ENABLED 1923 sctp_auditing(4, inp, stcb, net); 1924 #endif 1925 /* 1926 * We consider T3 and Cookie timer pretty much the same with 1927 * respect to where from in chunk_output. 1928 */ 1929 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1930 break; 1931 case SCTP_TIMER_TYPE_NEWCOOKIE: 1932 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1933 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1934 type, inp, stcb, net)); 1935 SCTP_STAT_INCR(sctps_timosecret); 1936 (void)SCTP_GETTIME_TIMEVAL(&tv); 1937 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1938 inp->sctp_ep.last_secret_number = 1939 inp->sctp_ep.current_secret_number; 1940 inp->sctp_ep.current_secret_number++; 1941 if (inp->sctp_ep.current_secret_number >= 1942 SCTP_HOW_MANY_SECRETS) { 1943 inp->sctp_ep.current_secret_number = 0; 1944 } 1945 secret = (int)inp->sctp_ep.current_secret_number; 1946 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1947 inp->sctp_ep.secret_key[secret][i] = 1948 sctp_select_initial_TSN(&inp->sctp_ep); 1949 } 1950 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1951 did_output = 0; 1952 break; 1953 case SCTP_TIMER_TYPE_PATHMTURAISE: 1954 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1955 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1956 type, inp, stcb, net)); 1957 SCTP_STAT_INCR(sctps_timopathmtu); 1958 sctp_pathmtu_timer(inp, stcb, net); 1959 did_output = 0; 1960 break; 1961 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1962 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1963 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1964 type, inp, stcb, net)); 1965 if (sctp_shutdownack_timer(inp, stcb, net)) { 1966 /* no need to unlock on tcb its gone */ 1967 goto out_decr; 1968 } 1969 SCTP_STAT_INCR(sctps_timoshutdownack); 1970 stcb->asoc.timoshutdownack++; 1971 #ifdef SCTP_AUDITING_ENABLED 1972 sctp_auditing(4, inp, stcb, net); 1973 #endif 1974 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1975 break; 1976 case SCTP_TIMER_TYPE_ASCONF: 1977 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1978 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1979 type, inp, stcb, net)); 1980 SCTP_STAT_INCR(sctps_timoasconf); 1981 if (sctp_asconf_timer(inp, stcb, net)) { 1982 /* no need to unlock on tcb its gone */ 1983 goto out_decr; 1984 } 1985 #ifdef SCTP_AUDITING_ENABLED 1986 sctp_auditing(4, inp, stcb, net); 1987 #endif 1988 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1989 break; 1990 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1991 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1992 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1993 type, inp, stcb, net)); 1994 SCTP_STAT_INCR(sctps_timoshutdownguard); 1995 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1996 "Shutdown guard timer expired"); 1997 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1998 /* no need to unlock on tcb its gone */ 1999 goto out_decr; 2000 case SCTP_TIMER_TYPE_AUTOCLOSE: 2001 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2002 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2003 type, inp, stcb, net)); 2004 SCTP_STAT_INCR(sctps_timoautoclose); 2005 sctp_autoclose_timer(inp, stcb); 2006 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2007 did_output = 0; 2008 break; 2009 case SCTP_TIMER_TYPE_STRRESET: 2010 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2011 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2012 type, inp, stcb, net)); 2013 SCTP_STAT_INCR(sctps_timostrmrst); 2014 if (sctp_strreset_timer(inp, stcb)) { 2015 /* no need to unlock on tcb its gone */ 2016 goto out_decr; 2017 } 2018 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2019 break; 2020 case SCTP_TIMER_TYPE_INPKILL: 2021 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2022 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2023 type, inp, stcb, net)); 2024 SCTP_STAT_INCR(sctps_timoinpkill); 2025 /* 2026 * special case, take away our increment since WE are the 2027 * killer 2028 */ 2029 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2030 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2031 SCTP_INP_DECR_REF(inp); 2032 SCTP_INP_WUNLOCK(inp); 2033 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2034 SCTP_CALLED_FROM_INPKILL_TIMER); 2035 inp = NULL; 2036 goto out_no_decr; 2037 case SCTP_TIMER_TYPE_ASOCKILL: 2038 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2039 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2040 type, inp, stcb, net)); 2041 SCTP_STAT_INCR(sctps_timoassockill); 2042 /* Can we free it yet? */ 2043 SCTP_INP_DECR_REF(inp); 2044 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2045 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2046 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2047 so = SCTP_INP_SO(inp); 2048 atomic_add_int(&stcb->asoc.refcnt, 1); 2049 SCTP_TCB_UNLOCK(stcb); 2050 SCTP_SOCKET_LOCK(so, 1); 2051 SCTP_TCB_LOCK(stcb); 2052 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2053 #endif 2054 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2055 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2056 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2057 SCTP_SOCKET_UNLOCK(so, 1); 2058 #endif 2059 /* 2060 * free asoc, always unlocks (or destroy's) so prevent 2061 * duplicate unlock or unlock of a free mtx :-0 2062 */ 2063 stcb = NULL; 2064 goto out_no_decr; 2065 case SCTP_TIMER_TYPE_ADDR_WQ: 2066 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 sctp_handle_addr_wq(); 2070 break; 2071 case SCTP_TIMER_TYPE_PRIM_DELETED: 2072 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2073 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2074 type, inp, stcb, net)); 2075 SCTP_STAT_INCR(sctps_timodelprim); 2076 sctp_delete_prim_timer(inp, stcb); 2077 break; 2078 default: 2079 #ifdef INVARIANTS 2080 panic("Unknown timer type %d", type); 2081 #else 2082 goto get_out; 2083 #endif 2084 } 2085 #ifdef SCTP_AUDITING_ENABLED 2086 sctp_audit_log(0xF1, (uint8_t)type); 2087 if (inp) 2088 sctp_auditing(5, inp, stcb, net); 2089 #endif 2090 if ((did_output) && stcb) { 2091 /* 2092 * Now we need to clean up the control chunk chain if an 2093 * ECNE is on it. It must be marked as UNSENT again so next 2094 * call will continue to send it until such time that we get 2095 * a CWR, to remove it. It is, however, less likely that we 2096 * will find a ecn echo on the chain though. 2097 */ 2098 sctp_fix_ecn_echo(&stcb->asoc); 2099 } 2100 get_out: 2101 if (stcb) { 2102 SCTP_TCB_UNLOCK(stcb); 2103 } else if (inp != NULL) { 2104 SCTP_INP_WUNLOCK(inp); 2105 } else { 2106 SCTP_WQ_ADDR_UNLOCK(); 2107 } 2108 2109 out_decr: 2110 if (inp) { 2111 SCTP_INP_DECR_REF(inp); 2112 } 2113 2114 out_no_decr: 2115 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2116 CURVNET_RESTORE(); 2117 NET_EPOCH_EXIT(et); 2118 } 2119 2120 /*- 2121 * The following table shows which parameters must be provided 2122 * when calling sctp_timer_start(). For parameters not being 2123 * provided, NULL must be used. 2124 * 2125 * |Name |inp |stcb|net | 2126 * |-----------------------------|----|----|----| 2127 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2128 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2130 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2132 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2134 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2139 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2140 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2141 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2142 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2143 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2144 * 2145 */ 2146 2147 void 2148 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2149 struct sctp_nets *net) 2150 { 2151 struct sctp_timer *tmr; 2152 uint32_t to_ticks; 2153 uint32_t rndval, jitter; 2154 2155 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2156 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2157 t_type, stcb, stcb->sctp_ep)); 2158 tmr = NULL; 2159 to_ticks = 0; 2160 if (stcb != NULL) { 2161 SCTP_TCB_LOCK_ASSERT(stcb); 2162 } else if (inp != NULL) { 2163 SCTP_INP_WLOCK_ASSERT(inp); 2164 } else { 2165 SCTP_WQ_ADDR_LOCK_ASSERT(); 2166 } 2167 if (stcb != NULL) { 2168 /* 2169 * Don't restart timer on association that's about to be 2170 * killed. 2171 */ 2172 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2173 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2174 SCTPDBG(SCTP_DEBUG_TIMER2, 2175 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2176 t_type, inp, stcb, net); 2177 return; 2178 } 2179 /* Don't restart timer on net that's been removed. */ 2180 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2181 SCTPDBG(SCTP_DEBUG_TIMER2, 2182 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2183 t_type, inp, stcb, net); 2184 return; 2185 } 2186 } 2187 switch (t_type) { 2188 case SCTP_TIMER_TYPE_SEND: 2189 /* Here we use the RTO timer. */ 2190 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2191 #ifdef INVARIANTS 2192 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2193 t_type, inp, stcb, net); 2194 #else 2195 return; 2196 #endif 2197 } 2198 tmr = &net->rxt_timer; 2199 if (net->RTO == 0) { 2200 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2201 } else { 2202 to_ticks = sctp_msecs_to_ticks(net->RTO); 2203 } 2204 break; 2205 case SCTP_TIMER_TYPE_INIT: 2206 /* 2207 * Here we use the INIT timer default usually about 1 2208 * second. 2209 */ 2210 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2211 #ifdef INVARIANTS 2212 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2213 t_type, inp, stcb, net); 2214 #else 2215 return; 2216 #endif 2217 } 2218 tmr = &net->rxt_timer; 2219 if (net->RTO == 0) { 2220 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2221 } else { 2222 to_ticks = sctp_msecs_to_ticks(net->RTO); 2223 } 2224 break; 2225 case SCTP_TIMER_TYPE_RECV: 2226 /* 2227 * Here we use the Delayed-Ack timer value from the inp, 2228 * ususually about 200ms. 2229 */ 2230 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2231 #ifdef INVARIANTS 2232 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2233 t_type, inp, stcb, net); 2234 #else 2235 return; 2236 #endif 2237 } 2238 tmr = &stcb->asoc.dack_timer; 2239 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2240 break; 2241 case SCTP_TIMER_TYPE_SHUTDOWN: 2242 /* Here we use the RTO of the destination. */ 2243 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2244 #ifdef INVARIANTS 2245 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2246 t_type, inp, stcb, net); 2247 #else 2248 return; 2249 #endif 2250 } 2251 tmr = &net->rxt_timer; 2252 if (net->RTO == 0) { 2253 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2254 } else { 2255 to_ticks = sctp_msecs_to_ticks(net->RTO); 2256 } 2257 break; 2258 case SCTP_TIMER_TYPE_HEARTBEAT: 2259 /* 2260 * The net is used here so that we can add in the RTO. Even 2261 * though we use a different timer. We also add the HB timer 2262 * PLUS a random jitter. 2263 */ 2264 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2265 #ifdef INVARIANTS 2266 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2267 t_type, inp, stcb, net); 2268 #else 2269 return; 2270 #endif 2271 } 2272 if ((net->dest_state & SCTP_ADDR_NOHB) && 2273 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2274 SCTPDBG(SCTP_DEBUG_TIMER2, 2275 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2276 t_type, inp, stcb, net); 2277 return; 2278 } 2279 tmr = &net->hb_timer; 2280 if (net->RTO == 0) { 2281 to_ticks = stcb->asoc.initial_rto; 2282 } else { 2283 to_ticks = net->RTO; 2284 } 2285 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2286 jitter = rndval % to_ticks; 2287 if (jitter >= (to_ticks >> 1)) { 2288 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2289 } else { 2290 to_ticks = to_ticks - jitter; 2291 } 2292 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2293 !(net->dest_state & SCTP_ADDR_PF)) { 2294 to_ticks += net->heart_beat_delay; 2295 } 2296 /* 2297 * Now we must convert the to_ticks that are now in ms to 2298 * ticks. 2299 */ 2300 to_ticks = sctp_msecs_to_ticks(to_ticks); 2301 break; 2302 case SCTP_TIMER_TYPE_COOKIE: 2303 /* 2304 * Here we can use the RTO timer from the network since one 2305 * RTT was complete. If a retransmission happened then we 2306 * will be using the RTO initial value. 2307 */ 2308 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2309 #ifdef INVARIANTS 2310 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2311 t_type, inp, stcb, net); 2312 #else 2313 return; 2314 #endif 2315 } 2316 tmr = &net->rxt_timer; 2317 if (net->RTO == 0) { 2318 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2319 } else { 2320 to_ticks = sctp_msecs_to_ticks(net->RTO); 2321 } 2322 break; 2323 case SCTP_TIMER_TYPE_NEWCOOKIE: 2324 /* 2325 * Nothing needed but the endpoint here ususually about 60 2326 * minutes. 2327 */ 2328 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2329 #ifdef INVARIANTS 2330 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2331 t_type, inp, stcb, net); 2332 #else 2333 return; 2334 #endif 2335 } 2336 tmr = &inp->sctp_ep.signature_change; 2337 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2338 break; 2339 case SCTP_TIMER_TYPE_PATHMTURAISE: 2340 /* 2341 * Here we use the value found in the EP for PMTUD, 2342 * ususually about 10 minutes. 2343 */ 2344 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2345 #ifdef INVARIANTS 2346 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2347 t_type, inp, stcb, net); 2348 #else 2349 return; 2350 #endif 2351 } 2352 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2353 SCTPDBG(SCTP_DEBUG_TIMER2, 2354 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2355 t_type, inp, stcb, net); 2356 return; 2357 } 2358 tmr = &net->pmtu_timer; 2359 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2360 break; 2361 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2362 /* Here we use the RTO of the destination. */ 2363 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2364 #ifdef INVARIANTS 2365 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2366 t_type, inp, stcb, net); 2367 #else 2368 return; 2369 #endif 2370 } 2371 tmr = &net->rxt_timer; 2372 if (net->RTO == 0) { 2373 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2374 } else { 2375 to_ticks = sctp_msecs_to_ticks(net->RTO); 2376 } 2377 break; 2378 case SCTP_TIMER_TYPE_ASCONF: 2379 /* 2380 * Here the timer comes from the stcb but its value is from 2381 * the net's RTO. 2382 */ 2383 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2384 #ifdef INVARIANTS 2385 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2386 t_type, inp, stcb, net); 2387 #else 2388 return; 2389 #endif 2390 } 2391 tmr = &stcb->asoc.asconf_timer; 2392 if (net->RTO == 0) { 2393 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2394 } else { 2395 to_ticks = sctp_msecs_to_ticks(net->RTO); 2396 } 2397 break; 2398 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2399 /* 2400 * Here we use the endpoints shutdown guard timer usually 2401 * about 3 minutes. 2402 */ 2403 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2404 #ifdef INVARIANTS 2405 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2406 t_type, inp, stcb, net); 2407 #else 2408 return; 2409 #endif 2410 } 2411 tmr = &stcb->asoc.shut_guard_timer; 2412 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2413 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2414 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2415 } else { 2416 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2417 } 2418 } else { 2419 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2420 } 2421 break; 2422 case SCTP_TIMER_TYPE_AUTOCLOSE: 2423 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2424 #ifdef INVARIANTS 2425 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2426 t_type, inp, stcb, net); 2427 #else 2428 return; 2429 #endif 2430 } 2431 tmr = &stcb->asoc.autoclose_timer; 2432 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2433 break; 2434 case SCTP_TIMER_TYPE_STRRESET: 2435 /* 2436 * Here the timer comes from the stcb but its value is from 2437 * the net's RTO. 2438 */ 2439 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2440 #ifdef INVARIANTS 2441 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2442 t_type, inp, stcb, net); 2443 #else 2444 return; 2445 #endif 2446 } 2447 tmr = &stcb->asoc.strreset_timer; 2448 if (net->RTO == 0) { 2449 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2450 } else { 2451 to_ticks = sctp_msecs_to_ticks(net->RTO); 2452 } 2453 break; 2454 case SCTP_TIMER_TYPE_INPKILL: 2455 /* 2456 * The inp is setup to die. We re-use the signature_chage 2457 * timer since that has stopped and we are in the GONE 2458 * state. 2459 */ 2460 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2461 #ifdef INVARIANTS 2462 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2463 t_type, inp, stcb, net); 2464 #else 2465 return; 2466 #endif 2467 } 2468 tmr = &inp->sctp_ep.signature_change; 2469 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2470 break; 2471 case SCTP_TIMER_TYPE_ASOCKILL: 2472 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2473 #ifdef INVARIANTS 2474 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2475 t_type, inp, stcb, net); 2476 #else 2477 return; 2478 #endif 2479 } 2480 tmr = &stcb->asoc.strreset_timer; 2481 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2482 break; 2483 case SCTP_TIMER_TYPE_ADDR_WQ: 2484 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2485 #ifdef INVARIANTS 2486 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2487 t_type, inp, stcb, net); 2488 #else 2489 return; 2490 #endif 2491 } 2492 /* Only 1 tick away :-) */ 2493 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2494 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2495 break; 2496 case SCTP_TIMER_TYPE_PRIM_DELETED: 2497 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2498 #ifdef INVARIANTS 2499 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2500 t_type, inp, stcb, net); 2501 #else 2502 return; 2503 #endif 2504 } 2505 tmr = &stcb->asoc.delete_prim_timer; 2506 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2507 break; 2508 default: 2509 #ifdef INVARIANTS 2510 panic("Unknown timer type %d", t_type); 2511 #else 2512 return; 2513 #endif 2514 } 2515 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2516 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2517 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2518 /* 2519 * We do NOT allow you to have it already running. If it is, 2520 * we leave the current one up unchanged. 2521 */ 2522 SCTPDBG(SCTP_DEBUG_TIMER2, 2523 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2524 t_type, inp, stcb, net); 2525 return; 2526 } 2527 /* At this point we can proceed. */ 2528 if (t_type == SCTP_TIMER_TYPE_SEND) { 2529 stcb->asoc.num_send_timers_up++; 2530 } 2531 tmr->stopped_from = 0; 2532 tmr->type = t_type; 2533 tmr->ep = (void *)inp; 2534 tmr->tcb = (void *)stcb; 2535 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2536 tmr->net = NULL; 2537 } else { 2538 tmr->net = (void *)net; 2539 } 2540 tmr->self = (void *)tmr; 2541 tmr->vnet = (void *)curvnet; 2542 tmr->ticks = sctp_get_tick_count(); 2543 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2544 SCTPDBG(SCTP_DEBUG_TIMER2, 2545 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2546 t_type, to_ticks, inp, stcb, net); 2547 } else { 2548 /* 2549 * This should not happen, since we checked for pending 2550 * above. 2551 */ 2552 SCTPDBG(SCTP_DEBUG_TIMER2, 2553 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2554 t_type, to_ticks, inp, stcb, net); 2555 } 2556 return; 2557 } 2558 2559 /*- 2560 * The following table shows which parameters must be provided 2561 * when calling sctp_timer_stop(). For parameters not being 2562 * provided, NULL must be used. 2563 * 2564 * |Name |inp |stcb|net | 2565 * |-----------------------------|----|----|----| 2566 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2567 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2568 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2569 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2570 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2571 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2572 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2573 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2574 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2575 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2576 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2577 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2578 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2579 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2580 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2581 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2582 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2583 * 2584 */ 2585 2586 void 2587 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2588 struct sctp_nets *net, uint32_t from) 2589 { 2590 struct sctp_timer *tmr; 2591 2592 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2593 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2594 t_type, stcb, stcb->sctp_ep)); 2595 if (stcb != NULL) { 2596 SCTP_TCB_LOCK_ASSERT(stcb); 2597 } else if (inp != NULL) { 2598 SCTP_INP_WLOCK_ASSERT(inp); 2599 } else { 2600 SCTP_WQ_ADDR_LOCK_ASSERT(); 2601 } 2602 tmr = NULL; 2603 switch (t_type) { 2604 case SCTP_TIMER_TYPE_SEND: 2605 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2606 #ifdef INVARIANTS 2607 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2608 t_type, inp, stcb, net); 2609 #else 2610 return; 2611 #endif 2612 } 2613 tmr = &net->rxt_timer; 2614 break; 2615 case SCTP_TIMER_TYPE_INIT: 2616 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2617 #ifdef INVARIANTS 2618 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2619 t_type, inp, stcb, net); 2620 #else 2621 return; 2622 #endif 2623 } 2624 tmr = &net->rxt_timer; 2625 break; 2626 case SCTP_TIMER_TYPE_RECV: 2627 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2628 #ifdef INVARIANTS 2629 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2630 t_type, inp, stcb, net); 2631 #else 2632 return; 2633 #endif 2634 } 2635 tmr = &stcb->asoc.dack_timer; 2636 break; 2637 case SCTP_TIMER_TYPE_SHUTDOWN: 2638 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2639 #ifdef INVARIANTS 2640 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2641 t_type, inp, stcb, net); 2642 #else 2643 return; 2644 #endif 2645 } 2646 tmr = &net->rxt_timer; 2647 break; 2648 case SCTP_TIMER_TYPE_HEARTBEAT: 2649 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2650 #ifdef INVARIANTS 2651 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2652 t_type, inp, stcb, net); 2653 #else 2654 return; 2655 #endif 2656 } 2657 tmr = &net->hb_timer; 2658 break; 2659 case SCTP_TIMER_TYPE_COOKIE: 2660 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2661 #ifdef INVARIANTS 2662 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2663 t_type, inp, stcb, net); 2664 #else 2665 return; 2666 #endif 2667 } 2668 tmr = &net->rxt_timer; 2669 break; 2670 case SCTP_TIMER_TYPE_NEWCOOKIE: 2671 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2672 #ifdef INVARIANTS 2673 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2674 t_type, inp, stcb, net); 2675 #else 2676 return; 2677 #endif 2678 } 2679 tmr = &inp->sctp_ep.signature_change; 2680 break; 2681 case SCTP_TIMER_TYPE_PATHMTURAISE: 2682 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2683 #ifdef INVARIANTS 2684 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2685 t_type, inp, stcb, net); 2686 #else 2687 return; 2688 #endif 2689 } 2690 tmr = &net->pmtu_timer; 2691 break; 2692 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2693 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2694 #ifdef INVARIANTS 2695 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2696 t_type, inp, stcb, net); 2697 #else 2698 return; 2699 #endif 2700 } 2701 tmr = &net->rxt_timer; 2702 break; 2703 case SCTP_TIMER_TYPE_ASCONF: 2704 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2705 #ifdef INVARIANTS 2706 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2707 t_type, inp, stcb, net); 2708 #else 2709 return; 2710 #endif 2711 } 2712 tmr = &stcb->asoc.asconf_timer; 2713 break; 2714 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2715 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2716 #ifdef INVARIANTS 2717 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2718 t_type, inp, stcb, net); 2719 #else 2720 return; 2721 #endif 2722 } 2723 tmr = &stcb->asoc.shut_guard_timer; 2724 break; 2725 case SCTP_TIMER_TYPE_AUTOCLOSE: 2726 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2727 #ifdef INVARIANTS 2728 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2729 t_type, inp, stcb, net); 2730 #else 2731 return; 2732 #endif 2733 } 2734 tmr = &stcb->asoc.autoclose_timer; 2735 break; 2736 case SCTP_TIMER_TYPE_STRRESET: 2737 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2738 #ifdef INVARIANTS 2739 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2740 t_type, inp, stcb, net); 2741 #else 2742 return; 2743 #endif 2744 } 2745 tmr = &stcb->asoc.strreset_timer; 2746 break; 2747 case SCTP_TIMER_TYPE_INPKILL: 2748 /* 2749 * The inp is setup to die. We re-use the signature_chage 2750 * timer since that has stopped and we are in the GONE 2751 * state. 2752 */ 2753 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2754 #ifdef INVARIANTS 2755 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2756 t_type, inp, stcb, net); 2757 #else 2758 return; 2759 #endif 2760 } 2761 tmr = &inp->sctp_ep.signature_change; 2762 break; 2763 case SCTP_TIMER_TYPE_ASOCKILL: 2764 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2765 #ifdef INVARIANTS 2766 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2767 t_type, inp, stcb, net); 2768 #else 2769 return; 2770 #endif 2771 } 2772 tmr = &stcb->asoc.strreset_timer; 2773 break; 2774 case SCTP_TIMER_TYPE_ADDR_WQ: 2775 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2776 #ifdef INVARIANTS 2777 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2778 t_type, inp, stcb, net); 2779 #else 2780 return; 2781 #endif 2782 } 2783 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2784 break; 2785 case SCTP_TIMER_TYPE_PRIM_DELETED: 2786 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2787 #ifdef INVARIANTS 2788 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2789 t_type, inp, stcb, net); 2790 #else 2791 return; 2792 #endif 2793 } 2794 tmr = &stcb->asoc.delete_prim_timer; 2795 break; 2796 default: 2797 #ifdef INVARIANTS 2798 panic("Unknown timer type %d", t_type); 2799 #else 2800 return; 2801 #endif 2802 } 2803 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2804 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2805 (tmr->type != t_type)) { 2806 /* 2807 * Ok we have a timer that is under joint use. Cookie timer 2808 * per chance with the SEND timer. We therefore are NOT 2809 * running the timer that the caller wants stopped. So just 2810 * return. 2811 */ 2812 SCTPDBG(SCTP_DEBUG_TIMER2, 2813 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2814 t_type, inp, stcb, net); 2815 return; 2816 } 2817 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2818 stcb->asoc.num_send_timers_up--; 2819 if (stcb->asoc.num_send_timers_up < 0) { 2820 stcb->asoc.num_send_timers_up = 0; 2821 } 2822 } 2823 tmr->self = NULL; 2824 tmr->stopped_from = from; 2825 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2826 KASSERT(tmr->ep == inp, 2827 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2828 t_type, inp, tmr->ep)); 2829 KASSERT(tmr->tcb == stcb, 2830 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2831 t_type, stcb, tmr->tcb)); 2832 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2833 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2834 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2835 t_type, net, tmr->net)); 2836 SCTPDBG(SCTP_DEBUG_TIMER2, 2837 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2838 t_type, inp, stcb, net); 2839 tmr->ep = NULL; 2840 tmr->tcb = NULL; 2841 tmr->net = NULL; 2842 } else { 2843 SCTPDBG(SCTP_DEBUG_TIMER2, 2844 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2845 t_type, inp, stcb, net); 2846 } 2847 return; 2848 } 2849 2850 uint32_t 2851 sctp_calculate_len(struct mbuf *m) 2852 { 2853 uint32_t tlen = 0; 2854 struct mbuf *at; 2855 2856 at = m; 2857 while (at) { 2858 tlen += SCTP_BUF_LEN(at); 2859 at = SCTP_BUF_NEXT(at); 2860 } 2861 return (tlen); 2862 } 2863 2864 void 2865 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2866 struct sctp_association *asoc, uint32_t mtu) 2867 { 2868 /* 2869 * Reset the P-MTU size on this association, this involves changing 2870 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2871 * allow the DF flag to be cleared. 2872 */ 2873 struct sctp_tmit_chunk *chk; 2874 unsigned int eff_mtu, ovh; 2875 2876 asoc->smallest_mtu = mtu; 2877 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2878 ovh = SCTP_MIN_OVERHEAD; 2879 } else { 2880 ovh = SCTP_MIN_V4_OVERHEAD; 2881 } 2882 eff_mtu = mtu - ovh; 2883 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2884 if (chk->send_size > eff_mtu) { 2885 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2886 } 2887 } 2888 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2889 if (chk->send_size > eff_mtu) { 2890 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2891 } 2892 } 2893 } 2894 2895 2896 /* 2897 * Given an association and starting time of the current RTT period, update 2898 * RTO in number of msecs. net should point to the current network. 2899 * Return 1, if an RTO update was performed, return 0 if no update was 2900 * performed due to invalid starting point. 2901 */ 2902 2903 int 2904 sctp_calculate_rto(struct sctp_tcb *stcb, 2905 struct sctp_association *asoc, 2906 struct sctp_nets *net, 2907 struct timeval *old, 2908 int rtt_from_sack) 2909 { 2910 struct timeval now; 2911 uint64_t rtt_us; /* RTT in us */ 2912 int32_t rtt; /* RTT in ms */ 2913 uint32_t new_rto; 2914 int first_measure = 0; 2915 2916 /************************/ 2917 /* 1. calculate new RTT */ 2918 /************************/ 2919 /* get the current time */ 2920 if (stcb->asoc.use_precise_time) { 2921 (void)SCTP_GETPTIME_TIMEVAL(&now); 2922 } else { 2923 (void)SCTP_GETTIME_TIMEVAL(&now); 2924 } 2925 if ((old->tv_sec > now.tv_sec) || 2926 ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) { 2927 /* The starting point is in the future. */ 2928 return (0); 2929 } 2930 timevalsub(&now, old); 2931 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2932 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2933 /* The RTT is larger than a sane value. */ 2934 return (0); 2935 } 2936 /* store the current RTT in us */ 2937 net->rtt = rtt_us; 2938 /* compute rtt in ms */ 2939 rtt = (int32_t)(net->rtt / 1000); 2940 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2941 /* 2942 * Tell the CC module that a new update has just occurred 2943 * from a sack 2944 */ 2945 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2946 } 2947 /* 2948 * Do we need to determine the lan? We do this only on sacks i.e. 2949 * RTT being determined from data not non-data (HB/INIT->INITACK). 2950 */ 2951 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2952 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2953 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2954 net->lan_type = SCTP_LAN_INTERNET; 2955 } else { 2956 net->lan_type = SCTP_LAN_LOCAL; 2957 } 2958 } 2959 2960 /***************************/ 2961 /* 2. update RTTVAR & SRTT */ 2962 /***************************/ 2963 /*- 2964 * Compute the scaled average lastsa and the 2965 * scaled variance lastsv as described in van Jacobson 2966 * Paper "Congestion Avoidance and Control", Annex A. 2967 * 2968 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2969 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2970 */ 2971 if (net->RTO_measured) { 2972 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2973 net->lastsa += rtt; 2974 if (rtt < 0) { 2975 rtt = -rtt; 2976 } 2977 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2978 net->lastsv += rtt; 2979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2980 rto_logging(net, SCTP_LOG_RTTVAR); 2981 } 2982 } else { 2983 /* First RTO measurment */ 2984 net->RTO_measured = 1; 2985 first_measure = 1; 2986 net->lastsa = rtt << SCTP_RTT_SHIFT; 2987 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2989 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2990 } 2991 } 2992 if (net->lastsv == 0) { 2993 net->lastsv = SCTP_CLOCK_GRANULARITY; 2994 } 2995 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2996 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2997 (stcb->asoc.sat_network_lockout == 0)) { 2998 stcb->asoc.sat_network = 1; 2999 } else if ((!first_measure) && stcb->asoc.sat_network) { 3000 stcb->asoc.sat_network = 0; 3001 stcb->asoc.sat_network_lockout = 1; 3002 } 3003 /* bound it, per C6/C7 in Section 5.3.1 */ 3004 if (new_rto < stcb->asoc.minrto) { 3005 new_rto = stcb->asoc.minrto; 3006 } 3007 if (new_rto > stcb->asoc.maxrto) { 3008 new_rto = stcb->asoc.maxrto; 3009 } 3010 net->RTO = new_rto; 3011 return (1); 3012 } 3013 3014 /* 3015 * return a pointer to a contiguous piece of data from the given mbuf chain 3016 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3017 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3018 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3019 */ 3020 caddr_t 3021 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3022 { 3023 uint32_t count; 3024 uint8_t *ptr; 3025 3026 ptr = in_ptr; 3027 if ((off < 0) || (len <= 0)) 3028 return (NULL); 3029 3030 /* find the desired start location */ 3031 while ((m != NULL) && (off > 0)) { 3032 if (off < SCTP_BUF_LEN(m)) 3033 break; 3034 off -= SCTP_BUF_LEN(m); 3035 m = SCTP_BUF_NEXT(m); 3036 } 3037 if (m == NULL) 3038 return (NULL); 3039 3040 /* is the current mbuf large enough (eg. contiguous)? */ 3041 if ((SCTP_BUF_LEN(m) - off) >= len) { 3042 return (mtod(m, caddr_t)+off); 3043 } else { 3044 /* else, it spans more than one mbuf, so save a temp copy... */ 3045 while ((m != NULL) && (len > 0)) { 3046 count = min(SCTP_BUF_LEN(m) - off, len); 3047 memcpy(ptr, mtod(m, caddr_t)+off, count); 3048 len -= count; 3049 ptr += count; 3050 off = 0; 3051 m = SCTP_BUF_NEXT(m); 3052 } 3053 if ((m == NULL) && (len > 0)) 3054 return (NULL); 3055 else 3056 return ((caddr_t)in_ptr); 3057 } 3058 } 3059 3060 3061 3062 struct sctp_paramhdr * 3063 sctp_get_next_param(struct mbuf *m, 3064 int offset, 3065 struct sctp_paramhdr *pull, 3066 int pull_limit) 3067 { 3068 /* This just provides a typed signature to Peter's Pull routine */ 3069 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3070 (uint8_t *)pull)); 3071 } 3072 3073 3074 struct mbuf * 3075 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3076 { 3077 struct mbuf *m_last; 3078 caddr_t dp; 3079 3080 if (padlen > 3) { 3081 return (NULL); 3082 } 3083 if (padlen <= M_TRAILINGSPACE(m)) { 3084 /* 3085 * The easy way. We hope the majority of the time we hit 3086 * here :) 3087 */ 3088 m_last = m; 3089 } else { 3090 /* Hard way we must grow the mbuf chain */ 3091 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3092 if (m_last == NULL) { 3093 return (NULL); 3094 } 3095 SCTP_BUF_LEN(m_last) = 0; 3096 SCTP_BUF_NEXT(m_last) = NULL; 3097 SCTP_BUF_NEXT(m) = m_last; 3098 } 3099 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3100 SCTP_BUF_LEN(m_last) += padlen; 3101 memset(dp, 0, padlen); 3102 return (m_last); 3103 } 3104 3105 struct mbuf * 3106 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3107 { 3108 /* find the last mbuf in chain and pad it */ 3109 struct mbuf *m_at; 3110 3111 if (last_mbuf != NULL) { 3112 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3113 } else { 3114 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3115 if (SCTP_BUF_NEXT(m_at) == NULL) { 3116 return (sctp_add_pad_tombuf(m_at, padval)); 3117 } 3118 } 3119 } 3120 return (NULL); 3121 } 3122 3123 static void 3124 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3125 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 3126 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3127 SCTP_UNUSED 3128 #endif 3129 ) 3130 { 3131 struct mbuf *m_notify; 3132 struct sctp_assoc_change *sac; 3133 struct sctp_queued_to_read *control; 3134 unsigned int notif_len; 3135 uint16_t abort_len; 3136 unsigned int i; 3137 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3138 struct socket *so; 3139 #endif 3140 3141 if (stcb == NULL) { 3142 return; 3143 } 3144 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3145 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3146 if (abort != NULL) { 3147 abort_len = ntohs(abort->ch.chunk_length); 3148 /* 3149 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3150 * contiguous. 3151 */ 3152 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3153 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3154 } 3155 } else { 3156 abort_len = 0; 3157 } 3158 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3159 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3160 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3161 notif_len += abort_len; 3162 } 3163 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3164 if (m_notify == NULL) { 3165 /* Retry with smaller value. */ 3166 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3167 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3168 if (m_notify == NULL) { 3169 goto set_error; 3170 } 3171 } 3172 SCTP_BUF_NEXT(m_notify) = NULL; 3173 sac = mtod(m_notify, struct sctp_assoc_change *); 3174 memset(sac, 0, notif_len); 3175 sac->sac_type = SCTP_ASSOC_CHANGE; 3176 sac->sac_flags = 0; 3177 sac->sac_length = sizeof(struct sctp_assoc_change); 3178 sac->sac_state = state; 3179 sac->sac_error = error; 3180 /* XXX verify these stream counts */ 3181 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3182 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3183 sac->sac_assoc_id = sctp_get_associd(stcb); 3184 if (notif_len > sizeof(struct sctp_assoc_change)) { 3185 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3186 i = 0; 3187 if (stcb->asoc.prsctp_supported == 1) { 3188 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3189 } 3190 if (stcb->asoc.auth_supported == 1) { 3191 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3192 } 3193 if (stcb->asoc.asconf_supported == 1) { 3194 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3195 } 3196 if (stcb->asoc.idata_supported == 1) { 3197 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3198 } 3199 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3200 if (stcb->asoc.reconfig_supported == 1) { 3201 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3202 } 3203 sac->sac_length += i; 3204 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3205 memcpy(sac->sac_info, abort, abort_len); 3206 sac->sac_length += abort_len; 3207 } 3208 } 3209 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3210 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3211 0, 0, stcb->asoc.context, 0, 0, 0, 3212 m_notify); 3213 if (control != NULL) { 3214 control->length = SCTP_BUF_LEN(m_notify); 3215 control->spec_flags = M_NOTIFICATION; 3216 /* not that we need this */ 3217 control->tail_mbuf = m_notify; 3218 sctp_add_to_readq(stcb->sctp_ep, stcb, 3219 control, 3220 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3221 so_locked); 3222 } else { 3223 sctp_m_freem(m_notify); 3224 } 3225 } 3226 /* 3227 * For 1-to-1 style sockets, we send up and error when an ABORT 3228 * comes in. 3229 */ 3230 set_error: 3231 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3232 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3233 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3234 SOCK_LOCK(stcb->sctp_socket); 3235 if (from_peer) { 3236 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3237 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3238 stcb->sctp_socket->so_error = ECONNREFUSED; 3239 } else { 3240 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3241 stcb->sctp_socket->so_error = ECONNRESET; 3242 } 3243 } else { 3244 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3245 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3246 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3247 stcb->sctp_socket->so_error = ETIMEDOUT; 3248 } else { 3249 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3250 stcb->sctp_socket->so_error = ECONNABORTED; 3251 } 3252 } 3253 SOCK_UNLOCK(stcb->sctp_socket); 3254 } 3255 /* Wake ANY sleepers */ 3256 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3257 so = SCTP_INP_SO(stcb->sctp_ep); 3258 if (!so_locked) { 3259 atomic_add_int(&stcb->asoc.refcnt, 1); 3260 SCTP_TCB_UNLOCK(stcb); 3261 SCTP_SOCKET_LOCK(so, 1); 3262 SCTP_TCB_LOCK(stcb); 3263 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3264 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3265 SCTP_SOCKET_UNLOCK(so, 1); 3266 return; 3267 } 3268 } 3269 #endif 3270 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3271 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3272 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3273 socantrcvmore(stcb->sctp_socket); 3274 } 3275 sorwakeup(stcb->sctp_socket); 3276 sowwakeup(stcb->sctp_socket); 3277 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3278 if (!so_locked) { 3279 SCTP_SOCKET_UNLOCK(so, 1); 3280 } 3281 #endif 3282 } 3283 3284 static void 3285 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3286 struct sockaddr *sa, uint32_t error, int so_locked 3287 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3288 SCTP_UNUSED 3289 #endif 3290 ) 3291 { 3292 struct mbuf *m_notify; 3293 struct sctp_paddr_change *spc; 3294 struct sctp_queued_to_read *control; 3295 3296 if ((stcb == NULL) || 3297 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3298 /* event not enabled */ 3299 return; 3300 } 3301 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3302 if (m_notify == NULL) 3303 return; 3304 SCTP_BUF_LEN(m_notify) = 0; 3305 spc = mtod(m_notify, struct sctp_paddr_change *); 3306 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3307 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3308 spc->spc_flags = 0; 3309 spc->spc_length = sizeof(struct sctp_paddr_change); 3310 switch (sa->sa_family) { 3311 #ifdef INET 3312 case AF_INET: 3313 #ifdef INET6 3314 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3315 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3316 (struct sockaddr_in6 *)&spc->spc_aaddr); 3317 } else { 3318 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3319 } 3320 #else 3321 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3322 #endif 3323 break; 3324 #endif 3325 #ifdef INET6 3326 case AF_INET6: 3327 { 3328 struct sockaddr_in6 *sin6; 3329 3330 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3331 3332 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3333 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3334 if (sin6->sin6_scope_id == 0) { 3335 /* recover scope_id for user */ 3336 (void)sa6_recoverscope(sin6); 3337 } else { 3338 /* clear embedded scope_id for user */ 3339 in6_clearscope(&sin6->sin6_addr); 3340 } 3341 } 3342 break; 3343 } 3344 #endif 3345 default: 3346 /* TSNH */ 3347 break; 3348 } 3349 spc->spc_state = state; 3350 spc->spc_error = error; 3351 spc->spc_assoc_id = sctp_get_associd(stcb); 3352 3353 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3354 SCTP_BUF_NEXT(m_notify) = NULL; 3355 3356 /* append to socket */ 3357 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3358 0, 0, stcb->asoc.context, 0, 0, 0, 3359 m_notify); 3360 if (control == NULL) { 3361 /* no memory */ 3362 sctp_m_freem(m_notify); 3363 return; 3364 } 3365 control->length = SCTP_BUF_LEN(m_notify); 3366 control->spec_flags = M_NOTIFICATION; 3367 /* not that we need this */ 3368 control->tail_mbuf = m_notify; 3369 sctp_add_to_readq(stcb->sctp_ep, stcb, 3370 control, 3371 &stcb->sctp_socket->so_rcv, 1, 3372 SCTP_READ_LOCK_NOT_HELD, 3373 so_locked); 3374 } 3375 3376 3377 static void 3378 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3379 struct sctp_tmit_chunk *chk, int so_locked 3380 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3381 SCTP_UNUSED 3382 #endif 3383 ) 3384 { 3385 struct mbuf *m_notify; 3386 struct sctp_send_failed *ssf; 3387 struct sctp_send_failed_event *ssfe; 3388 struct sctp_queued_to_read *control; 3389 struct sctp_chunkhdr *chkhdr; 3390 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3391 3392 if ((stcb == NULL) || 3393 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3394 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3395 /* event not enabled */ 3396 return; 3397 } 3398 3399 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3400 notifhdr_len = sizeof(struct sctp_send_failed_event); 3401 } else { 3402 notifhdr_len = sizeof(struct sctp_send_failed); 3403 } 3404 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3405 if (m_notify == NULL) 3406 /* no space left */ 3407 return; 3408 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3409 if (stcb->asoc.idata_supported) { 3410 chkhdr_len = sizeof(struct sctp_idata_chunk); 3411 } else { 3412 chkhdr_len = sizeof(struct sctp_data_chunk); 3413 } 3414 /* Use some defaults in case we can't access the chunk header */ 3415 if (chk->send_size >= chkhdr_len) { 3416 payload_len = chk->send_size - chkhdr_len; 3417 } else { 3418 payload_len = 0; 3419 } 3420 padding_len = 0; 3421 if (chk->data != NULL) { 3422 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3423 if (chkhdr != NULL) { 3424 chk_len = ntohs(chkhdr->chunk_length); 3425 if ((chk_len >= chkhdr_len) && 3426 (chk->send_size >= chk_len) && 3427 (chk->send_size - chk_len < 4)) { 3428 padding_len = chk->send_size - chk_len; 3429 payload_len = chk->send_size - chkhdr_len - padding_len; 3430 } 3431 } 3432 } 3433 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3434 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3435 memset(ssfe, 0, notifhdr_len); 3436 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3437 if (sent) { 3438 ssfe->ssfe_flags = SCTP_DATA_SENT; 3439 } else { 3440 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3441 } 3442 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3443 ssfe->ssfe_error = error; 3444 /* not exactly what the user sent in, but should be close :) */ 3445 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3446 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3447 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3448 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3449 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3450 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3451 } else { 3452 ssf = mtod(m_notify, struct sctp_send_failed *); 3453 memset(ssf, 0, notifhdr_len); 3454 ssf->ssf_type = SCTP_SEND_FAILED; 3455 if (sent) { 3456 ssf->ssf_flags = SCTP_DATA_SENT; 3457 } else { 3458 ssf->ssf_flags = SCTP_DATA_UNSENT; 3459 } 3460 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3461 ssf->ssf_error = error; 3462 /* not exactly what the user sent in, but should be close :) */ 3463 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3464 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3465 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3466 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3467 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3468 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3469 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3470 } 3471 if (chk->data != NULL) { 3472 /* Trim off the sctp chunk header (it should be there) */ 3473 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3474 m_adj(chk->data, chkhdr_len); 3475 m_adj(chk->data, -padding_len); 3476 sctp_mbuf_crush(chk->data); 3477 chk->send_size -= (chkhdr_len + padding_len); 3478 } 3479 } 3480 SCTP_BUF_NEXT(m_notify) = chk->data; 3481 /* Steal off the mbuf */ 3482 chk->data = NULL; 3483 /* 3484 * For this case, we check the actual socket buffer, since the assoc 3485 * is going away we don't want to overfill the socket buffer for a 3486 * non-reader 3487 */ 3488 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3489 sctp_m_freem(m_notify); 3490 return; 3491 } 3492 /* append to socket */ 3493 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3494 0, 0, stcb->asoc.context, 0, 0, 0, 3495 m_notify); 3496 if (control == NULL) { 3497 /* no memory */ 3498 sctp_m_freem(m_notify); 3499 return; 3500 } 3501 control->length = SCTP_BUF_LEN(m_notify); 3502 control->spec_flags = M_NOTIFICATION; 3503 /* not that we need this */ 3504 control->tail_mbuf = m_notify; 3505 sctp_add_to_readq(stcb->sctp_ep, stcb, 3506 control, 3507 &stcb->sctp_socket->so_rcv, 1, 3508 SCTP_READ_LOCK_NOT_HELD, 3509 so_locked); 3510 } 3511 3512 3513 static void 3514 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3515 struct sctp_stream_queue_pending *sp, int so_locked 3516 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3517 SCTP_UNUSED 3518 #endif 3519 ) 3520 { 3521 struct mbuf *m_notify; 3522 struct sctp_send_failed *ssf; 3523 struct sctp_send_failed_event *ssfe; 3524 struct sctp_queued_to_read *control; 3525 int notifhdr_len; 3526 3527 if ((stcb == NULL) || 3528 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3529 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3530 /* event not enabled */ 3531 return; 3532 } 3533 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3534 notifhdr_len = sizeof(struct sctp_send_failed_event); 3535 } else { 3536 notifhdr_len = sizeof(struct sctp_send_failed); 3537 } 3538 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3539 if (m_notify == NULL) { 3540 /* no space left */ 3541 return; 3542 } 3543 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3544 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3545 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3546 memset(ssfe, 0, notifhdr_len); 3547 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3548 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3549 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3550 ssfe->ssfe_error = error; 3551 /* not exactly what the user sent in, but should be close :) */ 3552 ssfe->ssfe_info.snd_sid = sp->sid; 3553 if (sp->some_taken) { 3554 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3555 } else { 3556 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3557 } 3558 ssfe->ssfe_info.snd_ppid = sp->ppid; 3559 ssfe->ssfe_info.snd_context = sp->context; 3560 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3561 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3562 } else { 3563 ssf = mtod(m_notify, struct sctp_send_failed *); 3564 memset(ssf, 0, notifhdr_len); 3565 ssf->ssf_type = SCTP_SEND_FAILED; 3566 ssf->ssf_flags = SCTP_DATA_UNSENT; 3567 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3568 ssf->ssf_error = error; 3569 /* not exactly what the user sent in, but should be close :) */ 3570 ssf->ssf_info.sinfo_stream = sp->sid; 3571 ssf->ssf_info.sinfo_ssn = 0; 3572 if (sp->some_taken) { 3573 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3574 } else { 3575 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3576 } 3577 ssf->ssf_info.sinfo_ppid = sp->ppid; 3578 ssf->ssf_info.sinfo_context = sp->context; 3579 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3580 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3581 } 3582 SCTP_BUF_NEXT(m_notify) = sp->data; 3583 3584 /* Steal off the mbuf */ 3585 sp->data = NULL; 3586 /* 3587 * For this case, we check the actual socket buffer, since the assoc 3588 * is going away we don't want to overfill the socket buffer for a 3589 * non-reader 3590 */ 3591 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3592 sctp_m_freem(m_notify); 3593 return; 3594 } 3595 /* append to socket */ 3596 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3597 0, 0, stcb->asoc.context, 0, 0, 0, 3598 m_notify); 3599 if (control == NULL) { 3600 /* no memory */ 3601 sctp_m_freem(m_notify); 3602 return; 3603 } 3604 control->length = SCTP_BUF_LEN(m_notify); 3605 control->spec_flags = M_NOTIFICATION; 3606 /* not that we need this */ 3607 control->tail_mbuf = m_notify; 3608 sctp_add_to_readq(stcb->sctp_ep, stcb, 3609 control, 3610 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3611 } 3612 3613 3614 3615 static void 3616 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3617 { 3618 struct mbuf *m_notify; 3619 struct sctp_adaptation_event *sai; 3620 struct sctp_queued_to_read *control; 3621 3622 if ((stcb == NULL) || 3623 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3624 /* event not enabled */ 3625 return; 3626 } 3627 3628 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3629 if (m_notify == NULL) 3630 /* no space left */ 3631 return; 3632 SCTP_BUF_LEN(m_notify) = 0; 3633 sai = mtod(m_notify, struct sctp_adaptation_event *); 3634 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3635 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3636 sai->sai_flags = 0; 3637 sai->sai_length = sizeof(struct sctp_adaptation_event); 3638 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3639 sai->sai_assoc_id = sctp_get_associd(stcb); 3640 3641 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3642 SCTP_BUF_NEXT(m_notify) = NULL; 3643 3644 /* append to socket */ 3645 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3646 0, 0, stcb->asoc.context, 0, 0, 0, 3647 m_notify); 3648 if (control == NULL) { 3649 /* no memory */ 3650 sctp_m_freem(m_notify); 3651 return; 3652 } 3653 control->length = SCTP_BUF_LEN(m_notify); 3654 control->spec_flags = M_NOTIFICATION; 3655 /* not that we need this */ 3656 control->tail_mbuf = m_notify; 3657 sctp_add_to_readq(stcb->sctp_ep, stcb, 3658 control, 3659 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3660 } 3661 3662 /* This always must be called with the read-queue LOCKED in the INP */ 3663 static void 3664 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3665 uint32_t val, int so_locked 3666 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3667 SCTP_UNUSED 3668 #endif 3669 ) 3670 { 3671 struct mbuf *m_notify; 3672 struct sctp_pdapi_event *pdapi; 3673 struct sctp_queued_to_read *control; 3674 struct sockbuf *sb; 3675 3676 if ((stcb == NULL) || 3677 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3678 /* event not enabled */ 3679 return; 3680 } 3681 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3682 return; 3683 } 3684 3685 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3686 if (m_notify == NULL) 3687 /* no space left */ 3688 return; 3689 SCTP_BUF_LEN(m_notify) = 0; 3690 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3691 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3692 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3693 pdapi->pdapi_flags = 0; 3694 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3695 pdapi->pdapi_indication = error; 3696 pdapi->pdapi_stream = (val >> 16); 3697 pdapi->pdapi_seq = (val & 0x0000ffff); 3698 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3699 3700 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3701 SCTP_BUF_NEXT(m_notify) = NULL; 3702 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3703 0, 0, stcb->asoc.context, 0, 0, 0, 3704 m_notify); 3705 if (control == NULL) { 3706 /* no memory */ 3707 sctp_m_freem(m_notify); 3708 return; 3709 } 3710 control->length = SCTP_BUF_LEN(m_notify); 3711 control->spec_flags = M_NOTIFICATION; 3712 /* not that we need this */ 3713 control->tail_mbuf = m_notify; 3714 sb = &stcb->sctp_socket->so_rcv; 3715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3716 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3717 } 3718 sctp_sballoc(stcb, sb, m_notify); 3719 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3720 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3721 } 3722 control->end_added = 1; 3723 if (stcb->asoc.control_pdapi) 3724 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3725 else { 3726 /* we really should not see this case */ 3727 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3728 } 3729 if (stcb->sctp_ep && stcb->sctp_socket) { 3730 /* This should always be the case */ 3731 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3732 struct socket *so; 3733 3734 so = SCTP_INP_SO(stcb->sctp_ep); 3735 if (!so_locked) { 3736 atomic_add_int(&stcb->asoc.refcnt, 1); 3737 SCTP_TCB_UNLOCK(stcb); 3738 SCTP_SOCKET_LOCK(so, 1); 3739 SCTP_TCB_LOCK(stcb); 3740 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3741 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3742 SCTP_SOCKET_UNLOCK(so, 1); 3743 return; 3744 } 3745 } 3746 #endif 3747 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3748 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3749 if (!so_locked) { 3750 SCTP_SOCKET_UNLOCK(so, 1); 3751 } 3752 #endif 3753 } 3754 } 3755 3756 static void 3757 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3758 { 3759 struct mbuf *m_notify; 3760 struct sctp_shutdown_event *sse; 3761 struct sctp_queued_to_read *control; 3762 3763 /* 3764 * For TCP model AND UDP connected sockets we will send an error up 3765 * when an SHUTDOWN completes 3766 */ 3767 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3768 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3769 /* mark socket closed for read/write and wakeup! */ 3770 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3771 struct socket *so; 3772 3773 so = SCTP_INP_SO(stcb->sctp_ep); 3774 atomic_add_int(&stcb->asoc.refcnt, 1); 3775 SCTP_TCB_UNLOCK(stcb); 3776 SCTP_SOCKET_LOCK(so, 1); 3777 SCTP_TCB_LOCK(stcb); 3778 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3779 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3780 SCTP_SOCKET_UNLOCK(so, 1); 3781 return; 3782 } 3783 #endif 3784 socantsendmore(stcb->sctp_socket); 3785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3786 SCTP_SOCKET_UNLOCK(so, 1); 3787 #endif 3788 } 3789 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3790 /* event not enabled */ 3791 return; 3792 } 3793 3794 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3795 if (m_notify == NULL) 3796 /* no space left */ 3797 return; 3798 sse = mtod(m_notify, struct sctp_shutdown_event *); 3799 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3800 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3801 sse->sse_flags = 0; 3802 sse->sse_length = sizeof(struct sctp_shutdown_event); 3803 sse->sse_assoc_id = sctp_get_associd(stcb); 3804 3805 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3806 SCTP_BUF_NEXT(m_notify) = NULL; 3807 3808 /* append to socket */ 3809 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3810 0, 0, stcb->asoc.context, 0, 0, 0, 3811 m_notify); 3812 if (control == NULL) { 3813 /* no memory */ 3814 sctp_m_freem(m_notify); 3815 return; 3816 } 3817 control->length = SCTP_BUF_LEN(m_notify); 3818 control->spec_flags = M_NOTIFICATION; 3819 /* not that we need this */ 3820 control->tail_mbuf = m_notify; 3821 sctp_add_to_readq(stcb->sctp_ep, stcb, 3822 control, 3823 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3824 } 3825 3826 static void 3827 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3828 int so_locked 3829 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3830 SCTP_UNUSED 3831 #endif 3832 ) 3833 { 3834 struct mbuf *m_notify; 3835 struct sctp_sender_dry_event *event; 3836 struct sctp_queued_to_read *control; 3837 3838 if ((stcb == NULL) || 3839 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3840 /* event not enabled */ 3841 return; 3842 } 3843 3844 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3845 if (m_notify == NULL) { 3846 /* no space left */ 3847 return; 3848 } 3849 SCTP_BUF_LEN(m_notify) = 0; 3850 event = mtod(m_notify, struct sctp_sender_dry_event *); 3851 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3852 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3853 event->sender_dry_flags = 0; 3854 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3855 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3856 3857 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3858 SCTP_BUF_NEXT(m_notify) = NULL; 3859 3860 /* append to socket */ 3861 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3862 0, 0, stcb->asoc.context, 0, 0, 0, 3863 m_notify); 3864 if (control == NULL) { 3865 /* no memory */ 3866 sctp_m_freem(m_notify); 3867 return; 3868 } 3869 control->length = SCTP_BUF_LEN(m_notify); 3870 control->spec_flags = M_NOTIFICATION; 3871 /* not that we need this */ 3872 control->tail_mbuf = m_notify; 3873 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3874 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3875 } 3876 3877 3878 void 3879 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3880 { 3881 struct mbuf *m_notify; 3882 struct sctp_queued_to_read *control; 3883 struct sctp_stream_change_event *stradd; 3884 3885 if ((stcb == NULL) || 3886 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3887 /* event not enabled */ 3888 return; 3889 } 3890 if ((stcb->asoc.peer_req_out) && flag) { 3891 /* Peer made the request, don't tell the local user */ 3892 stcb->asoc.peer_req_out = 0; 3893 return; 3894 } 3895 stcb->asoc.peer_req_out = 0; 3896 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3897 if (m_notify == NULL) 3898 /* no space left */ 3899 return; 3900 SCTP_BUF_LEN(m_notify) = 0; 3901 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3902 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3903 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3904 stradd->strchange_flags = flag; 3905 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3906 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3907 stradd->strchange_instrms = numberin; 3908 stradd->strchange_outstrms = numberout; 3909 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3910 SCTP_BUF_NEXT(m_notify) = NULL; 3911 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3912 /* no space */ 3913 sctp_m_freem(m_notify); 3914 return; 3915 } 3916 /* append to socket */ 3917 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3918 0, 0, stcb->asoc.context, 0, 0, 0, 3919 m_notify); 3920 if (control == NULL) { 3921 /* no memory */ 3922 sctp_m_freem(m_notify); 3923 return; 3924 } 3925 control->length = SCTP_BUF_LEN(m_notify); 3926 control->spec_flags = M_NOTIFICATION; 3927 /* not that we need this */ 3928 control->tail_mbuf = m_notify; 3929 sctp_add_to_readq(stcb->sctp_ep, stcb, 3930 control, 3931 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3932 } 3933 3934 void 3935 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3936 { 3937 struct mbuf *m_notify; 3938 struct sctp_queued_to_read *control; 3939 struct sctp_assoc_reset_event *strasoc; 3940 3941 if ((stcb == NULL) || 3942 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3943 /* event not enabled */ 3944 return; 3945 } 3946 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3947 if (m_notify == NULL) 3948 /* no space left */ 3949 return; 3950 SCTP_BUF_LEN(m_notify) = 0; 3951 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3952 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3953 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3954 strasoc->assocreset_flags = flag; 3955 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3956 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3957 strasoc->assocreset_local_tsn = sending_tsn; 3958 strasoc->assocreset_remote_tsn = recv_tsn; 3959 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3960 SCTP_BUF_NEXT(m_notify) = NULL; 3961 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3962 /* no space */ 3963 sctp_m_freem(m_notify); 3964 return; 3965 } 3966 /* append to socket */ 3967 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3968 0, 0, stcb->asoc.context, 0, 0, 0, 3969 m_notify); 3970 if (control == NULL) { 3971 /* no memory */ 3972 sctp_m_freem(m_notify); 3973 return; 3974 } 3975 control->length = SCTP_BUF_LEN(m_notify); 3976 control->spec_flags = M_NOTIFICATION; 3977 /* not that we need this */ 3978 control->tail_mbuf = m_notify; 3979 sctp_add_to_readq(stcb->sctp_ep, stcb, 3980 control, 3981 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3982 } 3983 3984 3985 3986 static void 3987 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3988 int number_entries, uint16_t *list, int flag) 3989 { 3990 struct mbuf *m_notify; 3991 struct sctp_queued_to_read *control; 3992 struct sctp_stream_reset_event *strreset; 3993 int len; 3994 3995 if ((stcb == NULL) || 3996 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3997 /* event not enabled */ 3998 return; 3999 } 4000 4001 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 4002 if (m_notify == NULL) 4003 /* no space left */ 4004 return; 4005 SCTP_BUF_LEN(m_notify) = 0; 4006 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 4007 if (len > M_TRAILINGSPACE(m_notify)) { 4008 /* never enough room */ 4009 sctp_m_freem(m_notify); 4010 return; 4011 } 4012 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 4013 memset(strreset, 0, len); 4014 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 4015 strreset->strreset_flags = flag; 4016 strreset->strreset_length = len; 4017 strreset->strreset_assoc_id = sctp_get_associd(stcb); 4018 if (number_entries) { 4019 int i; 4020 4021 for (i = 0; i < number_entries; i++) { 4022 strreset->strreset_stream_list[i] = ntohs(list[i]); 4023 } 4024 } 4025 SCTP_BUF_LEN(m_notify) = len; 4026 SCTP_BUF_NEXT(m_notify) = NULL; 4027 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 4028 /* no space */ 4029 sctp_m_freem(m_notify); 4030 return; 4031 } 4032 /* append to socket */ 4033 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4034 0, 0, stcb->asoc.context, 0, 0, 0, 4035 m_notify); 4036 if (control == NULL) { 4037 /* no memory */ 4038 sctp_m_freem(m_notify); 4039 return; 4040 } 4041 control->length = SCTP_BUF_LEN(m_notify); 4042 control->spec_flags = M_NOTIFICATION; 4043 /* not that we need this */ 4044 control->tail_mbuf = m_notify; 4045 sctp_add_to_readq(stcb->sctp_ep, stcb, 4046 control, 4047 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4048 } 4049 4050 4051 static void 4052 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 4053 { 4054 struct mbuf *m_notify; 4055 struct sctp_remote_error *sre; 4056 struct sctp_queued_to_read *control; 4057 unsigned int notif_len; 4058 uint16_t chunk_len; 4059 4060 if ((stcb == NULL) || 4061 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4062 return; 4063 } 4064 if (chunk != NULL) { 4065 chunk_len = ntohs(chunk->ch.chunk_length); 4066 /* 4067 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4068 * contiguous. 4069 */ 4070 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4071 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4072 } 4073 } else { 4074 chunk_len = 0; 4075 } 4076 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4077 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4078 if (m_notify == NULL) { 4079 /* Retry with smaller value. */ 4080 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4081 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4082 if (m_notify == NULL) { 4083 return; 4084 } 4085 } 4086 SCTP_BUF_NEXT(m_notify) = NULL; 4087 sre = mtod(m_notify, struct sctp_remote_error *); 4088 memset(sre, 0, notif_len); 4089 sre->sre_type = SCTP_REMOTE_ERROR; 4090 sre->sre_flags = 0; 4091 sre->sre_length = sizeof(struct sctp_remote_error); 4092 sre->sre_error = error; 4093 sre->sre_assoc_id = sctp_get_associd(stcb); 4094 if (notif_len > sizeof(struct sctp_remote_error)) { 4095 memcpy(sre->sre_data, chunk, chunk_len); 4096 sre->sre_length += chunk_len; 4097 } 4098 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4099 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4100 0, 0, stcb->asoc.context, 0, 0, 0, 4101 m_notify); 4102 if (control != NULL) { 4103 control->length = SCTP_BUF_LEN(m_notify); 4104 control->spec_flags = M_NOTIFICATION; 4105 /* not that we need this */ 4106 control->tail_mbuf = m_notify; 4107 sctp_add_to_readq(stcb->sctp_ep, stcb, 4108 control, 4109 &stcb->sctp_socket->so_rcv, 1, 4110 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4111 } else { 4112 sctp_m_freem(m_notify); 4113 } 4114 } 4115 4116 4117 void 4118 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4119 uint32_t error, void *data, int so_locked 4120 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4121 SCTP_UNUSED 4122 #endif 4123 ) 4124 { 4125 if ((stcb == NULL) || 4126 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4127 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4128 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4129 /* If the socket is gone we are out of here */ 4130 return; 4131 } 4132 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4133 return; 4134 } 4135 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4136 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4137 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4138 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4139 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4140 /* Don't report these in front states */ 4141 return; 4142 } 4143 } 4144 switch (notification) { 4145 case SCTP_NOTIFY_ASSOC_UP: 4146 if (stcb->asoc.assoc_up_sent == 0) { 4147 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4148 stcb->asoc.assoc_up_sent = 1; 4149 } 4150 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4151 sctp_notify_adaptation_layer(stcb); 4152 } 4153 if (stcb->asoc.auth_supported == 0) { 4154 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4155 NULL, so_locked); 4156 } 4157 break; 4158 case SCTP_NOTIFY_ASSOC_DOWN: 4159 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4160 break; 4161 case SCTP_NOTIFY_INTERFACE_DOWN: 4162 { 4163 struct sctp_nets *net; 4164 4165 net = (struct sctp_nets *)data; 4166 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4167 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4168 break; 4169 } 4170 case SCTP_NOTIFY_INTERFACE_UP: 4171 { 4172 struct sctp_nets *net; 4173 4174 net = (struct sctp_nets *)data; 4175 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4176 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4177 break; 4178 } 4179 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4180 { 4181 struct sctp_nets *net; 4182 4183 net = (struct sctp_nets *)data; 4184 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4185 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4186 break; 4187 } 4188 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4189 sctp_notify_send_failed2(stcb, error, 4190 (struct sctp_stream_queue_pending *)data, so_locked); 4191 break; 4192 case SCTP_NOTIFY_SENT_DG_FAIL: 4193 sctp_notify_send_failed(stcb, 1, error, 4194 (struct sctp_tmit_chunk *)data, so_locked); 4195 break; 4196 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4197 sctp_notify_send_failed(stcb, 0, error, 4198 (struct sctp_tmit_chunk *)data, so_locked); 4199 break; 4200 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4201 { 4202 uint32_t val; 4203 4204 val = *((uint32_t *)data); 4205 4206 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4207 break; 4208 } 4209 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4210 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4211 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4212 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4213 } else { 4214 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4215 } 4216 break; 4217 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4218 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4219 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4220 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4221 } else { 4222 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4223 } 4224 break; 4225 case SCTP_NOTIFY_ASSOC_RESTART: 4226 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4227 if (stcb->asoc.auth_supported == 0) { 4228 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4229 NULL, so_locked); 4230 } 4231 break; 4232 case SCTP_NOTIFY_STR_RESET_SEND: 4233 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4234 break; 4235 case SCTP_NOTIFY_STR_RESET_RECV: 4236 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4237 break; 4238 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4239 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4240 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4241 break; 4242 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4243 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4244 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4245 break; 4246 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4247 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4248 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4249 break; 4250 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4251 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4252 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4253 break; 4254 case SCTP_NOTIFY_ASCONF_ADD_IP: 4255 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4256 error, so_locked); 4257 break; 4258 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4259 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4260 error, so_locked); 4261 break; 4262 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4263 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4264 error, so_locked); 4265 break; 4266 case SCTP_NOTIFY_PEER_SHUTDOWN: 4267 sctp_notify_shutdown_event(stcb); 4268 break; 4269 case SCTP_NOTIFY_AUTH_NEW_KEY: 4270 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4271 (uint16_t)(uintptr_t)data, 4272 so_locked); 4273 break; 4274 case SCTP_NOTIFY_AUTH_FREE_KEY: 4275 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4276 (uint16_t)(uintptr_t)data, 4277 so_locked); 4278 break; 4279 case SCTP_NOTIFY_NO_PEER_AUTH: 4280 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4281 (uint16_t)(uintptr_t)data, 4282 so_locked); 4283 break; 4284 case SCTP_NOTIFY_SENDER_DRY: 4285 sctp_notify_sender_dry_event(stcb, so_locked); 4286 break; 4287 case SCTP_NOTIFY_REMOTE_ERROR: 4288 sctp_notify_remote_error(stcb, error, data); 4289 break; 4290 default: 4291 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4292 __func__, notification, notification); 4293 break; 4294 } /* end switch */ 4295 } 4296 4297 void 4298 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 4299 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4300 SCTP_UNUSED 4301 #endif 4302 ) 4303 { 4304 struct sctp_association *asoc; 4305 struct sctp_stream_out *outs; 4306 struct sctp_tmit_chunk *chk, *nchk; 4307 struct sctp_stream_queue_pending *sp, *nsp; 4308 int i; 4309 4310 if (stcb == NULL) { 4311 return; 4312 } 4313 asoc = &stcb->asoc; 4314 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4315 /* already being freed */ 4316 return; 4317 } 4318 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4319 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4320 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4321 return; 4322 } 4323 /* now through all the gunk freeing chunks */ 4324 if (holds_lock == 0) { 4325 SCTP_TCB_SEND_LOCK(stcb); 4326 } 4327 /* sent queue SHOULD be empty */ 4328 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4329 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4330 asoc->sent_queue_cnt--; 4331 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4332 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4333 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4334 #ifdef INVARIANTS 4335 } else { 4336 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4337 #endif 4338 } 4339 } 4340 if (chk->data != NULL) { 4341 sctp_free_bufspace(stcb, asoc, chk, 1); 4342 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4343 error, chk, so_locked); 4344 if (chk->data) { 4345 sctp_m_freem(chk->data); 4346 chk->data = NULL; 4347 } 4348 } 4349 sctp_free_a_chunk(stcb, chk, so_locked); 4350 /* sa_ignore FREED_MEMORY */ 4351 } 4352 /* pending send queue SHOULD be empty */ 4353 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4354 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4355 asoc->send_queue_cnt--; 4356 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4357 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4358 #ifdef INVARIANTS 4359 } else { 4360 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4361 #endif 4362 } 4363 if (chk->data != NULL) { 4364 sctp_free_bufspace(stcb, asoc, chk, 1); 4365 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4366 error, chk, so_locked); 4367 if (chk->data) { 4368 sctp_m_freem(chk->data); 4369 chk->data = NULL; 4370 } 4371 } 4372 sctp_free_a_chunk(stcb, chk, so_locked); 4373 /* sa_ignore FREED_MEMORY */ 4374 } 4375 for (i = 0; i < asoc->streamoutcnt; i++) { 4376 /* For each stream */ 4377 outs = &asoc->strmout[i]; 4378 /* clean up any sends there */ 4379 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4380 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4381 TAILQ_REMOVE(&outs->outqueue, sp, next); 4382 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4383 sctp_free_spbufspace(stcb, asoc, sp); 4384 if (sp->data) { 4385 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4386 error, (void *)sp, so_locked); 4387 if (sp->data) { 4388 sctp_m_freem(sp->data); 4389 sp->data = NULL; 4390 sp->tail_mbuf = NULL; 4391 sp->length = 0; 4392 } 4393 } 4394 if (sp->net) { 4395 sctp_free_remote_addr(sp->net); 4396 sp->net = NULL; 4397 } 4398 /* Free the chunk */ 4399 sctp_free_a_strmoq(stcb, sp, so_locked); 4400 /* sa_ignore FREED_MEMORY */ 4401 } 4402 } 4403 4404 if (holds_lock == 0) { 4405 SCTP_TCB_SEND_UNLOCK(stcb); 4406 } 4407 } 4408 4409 void 4410 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4411 struct sctp_abort_chunk *abort, int so_locked 4412 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4413 SCTP_UNUSED 4414 #endif 4415 ) 4416 { 4417 if (stcb == NULL) { 4418 return; 4419 } 4420 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4421 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4422 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4423 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4424 } 4425 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4426 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4427 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4428 return; 4429 } 4430 /* Tell them we lost the asoc */ 4431 sctp_report_all_outbound(stcb, error, 0, so_locked); 4432 if (from_peer) { 4433 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4434 } else { 4435 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4436 } 4437 } 4438 4439 void 4440 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4441 struct mbuf *m, int iphlen, 4442 struct sockaddr *src, struct sockaddr *dst, 4443 struct sctphdr *sh, struct mbuf *op_err, 4444 uint8_t mflowtype, uint32_t mflowid, 4445 uint32_t vrf_id, uint16_t port) 4446 { 4447 uint32_t vtag; 4448 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4449 struct socket *so; 4450 #endif 4451 4452 vtag = 0; 4453 if (stcb != NULL) { 4454 vtag = stcb->asoc.peer_vtag; 4455 vrf_id = stcb->asoc.vrf_id; 4456 } 4457 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4458 mflowtype, mflowid, inp->fibnum, 4459 vrf_id, port); 4460 if (stcb != NULL) { 4461 /* We have a TCB to abort, send notification too */ 4462 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4463 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4464 /* Ok, now lets free it */ 4465 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4466 so = SCTP_INP_SO(inp); 4467 atomic_add_int(&stcb->asoc.refcnt, 1); 4468 SCTP_TCB_UNLOCK(stcb); 4469 SCTP_SOCKET_LOCK(so, 1); 4470 SCTP_TCB_LOCK(stcb); 4471 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4472 #endif 4473 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4474 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4475 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4476 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4477 } 4478 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4479 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4480 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4481 SCTP_SOCKET_UNLOCK(so, 1); 4482 #endif 4483 } 4484 } 4485 #ifdef SCTP_ASOCLOG_OF_TSNS 4486 void 4487 sctp_print_out_track_log(struct sctp_tcb *stcb) 4488 { 4489 #ifdef NOSIY_PRINTS 4490 int i; 4491 4492 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4493 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4494 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4495 SCTP_PRINTF("None rcvd\n"); 4496 goto none_in; 4497 } 4498 if (stcb->asoc.tsn_in_wrapped) { 4499 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4500 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4501 stcb->asoc.in_tsnlog[i].tsn, 4502 stcb->asoc.in_tsnlog[i].strm, 4503 stcb->asoc.in_tsnlog[i].seq, 4504 stcb->asoc.in_tsnlog[i].flgs, 4505 stcb->asoc.in_tsnlog[i].sz); 4506 } 4507 } 4508 if (stcb->asoc.tsn_in_at) { 4509 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4510 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4511 stcb->asoc.in_tsnlog[i].tsn, 4512 stcb->asoc.in_tsnlog[i].strm, 4513 stcb->asoc.in_tsnlog[i].seq, 4514 stcb->asoc.in_tsnlog[i].flgs, 4515 stcb->asoc.in_tsnlog[i].sz); 4516 } 4517 } 4518 none_in: 4519 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4520 if ((stcb->asoc.tsn_out_at == 0) && 4521 (stcb->asoc.tsn_out_wrapped == 0)) { 4522 SCTP_PRINTF("None sent\n"); 4523 } 4524 if (stcb->asoc.tsn_out_wrapped) { 4525 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4526 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4527 stcb->asoc.out_tsnlog[i].tsn, 4528 stcb->asoc.out_tsnlog[i].strm, 4529 stcb->asoc.out_tsnlog[i].seq, 4530 stcb->asoc.out_tsnlog[i].flgs, 4531 stcb->asoc.out_tsnlog[i].sz); 4532 } 4533 } 4534 if (stcb->asoc.tsn_out_at) { 4535 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4536 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4537 stcb->asoc.out_tsnlog[i].tsn, 4538 stcb->asoc.out_tsnlog[i].strm, 4539 stcb->asoc.out_tsnlog[i].seq, 4540 stcb->asoc.out_tsnlog[i].flgs, 4541 stcb->asoc.out_tsnlog[i].sz); 4542 } 4543 } 4544 #endif 4545 } 4546 #endif 4547 4548 void 4549 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4550 struct mbuf *op_err, 4551 int so_locked 4552 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4553 SCTP_UNUSED 4554 #endif 4555 ) 4556 { 4557 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4558 struct socket *so; 4559 #endif 4560 4561 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4562 so = SCTP_INP_SO(inp); 4563 #endif 4564 if (stcb == NULL) { 4565 /* Got to have a TCB */ 4566 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4567 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4568 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4569 SCTP_CALLED_DIRECTLY_NOCMPSET); 4570 } 4571 } 4572 return; 4573 } else { 4574 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4575 } 4576 /* notify the peer */ 4577 sctp_send_abort_tcb(stcb, op_err, so_locked); 4578 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4579 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4580 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4581 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4582 } 4583 /* notify the ulp */ 4584 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4585 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4586 } 4587 /* now free the asoc */ 4588 #ifdef SCTP_ASOCLOG_OF_TSNS 4589 sctp_print_out_track_log(stcb); 4590 #endif 4591 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4592 if (!so_locked) { 4593 atomic_add_int(&stcb->asoc.refcnt, 1); 4594 SCTP_TCB_UNLOCK(stcb); 4595 SCTP_SOCKET_LOCK(so, 1); 4596 SCTP_TCB_LOCK(stcb); 4597 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4598 } 4599 #endif 4600 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4601 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4603 if (!so_locked) { 4604 SCTP_SOCKET_UNLOCK(so, 1); 4605 } 4606 #endif 4607 } 4608 4609 void 4610 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4611 struct sockaddr *src, struct sockaddr *dst, 4612 struct sctphdr *sh, struct sctp_inpcb *inp, 4613 struct mbuf *cause, 4614 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4615 uint32_t vrf_id, uint16_t port) 4616 { 4617 struct sctp_chunkhdr *ch, chunk_buf; 4618 unsigned int chk_length; 4619 int contains_init_chunk; 4620 4621 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4622 /* Generate a TO address for future reference */ 4623 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4624 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4625 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4626 SCTP_CALLED_DIRECTLY_NOCMPSET); 4627 } 4628 } 4629 contains_init_chunk = 0; 4630 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4631 sizeof(*ch), (uint8_t *)&chunk_buf); 4632 while (ch != NULL) { 4633 chk_length = ntohs(ch->chunk_length); 4634 if (chk_length < sizeof(*ch)) { 4635 /* break to abort land */ 4636 break; 4637 } 4638 switch (ch->chunk_type) { 4639 case SCTP_INIT: 4640 contains_init_chunk = 1; 4641 break; 4642 case SCTP_PACKET_DROPPED: 4643 /* we don't respond to pkt-dropped */ 4644 return; 4645 case SCTP_ABORT_ASSOCIATION: 4646 /* we don't respond with an ABORT to an ABORT */ 4647 return; 4648 case SCTP_SHUTDOWN_COMPLETE: 4649 /* 4650 * we ignore it since we are not waiting for it and 4651 * peer is gone 4652 */ 4653 return; 4654 case SCTP_SHUTDOWN_ACK: 4655 sctp_send_shutdown_complete2(src, dst, sh, 4656 mflowtype, mflowid, fibnum, 4657 vrf_id, port); 4658 return; 4659 default: 4660 break; 4661 } 4662 offset += SCTP_SIZE32(chk_length); 4663 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4664 sizeof(*ch), (uint8_t *)&chunk_buf); 4665 } 4666 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4667 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4668 (contains_init_chunk == 0))) { 4669 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4670 mflowtype, mflowid, fibnum, 4671 vrf_id, port); 4672 } 4673 } 4674 4675 /* 4676 * check the inbound datagram to make sure there is not an abort inside it, 4677 * if there is return 1, else return 0. 4678 */ 4679 int 4680 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4681 { 4682 struct sctp_chunkhdr *ch; 4683 struct sctp_init_chunk *init_chk, chunk_buf; 4684 int offset; 4685 unsigned int chk_length; 4686 4687 offset = iphlen + sizeof(struct sctphdr); 4688 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4689 (uint8_t *)&chunk_buf); 4690 while (ch != NULL) { 4691 chk_length = ntohs(ch->chunk_length); 4692 if (chk_length < sizeof(*ch)) { 4693 /* packet is probably corrupt */ 4694 break; 4695 } 4696 /* we seem to be ok, is it an abort? */ 4697 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4698 /* yep, tell them */ 4699 return (1); 4700 } 4701 if (ch->chunk_type == SCTP_INITIATION) { 4702 /* need to update the Vtag */ 4703 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4704 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4705 if (init_chk != NULL) { 4706 *vtagfill = ntohl(init_chk->init.initiate_tag); 4707 } 4708 } 4709 /* Nope, move to the next chunk */ 4710 offset += SCTP_SIZE32(chk_length); 4711 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4712 sizeof(*ch), (uint8_t *)&chunk_buf); 4713 } 4714 return (0); 4715 } 4716 4717 /* 4718 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4719 * set (i.e. it's 0) so, create this function to compare link local scopes 4720 */ 4721 #ifdef INET6 4722 uint32_t 4723 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4724 { 4725 struct sockaddr_in6 a, b; 4726 4727 /* save copies */ 4728 a = *addr1; 4729 b = *addr2; 4730 4731 if (a.sin6_scope_id == 0) 4732 if (sa6_recoverscope(&a)) { 4733 /* can't get scope, so can't match */ 4734 return (0); 4735 } 4736 if (b.sin6_scope_id == 0) 4737 if (sa6_recoverscope(&b)) { 4738 /* can't get scope, so can't match */ 4739 return (0); 4740 } 4741 if (a.sin6_scope_id != b.sin6_scope_id) 4742 return (0); 4743 4744 return (1); 4745 } 4746 4747 /* 4748 * returns a sockaddr_in6 with embedded scope recovered and removed 4749 */ 4750 struct sockaddr_in6 * 4751 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4752 { 4753 /* check and strip embedded scope junk */ 4754 if (addr->sin6_family == AF_INET6) { 4755 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4756 if (addr->sin6_scope_id == 0) { 4757 *store = *addr; 4758 if (!sa6_recoverscope(store)) { 4759 /* use the recovered scope */ 4760 addr = store; 4761 } 4762 } else { 4763 /* else, return the original "to" addr */ 4764 in6_clearscope(&addr->sin6_addr); 4765 } 4766 } 4767 } 4768 return (addr); 4769 } 4770 #endif 4771 4772 /* 4773 * are the two addresses the same? currently a "scopeless" check returns: 1 4774 * if same, 0 if not 4775 */ 4776 int 4777 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4778 { 4779 4780 /* must be valid */ 4781 if (sa1 == NULL || sa2 == NULL) 4782 return (0); 4783 4784 /* must be the same family */ 4785 if (sa1->sa_family != sa2->sa_family) 4786 return (0); 4787 4788 switch (sa1->sa_family) { 4789 #ifdef INET6 4790 case AF_INET6: 4791 { 4792 /* IPv6 addresses */ 4793 struct sockaddr_in6 *sin6_1, *sin6_2; 4794 4795 sin6_1 = (struct sockaddr_in6 *)sa1; 4796 sin6_2 = (struct sockaddr_in6 *)sa2; 4797 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4798 sin6_2)); 4799 } 4800 #endif 4801 #ifdef INET 4802 case AF_INET: 4803 { 4804 /* IPv4 addresses */ 4805 struct sockaddr_in *sin_1, *sin_2; 4806 4807 sin_1 = (struct sockaddr_in *)sa1; 4808 sin_2 = (struct sockaddr_in *)sa2; 4809 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4810 } 4811 #endif 4812 default: 4813 /* we don't do these... */ 4814 return (0); 4815 } 4816 } 4817 4818 void 4819 sctp_print_address(struct sockaddr *sa) 4820 { 4821 #ifdef INET6 4822 char ip6buf[INET6_ADDRSTRLEN]; 4823 #endif 4824 4825 switch (sa->sa_family) { 4826 #ifdef INET6 4827 case AF_INET6: 4828 { 4829 struct sockaddr_in6 *sin6; 4830 4831 sin6 = (struct sockaddr_in6 *)sa; 4832 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4833 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4834 ntohs(sin6->sin6_port), 4835 sin6->sin6_scope_id); 4836 break; 4837 } 4838 #endif 4839 #ifdef INET 4840 case AF_INET: 4841 { 4842 struct sockaddr_in *sin; 4843 unsigned char *p; 4844 4845 sin = (struct sockaddr_in *)sa; 4846 p = (unsigned char *)&sin->sin_addr; 4847 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4848 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4849 break; 4850 } 4851 #endif 4852 default: 4853 SCTP_PRINTF("?\n"); 4854 break; 4855 } 4856 } 4857 4858 void 4859 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4860 struct sctp_inpcb *new_inp, 4861 struct sctp_tcb *stcb, 4862 int waitflags) 4863 { 4864 /* 4865 * go through our old INP and pull off any control structures that 4866 * belong to stcb and move then to the new inp. 4867 */ 4868 struct socket *old_so, *new_so; 4869 struct sctp_queued_to_read *control, *nctl; 4870 struct sctp_readhead tmp_queue; 4871 struct mbuf *m; 4872 int error = 0; 4873 4874 old_so = old_inp->sctp_socket; 4875 new_so = new_inp->sctp_socket; 4876 TAILQ_INIT(&tmp_queue); 4877 error = sblock(&old_so->so_rcv, waitflags); 4878 if (error) { 4879 /* 4880 * Gak, can't get sblock, we have a problem. data will be 4881 * left stranded.. and we don't dare look at it since the 4882 * other thread may be reading something. Oh well, its a 4883 * screwed up app that does a peeloff OR a accept while 4884 * reading from the main socket... actually its only the 4885 * peeloff() case, since I think read will fail on a 4886 * listening socket.. 4887 */ 4888 return; 4889 } 4890 /* lock the socket buffers */ 4891 SCTP_INP_READ_LOCK(old_inp); 4892 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4893 /* Pull off all for out target stcb */ 4894 if (control->stcb == stcb) { 4895 /* remove it we want it */ 4896 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4897 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4898 m = control->data; 4899 while (m) { 4900 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4901 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4902 } 4903 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4905 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4906 } 4907 m = SCTP_BUF_NEXT(m); 4908 } 4909 } 4910 } 4911 SCTP_INP_READ_UNLOCK(old_inp); 4912 /* Remove the sb-lock on the old socket */ 4913 4914 sbunlock(&old_so->so_rcv); 4915 /* Now we move them over to the new socket buffer */ 4916 SCTP_INP_READ_LOCK(new_inp); 4917 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4918 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4919 m = control->data; 4920 while (m) { 4921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4922 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4923 } 4924 sctp_sballoc(stcb, &new_so->so_rcv, m); 4925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4926 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4927 } 4928 m = SCTP_BUF_NEXT(m); 4929 } 4930 } 4931 SCTP_INP_READ_UNLOCK(new_inp); 4932 } 4933 4934 void 4935 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4936 struct sctp_tcb *stcb, 4937 int so_locked 4938 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4939 SCTP_UNUSED 4940 #endif 4941 ) 4942 { 4943 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4944 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4945 struct socket *so; 4946 4947 so = SCTP_INP_SO(inp); 4948 if (!so_locked) { 4949 if (stcb) { 4950 atomic_add_int(&stcb->asoc.refcnt, 1); 4951 SCTP_TCB_UNLOCK(stcb); 4952 } 4953 SCTP_SOCKET_LOCK(so, 1); 4954 if (stcb) { 4955 SCTP_TCB_LOCK(stcb); 4956 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4957 } 4958 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4959 SCTP_SOCKET_UNLOCK(so, 1); 4960 return; 4961 } 4962 } 4963 #endif 4964 sctp_sorwakeup(inp, inp->sctp_socket); 4965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4966 if (!so_locked) { 4967 SCTP_SOCKET_UNLOCK(so, 1); 4968 } 4969 #endif 4970 } 4971 } 4972 4973 void 4974 sctp_add_to_readq(struct sctp_inpcb *inp, 4975 struct sctp_tcb *stcb, 4976 struct sctp_queued_to_read *control, 4977 struct sockbuf *sb, 4978 int end, 4979 int inp_read_lock_held, 4980 int so_locked 4981 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4982 SCTP_UNUSED 4983 #endif 4984 ) 4985 { 4986 /* 4987 * Here we must place the control on the end of the socket read 4988 * queue AND increment sb_cc so that select will work properly on 4989 * read. 4990 */ 4991 struct mbuf *m, *prev = NULL; 4992 4993 if (inp == NULL) { 4994 /* Gak, TSNH!! */ 4995 #ifdef INVARIANTS 4996 panic("Gak, inp NULL on add_to_readq"); 4997 #endif 4998 return; 4999 } 5000 if (inp_read_lock_held == 0) 5001 SCTP_INP_READ_LOCK(inp); 5002 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 5003 if (!control->on_strm_q) { 5004 sctp_free_remote_addr(control->whoFrom); 5005 if (control->data) { 5006 sctp_m_freem(control->data); 5007 control->data = NULL; 5008 } 5009 sctp_free_a_readq(stcb, control); 5010 } 5011 if (inp_read_lock_held == 0) 5012 SCTP_INP_READ_UNLOCK(inp); 5013 return; 5014 } 5015 if (!(control->spec_flags & M_NOTIFICATION)) { 5016 atomic_add_int(&inp->total_recvs, 1); 5017 if (!control->do_not_ref_stcb) { 5018 atomic_add_int(&stcb->total_recvs, 1); 5019 } 5020 } 5021 m = control->data; 5022 control->held_length = 0; 5023 control->length = 0; 5024 while (m) { 5025 if (SCTP_BUF_LEN(m) == 0) { 5026 /* Skip mbufs with NO length */ 5027 if (prev == NULL) { 5028 /* First one */ 5029 control->data = sctp_m_free(m); 5030 m = control->data; 5031 } else { 5032 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 5033 m = SCTP_BUF_NEXT(prev); 5034 } 5035 if (m == NULL) { 5036 control->tail_mbuf = prev; 5037 } 5038 continue; 5039 } 5040 prev = m; 5041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5042 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 5043 } 5044 sctp_sballoc(stcb, sb, m); 5045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5046 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5047 } 5048 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 5049 m = SCTP_BUF_NEXT(m); 5050 } 5051 if (prev != NULL) { 5052 control->tail_mbuf = prev; 5053 } else { 5054 /* Everything got collapsed out?? */ 5055 if (!control->on_strm_q) { 5056 sctp_free_remote_addr(control->whoFrom); 5057 sctp_free_a_readq(stcb, control); 5058 } 5059 if (inp_read_lock_held == 0) 5060 SCTP_INP_READ_UNLOCK(inp); 5061 return; 5062 } 5063 if (end) { 5064 control->end_added = 1; 5065 } 5066 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 5067 control->on_read_q = 1; 5068 if (inp_read_lock_held == 0) 5069 SCTP_INP_READ_UNLOCK(inp); 5070 if (inp && inp->sctp_socket) { 5071 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 5072 } 5073 } 5074 5075 /*************HOLD THIS COMMENT FOR PATCH FILE OF 5076 *************ALTERNATE ROUTING CODE 5077 */ 5078 5079 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 5080 *************ALTERNATE ROUTING CODE 5081 */ 5082 5083 struct mbuf * 5084 sctp_generate_cause(uint16_t code, char *info) 5085 { 5086 struct mbuf *m; 5087 struct sctp_gen_error_cause *cause; 5088 size_t info_len; 5089 uint16_t len; 5090 5091 if ((code == 0) || (info == NULL)) { 5092 return (NULL); 5093 } 5094 info_len = strlen(info); 5095 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 5096 return (NULL); 5097 } 5098 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 5099 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5100 if (m != NULL) { 5101 SCTP_BUF_LEN(m) = len; 5102 cause = mtod(m, struct sctp_gen_error_cause *); 5103 cause->code = htons(code); 5104 cause->length = htons(len); 5105 memcpy(cause->info, info, info_len); 5106 } 5107 return (m); 5108 } 5109 5110 struct mbuf * 5111 sctp_generate_no_user_data_cause(uint32_t tsn) 5112 { 5113 struct mbuf *m; 5114 struct sctp_error_no_user_data *no_user_data_cause; 5115 uint16_t len; 5116 5117 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 5118 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 5119 if (m != NULL) { 5120 SCTP_BUF_LEN(m) = len; 5121 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 5122 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 5123 no_user_data_cause->cause.length = htons(len); 5124 no_user_data_cause->tsn = htonl(tsn); 5125 } 5126 return (m); 5127 } 5128 5129 #ifdef SCTP_MBCNT_LOGGING 5130 void 5131 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 5132 struct sctp_tmit_chunk *tp1, int chk_cnt) 5133 { 5134 if (tp1->data == NULL) { 5135 return; 5136 } 5137 asoc->chunks_on_out_queue -= chk_cnt; 5138 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 5139 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5140 asoc->total_output_queue_size, 5141 tp1->book_size, 5142 0, 5143 tp1->mbcnt); 5144 } 5145 if (asoc->total_output_queue_size >= tp1->book_size) { 5146 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5147 } else { 5148 asoc->total_output_queue_size = 0; 5149 } 5150 5151 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5152 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5153 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5154 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5155 } else { 5156 stcb->sctp_socket->so_snd.sb_cc = 0; 5157 5158 } 5159 } 5160 } 5161 5162 #endif 5163 5164 int 5165 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5166 uint8_t sent, int so_locked 5167 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 5168 SCTP_UNUSED 5169 #endif 5170 ) 5171 { 5172 struct sctp_stream_out *strq; 5173 struct sctp_tmit_chunk *chk = NULL, *tp2; 5174 struct sctp_stream_queue_pending *sp; 5175 uint32_t mid; 5176 uint16_t sid; 5177 uint8_t foundeom = 0; 5178 int ret_sz = 0; 5179 int notdone; 5180 int do_wakeup_routine = 0; 5181 5182 sid = tp1->rec.data.sid; 5183 mid = tp1->rec.data.mid; 5184 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5185 stcb->asoc.abandoned_sent[0]++; 5186 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5187 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5188 #if defined(SCTP_DETAILED_STR_STATS) 5189 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5190 #endif 5191 } else { 5192 stcb->asoc.abandoned_unsent[0]++; 5193 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5194 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5195 #if defined(SCTP_DETAILED_STR_STATS) 5196 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5197 #endif 5198 } 5199 do { 5200 ret_sz += tp1->book_size; 5201 if (tp1->data != NULL) { 5202 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5203 sctp_flight_size_decrease(tp1); 5204 sctp_total_flight_decrease(stcb, tp1); 5205 } 5206 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5207 stcb->asoc.peers_rwnd += tp1->send_size; 5208 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5209 if (sent) { 5210 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5211 } else { 5212 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5213 } 5214 if (tp1->data) { 5215 sctp_m_freem(tp1->data); 5216 tp1->data = NULL; 5217 } 5218 do_wakeup_routine = 1; 5219 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5220 stcb->asoc.sent_queue_cnt_removeable--; 5221 } 5222 } 5223 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5224 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5225 SCTP_DATA_NOT_FRAG) { 5226 /* not frag'ed we ae done */ 5227 notdone = 0; 5228 foundeom = 1; 5229 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5230 /* end of frag, we are done */ 5231 notdone = 0; 5232 foundeom = 1; 5233 } else { 5234 /* 5235 * Its a begin or middle piece, we must mark all of 5236 * it 5237 */ 5238 notdone = 1; 5239 tp1 = TAILQ_NEXT(tp1, sctp_next); 5240 } 5241 } while (tp1 && notdone); 5242 if (foundeom == 0) { 5243 /* 5244 * The multi-part message was scattered across the send and 5245 * sent queue. 5246 */ 5247 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5248 if ((tp1->rec.data.sid != sid) || 5249 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5250 break; 5251 } 5252 /* 5253 * save to chk in case we have some on stream out 5254 * queue. If so and we have an un-transmitted one we 5255 * don't have to fudge the TSN. 5256 */ 5257 chk = tp1; 5258 ret_sz += tp1->book_size; 5259 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5260 if (sent) { 5261 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5262 } else { 5263 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5264 } 5265 if (tp1->data) { 5266 sctp_m_freem(tp1->data); 5267 tp1->data = NULL; 5268 } 5269 /* No flight involved here book the size to 0 */ 5270 tp1->book_size = 0; 5271 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5272 foundeom = 1; 5273 } 5274 do_wakeup_routine = 1; 5275 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5276 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5277 /* 5278 * on to the sent queue so we can wait for it to be 5279 * passed by. 5280 */ 5281 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5282 sctp_next); 5283 stcb->asoc.send_queue_cnt--; 5284 stcb->asoc.sent_queue_cnt++; 5285 } 5286 } 5287 if (foundeom == 0) { 5288 /* 5289 * Still no eom found. That means there is stuff left on the 5290 * stream out queue.. yuck. 5291 */ 5292 SCTP_TCB_SEND_LOCK(stcb); 5293 strq = &stcb->asoc.strmout[sid]; 5294 sp = TAILQ_FIRST(&strq->outqueue); 5295 if (sp != NULL) { 5296 sp->discard_rest = 1; 5297 /* 5298 * We may need to put a chunk on the queue that 5299 * holds the TSN that would have been sent with the 5300 * LAST bit. 5301 */ 5302 if (chk == NULL) { 5303 /* Yep, we have to */ 5304 sctp_alloc_a_chunk(stcb, chk); 5305 if (chk == NULL) { 5306 /* 5307 * we are hosed. All we can do is 5308 * nothing.. which will cause an 5309 * abort if the peer is paying 5310 * attention. 5311 */ 5312 goto oh_well; 5313 } 5314 memset(chk, 0, sizeof(*chk)); 5315 chk->rec.data.rcv_flags = 0; 5316 chk->sent = SCTP_FORWARD_TSN_SKIP; 5317 chk->asoc = &stcb->asoc; 5318 if (stcb->asoc.idata_supported == 0) { 5319 if (sp->sinfo_flags & SCTP_UNORDERED) { 5320 chk->rec.data.mid = 0; 5321 } else { 5322 chk->rec.data.mid = strq->next_mid_ordered; 5323 } 5324 } else { 5325 if (sp->sinfo_flags & SCTP_UNORDERED) { 5326 chk->rec.data.mid = strq->next_mid_unordered; 5327 } else { 5328 chk->rec.data.mid = strq->next_mid_ordered; 5329 } 5330 } 5331 chk->rec.data.sid = sp->sid; 5332 chk->rec.data.ppid = sp->ppid; 5333 chk->rec.data.context = sp->context; 5334 chk->flags = sp->act_flags; 5335 chk->whoTo = NULL; 5336 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5337 strq->chunks_on_queues++; 5338 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5339 stcb->asoc.sent_queue_cnt++; 5340 stcb->asoc.pr_sctp_cnt++; 5341 } 5342 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5343 if (sp->sinfo_flags & SCTP_UNORDERED) { 5344 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5345 } 5346 if (stcb->asoc.idata_supported == 0) { 5347 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5348 strq->next_mid_ordered++; 5349 } 5350 } else { 5351 if (sp->sinfo_flags & SCTP_UNORDERED) { 5352 strq->next_mid_unordered++; 5353 } else { 5354 strq->next_mid_ordered++; 5355 } 5356 } 5357 oh_well: 5358 if (sp->data) { 5359 /* 5360 * Pull any data to free up the SB and allow 5361 * sender to "add more" while we will throw 5362 * away :-) 5363 */ 5364 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5365 ret_sz += sp->length; 5366 do_wakeup_routine = 1; 5367 sp->some_taken = 1; 5368 sctp_m_freem(sp->data); 5369 sp->data = NULL; 5370 sp->tail_mbuf = NULL; 5371 sp->length = 0; 5372 } 5373 } 5374 SCTP_TCB_SEND_UNLOCK(stcb); 5375 } 5376 if (do_wakeup_routine) { 5377 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5378 struct socket *so; 5379 5380 so = SCTP_INP_SO(stcb->sctp_ep); 5381 if (!so_locked) { 5382 atomic_add_int(&stcb->asoc.refcnt, 1); 5383 SCTP_TCB_UNLOCK(stcb); 5384 SCTP_SOCKET_LOCK(so, 1); 5385 SCTP_TCB_LOCK(stcb); 5386 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5387 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 5388 /* assoc was freed while we were unlocked */ 5389 SCTP_SOCKET_UNLOCK(so, 1); 5390 return (ret_sz); 5391 } 5392 } 5393 #endif 5394 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5395 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5396 if (!so_locked) { 5397 SCTP_SOCKET_UNLOCK(so, 1); 5398 } 5399 #endif 5400 } 5401 return (ret_sz); 5402 } 5403 5404 /* 5405 * checks to see if the given address, sa, is one that is currently known by 5406 * the kernel note: can't distinguish the same address on multiple interfaces 5407 * and doesn't handle multiple addresses with different zone/scope id's note: 5408 * ifa_ifwithaddr() compares the entire sockaddr struct 5409 */ 5410 struct sctp_ifa * 5411 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5412 int holds_lock) 5413 { 5414 struct sctp_laddr *laddr; 5415 5416 if (holds_lock == 0) { 5417 SCTP_INP_RLOCK(inp); 5418 } 5419 5420 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5421 if (laddr->ifa == NULL) 5422 continue; 5423 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5424 continue; 5425 #ifdef INET 5426 if (addr->sa_family == AF_INET) { 5427 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5428 laddr->ifa->address.sin.sin_addr.s_addr) { 5429 /* found him. */ 5430 if (holds_lock == 0) { 5431 SCTP_INP_RUNLOCK(inp); 5432 } 5433 return (laddr->ifa); 5434 break; 5435 } 5436 } 5437 #endif 5438 #ifdef INET6 5439 if (addr->sa_family == AF_INET6) { 5440 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5441 &laddr->ifa->address.sin6)) { 5442 /* found him. */ 5443 if (holds_lock == 0) { 5444 SCTP_INP_RUNLOCK(inp); 5445 } 5446 return (laddr->ifa); 5447 break; 5448 } 5449 } 5450 #endif 5451 } 5452 if (holds_lock == 0) { 5453 SCTP_INP_RUNLOCK(inp); 5454 } 5455 return (NULL); 5456 } 5457 5458 uint32_t 5459 sctp_get_ifa_hash_val(struct sockaddr *addr) 5460 { 5461 switch (addr->sa_family) { 5462 #ifdef INET 5463 case AF_INET: 5464 { 5465 struct sockaddr_in *sin; 5466 5467 sin = (struct sockaddr_in *)addr; 5468 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5469 } 5470 #endif 5471 #ifdef INET6 5472 case AF_INET6: 5473 { 5474 struct sockaddr_in6 *sin6; 5475 uint32_t hash_of_addr; 5476 5477 sin6 = (struct sockaddr_in6 *)addr; 5478 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5479 sin6->sin6_addr.s6_addr32[1] + 5480 sin6->sin6_addr.s6_addr32[2] + 5481 sin6->sin6_addr.s6_addr32[3]); 5482 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5483 return (hash_of_addr); 5484 } 5485 #endif 5486 default: 5487 break; 5488 } 5489 return (0); 5490 } 5491 5492 struct sctp_ifa * 5493 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5494 { 5495 struct sctp_ifa *sctp_ifap; 5496 struct sctp_vrf *vrf; 5497 struct sctp_ifalist *hash_head; 5498 uint32_t hash_of_addr; 5499 5500 if (holds_lock == 0) 5501 SCTP_IPI_ADDR_RLOCK(); 5502 5503 vrf = sctp_find_vrf(vrf_id); 5504 if (vrf == NULL) { 5505 if (holds_lock == 0) 5506 SCTP_IPI_ADDR_RUNLOCK(); 5507 return (NULL); 5508 } 5509 5510 hash_of_addr = sctp_get_ifa_hash_val(addr); 5511 5512 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5513 if (hash_head == NULL) { 5514 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5515 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5516 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5517 sctp_print_address(addr); 5518 SCTP_PRINTF("No such bucket for address\n"); 5519 if (holds_lock == 0) 5520 SCTP_IPI_ADDR_RUNLOCK(); 5521 5522 return (NULL); 5523 } 5524 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5525 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5526 continue; 5527 #ifdef INET 5528 if (addr->sa_family == AF_INET) { 5529 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5530 sctp_ifap->address.sin.sin_addr.s_addr) { 5531 /* found him. */ 5532 if (holds_lock == 0) 5533 SCTP_IPI_ADDR_RUNLOCK(); 5534 return (sctp_ifap); 5535 break; 5536 } 5537 } 5538 #endif 5539 #ifdef INET6 5540 if (addr->sa_family == AF_INET6) { 5541 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5542 &sctp_ifap->address.sin6)) { 5543 /* found him. */ 5544 if (holds_lock == 0) 5545 SCTP_IPI_ADDR_RUNLOCK(); 5546 return (sctp_ifap); 5547 break; 5548 } 5549 } 5550 #endif 5551 } 5552 if (holds_lock == 0) 5553 SCTP_IPI_ADDR_RUNLOCK(); 5554 return (NULL); 5555 } 5556 5557 static void 5558 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5559 uint32_t rwnd_req) 5560 { 5561 /* User pulled some data, do we need a rwnd update? */ 5562 struct epoch_tracker et; 5563 int r_unlocked = 0; 5564 uint32_t dif, rwnd; 5565 struct socket *so = NULL; 5566 5567 if (stcb == NULL) 5568 return; 5569 5570 atomic_add_int(&stcb->asoc.refcnt, 1); 5571 5572 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5573 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5574 /* Pre-check If we are freeing no update */ 5575 goto no_lock; 5576 } 5577 SCTP_INP_INCR_REF(stcb->sctp_ep); 5578 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5579 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5580 goto out; 5581 } 5582 so = stcb->sctp_socket; 5583 if (so == NULL) { 5584 goto out; 5585 } 5586 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5587 /* Have you have freed enough to look */ 5588 *freed_so_far = 0; 5589 /* Yep, its worth a look and the lock overhead */ 5590 5591 /* Figure out what the rwnd would be */ 5592 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5593 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5594 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5595 } else { 5596 dif = 0; 5597 } 5598 if (dif >= rwnd_req) { 5599 if (hold_rlock) { 5600 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5601 r_unlocked = 1; 5602 } 5603 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5604 /* 5605 * One last check before we allow the guy possibly 5606 * to get in. There is a race, where the guy has not 5607 * reached the gate. In that case 5608 */ 5609 goto out; 5610 } 5611 SCTP_TCB_LOCK(stcb); 5612 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5613 /* No reports here */ 5614 SCTP_TCB_UNLOCK(stcb); 5615 goto out; 5616 } 5617 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5618 NET_EPOCH_ENTER(et); 5619 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5620 5621 sctp_chunk_output(stcb->sctp_ep, stcb, 5622 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5623 /* make sure no timer is running */ 5624 NET_EPOCH_EXIT(et); 5625 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5626 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5627 SCTP_TCB_UNLOCK(stcb); 5628 } else { 5629 /* Update how much we have pending */ 5630 stcb->freed_by_sorcv_sincelast = dif; 5631 } 5632 out: 5633 if (so && r_unlocked && hold_rlock) { 5634 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5635 } 5636 5637 SCTP_INP_DECR_REF(stcb->sctp_ep); 5638 no_lock: 5639 atomic_add_int(&stcb->asoc.refcnt, -1); 5640 return; 5641 } 5642 5643 int 5644 sctp_sorecvmsg(struct socket *so, 5645 struct uio *uio, 5646 struct mbuf **mp, 5647 struct sockaddr *from, 5648 int fromlen, 5649 int *msg_flags, 5650 struct sctp_sndrcvinfo *sinfo, 5651 int filling_sinfo) 5652 { 5653 /* 5654 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5655 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5656 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5657 * On the way out we may send out any combination of: 5658 * MSG_NOTIFICATION MSG_EOR 5659 * 5660 */ 5661 struct sctp_inpcb *inp = NULL; 5662 ssize_t my_len = 0; 5663 ssize_t cp_len = 0; 5664 int error = 0; 5665 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5666 struct mbuf *m = NULL; 5667 struct sctp_tcb *stcb = NULL; 5668 int wakeup_read_socket = 0; 5669 int freecnt_applied = 0; 5670 int out_flags = 0, in_flags = 0; 5671 int block_allowed = 1; 5672 uint32_t freed_so_far = 0; 5673 ssize_t copied_so_far = 0; 5674 int in_eeor_mode = 0; 5675 int no_rcv_needed = 0; 5676 uint32_t rwnd_req = 0; 5677 int hold_sblock = 0; 5678 int hold_rlock = 0; 5679 ssize_t slen = 0; 5680 uint32_t held_length = 0; 5681 int sockbuf_lock = 0; 5682 5683 if (uio == NULL) { 5684 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5685 return (EINVAL); 5686 } 5687 5688 if (msg_flags) { 5689 in_flags = *msg_flags; 5690 if (in_flags & MSG_PEEK) 5691 SCTP_STAT_INCR(sctps_read_peeks); 5692 } else { 5693 in_flags = 0; 5694 } 5695 slen = uio->uio_resid; 5696 5697 /* Pull in and set up our int flags */ 5698 if (in_flags & MSG_OOB) { 5699 /* Out of band's NOT supported */ 5700 return (EOPNOTSUPP); 5701 } 5702 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5703 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5704 return (EINVAL); 5705 } 5706 if ((in_flags & (MSG_DONTWAIT 5707 | MSG_NBIO 5708 )) || 5709 SCTP_SO_IS_NBIO(so)) { 5710 block_allowed = 0; 5711 } 5712 /* setup the endpoint */ 5713 inp = (struct sctp_inpcb *)so->so_pcb; 5714 if (inp == NULL) { 5715 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5716 return (EFAULT); 5717 } 5718 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5719 /* Must be at least a MTU's worth */ 5720 if (rwnd_req < SCTP_MIN_RWND) 5721 rwnd_req = SCTP_MIN_RWND; 5722 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5724 sctp_misc_ints(SCTP_SORECV_ENTER, 5725 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5726 } 5727 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5728 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5729 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5730 } 5731 5732 5733 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5734 if (error) { 5735 goto release_unlocked; 5736 } 5737 sockbuf_lock = 1; 5738 restart: 5739 5740 5741 restart_nosblocks: 5742 if (hold_sblock == 0) { 5743 SOCKBUF_LOCK(&so->so_rcv); 5744 hold_sblock = 1; 5745 } 5746 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5747 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5748 goto out; 5749 } 5750 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5751 if (so->so_error) { 5752 error = so->so_error; 5753 if ((in_flags & MSG_PEEK) == 0) 5754 so->so_error = 0; 5755 goto out; 5756 } else { 5757 if (so->so_rcv.sb_cc == 0) { 5758 /* indicate EOF */ 5759 error = 0; 5760 goto out; 5761 } 5762 } 5763 } 5764 if (so->so_rcv.sb_cc <= held_length) { 5765 if (so->so_error) { 5766 error = so->so_error; 5767 if ((in_flags & MSG_PEEK) == 0) { 5768 so->so_error = 0; 5769 } 5770 goto out; 5771 } 5772 if ((so->so_rcv.sb_cc == 0) && 5773 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5774 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5775 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5776 /* 5777 * For active open side clear flags for 5778 * re-use passive open is blocked by 5779 * connect. 5780 */ 5781 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5782 /* 5783 * You were aborted, passive side 5784 * always hits here 5785 */ 5786 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5787 error = ECONNRESET; 5788 } 5789 so->so_state &= ~(SS_ISCONNECTING | 5790 SS_ISDISCONNECTING | 5791 SS_ISCONFIRMING | 5792 SS_ISCONNECTED); 5793 if (error == 0) { 5794 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5796 error = ENOTCONN; 5797 } 5798 } 5799 goto out; 5800 } 5801 } 5802 if (block_allowed) { 5803 error = sbwait(&so->so_rcv); 5804 if (error) { 5805 goto out; 5806 } 5807 held_length = 0; 5808 goto restart_nosblocks; 5809 } else { 5810 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5811 error = EWOULDBLOCK; 5812 goto out; 5813 } 5814 } 5815 if (hold_sblock == 1) { 5816 SOCKBUF_UNLOCK(&so->so_rcv); 5817 hold_sblock = 0; 5818 } 5819 /* we possibly have data we can read */ 5820 /* sa_ignore FREED_MEMORY */ 5821 control = TAILQ_FIRST(&inp->read_queue); 5822 if (control == NULL) { 5823 /* 5824 * This could be happening since the appender did the 5825 * increment but as not yet did the tailq insert onto the 5826 * read_queue 5827 */ 5828 if (hold_rlock == 0) { 5829 SCTP_INP_READ_LOCK(inp); 5830 } 5831 control = TAILQ_FIRST(&inp->read_queue); 5832 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5833 #ifdef INVARIANTS 5834 panic("Huh, its non zero and nothing on control?"); 5835 #endif 5836 so->so_rcv.sb_cc = 0; 5837 } 5838 SCTP_INP_READ_UNLOCK(inp); 5839 hold_rlock = 0; 5840 goto restart; 5841 } 5842 5843 if ((control->length == 0) && 5844 (control->do_not_ref_stcb)) { 5845 /* 5846 * Clean up code for freeing assoc that left behind a 5847 * pdapi.. maybe a peer in EEOR that just closed after 5848 * sending and never indicated a EOR. 5849 */ 5850 if (hold_rlock == 0) { 5851 hold_rlock = 1; 5852 SCTP_INP_READ_LOCK(inp); 5853 } 5854 control->held_length = 0; 5855 if (control->data) { 5856 /* Hmm there is data here .. fix */ 5857 struct mbuf *m_tmp; 5858 int cnt = 0; 5859 5860 m_tmp = control->data; 5861 while (m_tmp) { 5862 cnt += SCTP_BUF_LEN(m_tmp); 5863 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5864 control->tail_mbuf = m_tmp; 5865 control->end_added = 1; 5866 } 5867 m_tmp = SCTP_BUF_NEXT(m_tmp); 5868 } 5869 control->length = cnt; 5870 } else { 5871 /* remove it */ 5872 TAILQ_REMOVE(&inp->read_queue, control, next); 5873 /* Add back any hiddend data */ 5874 sctp_free_remote_addr(control->whoFrom); 5875 sctp_free_a_readq(stcb, control); 5876 } 5877 if (hold_rlock) { 5878 hold_rlock = 0; 5879 SCTP_INP_READ_UNLOCK(inp); 5880 } 5881 goto restart; 5882 } 5883 if ((control->length == 0) && 5884 (control->end_added == 1)) { 5885 /* 5886 * Do we also need to check for (control->pdapi_aborted == 5887 * 1)? 5888 */ 5889 if (hold_rlock == 0) { 5890 hold_rlock = 1; 5891 SCTP_INP_READ_LOCK(inp); 5892 } 5893 TAILQ_REMOVE(&inp->read_queue, control, next); 5894 if (control->data) { 5895 #ifdef INVARIANTS 5896 panic("control->data not null but control->length == 0"); 5897 #else 5898 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5899 sctp_m_freem(control->data); 5900 control->data = NULL; 5901 #endif 5902 } 5903 if (control->aux_data) { 5904 sctp_m_free(control->aux_data); 5905 control->aux_data = NULL; 5906 } 5907 #ifdef INVARIANTS 5908 if (control->on_strm_q) { 5909 panic("About to free ctl:%p so:%p and its in %d", 5910 control, so, control->on_strm_q); 5911 } 5912 #endif 5913 sctp_free_remote_addr(control->whoFrom); 5914 sctp_free_a_readq(stcb, control); 5915 if (hold_rlock) { 5916 hold_rlock = 0; 5917 SCTP_INP_READ_UNLOCK(inp); 5918 } 5919 goto restart; 5920 } 5921 if (control->length == 0) { 5922 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5923 (filling_sinfo)) { 5924 /* find a more suitable one then this */ 5925 ctl = TAILQ_NEXT(control, next); 5926 while (ctl) { 5927 if ((ctl->stcb != control->stcb) && (ctl->length) && 5928 (ctl->some_taken || 5929 (ctl->spec_flags & M_NOTIFICATION) || 5930 ((ctl->do_not_ref_stcb == 0) && 5931 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5932 ) { 5933 /*- 5934 * If we have a different TCB next, and there is data 5935 * present. If we have already taken some (pdapi), OR we can 5936 * ref the tcb and no delivery as started on this stream, we 5937 * take it. Note we allow a notification on a different 5938 * assoc to be delivered.. 5939 */ 5940 control = ctl; 5941 goto found_one; 5942 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5943 (ctl->length) && 5944 ((ctl->some_taken) || 5945 ((ctl->do_not_ref_stcb == 0) && 5946 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5947 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5948 /*- 5949 * If we have the same tcb, and there is data present, and we 5950 * have the strm interleave feature present. Then if we have 5951 * taken some (pdapi) or we can refer to tht tcb AND we have 5952 * not started a delivery for this stream, we can take it. 5953 * Note we do NOT allow a notificaiton on the same assoc to 5954 * be delivered. 5955 */ 5956 control = ctl; 5957 goto found_one; 5958 } 5959 ctl = TAILQ_NEXT(ctl, next); 5960 } 5961 } 5962 /* 5963 * if we reach here, not suitable replacement is available 5964 * <or> fragment interleave is NOT on. So stuff the sb_cc 5965 * into the our held count, and its time to sleep again. 5966 */ 5967 held_length = so->so_rcv.sb_cc; 5968 control->held_length = so->so_rcv.sb_cc; 5969 goto restart; 5970 } 5971 /* Clear the held length since there is something to read */ 5972 control->held_length = 0; 5973 found_one: 5974 /* 5975 * If we reach here, control has a some data for us to read off. 5976 * Note that stcb COULD be NULL. 5977 */ 5978 if (hold_rlock == 0) { 5979 hold_rlock = 1; 5980 SCTP_INP_READ_LOCK(inp); 5981 } 5982 control->some_taken++; 5983 stcb = control->stcb; 5984 if (stcb) { 5985 if ((control->do_not_ref_stcb == 0) && 5986 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5987 if (freecnt_applied == 0) 5988 stcb = NULL; 5989 } else if (control->do_not_ref_stcb == 0) { 5990 /* you can't free it on me please */ 5991 /* 5992 * The lock on the socket buffer protects us so the 5993 * free code will stop. But since we used the 5994 * socketbuf lock and the sender uses the tcb_lock 5995 * to increment, we need to use the atomic add to 5996 * the refcnt 5997 */ 5998 if (freecnt_applied) { 5999 #ifdef INVARIANTS 6000 panic("refcnt already incremented"); 6001 #else 6002 SCTP_PRINTF("refcnt already incremented?\n"); 6003 #endif 6004 } else { 6005 atomic_add_int(&stcb->asoc.refcnt, 1); 6006 freecnt_applied = 1; 6007 } 6008 /* 6009 * Setup to remember how much we have not yet told 6010 * the peer our rwnd has opened up. Note we grab the 6011 * value from the tcb from last time. Note too that 6012 * sack sending clears this when a sack is sent, 6013 * which is fine. Once we hit the rwnd_req, we then 6014 * will go to the sctp_user_rcvd() that will not 6015 * lock until it KNOWs it MUST send a WUP-SACK. 6016 */ 6017 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 6018 stcb->freed_by_sorcv_sincelast = 0; 6019 } 6020 } 6021 if (stcb && 6022 ((control->spec_flags & M_NOTIFICATION) == 0) && 6023 control->do_not_ref_stcb == 0) { 6024 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 6025 } 6026 6027 /* First lets get off the sinfo and sockaddr info */ 6028 if ((sinfo != NULL) && (filling_sinfo != 0)) { 6029 sinfo->sinfo_stream = control->sinfo_stream; 6030 sinfo->sinfo_ssn = (uint16_t)control->mid; 6031 sinfo->sinfo_flags = control->sinfo_flags; 6032 sinfo->sinfo_ppid = control->sinfo_ppid; 6033 sinfo->sinfo_context = control->sinfo_context; 6034 sinfo->sinfo_timetolive = control->sinfo_timetolive; 6035 sinfo->sinfo_tsn = control->sinfo_tsn; 6036 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 6037 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 6038 nxt = TAILQ_NEXT(control, next); 6039 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6040 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 6041 struct sctp_extrcvinfo *s_extra; 6042 6043 s_extra = (struct sctp_extrcvinfo *)sinfo; 6044 if ((nxt) && 6045 (nxt->length)) { 6046 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 6047 if (nxt->sinfo_flags & SCTP_UNORDERED) { 6048 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 6049 } 6050 if (nxt->spec_flags & M_NOTIFICATION) { 6051 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 6052 } 6053 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 6054 s_extra->serinfo_next_length = nxt->length; 6055 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 6056 s_extra->serinfo_next_stream = nxt->sinfo_stream; 6057 if (nxt->tail_mbuf != NULL) { 6058 if (nxt->end_added) { 6059 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 6060 } 6061 } 6062 } else { 6063 /* 6064 * we explicitly 0 this, since the memcpy 6065 * got some other things beyond the older 6066 * sinfo_ that is on the control's structure 6067 * :-D 6068 */ 6069 nxt = NULL; 6070 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6071 s_extra->serinfo_next_aid = 0; 6072 s_extra->serinfo_next_length = 0; 6073 s_extra->serinfo_next_ppid = 0; 6074 s_extra->serinfo_next_stream = 0; 6075 } 6076 } 6077 /* 6078 * update off the real current cum-ack, if we have an stcb. 6079 */ 6080 if ((control->do_not_ref_stcb == 0) && stcb) 6081 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 6082 /* 6083 * mask off the high bits, we keep the actual chunk bits in 6084 * there. 6085 */ 6086 sinfo->sinfo_flags &= 0x00ff; 6087 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 6088 sinfo->sinfo_flags |= SCTP_UNORDERED; 6089 } 6090 } 6091 #ifdef SCTP_ASOCLOG_OF_TSNS 6092 { 6093 int index, newindex; 6094 struct sctp_pcbtsn_rlog *entry; 6095 6096 do { 6097 index = inp->readlog_index; 6098 newindex = index + 1; 6099 if (newindex >= SCTP_READ_LOG_SIZE) { 6100 newindex = 0; 6101 } 6102 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 6103 entry = &inp->readlog[index]; 6104 entry->vtag = control->sinfo_assoc_id; 6105 entry->strm = control->sinfo_stream; 6106 entry->seq = (uint16_t)control->mid; 6107 entry->sz = control->length; 6108 entry->flgs = control->sinfo_flags; 6109 } 6110 #endif 6111 if ((fromlen > 0) && (from != NULL)) { 6112 union sctp_sockstore store; 6113 size_t len; 6114 6115 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 6116 #ifdef INET6 6117 case AF_INET6: 6118 len = sizeof(struct sockaddr_in6); 6119 store.sin6 = control->whoFrom->ro._l_addr.sin6; 6120 store.sin6.sin6_port = control->port_from; 6121 break; 6122 #endif 6123 #ifdef INET 6124 case AF_INET: 6125 #ifdef INET6 6126 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 6127 len = sizeof(struct sockaddr_in6); 6128 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 6129 &store.sin6); 6130 store.sin6.sin6_port = control->port_from; 6131 } else { 6132 len = sizeof(struct sockaddr_in); 6133 store.sin = control->whoFrom->ro._l_addr.sin; 6134 store.sin.sin_port = control->port_from; 6135 } 6136 #else 6137 len = sizeof(struct sockaddr_in); 6138 store.sin = control->whoFrom->ro._l_addr.sin; 6139 store.sin.sin_port = control->port_from; 6140 #endif 6141 break; 6142 #endif 6143 default: 6144 len = 0; 6145 break; 6146 } 6147 memcpy(from, &store, min((size_t)fromlen, len)); 6148 #ifdef INET6 6149 { 6150 struct sockaddr_in6 lsa6, *from6; 6151 6152 from6 = (struct sockaddr_in6 *)from; 6153 sctp_recover_scope_mac(from6, (&lsa6)); 6154 } 6155 #endif 6156 } 6157 if (hold_rlock) { 6158 SCTP_INP_READ_UNLOCK(inp); 6159 hold_rlock = 0; 6160 } 6161 if (hold_sblock) { 6162 SOCKBUF_UNLOCK(&so->so_rcv); 6163 hold_sblock = 0; 6164 } 6165 /* now copy out what data we can */ 6166 if (mp == NULL) { 6167 /* copy out each mbuf in the chain up to length */ 6168 get_more_data: 6169 m = control->data; 6170 while (m) { 6171 /* Move out all we can */ 6172 cp_len = uio->uio_resid; 6173 my_len = SCTP_BUF_LEN(m); 6174 if (cp_len > my_len) { 6175 /* not enough in this buf */ 6176 cp_len = my_len; 6177 } 6178 if (hold_rlock) { 6179 SCTP_INP_READ_UNLOCK(inp); 6180 hold_rlock = 0; 6181 } 6182 if (cp_len > 0) 6183 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6184 /* re-read */ 6185 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6186 goto release; 6187 } 6188 6189 if ((control->do_not_ref_stcb == 0) && stcb && 6190 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6191 no_rcv_needed = 1; 6192 } 6193 if (error) { 6194 /* error we are out of here */ 6195 goto release; 6196 } 6197 SCTP_INP_READ_LOCK(inp); 6198 hold_rlock = 1; 6199 if (cp_len == SCTP_BUF_LEN(m)) { 6200 if ((SCTP_BUF_NEXT(m) == NULL) && 6201 (control->end_added)) { 6202 out_flags |= MSG_EOR; 6203 if ((control->do_not_ref_stcb == 0) && 6204 (control->stcb != NULL) && 6205 ((control->spec_flags & M_NOTIFICATION) == 0)) 6206 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6207 } 6208 if (control->spec_flags & M_NOTIFICATION) { 6209 out_flags |= MSG_NOTIFICATION; 6210 } 6211 /* we ate up the mbuf */ 6212 if (in_flags & MSG_PEEK) { 6213 /* just looking */ 6214 m = SCTP_BUF_NEXT(m); 6215 copied_so_far += cp_len; 6216 } else { 6217 /* dispose of the mbuf */ 6218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6219 sctp_sblog(&so->so_rcv, 6220 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6221 } 6222 sctp_sbfree(control, stcb, &so->so_rcv, m); 6223 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6224 sctp_sblog(&so->so_rcv, 6225 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6226 } 6227 copied_so_far += cp_len; 6228 freed_so_far += (uint32_t)cp_len; 6229 freed_so_far += MSIZE; 6230 atomic_subtract_int(&control->length, cp_len); 6231 control->data = sctp_m_free(m); 6232 m = control->data; 6233 /* 6234 * been through it all, must hold sb 6235 * lock ok to null tail 6236 */ 6237 if (control->data == NULL) { 6238 #ifdef INVARIANTS 6239 if ((control->end_added == 0) || 6240 (TAILQ_NEXT(control, next) == NULL)) { 6241 /* 6242 * If the end is not 6243 * added, OR the 6244 * next is NOT null 6245 * we MUST have the 6246 * lock. 6247 */ 6248 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6249 panic("Hmm we don't own the lock?"); 6250 } 6251 } 6252 #endif 6253 control->tail_mbuf = NULL; 6254 #ifdef INVARIANTS 6255 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6256 panic("end_added, nothing left and no MSG_EOR"); 6257 } 6258 #endif 6259 } 6260 } 6261 } else { 6262 /* Do we need to trim the mbuf? */ 6263 if (control->spec_flags & M_NOTIFICATION) { 6264 out_flags |= MSG_NOTIFICATION; 6265 } 6266 if ((in_flags & MSG_PEEK) == 0) { 6267 SCTP_BUF_RESV_UF(m, cp_len); 6268 SCTP_BUF_LEN(m) -= (int)cp_len; 6269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6270 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6271 } 6272 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6273 if ((control->do_not_ref_stcb == 0) && 6274 stcb) { 6275 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6276 } 6277 copied_so_far += cp_len; 6278 freed_so_far += (uint32_t)cp_len; 6279 freed_so_far += MSIZE; 6280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6281 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6282 SCTP_LOG_SBRESULT, 0); 6283 } 6284 atomic_subtract_int(&control->length, cp_len); 6285 } else { 6286 copied_so_far += cp_len; 6287 } 6288 } 6289 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6290 break; 6291 } 6292 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6293 (control->do_not_ref_stcb == 0) && 6294 (freed_so_far >= rwnd_req)) { 6295 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6296 } 6297 } /* end while(m) */ 6298 /* 6299 * At this point we have looked at it all and we either have 6300 * a MSG_EOR/or read all the user wants... <OR> 6301 * control->length == 0. 6302 */ 6303 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6304 /* we are done with this control */ 6305 if (control->length == 0) { 6306 if (control->data) { 6307 #ifdef INVARIANTS 6308 panic("control->data not null at read eor?"); 6309 #else 6310 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6311 sctp_m_freem(control->data); 6312 control->data = NULL; 6313 #endif 6314 } 6315 done_with_control: 6316 if (hold_rlock == 0) { 6317 SCTP_INP_READ_LOCK(inp); 6318 hold_rlock = 1; 6319 } 6320 TAILQ_REMOVE(&inp->read_queue, control, next); 6321 /* Add back any hiddend data */ 6322 if (control->held_length) { 6323 held_length = 0; 6324 control->held_length = 0; 6325 wakeup_read_socket = 1; 6326 } 6327 if (control->aux_data) { 6328 sctp_m_free(control->aux_data); 6329 control->aux_data = NULL; 6330 } 6331 no_rcv_needed = control->do_not_ref_stcb; 6332 sctp_free_remote_addr(control->whoFrom); 6333 control->data = NULL; 6334 #ifdef INVARIANTS 6335 if (control->on_strm_q) { 6336 panic("About to free ctl:%p so:%p and its in %d", 6337 control, so, control->on_strm_q); 6338 } 6339 #endif 6340 sctp_free_a_readq(stcb, control); 6341 control = NULL; 6342 if ((freed_so_far >= rwnd_req) && 6343 (no_rcv_needed == 0)) 6344 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6345 6346 } else { 6347 /* 6348 * The user did not read all of this 6349 * message, turn off the returned MSG_EOR 6350 * since we are leaving more behind on the 6351 * control to read. 6352 */ 6353 #ifdef INVARIANTS 6354 if (control->end_added && 6355 (control->data == NULL) && 6356 (control->tail_mbuf == NULL)) { 6357 panic("Gak, control->length is corrupt?"); 6358 } 6359 #endif 6360 no_rcv_needed = control->do_not_ref_stcb; 6361 out_flags &= ~MSG_EOR; 6362 } 6363 } 6364 if (out_flags & MSG_EOR) { 6365 goto release; 6366 } 6367 if ((uio->uio_resid == 0) || 6368 ((in_eeor_mode) && 6369 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6370 goto release; 6371 } 6372 /* 6373 * If I hit here the receiver wants more and this message is 6374 * NOT done (pd-api). So two questions. Can we block? if not 6375 * we are done. Did the user NOT set MSG_WAITALL? 6376 */ 6377 if (block_allowed == 0) { 6378 goto release; 6379 } 6380 /* 6381 * We need to wait for more data a few things: - We don't 6382 * sbunlock() so we don't get someone else reading. - We 6383 * must be sure to account for the case where what is added 6384 * is NOT to our control when we wakeup. 6385 */ 6386 6387 /* 6388 * Do we need to tell the transport a rwnd update might be 6389 * needed before we go to sleep? 6390 */ 6391 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6392 ((freed_so_far >= rwnd_req) && 6393 (control->do_not_ref_stcb == 0) && 6394 (no_rcv_needed == 0))) { 6395 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6396 } 6397 wait_some_more: 6398 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6399 goto release; 6400 } 6401 6402 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6403 goto release; 6404 6405 if (hold_rlock == 1) { 6406 SCTP_INP_READ_UNLOCK(inp); 6407 hold_rlock = 0; 6408 } 6409 if (hold_sblock == 0) { 6410 SOCKBUF_LOCK(&so->so_rcv); 6411 hold_sblock = 1; 6412 } 6413 if ((copied_so_far) && (control->length == 0) && 6414 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6415 goto release; 6416 } 6417 if (so->so_rcv.sb_cc <= control->held_length) { 6418 error = sbwait(&so->so_rcv); 6419 if (error) { 6420 goto release; 6421 } 6422 control->held_length = 0; 6423 } 6424 if (hold_sblock) { 6425 SOCKBUF_UNLOCK(&so->so_rcv); 6426 hold_sblock = 0; 6427 } 6428 if (control->length == 0) { 6429 /* still nothing here */ 6430 if (control->end_added == 1) { 6431 /* he aborted, or is done i.e.did a shutdown */ 6432 out_flags |= MSG_EOR; 6433 if (control->pdapi_aborted) { 6434 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6435 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6436 6437 out_flags |= MSG_TRUNC; 6438 } else { 6439 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6440 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6441 } 6442 goto done_with_control; 6443 } 6444 if (so->so_rcv.sb_cc > held_length) { 6445 control->held_length = so->so_rcv.sb_cc; 6446 held_length = 0; 6447 } 6448 goto wait_some_more; 6449 } else if (control->data == NULL) { 6450 /* 6451 * we must re-sync since data is probably being 6452 * added 6453 */ 6454 SCTP_INP_READ_LOCK(inp); 6455 if ((control->length > 0) && (control->data == NULL)) { 6456 /* 6457 * big trouble.. we have the lock and its 6458 * corrupt? 6459 */ 6460 #ifdef INVARIANTS 6461 panic("Impossible data==NULL length !=0"); 6462 #endif 6463 out_flags |= MSG_EOR; 6464 out_flags |= MSG_TRUNC; 6465 control->length = 0; 6466 SCTP_INP_READ_UNLOCK(inp); 6467 goto done_with_control; 6468 } 6469 SCTP_INP_READ_UNLOCK(inp); 6470 /* We will fall around to get more data */ 6471 } 6472 goto get_more_data; 6473 } else { 6474 /*- 6475 * Give caller back the mbuf chain, 6476 * store in uio_resid the length 6477 */ 6478 wakeup_read_socket = 0; 6479 if ((control->end_added == 0) || 6480 (TAILQ_NEXT(control, next) == NULL)) { 6481 /* Need to get rlock */ 6482 if (hold_rlock == 0) { 6483 SCTP_INP_READ_LOCK(inp); 6484 hold_rlock = 1; 6485 } 6486 } 6487 if (control->end_added) { 6488 out_flags |= MSG_EOR; 6489 if ((control->do_not_ref_stcb == 0) && 6490 (control->stcb != NULL) && 6491 ((control->spec_flags & M_NOTIFICATION) == 0)) 6492 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6493 } 6494 if (control->spec_flags & M_NOTIFICATION) { 6495 out_flags |= MSG_NOTIFICATION; 6496 } 6497 uio->uio_resid = control->length; 6498 *mp = control->data; 6499 m = control->data; 6500 while (m) { 6501 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6502 sctp_sblog(&so->so_rcv, 6503 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6504 } 6505 sctp_sbfree(control, stcb, &so->so_rcv, m); 6506 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6507 freed_so_far += MSIZE; 6508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6509 sctp_sblog(&so->so_rcv, 6510 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6511 } 6512 m = SCTP_BUF_NEXT(m); 6513 } 6514 control->data = control->tail_mbuf = NULL; 6515 control->length = 0; 6516 if (out_flags & MSG_EOR) { 6517 /* Done with this control */ 6518 goto done_with_control; 6519 } 6520 } 6521 release: 6522 if (hold_rlock == 1) { 6523 SCTP_INP_READ_UNLOCK(inp); 6524 hold_rlock = 0; 6525 } 6526 if (hold_sblock == 1) { 6527 SOCKBUF_UNLOCK(&so->so_rcv); 6528 hold_sblock = 0; 6529 } 6530 6531 sbunlock(&so->so_rcv); 6532 sockbuf_lock = 0; 6533 6534 release_unlocked: 6535 if (hold_sblock) { 6536 SOCKBUF_UNLOCK(&so->so_rcv); 6537 hold_sblock = 0; 6538 } 6539 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6540 if ((freed_so_far >= rwnd_req) && 6541 (control && (control->do_not_ref_stcb == 0)) && 6542 (no_rcv_needed == 0)) 6543 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6544 } 6545 out: 6546 if (msg_flags) { 6547 *msg_flags = out_flags; 6548 } 6549 if (((out_flags & MSG_EOR) == 0) && 6550 ((in_flags & MSG_PEEK) == 0) && 6551 (sinfo) && 6552 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6553 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6554 struct sctp_extrcvinfo *s_extra; 6555 6556 s_extra = (struct sctp_extrcvinfo *)sinfo; 6557 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6558 } 6559 if (hold_rlock == 1) { 6560 SCTP_INP_READ_UNLOCK(inp); 6561 } 6562 if (hold_sblock) { 6563 SOCKBUF_UNLOCK(&so->so_rcv); 6564 } 6565 if (sockbuf_lock) { 6566 sbunlock(&so->so_rcv); 6567 } 6568 6569 if (freecnt_applied) { 6570 /* 6571 * The lock on the socket buffer protects us so the free 6572 * code will stop. But since we used the socketbuf lock and 6573 * the sender uses the tcb_lock to increment, we need to use 6574 * the atomic add to the refcnt. 6575 */ 6576 if (stcb == NULL) { 6577 #ifdef INVARIANTS 6578 panic("stcb for refcnt has gone NULL?"); 6579 goto stage_left; 6580 #else 6581 goto stage_left; 6582 #endif 6583 } 6584 /* Save the value back for next time */ 6585 stcb->freed_by_sorcv_sincelast = freed_so_far; 6586 atomic_add_int(&stcb->asoc.refcnt, -1); 6587 } 6588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6589 if (stcb) { 6590 sctp_misc_ints(SCTP_SORECV_DONE, 6591 freed_so_far, 6592 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6593 stcb->asoc.my_rwnd, 6594 so->so_rcv.sb_cc); 6595 } else { 6596 sctp_misc_ints(SCTP_SORECV_DONE, 6597 freed_so_far, 6598 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6599 0, 6600 so->so_rcv.sb_cc); 6601 } 6602 } 6603 stage_left: 6604 if (wakeup_read_socket) { 6605 sctp_sorwakeup(inp, so); 6606 } 6607 return (error); 6608 } 6609 6610 6611 #ifdef SCTP_MBUF_LOGGING 6612 struct mbuf * 6613 sctp_m_free(struct mbuf *m) 6614 { 6615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6616 sctp_log_mb(m, SCTP_MBUF_IFREE); 6617 } 6618 return (m_free(m)); 6619 } 6620 6621 void 6622 sctp_m_freem(struct mbuf *mb) 6623 { 6624 while (mb != NULL) 6625 mb = sctp_m_free(mb); 6626 } 6627 6628 #endif 6629 6630 int 6631 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6632 { 6633 /* 6634 * Given a local address. For all associations that holds the 6635 * address, request a peer-set-primary. 6636 */ 6637 struct sctp_ifa *ifa; 6638 struct sctp_laddr *wi; 6639 6640 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6641 if (ifa == NULL) { 6642 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6643 return (EADDRNOTAVAIL); 6644 } 6645 /* 6646 * Now that we have the ifa we must awaken the iterator with this 6647 * message. 6648 */ 6649 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6650 if (wi == NULL) { 6651 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6652 return (ENOMEM); 6653 } 6654 /* Now incr the count and int wi structure */ 6655 SCTP_INCR_LADDR_COUNT(); 6656 memset(wi, 0, sizeof(*wi)); 6657 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6658 wi->ifa = ifa; 6659 wi->action = SCTP_SET_PRIM_ADDR; 6660 atomic_add_int(&ifa->refcount, 1); 6661 6662 /* Now add it to the work queue */ 6663 SCTP_WQ_ADDR_LOCK(); 6664 /* 6665 * Should this really be a tailq? As it is we will process the 6666 * newest first :-0 6667 */ 6668 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6669 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6670 (struct sctp_inpcb *)NULL, 6671 (struct sctp_tcb *)NULL, 6672 (struct sctp_nets *)NULL); 6673 SCTP_WQ_ADDR_UNLOCK(); 6674 return (0); 6675 } 6676 6677 6678 int 6679 sctp_soreceive(struct socket *so, 6680 struct sockaddr **psa, 6681 struct uio *uio, 6682 struct mbuf **mp0, 6683 struct mbuf **controlp, 6684 int *flagsp) 6685 { 6686 int error, fromlen; 6687 uint8_t sockbuf[256]; 6688 struct sockaddr *from; 6689 struct sctp_extrcvinfo sinfo; 6690 int filling_sinfo = 1; 6691 int flags; 6692 struct sctp_inpcb *inp; 6693 6694 inp = (struct sctp_inpcb *)so->so_pcb; 6695 /* pickup the assoc we are reading from */ 6696 if (inp == NULL) { 6697 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6698 return (EINVAL); 6699 } 6700 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6701 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6702 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6703 (controlp == NULL)) { 6704 /* user does not want the sndrcv ctl */ 6705 filling_sinfo = 0; 6706 } 6707 if (psa) { 6708 from = (struct sockaddr *)sockbuf; 6709 fromlen = sizeof(sockbuf); 6710 from->sa_len = 0; 6711 } else { 6712 from = NULL; 6713 fromlen = 0; 6714 } 6715 6716 if (filling_sinfo) { 6717 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6718 } 6719 if (flagsp != NULL) { 6720 flags = *flagsp; 6721 } else { 6722 flags = 0; 6723 } 6724 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6725 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6726 if (flagsp != NULL) { 6727 *flagsp = flags; 6728 } 6729 if (controlp != NULL) { 6730 /* copy back the sinfo in a CMSG format */ 6731 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6732 *controlp = sctp_build_ctl_nchunk(inp, 6733 (struct sctp_sndrcvinfo *)&sinfo); 6734 } else { 6735 *controlp = NULL; 6736 } 6737 } 6738 if (psa) { 6739 /* copy back the address info */ 6740 if (from && from->sa_len) { 6741 *psa = sodupsockaddr(from, M_NOWAIT); 6742 } else { 6743 *psa = NULL; 6744 } 6745 } 6746 return (error); 6747 } 6748 6749 6750 6751 6752 6753 int 6754 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6755 int totaddr, int *error) 6756 { 6757 int added = 0; 6758 int i; 6759 struct sctp_inpcb *inp; 6760 struct sockaddr *sa; 6761 size_t incr = 0; 6762 #ifdef INET 6763 struct sockaddr_in *sin; 6764 #endif 6765 #ifdef INET6 6766 struct sockaddr_in6 *sin6; 6767 #endif 6768 6769 sa = addr; 6770 inp = stcb->sctp_ep; 6771 *error = 0; 6772 for (i = 0; i < totaddr; i++) { 6773 switch (sa->sa_family) { 6774 #ifdef INET 6775 case AF_INET: 6776 incr = sizeof(struct sockaddr_in); 6777 sin = (struct sockaddr_in *)sa; 6778 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6779 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6780 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6781 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6782 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6783 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6784 *error = EINVAL; 6785 goto out_now; 6786 } 6787 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6788 SCTP_DONOT_SETSCOPE, 6789 SCTP_ADDR_IS_CONFIRMED)) { 6790 /* assoc gone no un-lock */ 6791 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6792 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6793 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6794 *error = ENOBUFS; 6795 goto out_now; 6796 } 6797 added++; 6798 break; 6799 #endif 6800 #ifdef INET6 6801 case AF_INET6: 6802 incr = sizeof(struct sockaddr_in6); 6803 sin6 = (struct sockaddr_in6 *)sa; 6804 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6805 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6806 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6807 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6808 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6809 *error = EINVAL; 6810 goto out_now; 6811 } 6812 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6813 SCTP_DONOT_SETSCOPE, 6814 SCTP_ADDR_IS_CONFIRMED)) { 6815 /* assoc gone no un-lock */ 6816 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6817 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6818 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6819 *error = ENOBUFS; 6820 goto out_now; 6821 } 6822 added++; 6823 break; 6824 #endif 6825 default: 6826 break; 6827 } 6828 sa = (struct sockaddr *)((caddr_t)sa + incr); 6829 } 6830 out_now: 6831 return (added); 6832 } 6833 6834 int 6835 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6836 unsigned int totaddr, 6837 unsigned int *num_v4, unsigned int *num_v6, 6838 unsigned int limit) 6839 { 6840 struct sockaddr *sa; 6841 struct sctp_tcb *stcb; 6842 unsigned int incr, at, i; 6843 6844 at = 0; 6845 sa = addr; 6846 *num_v6 = *num_v4 = 0; 6847 /* account and validate addresses */ 6848 if (totaddr == 0) { 6849 return (EINVAL); 6850 } 6851 for (i = 0; i < totaddr; i++) { 6852 if (at + sizeof(struct sockaddr) > limit) { 6853 return (EINVAL); 6854 } 6855 switch (sa->sa_family) { 6856 #ifdef INET 6857 case AF_INET: 6858 incr = (unsigned int)sizeof(struct sockaddr_in); 6859 if (sa->sa_len != incr) { 6860 return (EINVAL); 6861 } 6862 (*num_v4) += 1; 6863 break; 6864 #endif 6865 #ifdef INET6 6866 case AF_INET6: 6867 { 6868 struct sockaddr_in6 *sin6; 6869 6870 sin6 = (struct sockaddr_in6 *)sa; 6871 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6872 /* Must be non-mapped for connectx */ 6873 return (EINVAL); 6874 } 6875 incr = (unsigned int)sizeof(struct sockaddr_in6); 6876 if (sa->sa_len != incr) { 6877 return (EINVAL); 6878 } 6879 (*num_v6) += 1; 6880 break; 6881 } 6882 #endif 6883 default: 6884 return (EINVAL); 6885 } 6886 if ((at + incr) > limit) { 6887 return (EINVAL); 6888 } 6889 SCTP_INP_INCR_REF(inp); 6890 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6891 if (stcb != NULL) { 6892 SCTP_TCB_UNLOCK(stcb); 6893 return (EALREADY); 6894 } else { 6895 SCTP_INP_DECR_REF(inp); 6896 } 6897 at += incr; 6898 sa = (struct sockaddr *)((caddr_t)sa + incr); 6899 } 6900 return (0); 6901 } 6902 6903 /* 6904 * sctp_bindx(ADD) for one address. 6905 * assumes all arguments are valid/checked by caller. 6906 */ 6907 void 6908 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6909 struct sockaddr *sa, sctp_assoc_t assoc_id, 6910 uint32_t vrf_id, int *error, void *p) 6911 { 6912 struct sockaddr *addr_touse; 6913 #if defined(INET) && defined(INET6) 6914 struct sockaddr_in sin; 6915 #endif 6916 6917 /* see if we're bound all already! */ 6918 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6919 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6920 *error = EINVAL; 6921 return; 6922 } 6923 addr_touse = sa; 6924 #ifdef INET6 6925 if (sa->sa_family == AF_INET6) { 6926 #ifdef INET 6927 struct sockaddr_in6 *sin6; 6928 6929 #endif 6930 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6931 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6932 *error = EINVAL; 6933 return; 6934 } 6935 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6936 /* can only bind v6 on PF_INET6 sockets */ 6937 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6938 *error = EINVAL; 6939 return; 6940 } 6941 #ifdef INET 6942 sin6 = (struct sockaddr_in6 *)addr_touse; 6943 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6944 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6945 SCTP_IPV6_V6ONLY(inp)) { 6946 /* can't bind v4-mapped on PF_INET sockets */ 6947 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6948 *error = EINVAL; 6949 return; 6950 } 6951 in6_sin6_2_sin(&sin, sin6); 6952 addr_touse = (struct sockaddr *)&sin; 6953 } 6954 #endif 6955 } 6956 #endif 6957 #ifdef INET 6958 if (sa->sa_family == AF_INET) { 6959 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6960 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6961 *error = EINVAL; 6962 return; 6963 } 6964 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6965 SCTP_IPV6_V6ONLY(inp)) { 6966 /* can't bind v4 on PF_INET sockets */ 6967 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6968 *error = EINVAL; 6969 return; 6970 } 6971 } 6972 #endif 6973 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6974 if (p == NULL) { 6975 /* Can't get proc for Net/Open BSD */ 6976 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6977 *error = EINVAL; 6978 return; 6979 } 6980 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6981 return; 6982 } 6983 /* 6984 * No locks required here since bind and mgmt_ep_sa all do their own 6985 * locking. If we do something for the FIX: below we may need to 6986 * lock in that case. 6987 */ 6988 if (assoc_id == 0) { 6989 /* add the address */ 6990 struct sctp_inpcb *lep; 6991 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6992 6993 /* validate the incoming port */ 6994 if ((lsin->sin_port != 0) && 6995 (lsin->sin_port != inp->sctp_lport)) { 6996 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6997 *error = EINVAL; 6998 return; 6999 } else { 7000 /* user specified 0 port, set it to existing port */ 7001 lsin->sin_port = inp->sctp_lport; 7002 } 7003 7004 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 7005 if (lep != NULL) { 7006 /* 7007 * We must decrement the refcount since we have the 7008 * ep already and are binding. No remove going on 7009 * here. 7010 */ 7011 SCTP_INP_DECR_REF(lep); 7012 } 7013 if (lep == inp) { 7014 /* already bound to it.. ok */ 7015 return; 7016 } else if (lep == NULL) { 7017 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 7018 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 7019 SCTP_ADD_IP_ADDRESS, 7020 vrf_id, NULL); 7021 } else { 7022 *error = EADDRINUSE; 7023 } 7024 if (*error) 7025 return; 7026 } else { 7027 /* 7028 * FIX: decide whether we allow assoc based bindx 7029 */ 7030 } 7031 } 7032 7033 /* 7034 * sctp_bindx(DELETE) for one address. 7035 * assumes all arguments are valid/checked by caller. 7036 */ 7037 void 7038 sctp_bindx_delete_address(struct sctp_inpcb *inp, 7039 struct sockaddr *sa, sctp_assoc_t assoc_id, 7040 uint32_t vrf_id, int *error) 7041 { 7042 struct sockaddr *addr_touse; 7043 #if defined(INET) && defined(INET6) 7044 struct sockaddr_in sin; 7045 #endif 7046 7047 /* see if we're bound all already! */ 7048 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 7049 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7050 *error = EINVAL; 7051 return; 7052 } 7053 addr_touse = sa; 7054 #ifdef INET6 7055 if (sa->sa_family == AF_INET6) { 7056 #ifdef INET 7057 struct sockaddr_in6 *sin6; 7058 #endif 7059 7060 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 7061 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7062 *error = EINVAL; 7063 return; 7064 } 7065 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 7066 /* can only bind v6 on PF_INET6 sockets */ 7067 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7068 *error = EINVAL; 7069 return; 7070 } 7071 #ifdef INET 7072 sin6 = (struct sockaddr_in6 *)addr_touse; 7073 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 7074 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 7075 SCTP_IPV6_V6ONLY(inp)) { 7076 /* can't bind mapped-v4 on PF_INET sockets */ 7077 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7078 *error = EINVAL; 7079 return; 7080 } 7081 in6_sin6_2_sin(&sin, sin6); 7082 addr_touse = (struct sockaddr *)&sin; 7083 } 7084 #endif 7085 } 7086 #endif 7087 #ifdef INET 7088 if (sa->sa_family == AF_INET) { 7089 if (sa->sa_len != sizeof(struct sockaddr_in)) { 7090 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7091 *error = EINVAL; 7092 return; 7093 } 7094 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 7095 SCTP_IPV6_V6ONLY(inp)) { 7096 /* can't bind v4 on PF_INET sockets */ 7097 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 7098 *error = EINVAL; 7099 return; 7100 } 7101 } 7102 #endif 7103 /* 7104 * No lock required mgmt_ep_sa does its own locking. If the FIX: 7105 * below is ever changed we may need to lock before calling 7106 * association level binding. 7107 */ 7108 if (assoc_id == 0) { 7109 /* delete the address */ 7110 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 7111 SCTP_DEL_IP_ADDRESS, 7112 vrf_id, NULL); 7113 } else { 7114 /* 7115 * FIX: decide whether we allow assoc based bindx 7116 */ 7117 } 7118 } 7119 7120 /* 7121 * returns the valid local address count for an assoc, taking into account 7122 * all scoping rules 7123 */ 7124 int 7125 sctp_local_addr_count(struct sctp_tcb *stcb) 7126 { 7127 int loopback_scope; 7128 #if defined(INET) 7129 int ipv4_local_scope, ipv4_addr_legal; 7130 #endif 7131 #if defined (INET6) 7132 int local_scope, site_scope, ipv6_addr_legal; 7133 #endif 7134 struct sctp_vrf *vrf; 7135 struct sctp_ifn *sctp_ifn; 7136 struct sctp_ifa *sctp_ifa; 7137 int count = 0; 7138 7139 /* Turn on all the appropriate scopes */ 7140 loopback_scope = stcb->asoc.scope.loopback_scope; 7141 #if defined(INET) 7142 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 7143 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 7144 #endif 7145 #if defined(INET6) 7146 local_scope = stcb->asoc.scope.local_scope; 7147 site_scope = stcb->asoc.scope.site_scope; 7148 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 7149 #endif 7150 SCTP_IPI_ADDR_RLOCK(); 7151 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 7152 if (vrf == NULL) { 7153 /* no vrf, no addresses */ 7154 SCTP_IPI_ADDR_RUNLOCK(); 7155 return (0); 7156 } 7157 7158 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 7159 /* 7160 * bound all case: go through all ifns on the vrf 7161 */ 7162 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 7163 if ((loopback_scope == 0) && 7164 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 7165 continue; 7166 } 7167 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 7168 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 7169 continue; 7170 switch (sctp_ifa->address.sa.sa_family) { 7171 #ifdef INET 7172 case AF_INET: 7173 if (ipv4_addr_legal) { 7174 struct sockaddr_in *sin; 7175 7176 sin = &sctp_ifa->address.sin; 7177 if (sin->sin_addr.s_addr == 0) { 7178 /* 7179 * skip unspecified 7180 * addrs 7181 */ 7182 continue; 7183 } 7184 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 7185 &sin->sin_addr) != 0) { 7186 continue; 7187 } 7188 if ((ipv4_local_scope == 0) && 7189 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 7190 continue; 7191 } 7192 /* count this one */ 7193 count++; 7194 } else { 7195 continue; 7196 } 7197 break; 7198 #endif 7199 #ifdef INET6 7200 case AF_INET6: 7201 if (ipv6_addr_legal) { 7202 struct sockaddr_in6 *sin6; 7203 7204 sin6 = &sctp_ifa->address.sin6; 7205 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7206 continue; 7207 } 7208 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7209 &sin6->sin6_addr) != 0) { 7210 continue; 7211 } 7212 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7213 if (local_scope == 0) 7214 continue; 7215 if (sin6->sin6_scope_id == 0) { 7216 if (sa6_recoverscope(sin6) != 0) 7217 /* 7218 * 7219 * bad 7220 * link 7221 * 7222 * local 7223 * 7224 * address 7225 */ 7226 continue; 7227 } 7228 } 7229 if ((site_scope == 0) && 7230 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7231 continue; 7232 } 7233 /* count this one */ 7234 count++; 7235 } 7236 break; 7237 #endif 7238 default: 7239 /* TSNH */ 7240 break; 7241 } 7242 } 7243 } 7244 } else { 7245 /* 7246 * subset bound case 7247 */ 7248 struct sctp_laddr *laddr; 7249 7250 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7251 sctp_nxt_addr) { 7252 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7253 continue; 7254 } 7255 /* count this one */ 7256 count++; 7257 } 7258 } 7259 SCTP_IPI_ADDR_RUNLOCK(); 7260 return (count); 7261 } 7262 7263 #if defined(SCTP_LOCAL_TRACE_BUF) 7264 7265 void 7266 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7267 { 7268 uint32_t saveindex, newindex; 7269 7270 do { 7271 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7272 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7273 newindex = 1; 7274 } else { 7275 newindex = saveindex + 1; 7276 } 7277 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7278 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7279 saveindex = 0; 7280 } 7281 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7282 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7283 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7284 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7285 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7286 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7287 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7288 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7289 } 7290 7291 #endif 7292 static void 7293 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7294 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7295 { 7296 struct ip *iph; 7297 #ifdef INET6 7298 struct ip6_hdr *ip6; 7299 #endif 7300 struct mbuf *sp, *last; 7301 struct udphdr *uhdr; 7302 uint16_t port; 7303 7304 if ((m->m_flags & M_PKTHDR) == 0) { 7305 /* Can't handle one that is not a pkt hdr */ 7306 goto out; 7307 } 7308 /* Pull the src port */ 7309 iph = mtod(m, struct ip *); 7310 uhdr = (struct udphdr *)((caddr_t)iph + off); 7311 port = uhdr->uh_sport; 7312 /* 7313 * Split out the mbuf chain. Leave the IP header in m, place the 7314 * rest in the sp. 7315 */ 7316 sp = m_split(m, off, M_NOWAIT); 7317 if (sp == NULL) { 7318 /* Gak, drop packet, we can't do a split */ 7319 goto out; 7320 } 7321 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7322 /* Gak, packet can't have an SCTP header in it - too small */ 7323 m_freem(sp); 7324 goto out; 7325 } 7326 /* Now pull up the UDP header and SCTP header together */ 7327 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7328 if (sp == NULL) { 7329 /* Gak pullup failed */ 7330 goto out; 7331 } 7332 /* Trim out the UDP header */ 7333 m_adj(sp, sizeof(struct udphdr)); 7334 7335 /* Now reconstruct the mbuf chain */ 7336 for (last = m; last->m_next; last = last->m_next); 7337 last->m_next = sp; 7338 m->m_pkthdr.len += sp->m_pkthdr.len; 7339 /* 7340 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7341 * checksum and it was valid. Since CSUM_DATA_VALID == 7342 * CSUM_SCTP_VALID this would imply that the HW also verified the 7343 * SCTP checksum. Therefore, clear the bit. 7344 */ 7345 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7346 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7347 m->m_pkthdr.len, 7348 if_name(m->m_pkthdr.rcvif), 7349 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7350 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7351 iph = mtod(m, struct ip *); 7352 switch (iph->ip_v) { 7353 #ifdef INET 7354 case IPVERSION: 7355 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7356 sctp_input_with_port(m, off, port); 7357 break; 7358 #endif 7359 #ifdef INET6 7360 case IPV6_VERSION >> 4: 7361 ip6 = mtod(m, struct ip6_hdr *); 7362 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7363 sctp6_input_with_port(&m, &off, port); 7364 break; 7365 #endif 7366 default: 7367 goto out; 7368 break; 7369 } 7370 return; 7371 out: 7372 m_freem(m); 7373 } 7374 7375 #ifdef INET 7376 static void 7377 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7378 { 7379 struct ip *outer_ip, *inner_ip; 7380 struct sctphdr *sh; 7381 struct icmp *icmp; 7382 struct udphdr *udp; 7383 struct sctp_inpcb *inp; 7384 struct sctp_tcb *stcb; 7385 struct sctp_nets *net; 7386 struct sctp_init_chunk *ch; 7387 struct sockaddr_in src, dst; 7388 uint8_t type, code; 7389 7390 inner_ip = (struct ip *)vip; 7391 icmp = (struct icmp *)((caddr_t)inner_ip - 7392 (sizeof(struct icmp) - sizeof(struct ip))); 7393 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7394 if (ntohs(outer_ip->ip_len) < 7395 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7396 return; 7397 } 7398 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7399 sh = (struct sctphdr *)(udp + 1); 7400 memset(&src, 0, sizeof(struct sockaddr_in)); 7401 src.sin_family = AF_INET; 7402 src.sin_len = sizeof(struct sockaddr_in); 7403 src.sin_port = sh->src_port; 7404 src.sin_addr = inner_ip->ip_src; 7405 memset(&dst, 0, sizeof(struct sockaddr_in)); 7406 dst.sin_family = AF_INET; 7407 dst.sin_len = sizeof(struct sockaddr_in); 7408 dst.sin_port = sh->dest_port; 7409 dst.sin_addr = inner_ip->ip_dst; 7410 /* 7411 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7412 * holds our local endpoint address. Thus we reverse the dst and the 7413 * src in the lookup. 7414 */ 7415 inp = NULL; 7416 net = NULL; 7417 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7418 (struct sockaddr *)&src, 7419 &inp, &net, 1, 7420 SCTP_DEFAULT_VRFID); 7421 if ((stcb != NULL) && 7422 (net != NULL) && 7423 (inp != NULL)) { 7424 /* Check the UDP port numbers */ 7425 if ((udp->uh_dport != net->port) || 7426 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7427 SCTP_TCB_UNLOCK(stcb); 7428 return; 7429 } 7430 /* Check the verification tag */ 7431 if (ntohl(sh->v_tag) != 0) { 7432 /* 7433 * This must be the verification tag used for 7434 * sending out packets. We don't consider packets 7435 * reflecting the verification tag. 7436 */ 7437 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7438 SCTP_TCB_UNLOCK(stcb); 7439 return; 7440 } 7441 } else { 7442 if (ntohs(outer_ip->ip_len) >= 7443 sizeof(struct ip) + 7444 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7445 /* 7446 * In this case we can check if we got an 7447 * INIT chunk and if the initiate tag 7448 * matches. 7449 */ 7450 ch = (struct sctp_init_chunk *)(sh + 1); 7451 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7452 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7453 SCTP_TCB_UNLOCK(stcb); 7454 return; 7455 } 7456 } else { 7457 SCTP_TCB_UNLOCK(stcb); 7458 return; 7459 } 7460 } 7461 type = icmp->icmp_type; 7462 code = icmp->icmp_code; 7463 if ((type == ICMP_UNREACH) && 7464 (code == ICMP_UNREACH_PORT)) { 7465 code = ICMP_UNREACH_PROTOCOL; 7466 } 7467 sctp_notify(inp, stcb, net, type, code, 7468 ntohs(inner_ip->ip_len), 7469 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7470 } else { 7471 if ((stcb == NULL) && (inp != NULL)) { 7472 /* reduce ref-count */ 7473 SCTP_INP_WLOCK(inp); 7474 SCTP_INP_DECR_REF(inp); 7475 SCTP_INP_WUNLOCK(inp); 7476 } 7477 if (stcb) { 7478 SCTP_TCB_UNLOCK(stcb); 7479 } 7480 } 7481 return; 7482 } 7483 #endif 7484 7485 #ifdef INET6 7486 static void 7487 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7488 { 7489 struct ip6ctlparam *ip6cp; 7490 struct sctp_inpcb *inp; 7491 struct sctp_tcb *stcb; 7492 struct sctp_nets *net; 7493 struct sctphdr sh; 7494 struct udphdr udp; 7495 struct sockaddr_in6 src, dst; 7496 uint8_t type, code; 7497 7498 ip6cp = (struct ip6ctlparam *)d; 7499 /* 7500 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7501 */ 7502 if (ip6cp->ip6c_m == NULL) { 7503 return; 7504 } 7505 /* 7506 * Check if we can safely examine the ports and the verification tag 7507 * of the SCTP common header. 7508 */ 7509 if (ip6cp->ip6c_m->m_pkthdr.len < 7510 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7511 return; 7512 } 7513 /* Copy out the UDP header. */ 7514 memset(&udp, 0, sizeof(struct udphdr)); 7515 m_copydata(ip6cp->ip6c_m, 7516 ip6cp->ip6c_off, 7517 sizeof(struct udphdr), 7518 (caddr_t)&udp); 7519 /* Copy out the port numbers and the verification tag. */ 7520 memset(&sh, 0, sizeof(struct sctphdr)); 7521 m_copydata(ip6cp->ip6c_m, 7522 ip6cp->ip6c_off + sizeof(struct udphdr), 7523 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7524 (caddr_t)&sh); 7525 memset(&src, 0, sizeof(struct sockaddr_in6)); 7526 src.sin6_family = AF_INET6; 7527 src.sin6_len = sizeof(struct sockaddr_in6); 7528 src.sin6_port = sh.src_port; 7529 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7530 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7531 return; 7532 } 7533 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7534 dst.sin6_family = AF_INET6; 7535 dst.sin6_len = sizeof(struct sockaddr_in6); 7536 dst.sin6_port = sh.dest_port; 7537 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7538 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7539 return; 7540 } 7541 inp = NULL; 7542 net = NULL; 7543 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7544 (struct sockaddr *)&src, 7545 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7546 if ((stcb != NULL) && 7547 (net != NULL) && 7548 (inp != NULL)) { 7549 /* Check the UDP port numbers */ 7550 if ((udp.uh_dport != net->port) || 7551 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7552 SCTP_TCB_UNLOCK(stcb); 7553 return; 7554 } 7555 /* Check the verification tag */ 7556 if (ntohl(sh.v_tag) != 0) { 7557 /* 7558 * This must be the verification tag used for 7559 * sending out packets. We don't consider packets 7560 * reflecting the verification tag. 7561 */ 7562 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7563 SCTP_TCB_UNLOCK(stcb); 7564 return; 7565 } 7566 } else { 7567 if (ip6cp->ip6c_m->m_pkthdr.len >= 7568 ip6cp->ip6c_off + sizeof(struct udphdr) + 7569 sizeof(struct sctphdr) + 7570 sizeof(struct sctp_chunkhdr) + 7571 offsetof(struct sctp_init, a_rwnd)) { 7572 /* 7573 * In this case we can check if we got an 7574 * INIT chunk and if the initiate tag 7575 * matches. 7576 */ 7577 uint32_t initiate_tag; 7578 uint8_t chunk_type; 7579 7580 m_copydata(ip6cp->ip6c_m, 7581 ip6cp->ip6c_off + 7582 sizeof(struct udphdr) + 7583 sizeof(struct sctphdr), 7584 sizeof(uint8_t), 7585 (caddr_t)&chunk_type); 7586 m_copydata(ip6cp->ip6c_m, 7587 ip6cp->ip6c_off + 7588 sizeof(struct udphdr) + 7589 sizeof(struct sctphdr) + 7590 sizeof(struct sctp_chunkhdr), 7591 sizeof(uint32_t), 7592 (caddr_t)&initiate_tag); 7593 if ((chunk_type != SCTP_INITIATION) || 7594 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7595 SCTP_TCB_UNLOCK(stcb); 7596 return; 7597 } 7598 } else { 7599 SCTP_TCB_UNLOCK(stcb); 7600 return; 7601 } 7602 } 7603 type = ip6cp->ip6c_icmp6->icmp6_type; 7604 code = ip6cp->ip6c_icmp6->icmp6_code; 7605 if ((type == ICMP6_DST_UNREACH) && 7606 (code == ICMP6_DST_UNREACH_NOPORT)) { 7607 type = ICMP6_PARAM_PROB; 7608 code = ICMP6_PARAMPROB_NEXTHEADER; 7609 } 7610 sctp6_notify(inp, stcb, net, type, code, 7611 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7612 } else { 7613 if ((stcb == NULL) && (inp != NULL)) { 7614 /* reduce inp's ref-count */ 7615 SCTP_INP_WLOCK(inp); 7616 SCTP_INP_DECR_REF(inp); 7617 SCTP_INP_WUNLOCK(inp); 7618 } 7619 if (stcb) { 7620 SCTP_TCB_UNLOCK(stcb); 7621 } 7622 } 7623 } 7624 #endif 7625 7626 void 7627 sctp_over_udp_stop(void) 7628 { 7629 /* 7630 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7631 * for writting! 7632 */ 7633 #ifdef INET 7634 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7635 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7636 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7637 } 7638 #endif 7639 #ifdef INET6 7640 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7641 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7642 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7643 } 7644 #endif 7645 } 7646 7647 int 7648 sctp_over_udp_start(void) 7649 { 7650 uint16_t port; 7651 int ret; 7652 #ifdef INET 7653 struct sockaddr_in sin; 7654 #endif 7655 #ifdef INET6 7656 struct sockaddr_in6 sin6; 7657 #endif 7658 /* 7659 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7660 * for writting! 7661 */ 7662 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7663 if (ntohs(port) == 0) { 7664 /* Must have a port set */ 7665 return (EINVAL); 7666 } 7667 #ifdef INET 7668 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7669 /* Already running -- must stop first */ 7670 return (EALREADY); 7671 } 7672 #endif 7673 #ifdef INET6 7674 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7675 /* Already running -- must stop first */ 7676 return (EALREADY); 7677 } 7678 #endif 7679 #ifdef INET 7680 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7681 SOCK_DGRAM, IPPROTO_UDP, 7682 curthread->td_ucred, curthread))) { 7683 sctp_over_udp_stop(); 7684 return (ret); 7685 } 7686 /* Call the special UDP hook. */ 7687 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7688 sctp_recv_udp_tunneled_packet, 7689 sctp_recv_icmp_tunneled_packet, 7690 NULL))) { 7691 sctp_over_udp_stop(); 7692 return (ret); 7693 } 7694 /* Ok, we have a socket, bind it to the port. */ 7695 memset(&sin, 0, sizeof(struct sockaddr_in)); 7696 sin.sin_len = sizeof(struct sockaddr_in); 7697 sin.sin_family = AF_INET; 7698 sin.sin_port = htons(port); 7699 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7700 (struct sockaddr *)&sin, curthread))) { 7701 sctp_over_udp_stop(); 7702 return (ret); 7703 } 7704 #endif 7705 #ifdef INET6 7706 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7707 SOCK_DGRAM, IPPROTO_UDP, 7708 curthread->td_ucred, curthread))) { 7709 sctp_over_udp_stop(); 7710 return (ret); 7711 } 7712 /* Call the special UDP hook. */ 7713 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7714 sctp_recv_udp_tunneled_packet, 7715 sctp_recv_icmp6_tunneled_packet, 7716 NULL))) { 7717 sctp_over_udp_stop(); 7718 return (ret); 7719 } 7720 /* Ok, we have a socket, bind it to the port. */ 7721 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7722 sin6.sin6_len = sizeof(struct sockaddr_in6); 7723 sin6.sin6_family = AF_INET6; 7724 sin6.sin6_port = htons(port); 7725 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7726 (struct sockaddr *)&sin6, curthread))) { 7727 sctp_over_udp_stop(); 7728 return (ret); 7729 } 7730 #endif 7731 return (0); 7732 } 7733 7734 /* 7735 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7736 * If all arguments are zero, zero is returned. 7737 */ 7738 uint32_t 7739 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7740 { 7741 if (mtu1 > 0) { 7742 if (mtu2 > 0) { 7743 if (mtu3 > 0) { 7744 return (min(mtu1, min(mtu2, mtu3))); 7745 } else { 7746 return (min(mtu1, mtu2)); 7747 } 7748 } else { 7749 if (mtu3 > 0) { 7750 return (min(mtu1, mtu3)); 7751 } else { 7752 return (mtu1); 7753 } 7754 } 7755 } else { 7756 if (mtu2 > 0) { 7757 if (mtu3 > 0) { 7758 return (min(mtu2, mtu3)); 7759 } else { 7760 return (mtu2); 7761 } 7762 } else { 7763 return (mtu3); 7764 } 7765 } 7766 } 7767 7768 void 7769 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7770 { 7771 struct in_conninfo inc; 7772 7773 memset(&inc, 0, sizeof(struct in_conninfo)); 7774 inc.inc_fibnum = fibnum; 7775 switch (addr->sa.sa_family) { 7776 #ifdef INET 7777 case AF_INET: 7778 inc.inc_faddr = addr->sin.sin_addr; 7779 break; 7780 #endif 7781 #ifdef INET6 7782 case AF_INET6: 7783 inc.inc_flags |= INC_ISIPV6; 7784 inc.inc6_faddr = addr->sin6.sin6_addr; 7785 break; 7786 #endif 7787 default: 7788 return; 7789 } 7790 tcp_hc_updatemtu(&inc, (u_long)mtu); 7791 } 7792 7793 uint32_t 7794 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7795 { 7796 struct in_conninfo inc; 7797 7798 memset(&inc, 0, sizeof(struct in_conninfo)); 7799 inc.inc_fibnum = fibnum; 7800 switch (addr->sa.sa_family) { 7801 #ifdef INET 7802 case AF_INET: 7803 inc.inc_faddr = addr->sin.sin_addr; 7804 break; 7805 #endif 7806 #ifdef INET6 7807 case AF_INET6: 7808 inc.inc_flags |= INC_ISIPV6; 7809 inc.inc6_faddr = addr->sin6.sin6_addr; 7810 break; 7811 #endif 7812 default: 7813 return (0); 7814 } 7815 return ((uint32_t)tcp_hc_getmtu(&inc)); 7816 } 7817 7818 void 7819 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7820 { 7821 #if defined(KDTRACE_HOOKS) 7822 int old_state = stcb->asoc.state; 7823 #endif 7824 7825 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7826 ("sctp_set_state: Can't set substate (new_state = %x)", 7827 new_state)); 7828 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7829 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7830 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7831 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7832 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7833 } 7834 #if defined(KDTRACE_HOOKS) 7835 if (((old_state & SCTP_STATE_MASK) != new_state) && 7836 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7837 (new_state == SCTP_STATE_INUSE))) { 7838 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7839 } 7840 #endif 7841 } 7842 7843 void 7844 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7845 { 7846 #if defined(KDTRACE_HOOKS) 7847 int old_state = stcb->asoc.state; 7848 #endif 7849 7850 KASSERT((substate & SCTP_STATE_MASK) == 0, 7851 ("sctp_add_substate: Can't set state (substate = %x)", 7852 substate)); 7853 stcb->asoc.state |= substate; 7854 #if defined(KDTRACE_HOOKS) 7855 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7856 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7857 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7858 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7859 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7860 } 7861 #endif 7862 } 7863