1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #if defined(INET6) || defined(INET) 55 #include <netinet/tcp_var.h> 56 #endif 57 #include <netinet/udp.h> 58 #include <netinet/udp_var.h> 59 #include <sys/proc.h> 60 #ifdef INET6 61 #include <netinet/icmp6.h> 62 #endif 63 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 struct sctp_cwnd_log sctp_clog __unused; 76 77 sctp_clog.x.sb.stcb = stcb; 78 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 79 if (stcb) 80 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 81 else 82 sctp_clog.x.sb.stcb_sbcc = 0; 83 sctp_clog.x.sb.incr = incr; 84 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 85 SCTP_LOG_EVENT_SB, 86 from, 87 sctp_clog.x.misc.log1, 88 sctp_clog.x.misc.log2, 89 sctp_clog.x.misc.log3, 90 sctp_clog.x.misc.log4); 91 } 92 93 void 94 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 95 { 96 struct sctp_cwnd_log sctp_clog __unused; 97 98 sctp_clog.x.close.inp = (void *)inp; 99 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 100 if (stcb) { 101 sctp_clog.x.close.stcb = (void *)stcb; 102 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 103 } else { 104 sctp_clog.x.close.stcb = 0; 105 sctp_clog.x.close.state = 0; 106 } 107 sctp_clog.x.close.loc = loc; 108 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 109 SCTP_LOG_EVENT_CLOSE, 110 0, 111 sctp_clog.x.misc.log1, 112 sctp_clog.x.misc.log2, 113 sctp_clog.x.misc.log3, 114 sctp_clog.x.misc.log4); 115 } 116 117 void 118 rto_logging(struct sctp_nets *net, int from) 119 { 120 struct sctp_cwnd_log sctp_clog __unused; 121 122 memset(&sctp_clog, 0, sizeof(sctp_clog)); 123 sctp_clog.x.rto.net = (void *)net; 124 sctp_clog.x.rto.rtt = net->rtt / 1000; 125 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 126 SCTP_LOG_EVENT_RTT, 127 from, 128 sctp_clog.x.misc.log1, 129 sctp_clog.x.misc.log2, 130 sctp_clog.x.misc.log3, 131 sctp_clog.x.misc.log4); 132 } 133 134 void 135 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 136 { 137 struct sctp_cwnd_log sctp_clog __unused; 138 139 sctp_clog.x.strlog.stcb = stcb; 140 sctp_clog.x.strlog.n_tsn = tsn; 141 sctp_clog.x.strlog.n_sseq = sseq; 142 sctp_clog.x.strlog.e_tsn = 0; 143 sctp_clog.x.strlog.e_sseq = 0; 144 sctp_clog.x.strlog.strm = stream; 145 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 146 SCTP_LOG_EVENT_STRM, 147 from, 148 sctp_clog.x.misc.log1, 149 sctp_clog.x.misc.log2, 150 sctp_clog.x.misc.log3, 151 sctp_clog.x.misc.log4); 152 } 153 154 void 155 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 156 { 157 struct sctp_cwnd_log sctp_clog __unused; 158 159 sctp_clog.x.nagle.stcb = (void *)stcb; 160 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 161 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 162 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 163 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 164 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 165 SCTP_LOG_EVENT_NAGLE, 166 action, 167 sctp_clog.x.misc.log1, 168 sctp_clog.x.misc.log2, 169 sctp_clog.x.misc.log3, 170 sctp_clog.x.misc.log4); 171 } 172 173 void 174 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 175 { 176 struct sctp_cwnd_log sctp_clog __unused; 177 178 sctp_clog.x.sack.cumack = cumack; 179 sctp_clog.x.sack.oldcumack = old_cumack; 180 sctp_clog.x.sack.tsn = tsn; 181 sctp_clog.x.sack.numGaps = gaps; 182 sctp_clog.x.sack.numDups = dups; 183 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 184 SCTP_LOG_EVENT_SACK, 185 from, 186 sctp_clog.x.misc.log1, 187 sctp_clog.x.misc.log2, 188 sctp_clog.x.misc.log3, 189 sctp_clog.x.misc.log4); 190 } 191 192 void 193 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 194 { 195 struct sctp_cwnd_log sctp_clog __unused; 196 197 memset(&sctp_clog, 0, sizeof(sctp_clog)); 198 sctp_clog.x.map.base = map; 199 sctp_clog.x.map.cum = cum; 200 sctp_clog.x.map.high = high; 201 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 202 SCTP_LOG_EVENT_MAP, 203 from, 204 sctp_clog.x.misc.log1, 205 sctp_clog.x.misc.log2, 206 sctp_clog.x.misc.log3, 207 sctp_clog.x.misc.log4); 208 } 209 210 void 211 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 212 { 213 struct sctp_cwnd_log sctp_clog __unused; 214 215 memset(&sctp_clog, 0, sizeof(sctp_clog)); 216 sctp_clog.x.fr.largest_tsn = biggest_tsn; 217 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 218 sctp_clog.x.fr.tsn = tsn; 219 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 220 SCTP_LOG_EVENT_FR, 221 from, 222 sctp_clog.x.misc.log1, 223 sctp_clog.x.misc.log2, 224 sctp_clog.x.misc.log3, 225 sctp_clog.x.misc.log4); 226 } 227 228 #ifdef SCTP_MBUF_LOGGING 229 void 230 sctp_log_mb(struct mbuf *m, int from) 231 { 232 struct sctp_cwnd_log sctp_clog __unused; 233 234 sctp_clog.x.mb.mp = m; 235 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 236 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 237 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 238 if (SCTP_BUF_IS_EXTENDED(m)) { 239 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 240 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 241 } else { 242 sctp_clog.x.mb.ext = 0; 243 sctp_clog.x.mb.refcnt = 0; 244 } 245 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 246 SCTP_LOG_EVENT_MBUF, 247 from, 248 sctp_clog.x.misc.log1, 249 sctp_clog.x.misc.log2, 250 sctp_clog.x.misc.log3, 251 sctp_clog.x.misc.log4); 252 } 253 254 void 255 sctp_log_mbc(struct mbuf *m, int from) 256 { 257 struct mbuf *mat; 258 259 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 260 sctp_log_mb(mat, from); 261 } 262 } 263 #endif 264 265 void 266 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 267 { 268 struct sctp_cwnd_log sctp_clog __unused; 269 270 if (control == NULL) { 271 SCTP_PRINTF("Gak log of NULL?\n"); 272 return; 273 } 274 sctp_clog.x.strlog.stcb = control->stcb; 275 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 276 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 277 sctp_clog.x.strlog.strm = control->sinfo_stream; 278 if (poschk != NULL) { 279 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 280 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 281 } else { 282 sctp_clog.x.strlog.e_tsn = 0; 283 sctp_clog.x.strlog.e_sseq = 0; 284 } 285 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 286 SCTP_LOG_EVENT_STRM, 287 from, 288 sctp_clog.x.misc.log1, 289 sctp_clog.x.misc.log2, 290 sctp_clog.x.misc.log3, 291 sctp_clog.x.misc.log4); 292 } 293 294 void 295 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 296 { 297 struct sctp_cwnd_log sctp_clog __unused; 298 299 sctp_clog.x.cwnd.net = net; 300 if (stcb->asoc.send_queue_cnt > 255) 301 sctp_clog.x.cwnd.cnt_in_send = 255; 302 else 303 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 304 if (stcb->asoc.stream_queue_cnt > 255) 305 sctp_clog.x.cwnd.cnt_in_str = 255; 306 else 307 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 308 309 if (net) { 310 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 311 sctp_clog.x.cwnd.inflight = net->flight_size; 312 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 313 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 314 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 315 } 316 if (SCTP_CWNDLOG_PRESEND == from) { 317 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 318 } 319 sctp_clog.x.cwnd.cwnd_augment = augment; 320 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 321 SCTP_LOG_EVENT_CWND, 322 from, 323 sctp_clog.x.misc.log1, 324 sctp_clog.x.misc.log2, 325 sctp_clog.x.misc.log3, 326 sctp_clog.x.misc.log4); 327 } 328 329 void 330 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 331 { 332 struct sctp_cwnd_log sctp_clog __unused; 333 334 memset(&sctp_clog, 0, sizeof(sctp_clog)); 335 if (inp) { 336 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 337 338 } else { 339 sctp_clog.x.lock.sock = (void *)NULL; 340 } 341 sctp_clog.x.lock.inp = (void *)inp; 342 if (stcb) { 343 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 344 } else { 345 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 346 } 347 if (inp) { 348 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 349 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 350 } else { 351 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 352 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 353 } 354 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 355 if (inp && (inp->sctp_socket)) { 356 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 357 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 358 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 359 } else { 360 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 361 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 362 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 363 } 364 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 365 SCTP_LOG_LOCK_EVENT, 366 from, 367 sctp_clog.x.misc.log1, 368 sctp_clog.x.misc.log2, 369 sctp_clog.x.misc.log3, 370 sctp_clog.x.misc.log4); 371 } 372 373 void 374 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 375 { 376 struct sctp_cwnd_log sctp_clog __unused; 377 378 memset(&sctp_clog, 0, sizeof(sctp_clog)); 379 sctp_clog.x.cwnd.net = net; 380 sctp_clog.x.cwnd.cwnd_new_value = error; 381 sctp_clog.x.cwnd.inflight = net->flight_size; 382 sctp_clog.x.cwnd.cwnd_augment = burst; 383 if (stcb->asoc.send_queue_cnt > 255) 384 sctp_clog.x.cwnd.cnt_in_send = 255; 385 else 386 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 387 if (stcb->asoc.stream_queue_cnt > 255) 388 sctp_clog.x.cwnd.cnt_in_str = 255; 389 else 390 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 391 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 392 SCTP_LOG_EVENT_MAXBURST, 393 from, 394 sctp_clog.x.misc.log1, 395 sctp_clog.x.misc.log2, 396 sctp_clog.x.misc.log3, 397 sctp_clog.x.misc.log4); 398 } 399 400 void 401 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 402 { 403 struct sctp_cwnd_log sctp_clog __unused; 404 405 sctp_clog.x.rwnd.rwnd = peers_rwnd; 406 sctp_clog.x.rwnd.send_size = snd_size; 407 sctp_clog.x.rwnd.overhead = overhead; 408 sctp_clog.x.rwnd.new_rwnd = 0; 409 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 410 SCTP_LOG_EVENT_RWND, 411 from, 412 sctp_clog.x.misc.log1, 413 sctp_clog.x.misc.log2, 414 sctp_clog.x.misc.log3, 415 sctp_clog.x.misc.log4); 416 } 417 418 void 419 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 420 { 421 struct sctp_cwnd_log sctp_clog __unused; 422 423 sctp_clog.x.rwnd.rwnd = peers_rwnd; 424 sctp_clog.x.rwnd.send_size = flight_size; 425 sctp_clog.x.rwnd.overhead = overhead; 426 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 427 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 428 SCTP_LOG_EVENT_RWND, 429 from, 430 sctp_clog.x.misc.log1, 431 sctp_clog.x.misc.log2, 432 sctp_clog.x.misc.log3, 433 sctp_clog.x.misc.log4); 434 } 435 436 #ifdef SCTP_MBCNT_LOGGING 437 static void 438 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 439 { 440 struct sctp_cwnd_log sctp_clog __unused; 441 442 sctp_clog.x.mbcnt.total_queue_size = total_oq; 443 sctp_clog.x.mbcnt.size_change = book; 444 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 445 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 446 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 447 SCTP_LOG_EVENT_MBCNT, 448 from, 449 sctp_clog.x.misc.log1, 450 sctp_clog.x.misc.log2, 451 sctp_clog.x.misc.log3, 452 sctp_clog.x.misc.log4); 453 } 454 #endif 455 456 void 457 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 458 { 459 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 460 SCTP_LOG_MISC_EVENT, 461 from, 462 a, b, c, d); 463 } 464 465 void 466 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 467 { 468 struct sctp_cwnd_log sctp_clog __unused; 469 470 sctp_clog.x.wake.stcb = (void *)stcb; 471 sctp_clog.x.wake.wake_cnt = wake_cnt; 472 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 473 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 474 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 475 476 if (stcb->asoc.stream_queue_cnt < 0xff) 477 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 478 else 479 sctp_clog.x.wake.stream_qcnt = 0xff; 480 481 if (stcb->asoc.chunks_on_out_queue < 0xff) 482 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 483 else 484 sctp_clog.x.wake.chunks_on_oque = 0xff; 485 486 sctp_clog.x.wake.sctpflags = 0; 487 /* set in the defered mode stuff */ 488 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 489 sctp_clog.x.wake.sctpflags |= 1; 490 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 491 sctp_clog.x.wake.sctpflags |= 2; 492 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 493 sctp_clog.x.wake.sctpflags |= 4; 494 /* what about the sb */ 495 if (stcb->sctp_socket) { 496 struct socket *so = stcb->sctp_socket; 497 498 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 499 } else { 500 sctp_clog.x.wake.sbflags = 0xff; 501 } 502 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 503 SCTP_LOG_EVENT_WAKE, 504 from, 505 sctp_clog.x.misc.log1, 506 sctp_clog.x.misc.log2, 507 sctp_clog.x.misc.log3, 508 sctp_clog.x.misc.log4); 509 } 510 511 void 512 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen) 513 { 514 struct sctp_cwnd_log sctp_clog __unused; 515 516 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 517 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 518 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 519 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 520 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 521 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 522 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 523 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 524 SCTP_LOG_EVENT_BLOCK, 525 from, 526 sctp_clog.x.misc.log1, 527 sctp_clog.x.misc.log2, 528 sctp_clog.x.misc.log3, 529 sctp_clog.x.misc.log4); 530 } 531 532 int 533 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 534 { 535 /* May need to fix this if ktrdump does not work */ 536 return (0); 537 } 538 539 #ifdef SCTP_AUDITING_ENABLED 540 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 541 static int sctp_audit_indx = 0; 542 543 static 544 void 545 sctp_print_audit_report(void) 546 { 547 int i; 548 int cnt; 549 550 cnt = 0; 551 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 552 if ((sctp_audit_data[i][0] == 0xe0) && 553 (sctp_audit_data[i][1] == 0x01)) { 554 cnt = 0; 555 SCTP_PRINTF("\n"); 556 } else if (sctp_audit_data[i][0] == 0xf0) { 557 cnt = 0; 558 SCTP_PRINTF("\n"); 559 } else if ((sctp_audit_data[i][0] == 0xc0) && 560 (sctp_audit_data[i][1] == 0x01)) { 561 SCTP_PRINTF("\n"); 562 cnt = 0; 563 } 564 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 565 (uint32_t)sctp_audit_data[i][1]); 566 cnt++; 567 if ((cnt % 14) == 0) 568 SCTP_PRINTF("\n"); 569 } 570 for (i = 0; i < sctp_audit_indx; i++) { 571 if ((sctp_audit_data[i][0] == 0xe0) && 572 (sctp_audit_data[i][1] == 0x01)) { 573 cnt = 0; 574 SCTP_PRINTF("\n"); 575 } else if (sctp_audit_data[i][0] == 0xf0) { 576 cnt = 0; 577 SCTP_PRINTF("\n"); 578 } else if ((sctp_audit_data[i][0] == 0xc0) && 579 (sctp_audit_data[i][1] == 0x01)) { 580 SCTP_PRINTF("\n"); 581 cnt = 0; 582 } 583 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 584 (uint32_t)sctp_audit_data[i][1]); 585 cnt++; 586 if ((cnt % 14) == 0) 587 SCTP_PRINTF("\n"); 588 } 589 SCTP_PRINTF("\n"); 590 } 591 592 void 593 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 594 struct sctp_nets *net) 595 { 596 int resend_cnt, tot_out, rep, tot_book_cnt; 597 struct sctp_nets *lnet; 598 struct sctp_tmit_chunk *chk; 599 600 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 601 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 602 sctp_audit_indx++; 603 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 604 sctp_audit_indx = 0; 605 } 606 if (inp == NULL) { 607 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 608 sctp_audit_data[sctp_audit_indx][1] = 0x01; 609 sctp_audit_indx++; 610 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 611 sctp_audit_indx = 0; 612 } 613 return; 614 } 615 if (stcb == NULL) { 616 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 617 sctp_audit_data[sctp_audit_indx][1] = 0x02; 618 sctp_audit_indx++; 619 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 620 sctp_audit_indx = 0; 621 } 622 return; 623 } 624 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 625 sctp_audit_data[sctp_audit_indx][1] = 626 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 627 sctp_audit_indx++; 628 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 629 sctp_audit_indx = 0; 630 } 631 rep = 0; 632 tot_book_cnt = 0; 633 resend_cnt = tot_out = 0; 634 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 635 if (chk->sent == SCTP_DATAGRAM_RESEND) { 636 resend_cnt++; 637 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 638 tot_out += chk->book_size; 639 tot_book_cnt++; 640 } 641 } 642 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 643 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 644 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 645 sctp_audit_indx++; 646 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 647 sctp_audit_indx = 0; 648 } 649 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 650 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 651 rep = 1; 652 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 653 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 654 sctp_audit_data[sctp_audit_indx][1] = 655 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 } 661 if (tot_out != stcb->asoc.total_flight) { 662 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 663 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 664 sctp_audit_indx++; 665 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 666 sctp_audit_indx = 0; 667 } 668 rep = 1; 669 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 670 (int)stcb->asoc.total_flight); 671 stcb->asoc.total_flight = tot_out; 672 } 673 if (tot_book_cnt != stcb->asoc.total_flight_count) { 674 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 675 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 676 sctp_audit_indx++; 677 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 678 sctp_audit_indx = 0; 679 } 680 rep = 1; 681 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 682 683 stcb->asoc.total_flight_count = tot_book_cnt; 684 } 685 tot_out = 0; 686 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 687 tot_out += lnet->flight_size; 688 } 689 if (tot_out != stcb->asoc.total_flight) { 690 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 691 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 692 sctp_audit_indx++; 693 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 694 sctp_audit_indx = 0; 695 } 696 rep = 1; 697 SCTP_PRINTF("real flight:%d net total was %d\n", 698 stcb->asoc.total_flight, tot_out); 699 /* now corrective action */ 700 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 701 702 tot_out = 0; 703 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 704 if ((chk->whoTo == lnet) && 705 (chk->sent < SCTP_DATAGRAM_RESEND)) { 706 tot_out += chk->book_size; 707 } 708 } 709 if (lnet->flight_size != tot_out) { 710 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 711 (void *)lnet, lnet->flight_size, 712 tot_out); 713 lnet->flight_size = tot_out; 714 } 715 } 716 } 717 if (rep) { 718 sctp_print_audit_report(); 719 } 720 } 721 722 void 723 sctp_audit_log(uint8_t ev, uint8_t fd) 724 { 725 726 sctp_audit_data[sctp_audit_indx][0] = ev; 727 sctp_audit_data[sctp_audit_indx][1] = fd; 728 sctp_audit_indx++; 729 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 730 sctp_audit_indx = 0; 731 } 732 } 733 734 #endif 735 736 /* 737 * sctp_stop_timers_for_shutdown() should be called 738 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 739 * state to make sure that all timers are stopped. 740 */ 741 void 742 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 743 { 744 struct sctp_association *asoc; 745 struct sctp_nets *net; 746 747 asoc = &stcb->asoc; 748 749 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 750 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 751 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 752 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 753 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 754 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 755 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 756 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 757 } 758 } 759 760 /* 761 * A list of sizes based on typical mtu's, used only if next hop size not 762 * returned. These values MUST be multiples of 4 and MUST be ordered. 763 */ 764 static uint32_t sctp_mtu_sizes[] = { 765 68, 766 296, 767 508, 768 512, 769 544, 770 576, 771 1004, 772 1492, 773 1500, 774 1536, 775 2000, 776 2048, 777 4352, 778 4464, 779 8166, 780 17912, 781 32000, 782 65532 783 }; 784 785 /* 786 * Return the largest MTU in sctp_mtu_sizes smaller than val. 787 * If val is smaller than the minimum, just return the largest 788 * multiple of 4 smaller or equal to val. 789 * Ensure that the result is a multiple of 4. 790 */ 791 uint32_t 792 sctp_get_prev_mtu(uint32_t val) 793 { 794 uint32_t i; 795 796 val &= 0x00000003; 797 if (val <= sctp_mtu_sizes[0]) { 798 return (val); 799 } 800 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 801 if (val <= sctp_mtu_sizes[i]) { 802 break; 803 } 804 } 805 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 806 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 807 return (sctp_mtu_sizes[i - 1]); 808 } 809 810 /* 811 * Return the smallest MTU in sctp_mtu_sizes larger than val. 812 * If val is larger than the maximum, just return the largest multiple of 4 smaller 813 * or equal to val. 814 * Ensure that the result is a multiple of 4. 815 */ 816 uint32_t 817 sctp_get_next_mtu(uint32_t val) 818 { 819 /* select another MTU that is just bigger than this one */ 820 uint32_t i; 821 822 val &= 0x00000003; 823 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 824 if (val < sctp_mtu_sizes[i]) { 825 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 826 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 827 return (sctp_mtu_sizes[i]); 828 } 829 } 830 return (val); 831 } 832 833 void 834 sctp_fill_random_store(struct sctp_pcb *m) 835 { 836 /* 837 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 838 * our counter. The result becomes our good random numbers and we 839 * then setup to give these out. Note that we do no locking to 840 * protect this. This is ok, since if competing folks call this we 841 * will get more gobbled gook in the random store which is what we 842 * want. There is a danger that two guys will use the same random 843 * numbers, but thats ok too since that is random as well :-> 844 */ 845 m->store_at = 0; 846 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 847 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 848 sizeof(m->random_counter), (uint8_t *)m->random_store); 849 m->random_counter++; 850 } 851 852 uint32_t 853 sctp_select_initial_TSN(struct sctp_pcb *inp) 854 { 855 /* 856 * A true implementation should use random selection process to get 857 * the initial stream sequence number, using RFC1750 as a good 858 * guideline 859 */ 860 uint32_t x, *xp; 861 uint8_t *p; 862 int store_at, new_store; 863 864 if (inp->initial_sequence_debug != 0) { 865 uint32_t ret; 866 867 ret = inp->initial_sequence_debug; 868 inp->initial_sequence_debug++; 869 return (ret); 870 } 871 retry: 872 store_at = inp->store_at; 873 new_store = store_at + sizeof(uint32_t); 874 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 875 new_store = 0; 876 } 877 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 878 goto retry; 879 } 880 if (new_store == 0) { 881 /* Refill the random store */ 882 sctp_fill_random_store(inp); 883 } 884 p = &inp->random_store[store_at]; 885 xp = (uint32_t *)p; 886 x = *xp; 887 return (x); 888 } 889 890 uint32_t 891 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 892 { 893 uint32_t x; 894 struct timeval now; 895 896 if (check) { 897 (void)SCTP_GETTIME_TIMEVAL(&now); 898 } 899 for (;;) { 900 x = sctp_select_initial_TSN(&inp->sctp_ep); 901 if (x == 0) { 902 /* we never use 0 */ 903 continue; 904 } 905 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 906 break; 907 } 908 } 909 return (x); 910 } 911 912 int32_t 913 sctp_map_assoc_state(int kernel_state) 914 { 915 int32_t user_state; 916 917 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 918 user_state = SCTP_CLOSED; 919 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 920 user_state = SCTP_SHUTDOWN_PENDING; 921 } else { 922 switch (kernel_state & SCTP_STATE_MASK) { 923 case SCTP_STATE_EMPTY: 924 user_state = SCTP_CLOSED; 925 break; 926 case SCTP_STATE_INUSE: 927 user_state = SCTP_CLOSED; 928 break; 929 case SCTP_STATE_COOKIE_WAIT: 930 user_state = SCTP_COOKIE_WAIT; 931 break; 932 case SCTP_STATE_COOKIE_ECHOED: 933 user_state = SCTP_COOKIE_ECHOED; 934 break; 935 case SCTP_STATE_OPEN: 936 user_state = SCTP_ESTABLISHED; 937 break; 938 case SCTP_STATE_SHUTDOWN_SENT: 939 user_state = SCTP_SHUTDOWN_SENT; 940 break; 941 case SCTP_STATE_SHUTDOWN_RECEIVED: 942 user_state = SCTP_SHUTDOWN_RECEIVED; 943 break; 944 case SCTP_STATE_SHUTDOWN_ACK_SENT: 945 user_state = SCTP_SHUTDOWN_ACK_SENT; 946 break; 947 default: 948 user_state = SCTP_CLOSED; 949 break; 950 } 951 } 952 return (user_state); 953 } 954 955 int 956 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 957 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 958 { 959 struct sctp_association *asoc; 960 961 /* 962 * Anything set to zero is taken care of by the allocation routine's 963 * bzero 964 */ 965 966 /* 967 * Up front select what scoping to apply on addresses I tell my peer 968 * Not sure what to do with these right now, we will need to come up 969 * with a way to set them. We may need to pass them through from the 970 * caller in the sctp_aloc_assoc() function. 971 */ 972 int i; 973 #if defined(SCTP_DETAILED_STR_STATS) 974 int j; 975 #endif 976 977 asoc = &stcb->asoc; 978 /* init all variables to a known value. */ 979 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 980 asoc->max_burst = inp->sctp_ep.max_burst; 981 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 982 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 983 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 984 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 985 asoc->ecn_supported = inp->ecn_supported; 986 asoc->prsctp_supported = inp->prsctp_supported; 987 asoc->idata_supported = inp->idata_supported; 988 asoc->auth_supported = inp->auth_supported; 989 asoc->asconf_supported = inp->asconf_supported; 990 asoc->reconfig_supported = inp->reconfig_supported; 991 asoc->nrsack_supported = inp->nrsack_supported; 992 asoc->pktdrop_supported = inp->pktdrop_supported; 993 asoc->idata_supported = inp->idata_supported; 994 asoc->sctp_cmt_pf = (uint8_t)0; 995 asoc->sctp_frag_point = inp->sctp_frag_point; 996 asoc->sctp_features = inp->sctp_features; 997 asoc->default_dscp = inp->sctp_ep.default_dscp; 998 asoc->max_cwnd = inp->max_cwnd; 999 #ifdef INET6 1000 if (inp->sctp_ep.default_flowlabel) { 1001 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1002 } else { 1003 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1004 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1005 asoc->default_flowlabel &= 0x000fffff; 1006 asoc->default_flowlabel |= 0x80000000; 1007 } else { 1008 asoc->default_flowlabel = 0; 1009 } 1010 } 1011 #endif 1012 asoc->sb_send_resv = 0; 1013 if (override_tag) { 1014 asoc->my_vtag = override_tag; 1015 } else { 1016 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1017 } 1018 /* Get the nonce tags */ 1019 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1020 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1021 asoc->vrf_id = vrf_id; 1022 1023 #ifdef SCTP_ASOCLOG_OF_TSNS 1024 asoc->tsn_in_at = 0; 1025 asoc->tsn_out_at = 0; 1026 asoc->tsn_in_wrapped = 0; 1027 asoc->tsn_out_wrapped = 0; 1028 asoc->cumack_log_at = 0; 1029 asoc->cumack_log_atsnt = 0; 1030 #endif 1031 #ifdef SCTP_FS_SPEC_LOG 1032 asoc->fs_index = 0; 1033 #endif 1034 asoc->refcnt = 0; 1035 asoc->assoc_up_sent = 0; 1036 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1037 sctp_select_initial_TSN(&inp->sctp_ep); 1038 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1039 /* we are optimisitic here */ 1040 asoc->peer_supports_nat = 0; 1041 asoc->sent_queue_retran_cnt = 0; 1042 1043 /* for CMT */ 1044 asoc->last_net_cmt_send_started = NULL; 1045 1046 /* This will need to be adjusted */ 1047 asoc->last_acked_seq = asoc->init_seq_number - 1; 1048 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1049 asoc->asconf_seq_in = asoc->last_acked_seq; 1050 1051 /* here we are different, we hold the next one we expect */ 1052 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1053 1054 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1055 asoc->initial_rto = inp->sctp_ep.initial_rto; 1056 1057 asoc->default_mtu = inp->sctp_ep.default_mtu; 1058 asoc->max_init_times = inp->sctp_ep.max_init_times; 1059 asoc->max_send_times = inp->sctp_ep.max_send_times; 1060 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1061 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1062 asoc->free_chunk_cnt = 0; 1063 1064 asoc->iam_blocking = 0; 1065 asoc->context = inp->sctp_context; 1066 asoc->local_strreset_support = inp->local_strreset_support; 1067 asoc->def_send = inp->def_send; 1068 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1069 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1070 asoc->pr_sctp_cnt = 0; 1071 asoc->total_output_queue_size = 0; 1072 1073 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1074 asoc->scope.ipv6_addr_legal = 1; 1075 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1076 asoc->scope.ipv4_addr_legal = 1; 1077 } else { 1078 asoc->scope.ipv4_addr_legal = 0; 1079 } 1080 } else { 1081 asoc->scope.ipv6_addr_legal = 0; 1082 asoc->scope.ipv4_addr_legal = 1; 1083 } 1084 1085 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1086 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1087 1088 asoc->smallest_mtu = inp->sctp_frag_point; 1089 asoc->minrto = inp->sctp_ep.sctp_minrto; 1090 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1091 1092 asoc->stream_locked_on = 0; 1093 asoc->ecn_echo_cnt_onq = 0; 1094 asoc->stream_locked = 0; 1095 1096 asoc->send_sack = 1; 1097 1098 LIST_INIT(&asoc->sctp_restricted_addrs); 1099 1100 TAILQ_INIT(&asoc->nets); 1101 TAILQ_INIT(&asoc->pending_reply_queue); 1102 TAILQ_INIT(&asoc->asconf_ack_sent); 1103 /* Setup to fill the hb random cache at first HB */ 1104 asoc->hb_random_idx = 4; 1105 1106 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1107 1108 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1109 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1110 1111 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1112 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1113 1114 /* 1115 * Now the stream parameters, here we allocate space for all streams 1116 * that we request by default. 1117 */ 1118 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1119 o_strms; 1120 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1121 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1122 SCTP_M_STRMO); 1123 if (asoc->strmout == NULL) { 1124 /* big trouble no memory */ 1125 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1126 return (ENOMEM); 1127 } 1128 for (i = 0; i < asoc->streamoutcnt; i++) { 1129 /* 1130 * inbound side must be set to 0xffff, also NOTE when we get 1131 * the INIT-ACK back (for INIT sender) we MUST reduce the 1132 * count (streamoutcnt) but first check if we sent to any of 1133 * the upper streams that were dropped (if some were). Those 1134 * that were dropped must be notified to the upper layer as 1135 * failed to send. 1136 */ 1137 asoc->strmout[i].next_mid_ordered = 0; 1138 asoc->strmout[i].next_mid_unordered = 0; 1139 TAILQ_INIT(&asoc->strmout[i].outqueue); 1140 asoc->strmout[i].chunks_on_queues = 0; 1141 #if defined(SCTP_DETAILED_STR_STATS) 1142 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1143 asoc->strmout[i].abandoned_sent[j] = 0; 1144 asoc->strmout[i].abandoned_unsent[j] = 0; 1145 } 1146 #else 1147 asoc->strmout[i].abandoned_sent[0] = 0; 1148 asoc->strmout[i].abandoned_unsent[0] = 0; 1149 #endif 1150 asoc->strmout[i].sid = i; 1151 asoc->strmout[i].last_msg_incomplete = 0; 1152 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1153 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1154 } 1155 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1156 1157 /* Now the mapping array */ 1158 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1159 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1160 SCTP_M_MAP); 1161 if (asoc->mapping_array == NULL) { 1162 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1163 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1164 return (ENOMEM); 1165 } 1166 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1167 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1168 SCTP_M_MAP); 1169 if (asoc->nr_mapping_array == NULL) { 1170 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1171 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1172 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1173 return (ENOMEM); 1174 } 1175 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1176 1177 /* Now the init of the other outqueues */ 1178 TAILQ_INIT(&asoc->free_chunks); 1179 TAILQ_INIT(&asoc->control_send_queue); 1180 TAILQ_INIT(&asoc->asconf_send_queue); 1181 TAILQ_INIT(&asoc->send_queue); 1182 TAILQ_INIT(&asoc->sent_queue); 1183 TAILQ_INIT(&asoc->resetHead); 1184 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1185 TAILQ_INIT(&asoc->asconf_queue); 1186 /* authentication fields */ 1187 asoc->authinfo.random = NULL; 1188 asoc->authinfo.active_keyid = 0; 1189 asoc->authinfo.assoc_key = NULL; 1190 asoc->authinfo.assoc_keyid = 0; 1191 asoc->authinfo.recv_key = NULL; 1192 asoc->authinfo.recv_keyid = 0; 1193 LIST_INIT(&asoc->shared_keys); 1194 asoc->marked_retrans = 0; 1195 asoc->port = inp->sctp_ep.port; 1196 asoc->timoinit = 0; 1197 asoc->timodata = 0; 1198 asoc->timosack = 0; 1199 asoc->timoshutdown = 0; 1200 asoc->timoheartbeat = 0; 1201 asoc->timocookie = 0; 1202 asoc->timoshutdownack = 0; 1203 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1204 asoc->discontinuity_time = asoc->start_time; 1205 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1206 asoc->abandoned_unsent[i] = 0; 1207 asoc->abandoned_sent[i] = 0; 1208 } 1209 /* 1210 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1211 * freed later when the association is freed. 1212 */ 1213 return (0); 1214 } 1215 1216 void 1217 sctp_print_mapping_array(struct sctp_association *asoc) 1218 { 1219 unsigned int i, limit; 1220 1221 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1222 asoc->mapping_array_size, 1223 asoc->mapping_array_base_tsn, 1224 asoc->cumulative_tsn, 1225 asoc->highest_tsn_inside_map, 1226 asoc->highest_tsn_inside_nr_map); 1227 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1228 if (asoc->mapping_array[limit - 1] != 0) { 1229 break; 1230 } 1231 } 1232 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1233 for (i = 0; i < limit; i++) { 1234 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1235 } 1236 if (limit % 16) 1237 SCTP_PRINTF("\n"); 1238 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1239 if (asoc->nr_mapping_array[limit - 1]) { 1240 break; 1241 } 1242 } 1243 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1244 for (i = 0; i < limit; i++) { 1245 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1246 } 1247 if (limit % 16) 1248 SCTP_PRINTF("\n"); 1249 } 1250 1251 int 1252 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1253 { 1254 /* mapping array needs to grow */ 1255 uint8_t *new_array1, *new_array2; 1256 uint32_t new_size; 1257 1258 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1259 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1260 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1261 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1262 /* can't get more, forget it */ 1263 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1264 if (new_array1) { 1265 SCTP_FREE(new_array1, SCTP_M_MAP); 1266 } 1267 if (new_array2) { 1268 SCTP_FREE(new_array2, SCTP_M_MAP); 1269 } 1270 return (-1); 1271 } 1272 memset(new_array1, 0, new_size); 1273 memset(new_array2, 0, new_size); 1274 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1275 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1276 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1277 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1278 asoc->mapping_array = new_array1; 1279 asoc->nr_mapping_array = new_array2; 1280 asoc->mapping_array_size = new_size; 1281 return (0); 1282 } 1283 1284 1285 static void 1286 sctp_iterator_work(struct sctp_iterator *it) 1287 { 1288 int iteration_count = 0; 1289 int inp_skip = 0; 1290 int first_in = 1; 1291 struct sctp_inpcb *tinp; 1292 1293 SCTP_INP_INFO_RLOCK(); 1294 SCTP_ITERATOR_LOCK(); 1295 sctp_it_ctl.cur_it = it; 1296 if (it->inp) { 1297 SCTP_INP_RLOCK(it->inp); 1298 SCTP_INP_DECR_REF(it->inp); 1299 } 1300 if (it->inp == NULL) { 1301 /* iterator is complete */ 1302 done_with_iterator: 1303 sctp_it_ctl.cur_it = NULL; 1304 SCTP_ITERATOR_UNLOCK(); 1305 SCTP_INP_INFO_RUNLOCK(); 1306 if (it->function_atend != NULL) { 1307 (*it->function_atend) (it->pointer, it->val); 1308 } 1309 SCTP_FREE(it, SCTP_M_ITER); 1310 return; 1311 } 1312 select_a_new_ep: 1313 if (first_in) { 1314 first_in = 0; 1315 } else { 1316 SCTP_INP_RLOCK(it->inp); 1317 } 1318 while (((it->pcb_flags) && 1319 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1320 ((it->pcb_features) && 1321 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1322 /* endpoint flags or features don't match, so keep looking */ 1323 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1324 SCTP_INP_RUNLOCK(it->inp); 1325 goto done_with_iterator; 1326 } 1327 tinp = it->inp; 1328 it->inp = LIST_NEXT(it->inp, sctp_list); 1329 SCTP_INP_RUNLOCK(tinp); 1330 if (it->inp == NULL) { 1331 goto done_with_iterator; 1332 } 1333 SCTP_INP_RLOCK(it->inp); 1334 } 1335 /* now go through each assoc which is in the desired state */ 1336 if (it->done_current_ep == 0) { 1337 if (it->function_inp != NULL) 1338 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1339 it->done_current_ep = 1; 1340 } 1341 if (it->stcb == NULL) { 1342 /* run the per instance function */ 1343 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1344 } 1345 if ((inp_skip) || it->stcb == NULL) { 1346 if (it->function_inp_end != NULL) { 1347 inp_skip = (*it->function_inp_end) (it->inp, 1348 it->pointer, 1349 it->val); 1350 } 1351 SCTP_INP_RUNLOCK(it->inp); 1352 goto no_stcb; 1353 } 1354 while (it->stcb) { 1355 SCTP_TCB_LOCK(it->stcb); 1356 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1357 /* not in the right state... keep looking */ 1358 SCTP_TCB_UNLOCK(it->stcb); 1359 goto next_assoc; 1360 } 1361 /* see if we have limited out the iterator loop */ 1362 iteration_count++; 1363 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1364 /* Pause to let others grab the lock */ 1365 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1366 SCTP_TCB_UNLOCK(it->stcb); 1367 SCTP_INP_INCR_REF(it->inp); 1368 SCTP_INP_RUNLOCK(it->inp); 1369 SCTP_ITERATOR_UNLOCK(); 1370 SCTP_INP_INFO_RUNLOCK(); 1371 SCTP_INP_INFO_RLOCK(); 1372 SCTP_ITERATOR_LOCK(); 1373 if (sctp_it_ctl.iterator_flags) { 1374 /* We won't be staying here */ 1375 SCTP_INP_DECR_REF(it->inp); 1376 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1377 if (sctp_it_ctl.iterator_flags & 1378 SCTP_ITERATOR_STOP_CUR_IT) { 1379 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1380 goto done_with_iterator; 1381 } 1382 if (sctp_it_ctl.iterator_flags & 1383 SCTP_ITERATOR_STOP_CUR_INP) { 1384 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1385 goto no_stcb; 1386 } 1387 /* If we reach here huh? */ 1388 SCTP_PRINTF("Unknown it ctl flag %x\n", 1389 sctp_it_ctl.iterator_flags); 1390 sctp_it_ctl.iterator_flags = 0; 1391 } 1392 SCTP_INP_RLOCK(it->inp); 1393 SCTP_INP_DECR_REF(it->inp); 1394 SCTP_TCB_LOCK(it->stcb); 1395 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1396 iteration_count = 0; 1397 } 1398 /* run function on this one */ 1399 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1400 1401 /* 1402 * we lie here, it really needs to have its own type but 1403 * first I must verify that this won't effect things :-0 1404 */ 1405 if (it->no_chunk_output == 0) 1406 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1407 1408 SCTP_TCB_UNLOCK(it->stcb); 1409 next_assoc: 1410 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1411 if (it->stcb == NULL) { 1412 /* Run last function */ 1413 if (it->function_inp_end != NULL) { 1414 inp_skip = (*it->function_inp_end) (it->inp, 1415 it->pointer, 1416 it->val); 1417 } 1418 } 1419 } 1420 SCTP_INP_RUNLOCK(it->inp); 1421 no_stcb: 1422 /* done with all assocs on this endpoint, move on to next endpoint */ 1423 it->done_current_ep = 0; 1424 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1425 it->inp = NULL; 1426 } else { 1427 it->inp = LIST_NEXT(it->inp, sctp_list); 1428 } 1429 if (it->inp == NULL) { 1430 goto done_with_iterator; 1431 } 1432 goto select_a_new_ep; 1433 } 1434 1435 void 1436 sctp_iterator_worker(void) 1437 { 1438 struct sctp_iterator *it, *nit; 1439 1440 /* This function is called with the WQ lock in place */ 1441 1442 sctp_it_ctl.iterator_running = 1; 1443 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1444 /* now lets work on this one */ 1445 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1446 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1447 CURVNET_SET(it->vn); 1448 sctp_iterator_work(it); 1449 CURVNET_RESTORE(); 1450 SCTP_IPI_ITERATOR_WQ_LOCK(); 1451 /* sa_ignore FREED_MEMORY */ 1452 } 1453 sctp_it_ctl.iterator_running = 0; 1454 return; 1455 } 1456 1457 1458 static void 1459 sctp_handle_addr_wq(void) 1460 { 1461 /* deal with the ADDR wq from the rtsock calls */ 1462 struct sctp_laddr *wi, *nwi; 1463 struct sctp_asconf_iterator *asc; 1464 1465 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1466 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1467 if (asc == NULL) { 1468 /* Try later, no memory */ 1469 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1470 (struct sctp_inpcb *)NULL, 1471 (struct sctp_tcb *)NULL, 1472 (struct sctp_nets *)NULL); 1473 return; 1474 } 1475 LIST_INIT(&asc->list_of_work); 1476 asc->cnt = 0; 1477 1478 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1479 LIST_REMOVE(wi, sctp_nxt_addr); 1480 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1481 asc->cnt++; 1482 } 1483 1484 if (asc->cnt == 0) { 1485 SCTP_FREE(asc, SCTP_M_ASC_IT); 1486 } else { 1487 int ret; 1488 1489 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1490 sctp_asconf_iterator_stcb, 1491 NULL, /* No ep end for boundall */ 1492 SCTP_PCB_FLAGS_BOUNDALL, 1493 SCTP_PCB_ANY_FEATURES, 1494 SCTP_ASOC_ANY_STATE, 1495 (void *)asc, 0, 1496 sctp_asconf_iterator_end, NULL, 0); 1497 if (ret) { 1498 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1499 /* 1500 * Freeing if we are stopping or put back on the 1501 * addr_wq. 1502 */ 1503 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1504 sctp_asconf_iterator_end(asc, 0); 1505 } else { 1506 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1507 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1508 } 1509 SCTP_FREE(asc, SCTP_M_ASC_IT); 1510 } 1511 } 1512 } 1513 } 1514 1515 void 1516 sctp_timeout_handler(void *t) 1517 { 1518 struct sctp_inpcb *inp; 1519 struct sctp_tcb *stcb; 1520 struct sctp_nets *net; 1521 struct sctp_timer *tmr; 1522 struct mbuf *op_err; 1523 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1524 struct socket *so; 1525 #endif 1526 int did_output; 1527 int type; 1528 1529 tmr = (struct sctp_timer *)t; 1530 inp = (struct sctp_inpcb *)tmr->ep; 1531 stcb = (struct sctp_tcb *)tmr->tcb; 1532 net = (struct sctp_nets *)tmr->net; 1533 CURVNET_SET((struct vnet *)tmr->vnet); 1534 did_output = 1; 1535 1536 #ifdef SCTP_AUDITING_ENABLED 1537 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1538 sctp_auditing(3, inp, stcb, net); 1539 #endif 1540 1541 /* sanity checks... */ 1542 if (tmr->self != (void *)tmr) { 1543 /* 1544 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1545 * (void *)tmr); 1546 */ 1547 CURVNET_RESTORE(); 1548 return; 1549 } 1550 tmr->stopped_from = 0xa001; 1551 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1552 /* 1553 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1554 * tmr->type); 1555 */ 1556 CURVNET_RESTORE(); 1557 return; 1558 } 1559 tmr->stopped_from = 0xa002; 1560 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1561 CURVNET_RESTORE(); 1562 return; 1563 } 1564 /* if this is an iterator timeout, get the struct and clear inp */ 1565 tmr->stopped_from = 0xa003; 1566 if (inp) { 1567 SCTP_INP_INCR_REF(inp); 1568 if ((inp->sctp_socket == NULL) && 1569 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1570 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1571 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1572 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1573 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1574 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1575 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1576 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1577 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) { 1578 SCTP_INP_DECR_REF(inp); 1579 CURVNET_RESTORE(); 1580 return; 1581 } 1582 } 1583 tmr->stopped_from = 0xa004; 1584 if (stcb) { 1585 atomic_add_int(&stcb->asoc.refcnt, 1); 1586 if (stcb->asoc.state == 0) { 1587 atomic_add_int(&stcb->asoc.refcnt, -1); 1588 if (inp) { 1589 SCTP_INP_DECR_REF(inp); 1590 } 1591 CURVNET_RESTORE(); 1592 return; 1593 } 1594 } 1595 type = tmr->type; 1596 tmr->stopped_from = 0xa005; 1597 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type); 1598 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1599 if (inp) { 1600 SCTP_INP_DECR_REF(inp); 1601 } 1602 if (stcb) { 1603 atomic_add_int(&stcb->asoc.refcnt, -1); 1604 } 1605 CURVNET_RESTORE(); 1606 return; 1607 } 1608 tmr->stopped_from = 0xa006; 1609 1610 if (stcb) { 1611 SCTP_TCB_LOCK(stcb); 1612 atomic_add_int(&stcb->asoc.refcnt, -1); 1613 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1614 ((stcb->asoc.state == 0) || 1615 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1616 SCTP_TCB_UNLOCK(stcb); 1617 if (inp) { 1618 SCTP_INP_DECR_REF(inp); 1619 } 1620 CURVNET_RESTORE(); 1621 return; 1622 } 1623 } else if (inp != NULL) { 1624 if (type != SCTP_TIMER_TYPE_INPKILL) { 1625 SCTP_INP_WLOCK(inp); 1626 } 1627 } else { 1628 SCTP_WQ_ADDR_LOCK(); 1629 } 1630 /* record in stopped what t-o occurred */ 1631 tmr->stopped_from = type; 1632 1633 /* mark as being serviced now */ 1634 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1635 /* 1636 * Callout has been rescheduled. 1637 */ 1638 goto get_out; 1639 } 1640 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1641 /* 1642 * Not active, so no action. 1643 */ 1644 goto get_out; 1645 } 1646 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1647 1648 /* call the handler for the appropriate timer type */ 1649 switch (type) { 1650 case SCTP_TIMER_TYPE_ADDR_WQ: 1651 sctp_handle_addr_wq(); 1652 break; 1653 case SCTP_TIMER_TYPE_SEND: 1654 if ((stcb == NULL) || (inp == NULL)) { 1655 break; 1656 } 1657 SCTP_STAT_INCR(sctps_timodata); 1658 stcb->asoc.timodata++; 1659 stcb->asoc.num_send_timers_up--; 1660 if (stcb->asoc.num_send_timers_up < 0) { 1661 stcb->asoc.num_send_timers_up = 0; 1662 } 1663 SCTP_TCB_LOCK_ASSERT(stcb); 1664 if (sctp_t3rxt_timer(inp, stcb, net)) { 1665 /* no need to unlock on tcb its gone */ 1666 1667 goto out_decr; 1668 } 1669 SCTP_TCB_LOCK_ASSERT(stcb); 1670 #ifdef SCTP_AUDITING_ENABLED 1671 sctp_auditing(4, inp, stcb, net); 1672 #endif 1673 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1674 if ((stcb->asoc.num_send_timers_up == 0) && 1675 (stcb->asoc.sent_queue_cnt > 0)) { 1676 struct sctp_tmit_chunk *chk; 1677 1678 /* 1679 * safeguard. If there on some on the sent queue 1680 * somewhere but no timers running something is 1681 * wrong... so we start a timer on the first chunk 1682 * on the send queue on whatever net it is sent to. 1683 */ 1684 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1685 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1686 chk->whoTo); 1687 } 1688 break; 1689 case SCTP_TIMER_TYPE_INIT: 1690 if ((stcb == NULL) || (inp == NULL)) { 1691 break; 1692 } 1693 SCTP_STAT_INCR(sctps_timoinit); 1694 stcb->asoc.timoinit++; 1695 if (sctp_t1init_timer(inp, stcb, net)) { 1696 /* no need to unlock on tcb its gone */ 1697 goto out_decr; 1698 } 1699 /* We do output but not here */ 1700 did_output = 0; 1701 break; 1702 case SCTP_TIMER_TYPE_RECV: 1703 if ((stcb == NULL) || (inp == NULL)) { 1704 break; 1705 } 1706 SCTP_STAT_INCR(sctps_timosack); 1707 stcb->asoc.timosack++; 1708 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1709 #ifdef SCTP_AUDITING_ENABLED 1710 sctp_auditing(4, inp, stcb, net); 1711 #endif 1712 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1713 break; 1714 case SCTP_TIMER_TYPE_SHUTDOWN: 1715 if ((stcb == NULL) || (inp == NULL)) { 1716 break; 1717 } 1718 if (sctp_shutdown_timer(inp, stcb, net)) { 1719 /* no need to unlock on tcb its gone */ 1720 goto out_decr; 1721 } 1722 SCTP_STAT_INCR(sctps_timoshutdown); 1723 stcb->asoc.timoshutdown++; 1724 #ifdef SCTP_AUDITING_ENABLED 1725 sctp_auditing(4, inp, stcb, net); 1726 #endif 1727 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1728 break; 1729 case SCTP_TIMER_TYPE_HEARTBEAT: 1730 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1731 break; 1732 } 1733 SCTP_STAT_INCR(sctps_timoheartbeat); 1734 stcb->asoc.timoheartbeat++; 1735 if (sctp_heartbeat_timer(inp, stcb, net)) { 1736 /* no need to unlock on tcb its gone */ 1737 goto out_decr; 1738 } 1739 #ifdef SCTP_AUDITING_ENABLED 1740 sctp_auditing(4, inp, stcb, net); 1741 #endif 1742 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1743 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1744 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1745 } 1746 break; 1747 case SCTP_TIMER_TYPE_COOKIE: 1748 if ((stcb == NULL) || (inp == NULL)) { 1749 break; 1750 } 1751 if (sctp_cookie_timer(inp, stcb, net)) { 1752 /* no need to unlock on tcb its gone */ 1753 goto out_decr; 1754 } 1755 SCTP_STAT_INCR(sctps_timocookie); 1756 stcb->asoc.timocookie++; 1757 #ifdef SCTP_AUDITING_ENABLED 1758 sctp_auditing(4, inp, stcb, net); 1759 #endif 1760 /* 1761 * We consider T3 and Cookie timer pretty much the same with 1762 * respect to where from in chunk_output. 1763 */ 1764 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1765 break; 1766 case SCTP_TIMER_TYPE_NEWCOOKIE: 1767 { 1768 struct timeval tv; 1769 int i, secret; 1770 1771 if (inp == NULL) { 1772 break; 1773 } 1774 SCTP_STAT_INCR(sctps_timosecret); 1775 (void)SCTP_GETTIME_TIMEVAL(&tv); 1776 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1777 inp->sctp_ep.last_secret_number = 1778 inp->sctp_ep.current_secret_number; 1779 inp->sctp_ep.current_secret_number++; 1780 if (inp->sctp_ep.current_secret_number >= 1781 SCTP_HOW_MANY_SECRETS) { 1782 inp->sctp_ep.current_secret_number = 0; 1783 } 1784 secret = (int)inp->sctp_ep.current_secret_number; 1785 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1786 inp->sctp_ep.secret_key[secret][i] = 1787 sctp_select_initial_TSN(&inp->sctp_ep); 1788 } 1789 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1790 } 1791 did_output = 0; 1792 break; 1793 case SCTP_TIMER_TYPE_PATHMTURAISE: 1794 if ((stcb == NULL) || (inp == NULL)) { 1795 break; 1796 } 1797 SCTP_STAT_INCR(sctps_timopathmtu); 1798 sctp_pathmtu_timer(inp, stcb, net); 1799 did_output = 0; 1800 break; 1801 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1802 if ((stcb == NULL) || (inp == NULL)) { 1803 break; 1804 } 1805 if (sctp_shutdownack_timer(inp, stcb, net)) { 1806 /* no need to unlock on tcb its gone */ 1807 goto out_decr; 1808 } 1809 SCTP_STAT_INCR(sctps_timoshutdownack); 1810 stcb->asoc.timoshutdownack++; 1811 #ifdef SCTP_AUDITING_ENABLED 1812 sctp_auditing(4, inp, stcb, net); 1813 #endif 1814 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1815 break; 1816 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1817 if ((stcb == NULL) || (inp == NULL)) { 1818 break; 1819 } 1820 SCTP_STAT_INCR(sctps_timoshutdownguard); 1821 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1822 "Shutdown guard timer expired"); 1823 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1824 /* no need to unlock on tcb its gone */ 1825 goto out_decr; 1826 1827 case SCTP_TIMER_TYPE_STRRESET: 1828 if ((stcb == NULL) || (inp == NULL)) { 1829 break; 1830 } 1831 if (sctp_strreset_timer(inp, stcb, net)) { 1832 /* no need to unlock on tcb its gone */ 1833 goto out_decr; 1834 } 1835 SCTP_STAT_INCR(sctps_timostrmrst); 1836 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1837 break; 1838 case SCTP_TIMER_TYPE_ASCONF: 1839 if ((stcb == NULL) || (inp == NULL)) { 1840 break; 1841 } 1842 if (sctp_asconf_timer(inp, stcb, net)) { 1843 /* no need to unlock on tcb its gone */ 1844 goto out_decr; 1845 } 1846 SCTP_STAT_INCR(sctps_timoasconf); 1847 #ifdef SCTP_AUDITING_ENABLED 1848 sctp_auditing(4, inp, stcb, net); 1849 #endif 1850 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1851 break; 1852 case SCTP_TIMER_TYPE_PRIM_DELETED: 1853 if ((stcb == NULL) || (inp == NULL)) { 1854 break; 1855 } 1856 sctp_delete_prim_timer(inp, stcb, net); 1857 SCTP_STAT_INCR(sctps_timodelprim); 1858 break; 1859 1860 case SCTP_TIMER_TYPE_AUTOCLOSE: 1861 if ((stcb == NULL) || (inp == NULL)) { 1862 break; 1863 } 1864 SCTP_STAT_INCR(sctps_timoautoclose); 1865 sctp_autoclose_timer(inp, stcb, net); 1866 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1867 did_output = 0; 1868 break; 1869 case SCTP_TIMER_TYPE_ASOCKILL: 1870 if ((stcb == NULL) || (inp == NULL)) { 1871 break; 1872 } 1873 SCTP_STAT_INCR(sctps_timoassockill); 1874 /* Can we free it yet? */ 1875 SCTP_INP_DECR_REF(inp); 1876 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 1877 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1878 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1879 so = SCTP_INP_SO(inp); 1880 atomic_add_int(&stcb->asoc.refcnt, 1); 1881 SCTP_TCB_UNLOCK(stcb); 1882 SCTP_SOCKET_LOCK(so, 1); 1883 SCTP_TCB_LOCK(stcb); 1884 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1885 #endif 1886 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1887 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1888 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1889 SCTP_SOCKET_UNLOCK(so, 1); 1890 #endif 1891 /* 1892 * free asoc, always unlocks (or destroy's) so prevent 1893 * duplicate unlock or unlock of a free mtx :-0 1894 */ 1895 stcb = NULL; 1896 goto out_no_decr; 1897 case SCTP_TIMER_TYPE_INPKILL: 1898 SCTP_STAT_INCR(sctps_timoinpkill); 1899 if (inp == NULL) { 1900 break; 1901 } 1902 /* 1903 * special case, take away our increment since WE are the 1904 * killer 1905 */ 1906 SCTP_INP_DECR_REF(inp); 1907 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 1908 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1909 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1910 SCTP_CALLED_FROM_INPKILL_TIMER); 1911 inp = NULL; 1912 goto out_no_decr; 1913 default: 1914 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1915 type); 1916 break; 1917 } 1918 #ifdef SCTP_AUDITING_ENABLED 1919 sctp_audit_log(0xF1, (uint8_t)type); 1920 if (inp) 1921 sctp_auditing(5, inp, stcb, net); 1922 #endif 1923 if ((did_output) && stcb) { 1924 /* 1925 * Now we need to clean up the control chunk chain if an 1926 * ECNE is on it. It must be marked as UNSENT again so next 1927 * call will continue to send it until such time that we get 1928 * a CWR, to remove it. It is, however, less likely that we 1929 * will find a ecn echo on the chain though. 1930 */ 1931 sctp_fix_ecn_echo(&stcb->asoc); 1932 } 1933 get_out: 1934 if (stcb) { 1935 SCTP_TCB_UNLOCK(stcb); 1936 } else if (inp != NULL) { 1937 SCTP_INP_WUNLOCK(inp); 1938 } else { 1939 SCTP_WQ_ADDR_UNLOCK(); 1940 } 1941 1942 out_decr: 1943 if (inp) { 1944 SCTP_INP_DECR_REF(inp); 1945 } 1946 out_no_decr: 1947 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type); 1948 CURVNET_RESTORE(); 1949 } 1950 1951 void 1952 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1953 struct sctp_nets *net) 1954 { 1955 uint32_t to_ticks; 1956 struct sctp_timer *tmr; 1957 1958 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1959 return; 1960 1961 tmr = NULL; 1962 if (stcb) { 1963 SCTP_TCB_LOCK_ASSERT(stcb); 1964 } 1965 switch (t_type) { 1966 case SCTP_TIMER_TYPE_ADDR_WQ: 1967 /* Only 1 tick away :-) */ 1968 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1969 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1970 break; 1971 case SCTP_TIMER_TYPE_SEND: 1972 /* Here we use the RTO timer */ 1973 { 1974 int rto_val; 1975 1976 if ((stcb == NULL) || (net == NULL)) { 1977 return; 1978 } 1979 tmr = &net->rxt_timer; 1980 if (net->RTO == 0) { 1981 rto_val = stcb->asoc.initial_rto; 1982 } else { 1983 rto_val = net->RTO; 1984 } 1985 to_ticks = MSEC_TO_TICKS(rto_val); 1986 } 1987 break; 1988 case SCTP_TIMER_TYPE_INIT: 1989 /* 1990 * Here we use the INIT timer default usually about 1 1991 * minute. 1992 */ 1993 if ((stcb == NULL) || (net == NULL)) { 1994 return; 1995 } 1996 tmr = &net->rxt_timer; 1997 if (net->RTO == 0) { 1998 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1999 } else { 2000 to_ticks = MSEC_TO_TICKS(net->RTO); 2001 } 2002 break; 2003 case SCTP_TIMER_TYPE_RECV: 2004 /* 2005 * Here we use the Delayed-Ack timer value from the inp 2006 * ususually about 200ms. 2007 */ 2008 if (stcb == NULL) { 2009 return; 2010 } 2011 tmr = &stcb->asoc.dack_timer; 2012 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 2013 break; 2014 case SCTP_TIMER_TYPE_SHUTDOWN: 2015 /* Here we use the RTO of the destination. */ 2016 if ((stcb == NULL) || (net == NULL)) { 2017 return; 2018 } 2019 if (net->RTO == 0) { 2020 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2021 } else { 2022 to_ticks = MSEC_TO_TICKS(net->RTO); 2023 } 2024 tmr = &net->rxt_timer; 2025 break; 2026 case SCTP_TIMER_TYPE_HEARTBEAT: 2027 /* 2028 * the net is used here so that we can add in the RTO. Even 2029 * though we use a different timer. We also add the HB timer 2030 * PLUS a random jitter. 2031 */ 2032 if ((stcb == NULL) || (net == NULL)) { 2033 return; 2034 } else { 2035 uint32_t rndval; 2036 uint32_t jitter; 2037 2038 if ((net->dest_state & SCTP_ADDR_NOHB) && 2039 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2040 return; 2041 } 2042 if (net->RTO == 0) { 2043 to_ticks = stcb->asoc.initial_rto; 2044 } else { 2045 to_ticks = net->RTO; 2046 } 2047 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2048 jitter = rndval % to_ticks; 2049 if (jitter >= (to_ticks >> 1)) { 2050 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2051 } else { 2052 to_ticks = to_ticks - jitter; 2053 } 2054 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2055 !(net->dest_state & SCTP_ADDR_PF)) { 2056 to_ticks += net->heart_beat_delay; 2057 } 2058 /* 2059 * Now we must convert the to_ticks that are now in 2060 * ms to ticks. 2061 */ 2062 to_ticks = MSEC_TO_TICKS(to_ticks); 2063 tmr = &net->hb_timer; 2064 } 2065 break; 2066 case SCTP_TIMER_TYPE_COOKIE: 2067 /* 2068 * Here we can use the RTO timer from the network since one 2069 * RTT was compelete. If a retran happened then we will be 2070 * using the RTO initial value. 2071 */ 2072 if ((stcb == NULL) || (net == NULL)) { 2073 return; 2074 } 2075 if (net->RTO == 0) { 2076 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2077 } else { 2078 to_ticks = MSEC_TO_TICKS(net->RTO); 2079 } 2080 tmr = &net->rxt_timer; 2081 break; 2082 case SCTP_TIMER_TYPE_NEWCOOKIE: 2083 /* 2084 * nothing needed but the endpoint here ususually about 60 2085 * minutes. 2086 */ 2087 tmr = &inp->sctp_ep.signature_change; 2088 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2089 break; 2090 case SCTP_TIMER_TYPE_ASOCKILL: 2091 if (stcb == NULL) { 2092 return; 2093 } 2094 tmr = &stcb->asoc.strreset_timer; 2095 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 2096 break; 2097 case SCTP_TIMER_TYPE_INPKILL: 2098 /* 2099 * The inp is setup to die. We re-use the signature_chage 2100 * timer since that has stopped and we are in the GONE 2101 * state. 2102 */ 2103 tmr = &inp->sctp_ep.signature_change; 2104 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2105 break; 2106 case SCTP_TIMER_TYPE_PATHMTURAISE: 2107 /* 2108 * Here we use the value found in the EP for PMTU ususually 2109 * about 10 minutes. 2110 */ 2111 if ((stcb == NULL) || (net == NULL)) { 2112 return; 2113 } 2114 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2115 return; 2116 } 2117 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2118 tmr = &net->pmtu_timer; 2119 break; 2120 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2121 /* Here we use the RTO of the destination */ 2122 if ((stcb == NULL) || (net == NULL)) { 2123 return; 2124 } 2125 if (net->RTO == 0) { 2126 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2127 } else { 2128 to_ticks = MSEC_TO_TICKS(net->RTO); 2129 } 2130 tmr = &net->rxt_timer; 2131 break; 2132 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2133 /* 2134 * Here we use the endpoints shutdown guard timer usually 2135 * about 3 minutes. 2136 */ 2137 if (stcb == NULL) { 2138 return; 2139 } 2140 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2141 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto); 2142 } else { 2143 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2144 } 2145 tmr = &stcb->asoc.shut_guard_timer; 2146 break; 2147 case SCTP_TIMER_TYPE_STRRESET: 2148 /* 2149 * Here the timer comes from the stcb but its value is from 2150 * the net's RTO. 2151 */ 2152 if ((stcb == NULL) || (net == NULL)) { 2153 return; 2154 } 2155 if (net->RTO == 0) { 2156 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2157 } else { 2158 to_ticks = MSEC_TO_TICKS(net->RTO); 2159 } 2160 tmr = &stcb->asoc.strreset_timer; 2161 break; 2162 case SCTP_TIMER_TYPE_ASCONF: 2163 /* 2164 * Here the timer comes from the stcb but its value is from 2165 * the net's RTO. 2166 */ 2167 if ((stcb == NULL) || (net == NULL)) { 2168 return; 2169 } 2170 if (net->RTO == 0) { 2171 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2172 } else { 2173 to_ticks = MSEC_TO_TICKS(net->RTO); 2174 } 2175 tmr = &stcb->asoc.asconf_timer; 2176 break; 2177 case SCTP_TIMER_TYPE_PRIM_DELETED: 2178 if ((stcb == NULL) || (net != NULL)) { 2179 return; 2180 } 2181 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2182 tmr = &stcb->asoc.delete_prim_timer; 2183 break; 2184 case SCTP_TIMER_TYPE_AUTOCLOSE: 2185 if (stcb == NULL) { 2186 return; 2187 } 2188 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2189 /* 2190 * Really an error since stcb is NOT set to 2191 * autoclose 2192 */ 2193 return; 2194 } 2195 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2196 tmr = &stcb->asoc.autoclose_timer; 2197 break; 2198 default: 2199 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2200 __func__, t_type); 2201 return; 2202 break; 2203 } 2204 if ((to_ticks <= 0) || (tmr == NULL)) { 2205 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2206 __func__, t_type, to_ticks, (void *)tmr); 2207 return; 2208 } 2209 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2210 /* 2211 * we do NOT allow you to have it already running. if it is 2212 * we leave the current one up unchanged 2213 */ 2214 return; 2215 } 2216 /* At this point we can proceed */ 2217 if (t_type == SCTP_TIMER_TYPE_SEND) { 2218 stcb->asoc.num_send_timers_up++; 2219 } 2220 tmr->stopped_from = 0; 2221 tmr->type = t_type; 2222 tmr->ep = (void *)inp; 2223 tmr->tcb = (void *)stcb; 2224 tmr->net = (void *)net; 2225 tmr->self = (void *)tmr; 2226 tmr->vnet = (void *)curvnet; 2227 tmr->ticks = sctp_get_tick_count(); 2228 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2229 return; 2230 } 2231 2232 void 2233 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2234 struct sctp_nets *net, uint32_t from) 2235 { 2236 struct sctp_timer *tmr; 2237 2238 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2239 (inp == NULL)) 2240 return; 2241 2242 tmr = NULL; 2243 if (stcb) { 2244 SCTP_TCB_LOCK_ASSERT(stcb); 2245 } 2246 switch (t_type) { 2247 case SCTP_TIMER_TYPE_ADDR_WQ: 2248 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2249 break; 2250 case SCTP_TIMER_TYPE_SEND: 2251 if ((stcb == NULL) || (net == NULL)) { 2252 return; 2253 } 2254 tmr = &net->rxt_timer; 2255 break; 2256 case SCTP_TIMER_TYPE_INIT: 2257 if ((stcb == NULL) || (net == NULL)) { 2258 return; 2259 } 2260 tmr = &net->rxt_timer; 2261 break; 2262 case SCTP_TIMER_TYPE_RECV: 2263 if (stcb == NULL) { 2264 return; 2265 } 2266 tmr = &stcb->asoc.dack_timer; 2267 break; 2268 case SCTP_TIMER_TYPE_SHUTDOWN: 2269 if ((stcb == NULL) || (net == NULL)) { 2270 return; 2271 } 2272 tmr = &net->rxt_timer; 2273 break; 2274 case SCTP_TIMER_TYPE_HEARTBEAT: 2275 if ((stcb == NULL) || (net == NULL)) { 2276 return; 2277 } 2278 tmr = &net->hb_timer; 2279 break; 2280 case SCTP_TIMER_TYPE_COOKIE: 2281 if ((stcb == NULL) || (net == NULL)) { 2282 return; 2283 } 2284 tmr = &net->rxt_timer; 2285 break; 2286 case SCTP_TIMER_TYPE_NEWCOOKIE: 2287 /* nothing needed but the endpoint here */ 2288 tmr = &inp->sctp_ep.signature_change; 2289 /* 2290 * We re-use the newcookie timer for the INP kill timer. We 2291 * must assure that we do not kill it by accident. 2292 */ 2293 break; 2294 case SCTP_TIMER_TYPE_ASOCKILL: 2295 /* 2296 * Stop the asoc kill timer. 2297 */ 2298 if (stcb == NULL) { 2299 return; 2300 } 2301 tmr = &stcb->asoc.strreset_timer; 2302 break; 2303 2304 case SCTP_TIMER_TYPE_INPKILL: 2305 /* 2306 * The inp is setup to die. We re-use the signature_chage 2307 * timer since that has stopped and we are in the GONE 2308 * state. 2309 */ 2310 tmr = &inp->sctp_ep.signature_change; 2311 break; 2312 case SCTP_TIMER_TYPE_PATHMTURAISE: 2313 if ((stcb == NULL) || (net == NULL)) { 2314 return; 2315 } 2316 tmr = &net->pmtu_timer; 2317 break; 2318 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2319 if ((stcb == NULL) || (net == NULL)) { 2320 return; 2321 } 2322 tmr = &net->rxt_timer; 2323 break; 2324 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2325 if (stcb == NULL) { 2326 return; 2327 } 2328 tmr = &stcb->asoc.shut_guard_timer; 2329 break; 2330 case SCTP_TIMER_TYPE_STRRESET: 2331 if (stcb == NULL) { 2332 return; 2333 } 2334 tmr = &stcb->asoc.strreset_timer; 2335 break; 2336 case SCTP_TIMER_TYPE_ASCONF: 2337 if (stcb == NULL) { 2338 return; 2339 } 2340 tmr = &stcb->asoc.asconf_timer; 2341 break; 2342 case SCTP_TIMER_TYPE_PRIM_DELETED: 2343 if (stcb == NULL) { 2344 return; 2345 } 2346 tmr = &stcb->asoc.delete_prim_timer; 2347 break; 2348 case SCTP_TIMER_TYPE_AUTOCLOSE: 2349 if (stcb == NULL) { 2350 return; 2351 } 2352 tmr = &stcb->asoc.autoclose_timer; 2353 break; 2354 default: 2355 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2356 __func__, t_type); 2357 break; 2358 } 2359 if (tmr == NULL) { 2360 return; 2361 } 2362 if ((tmr->type != t_type) && tmr->type) { 2363 /* 2364 * Ok we have a timer that is under joint use. Cookie timer 2365 * per chance with the SEND timer. We therefore are NOT 2366 * running the timer that the caller wants stopped. So just 2367 * return. 2368 */ 2369 return; 2370 } 2371 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2372 stcb->asoc.num_send_timers_up--; 2373 if (stcb->asoc.num_send_timers_up < 0) { 2374 stcb->asoc.num_send_timers_up = 0; 2375 } 2376 } 2377 tmr->self = NULL; 2378 tmr->stopped_from = from; 2379 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2380 return; 2381 } 2382 2383 uint32_t 2384 sctp_calculate_len(struct mbuf *m) 2385 { 2386 uint32_t tlen = 0; 2387 struct mbuf *at; 2388 2389 at = m; 2390 while (at) { 2391 tlen += SCTP_BUF_LEN(at); 2392 at = SCTP_BUF_NEXT(at); 2393 } 2394 return (tlen); 2395 } 2396 2397 void 2398 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2399 struct sctp_association *asoc, uint32_t mtu) 2400 { 2401 /* 2402 * Reset the P-MTU size on this association, this involves changing 2403 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2404 * allow the DF flag to be cleared. 2405 */ 2406 struct sctp_tmit_chunk *chk; 2407 unsigned int eff_mtu, ovh; 2408 2409 asoc->smallest_mtu = mtu; 2410 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2411 ovh = SCTP_MIN_OVERHEAD; 2412 } else { 2413 ovh = SCTP_MIN_V4_OVERHEAD; 2414 } 2415 eff_mtu = mtu - ovh; 2416 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2417 if (chk->send_size > eff_mtu) { 2418 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2419 } 2420 } 2421 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2422 if (chk->send_size > eff_mtu) { 2423 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2424 } 2425 } 2426 } 2427 2428 2429 /* 2430 * given an association and starting time of the current RTT period return 2431 * RTO in number of msecs net should point to the current network 2432 */ 2433 2434 uint32_t 2435 sctp_calculate_rto(struct sctp_tcb *stcb, 2436 struct sctp_association *asoc, 2437 struct sctp_nets *net, 2438 struct timeval *old, 2439 int rtt_from_sack) 2440 { 2441 /*- 2442 * given an association and the starting time of the current RTT 2443 * period (in value1/value2) return RTO in number of msecs. 2444 */ 2445 int32_t rtt; /* RTT in ms */ 2446 uint32_t new_rto; 2447 int first_measure = 0; 2448 struct timeval now; 2449 2450 /************************/ 2451 /* 1. calculate new RTT */ 2452 /************************/ 2453 /* get the current time */ 2454 if (stcb->asoc.use_precise_time) { 2455 (void)SCTP_GETPTIME_TIMEVAL(&now); 2456 } else { 2457 (void)SCTP_GETTIME_TIMEVAL(&now); 2458 } 2459 timevalsub(&now, old); 2460 /* store the current RTT in us */ 2461 net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec + 2462 (uint64_t)now.tv_usec; 2463 2464 /* compute rtt in ms */ 2465 rtt = (int32_t)(net->rtt / 1000); 2466 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2467 /* 2468 * Tell the CC module that a new update has just occurred 2469 * from a sack 2470 */ 2471 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2472 } 2473 /* 2474 * Do we need to determine the lan? We do this only on sacks i.e. 2475 * RTT being determined from data not non-data (HB/INIT->INITACK). 2476 */ 2477 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2478 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2479 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2480 net->lan_type = SCTP_LAN_INTERNET; 2481 } else { 2482 net->lan_type = SCTP_LAN_LOCAL; 2483 } 2484 } 2485 /***************************/ 2486 /* 2. update RTTVAR & SRTT */ 2487 /***************************/ 2488 /*- 2489 * Compute the scaled average lastsa and the 2490 * scaled variance lastsv as described in van Jacobson 2491 * Paper "Congestion Avoidance and Control", Annex A. 2492 * 2493 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2494 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2495 */ 2496 if (net->RTO_measured) { 2497 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2498 net->lastsa += rtt; 2499 if (rtt < 0) { 2500 rtt = -rtt; 2501 } 2502 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2503 net->lastsv += rtt; 2504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2505 rto_logging(net, SCTP_LOG_RTTVAR); 2506 } 2507 } else { 2508 /* First RTO measurment */ 2509 net->RTO_measured = 1; 2510 first_measure = 1; 2511 net->lastsa = rtt << SCTP_RTT_SHIFT; 2512 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2514 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2515 } 2516 } 2517 if (net->lastsv == 0) { 2518 net->lastsv = SCTP_CLOCK_GRANULARITY; 2519 } 2520 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2521 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2522 (stcb->asoc.sat_network_lockout == 0)) { 2523 stcb->asoc.sat_network = 1; 2524 } else if ((!first_measure) && stcb->asoc.sat_network) { 2525 stcb->asoc.sat_network = 0; 2526 stcb->asoc.sat_network_lockout = 1; 2527 } 2528 /* bound it, per C6/C7 in Section 5.3.1 */ 2529 if (new_rto < stcb->asoc.minrto) { 2530 new_rto = stcb->asoc.minrto; 2531 } 2532 if (new_rto > stcb->asoc.maxrto) { 2533 new_rto = stcb->asoc.maxrto; 2534 } 2535 /* we are now returning the RTO */ 2536 return (new_rto); 2537 } 2538 2539 /* 2540 * return a pointer to a contiguous piece of data from the given mbuf chain 2541 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2542 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2543 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2544 */ 2545 caddr_t 2546 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 2547 { 2548 uint32_t count; 2549 uint8_t *ptr; 2550 2551 ptr = in_ptr; 2552 if ((off < 0) || (len <= 0)) 2553 return (NULL); 2554 2555 /* find the desired start location */ 2556 while ((m != NULL) && (off > 0)) { 2557 if (off < SCTP_BUF_LEN(m)) 2558 break; 2559 off -= SCTP_BUF_LEN(m); 2560 m = SCTP_BUF_NEXT(m); 2561 } 2562 if (m == NULL) 2563 return (NULL); 2564 2565 /* is the current mbuf large enough (eg. contiguous)? */ 2566 if ((SCTP_BUF_LEN(m) - off) >= len) { 2567 return (mtod(m, caddr_t)+off); 2568 } else { 2569 /* else, it spans more than one mbuf, so save a temp copy... */ 2570 while ((m != NULL) && (len > 0)) { 2571 count = min(SCTP_BUF_LEN(m) - off, len); 2572 memcpy(ptr, mtod(m, caddr_t)+off, count); 2573 len -= count; 2574 ptr += count; 2575 off = 0; 2576 m = SCTP_BUF_NEXT(m); 2577 } 2578 if ((m == NULL) && (len > 0)) 2579 return (NULL); 2580 else 2581 return ((caddr_t)in_ptr); 2582 } 2583 } 2584 2585 2586 2587 struct sctp_paramhdr * 2588 sctp_get_next_param(struct mbuf *m, 2589 int offset, 2590 struct sctp_paramhdr *pull, 2591 int pull_limit) 2592 { 2593 /* This just provides a typed signature to Peter's Pull routine */ 2594 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2595 (uint8_t *)pull)); 2596 } 2597 2598 2599 struct mbuf * 2600 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2601 { 2602 struct mbuf *m_last; 2603 caddr_t dp; 2604 2605 if (padlen > 3) { 2606 return (NULL); 2607 } 2608 if (padlen <= M_TRAILINGSPACE(m)) { 2609 /* 2610 * The easy way. We hope the majority of the time we hit 2611 * here :) 2612 */ 2613 m_last = m; 2614 } else { 2615 /* Hard way we must grow the mbuf chain */ 2616 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2617 if (m_last == NULL) { 2618 return (NULL); 2619 } 2620 SCTP_BUF_LEN(m_last) = 0; 2621 SCTP_BUF_NEXT(m_last) = NULL; 2622 SCTP_BUF_NEXT(m) = m_last; 2623 } 2624 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 2625 SCTP_BUF_LEN(m_last) += padlen; 2626 memset(dp, 0, padlen); 2627 return (m_last); 2628 } 2629 2630 struct mbuf * 2631 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2632 { 2633 /* find the last mbuf in chain and pad it */ 2634 struct mbuf *m_at; 2635 2636 if (last_mbuf != NULL) { 2637 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2638 } else { 2639 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2640 if (SCTP_BUF_NEXT(m_at) == NULL) { 2641 return (sctp_add_pad_tombuf(m_at, padval)); 2642 } 2643 } 2644 } 2645 return (NULL); 2646 } 2647 2648 static void 2649 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2650 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2651 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2652 SCTP_UNUSED 2653 #endif 2654 ) 2655 { 2656 struct mbuf *m_notify; 2657 struct sctp_assoc_change *sac; 2658 struct sctp_queued_to_read *control; 2659 unsigned int notif_len; 2660 uint16_t abort_len; 2661 unsigned int i; 2662 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2663 struct socket *so; 2664 #endif 2665 2666 if (stcb == NULL) { 2667 return; 2668 } 2669 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2670 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 2671 if (abort != NULL) { 2672 abort_len = ntohs(abort->ch.chunk_length); 2673 /* 2674 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 2675 * contiguous. 2676 */ 2677 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 2678 abort_len = SCTP_CHUNK_BUFFER_SIZE; 2679 } 2680 } else { 2681 abort_len = 0; 2682 } 2683 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2684 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2685 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2686 notif_len += abort_len; 2687 } 2688 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2689 if (m_notify == NULL) { 2690 /* Retry with smaller value. */ 2691 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 2692 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2693 if (m_notify == NULL) { 2694 goto set_error; 2695 } 2696 } 2697 SCTP_BUF_NEXT(m_notify) = NULL; 2698 sac = mtod(m_notify, struct sctp_assoc_change *); 2699 memset(sac, 0, notif_len); 2700 sac->sac_type = SCTP_ASSOC_CHANGE; 2701 sac->sac_flags = 0; 2702 sac->sac_length = sizeof(struct sctp_assoc_change); 2703 sac->sac_state = state; 2704 sac->sac_error = error; 2705 /* XXX verify these stream counts */ 2706 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2707 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2708 sac->sac_assoc_id = sctp_get_associd(stcb); 2709 if (notif_len > sizeof(struct sctp_assoc_change)) { 2710 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2711 i = 0; 2712 if (stcb->asoc.prsctp_supported == 1) { 2713 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2714 } 2715 if (stcb->asoc.auth_supported == 1) { 2716 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2717 } 2718 if (stcb->asoc.asconf_supported == 1) { 2719 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2720 } 2721 if (stcb->asoc.idata_supported == 1) { 2722 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 2723 } 2724 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2725 if (stcb->asoc.reconfig_supported == 1) { 2726 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2727 } 2728 sac->sac_length += i; 2729 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2730 memcpy(sac->sac_info, abort, abort_len); 2731 sac->sac_length += abort_len; 2732 } 2733 } 2734 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2735 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2736 0, 0, stcb->asoc.context, 0, 0, 0, 2737 m_notify); 2738 if (control != NULL) { 2739 control->length = SCTP_BUF_LEN(m_notify); 2740 control->spec_flags = M_NOTIFICATION; 2741 /* not that we need this */ 2742 control->tail_mbuf = m_notify; 2743 sctp_add_to_readq(stcb->sctp_ep, stcb, 2744 control, 2745 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2746 so_locked); 2747 } else { 2748 sctp_m_freem(m_notify); 2749 } 2750 } 2751 /* 2752 * For 1-to-1 style sockets, we send up and error when an ABORT 2753 * comes in. 2754 */ 2755 set_error: 2756 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2757 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2758 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2759 SOCK_LOCK(stcb->sctp_socket); 2760 if (from_peer) { 2761 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2762 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2763 stcb->sctp_socket->so_error = ECONNREFUSED; 2764 } else { 2765 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2766 stcb->sctp_socket->so_error = ECONNRESET; 2767 } 2768 } else { 2769 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) || 2770 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 2771 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 2772 stcb->sctp_socket->so_error = ETIMEDOUT; 2773 } else { 2774 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2775 stcb->sctp_socket->so_error = ECONNABORTED; 2776 } 2777 } 2778 SOCK_UNLOCK(stcb->sctp_socket); 2779 } 2780 /* Wake ANY sleepers */ 2781 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2782 so = SCTP_INP_SO(stcb->sctp_ep); 2783 if (!so_locked) { 2784 atomic_add_int(&stcb->asoc.refcnt, 1); 2785 SCTP_TCB_UNLOCK(stcb); 2786 SCTP_SOCKET_LOCK(so, 1); 2787 SCTP_TCB_LOCK(stcb); 2788 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2789 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2790 SCTP_SOCKET_UNLOCK(so, 1); 2791 return; 2792 } 2793 } 2794 #endif 2795 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2796 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2797 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2798 socantrcvmore(stcb->sctp_socket); 2799 } 2800 sorwakeup(stcb->sctp_socket); 2801 sowwakeup(stcb->sctp_socket); 2802 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2803 if (!so_locked) { 2804 SCTP_SOCKET_UNLOCK(so, 1); 2805 } 2806 #endif 2807 } 2808 2809 static void 2810 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2811 struct sockaddr *sa, uint32_t error, int so_locked 2812 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2813 SCTP_UNUSED 2814 #endif 2815 ) 2816 { 2817 struct mbuf *m_notify; 2818 struct sctp_paddr_change *spc; 2819 struct sctp_queued_to_read *control; 2820 2821 if ((stcb == NULL) || 2822 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2823 /* event not enabled */ 2824 return; 2825 } 2826 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2827 if (m_notify == NULL) 2828 return; 2829 SCTP_BUF_LEN(m_notify) = 0; 2830 spc = mtod(m_notify, struct sctp_paddr_change *); 2831 memset(spc, 0, sizeof(struct sctp_paddr_change)); 2832 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2833 spc->spc_flags = 0; 2834 spc->spc_length = sizeof(struct sctp_paddr_change); 2835 switch (sa->sa_family) { 2836 #ifdef INET 2837 case AF_INET: 2838 #ifdef INET6 2839 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 2840 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 2841 (struct sockaddr_in6 *)&spc->spc_aaddr); 2842 } else { 2843 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2844 } 2845 #else 2846 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2847 #endif 2848 break; 2849 #endif 2850 #ifdef INET6 2851 case AF_INET6: 2852 { 2853 struct sockaddr_in6 *sin6; 2854 2855 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2856 2857 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2858 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2859 if (sin6->sin6_scope_id == 0) { 2860 /* recover scope_id for user */ 2861 (void)sa6_recoverscope(sin6); 2862 } else { 2863 /* clear embedded scope_id for user */ 2864 in6_clearscope(&sin6->sin6_addr); 2865 } 2866 } 2867 break; 2868 } 2869 #endif 2870 default: 2871 /* TSNH */ 2872 break; 2873 } 2874 spc->spc_state = state; 2875 spc->spc_error = error; 2876 spc->spc_assoc_id = sctp_get_associd(stcb); 2877 2878 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2879 SCTP_BUF_NEXT(m_notify) = NULL; 2880 2881 /* append to socket */ 2882 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2883 0, 0, stcb->asoc.context, 0, 0, 0, 2884 m_notify); 2885 if (control == NULL) { 2886 /* no memory */ 2887 sctp_m_freem(m_notify); 2888 return; 2889 } 2890 control->length = SCTP_BUF_LEN(m_notify); 2891 control->spec_flags = M_NOTIFICATION; 2892 /* not that we need this */ 2893 control->tail_mbuf = m_notify; 2894 sctp_add_to_readq(stcb->sctp_ep, stcb, 2895 control, 2896 &stcb->sctp_socket->so_rcv, 1, 2897 SCTP_READ_LOCK_NOT_HELD, 2898 so_locked); 2899 } 2900 2901 2902 static void 2903 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2904 struct sctp_tmit_chunk *chk, int so_locked 2905 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2906 SCTP_UNUSED 2907 #endif 2908 ) 2909 { 2910 struct mbuf *m_notify; 2911 struct sctp_send_failed *ssf; 2912 struct sctp_send_failed_event *ssfe; 2913 struct sctp_queued_to_read *control; 2914 struct sctp_chunkhdr *chkhdr; 2915 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 2916 2917 if ((stcb == NULL) || 2918 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2919 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2920 /* event not enabled */ 2921 return; 2922 } 2923 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2924 notifhdr_len = sizeof(struct sctp_send_failed_event); 2925 } else { 2926 notifhdr_len = sizeof(struct sctp_send_failed); 2927 } 2928 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 2929 if (m_notify == NULL) 2930 /* no space left */ 2931 return; 2932 SCTP_BUF_LEN(m_notify) = notifhdr_len; 2933 if (stcb->asoc.idata_supported) { 2934 chkhdr_len = sizeof(struct sctp_idata_chunk); 2935 } else { 2936 chkhdr_len = sizeof(struct sctp_data_chunk); 2937 } 2938 /* Use some defaults in case we can't access the chunk header */ 2939 if (chk->send_size >= chkhdr_len) { 2940 payload_len = chk->send_size - chkhdr_len; 2941 } else { 2942 payload_len = 0; 2943 } 2944 padding_len = 0; 2945 if (chk->data != NULL) { 2946 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 2947 if (chkhdr != NULL) { 2948 chk_len = ntohs(chkhdr->chunk_length); 2949 if ((chk_len >= chkhdr_len) && 2950 (chk->send_size >= chk_len) && 2951 (chk->send_size - chk_len < 4)) { 2952 padding_len = chk->send_size - chk_len; 2953 payload_len = chk->send_size - chkhdr_len - padding_len; 2954 } 2955 } 2956 } 2957 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2958 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2959 memset(ssfe, 0, notifhdr_len); 2960 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2961 if (sent) { 2962 ssfe->ssfe_flags = SCTP_DATA_SENT; 2963 } else { 2964 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2965 } 2966 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 2967 ssfe->ssfe_error = error; 2968 /* not exactly what the user sent in, but should be close :) */ 2969 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 2970 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2971 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 2972 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2973 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2974 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2975 } else { 2976 ssf = mtod(m_notify, struct sctp_send_failed *); 2977 memset(ssf, 0, notifhdr_len); 2978 ssf->ssf_type = SCTP_SEND_FAILED; 2979 if (sent) { 2980 ssf->ssf_flags = SCTP_DATA_SENT; 2981 } else { 2982 ssf->ssf_flags = SCTP_DATA_UNSENT; 2983 } 2984 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 2985 ssf->ssf_error = error; 2986 /* not exactly what the user sent in, but should be close :) */ 2987 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 2988 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 2989 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2990 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 2991 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2992 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2993 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2994 } 2995 if (chk->data != NULL) { 2996 /* Trim off the sctp chunk header (it should be there) */ 2997 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 2998 m_adj(chk->data, chkhdr_len); 2999 m_adj(chk->data, -padding_len); 3000 sctp_mbuf_crush(chk->data); 3001 chk->send_size -= (chkhdr_len + padding_len); 3002 } 3003 } 3004 SCTP_BUF_NEXT(m_notify) = chk->data; 3005 /* Steal off the mbuf */ 3006 chk->data = NULL; 3007 /* 3008 * For this case, we check the actual socket buffer, since the assoc 3009 * is going away we don't want to overfill the socket buffer for a 3010 * non-reader 3011 */ 3012 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3013 sctp_m_freem(m_notify); 3014 return; 3015 } 3016 /* append to socket */ 3017 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3018 0, 0, stcb->asoc.context, 0, 0, 0, 3019 m_notify); 3020 if (control == NULL) { 3021 /* no memory */ 3022 sctp_m_freem(m_notify); 3023 return; 3024 } 3025 control->length = SCTP_BUF_LEN(m_notify); 3026 control->spec_flags = M_NOTIFICATION; 3027 /* not that we need this */ 3028 control->tail_mbuf = m_notify; 3029 sctp_add_to_readq(stcb->sctp_ep, stcb, 3030 control, 3031 &stcb->sctp_socket->so_rcv, 1, 3032 SCTP_READ_LOCK_NOT_HELD, 3033 so_locked); 3034 } 3035 3036 3037 static void 3038 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3039 struct sctp_stream_queue_pending *sp, int so_locked 3040 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3041 SCTP_UNUSED 3042 #endif 3043 ) 3044 { 3045 struct mbuf *m_notify; 3046 struct sctp_send_failed *ssf; 3047 struct sctp_send_failed_event *ssfe; 3048 struct sctp_queued_to_read *control; 3049 int notifhdr_len; 3050 3051 if ((stcb == NULL) || 3052 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3053 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3054 /* event not enabled */ 3055 return; 3056 } 3057 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3058 notifhdr_len = sizeof(struct sctp_send_failed_event); 3059 } else { 3060 notifhdr_len = sizeof(struct sctp_send_failed); 3061 } 3062 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3063 if (m_notify == NULL) { 3064 /* no space left */ 3065 return; 3066 } 3067 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3068 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3069 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3070 memset(ssfe, 0, notifhdr_len); 3071 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3072 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3073 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3074 ssfe->ssfe_error = error; 3075 /* not exactly what the user sent in, but should be close :) */ 3076 ssfe->ssfe_info.snd_sid = sp->sid; 3077 if (sp->some_taken) { 3078 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3079 } else { 3080 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3081 } 3082 ssfe->ssfe_info.snd_ppid = sp->ppid; 3083 ssfe->ssfe_info.snd_context = sp->context; 3084 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3085 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3086 } else { 3087 ssf = mtod(m_notify, struct sctp_send_failed *); 3088 memset(ssf, 0, notifhdr_len); 3089 ssf->ssf_type = SCTP_SEND_FAILED; 3090 ssf->ssf_flags = SCTP_DATA_UNSENT; 3091 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3092 ssf->ssf_error = error; 3093 /* not exactly what the user sent in, but should be close :) */ 3094 ssf->ssf_info.sinfo_stream = sp->sid; 3095 ssf->ssf_info.sinfo_ssn = 0; 3096 if (sp->some_taken) { 3097 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3098 } else { 3099 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3100 } 3101 ssf->ssf_info.sinfo_ppid = sp->ppid; 3102 ssf->ssf_info.sinfo_context = sp->context; 3103 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3104 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3105 } 3106 SCTP_BUF_NEXT(m_notify) = sp->data; 3107 3108 /* Steal off the mbuf */ 3109 sp->data = NULL; 3110 /* 3111 * For this case, we check the actual socket buffer, since the assoc 3112 * is going away we don't want to overfill the socket buffer for a 3113 * non-reader 3114 */ 3115 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3116 sctp_m_freem(m_notify); 3117 return; 3118 } 3119 /* append to socket */ 3120 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3121 0, 0, stcb->asoc.context, 0, 0, 0, 3122 m_notify); 3123 if (control == NULL) { 3124 /* no memory */ 3125 sctp_m_freem(m_notify); 3126 return; 3127 } 3128 control->length = SCTP_BUF_LEN(m_notify); 3129 control->spec_flags = M_NOTIFICATION; 3130 /* not that we need this */ 3131 control->tail_mbuf = m_notify; 3132 sctp_add_to_readq(stcb->sctp_ep, stcb, 3133 control, 3134 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3135 } 3136 3137 3138 3139 static void 3140 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3141 { 3142 struct mbuf *m_notify; 3143 struct sctp_adaptation_event *sai; 3144 struct sctp_queued_to_read *control; 3145 3146 if ((stcb == NULL) || 3147 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3148 /* event not enabled */ 3149 return; 3150 } 3151 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3152 if (m_notify == NULL) 3153 /* no space left */ 3154 return; 3155 SCTP_BUF_LEN(m_notify) = 0; 3156 sai = mtod(m_notify, struct sctp_adaptation_event *); 3157 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3158 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3159 sai->sai_flags = 0; 3160 sai->sai_length = sizeof(struct sctp_adaptation_event); 3161 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3162 sai->sai_assoc_id = sctp_get_associd(stcb); 3163 3164 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3165 SCTP_BUF_NEXT(m_notify) = NULL; 3166 3167 /* append to socket */ 3168 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3169 0, 0, stcb->asoc.context, 0, 0, 0, 3170 m_notify); 3171 if (control == NULL) { 3172 /* no memory */ 3173 sctp_m_freem(m_notify); 3174 return; 3175 } 3176 control->length = SCTP_BUF_LEN(m_notify); 3177 control->spec_flags = M_NOTIFICATION; 3178 /* not that we need this */ 3179 control->tail_mbuf = m_notify; 3180 sctp_add_to_readq(stcb->sctp_ep, stcb, 3181 control, 3182 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3183 } 3184 3185 /* This always must be called with the read-queue LOCKED in the INP */ 3186 static void 3187 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3188 uint32_t val, int so_locked 3189 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3190 SCTP_UNUSED 3191 #endif 3192 ) 3193 { 3194 struct mbuf *m_notify; 3195 struct sctp_pdapi_event *pdapi; 3196 struct sctp_queued_to_read *control; 3197 struct sockbuf *sb; 3198 3199 if ((stcb == NULL) || 3200 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3201 /* event not enabled */ 3202 return; 3203 } 3204 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3205 return; 3206 } 3207 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3208 if (m_notify == NULL) 3209 /* no space left */ 3210 return; 3211 SCTP_BUF_LEN(m_notify) = 0; 3212 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3213 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3214 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3215 pdapi->pdapi_flags = 0; 3216 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3217 pdapi->pdapi_indication = error; 3218 pdapi->pdapi_stream = (val >> 16); 3219 pdapi->pdapi_seq = (val & 0x0000ffff); 3220 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3221 3222 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3223 SCTP_BUF_NEXT(m_notify) = NULL; 3224 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3225 0, 0, stcb->asoc.context, 0, 0, 0, 3226 m_notify); 3227 if (control == NULL) { 3228 /* no memory */ 3229 sctp_m_freem(m_notify); 3230 return; 3231 } 3232 control->length = SCTP_BUF_LEN(m_notify); 3233 control->spec_flags = M_NOTIFICATION; 3234 /* not that we need this */ 3235 control->tail_mbuf = m_notify; 3236 sb = &stcb->sctp_socket->so_rcv; 3237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3238 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3239 } 3240 sctp_sballoc(stcb, sb, m_notify); 3241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3242 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3243 } 3244 control->end_added = 1; 3245 if (stcb->asoc.control_pdapi) 3246 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3247 else { 3248 /* we really should not see this case */ 3249 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3250 } 3251 if (stcb->sctp_ep && stcb->sctp_socket) { 3252 /* This should always be the case */ 3253 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3254 struct socket *so; 3255 3256 so = SCTP_INP_SO(stcb->sctp_ep); 3257 if (!so_locked) { 3258 atomic_add_int(&stcb->asoc.refcnt, 1); 3259 SCTP_TCB_UNLOCK(stcb); 3260 SCTP_SOCKET_LOCK(so, 1); 3261 SCTP_TCB_LOCK(stcb); 3262 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3263 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3264 SCTP_SOCKET_UNLOCK(so, 1); 3265 return; 3266 } 3267 } 3268 #endif 3269 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3270 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3271 if (!so_locked) { 3272 SCTP_SOCKET_UNLOCK(so, 1); 3273 } 3274 #endif 3275 } 3276 } 3277 3278 static void 3279 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3280 { 3281 struct mbuf *m_notify; 3282 struct sctp_shutdown_event *sse; 3283 struct sctp_queued_to_read *control; 3284 3285 /* 3286 * For TCP model AND UDP connected sockets we will send an error up 3287 * when an SHUTDOWN completes 3288 */ 3289 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3290 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3291 /* mark socket closed for read/write and wakeup! */ 3292 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3293 struct socket *so; 3294 3295 so = SCTP_INP_SO(stcb->sctp_ep); 3296 atomic_add_int(&stcb->asoc.refcnt, 1); 3297 SCTP_TCB_UNLOCK(stcb); 3298 SCTP_SOCKET_LOCK(so, 1); 3299 SCTP_TCB_LOCK(stcb); 3300 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3301 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3302 SCTP_SOCKET_UNLOCK(so, 1); 3303 return; 3304 } 3305 #endif 3306 socantsendmore(stcb->sctp_socket); 3307 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3308 SCTP_SOCKET_UNLOCK(so, 1); 3309 #endif 3310 } 3311 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3312 /* event not enabled */ 3313 return; 3314 } 3315 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3316 if (m_notify == NULL) 3317 /* no space left */ 3318 return; 3319 sse = mtod(m_notify, struct sctp_shutdown_event *); 3320 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3321 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3322 sse->sse_flags = 0; 3323 sse->sse_length = sizeof(struct sctp_shutdown_event); 3324 sse->sse_assoc_id = sctp_get_associd(stcb); 3325 3326 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3327 SCTP_BUF_NEXT(m_notify) = NULL; 3328 3329 /* append to socket */ 3330 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3331 0, 0, stcb->asoc.context, 0, 0, 0, 3332 m_notify); 3333 if (control == NULL) { 3334 /* no memory */ 3335 sctp_m_freem(m_notify); 3336 return; 3337 } 3338 control->length = SCTP_BUF_LEN(m_notify); 3339 control->spec_flags = M_NOTIFICATION; 3340 /* not that we need this */ 3341 control->tail_mbuf = m_notify; 3342 sctp_add_to_readq(stcb->sctp_ep, stcb, 3343 control, 3344 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3345 } 3346 3347 static void 3348 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3349 int so_locked 3350 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3351 SCTP_UNUSED 3352 #endif 3353 ) 3354 { 3355 struct mbuf *m_notify; 3356 struct sctp_sender_dry_event *event; 3357 struct sctp_queued_to_read *control; 3358 3359 if ((stcb == NULL) || 3360 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3361 /* event not enabled */ 3362 return; 3363 } 3364 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3365 if (m_notify == NULL) { 3366 /* no space left */ 3367 return; 3368 } 3369 SCTP_BUF_LEN(m_notify) = 0; 3370 event = mtod(m_notify, struct sctp_sender_dry_event *); 3371 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3372 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3373 event->sender_dry_flags = 0; 3374 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3375 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3376 3377 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3378 SCTP_BUF_NEXT(m_notify) = NULL; 3379 3380 /* append to socket */ 3381 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3382 0, 0, stcb->asoc.context, 0, 0, 0, 3383 m_notify); 3384 if (control == NULL) { 3385 /* no memory */ 3386 sctp_m_freem(m_notify); 3387 return; 3388 } 3389 control->length = SCTP_BUF_LEN(m_notify); 3390 control->spec_flags = M_NOTIFICATION; 3391 /* not that we need this */ 3392 control->tail_mbuf = m_notify; 3393 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3394 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3395 } 3396 3397 3398 void 3399 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3400 { 3401 struct mbuf *m_notify; 3402 struct sctp_queued_to_read *control; 3403 struct sctp_stream_change_event *stradd; 3404 3405 if ((stcb == NULL) || 3406 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3407 /* event not enabled */ 3408 return; 3409 } 3410 if ((stcb->asoc.peer_req_out) && flag) { 3411 /* Peer made the request, don't tell the local user */ 3412 stcb->asoc.peer_req_out = 0; 3413 return; 3414 } 3415 stcb->asoc.peer_req_out = 0; 3416 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3417 if (m_notify == NULL) 3418 /* no space left */ 3419 return; 3420 SCTP_BUF_LEN(m_notify) = 0; 3421 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3422 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3423 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3424 stradd->strchange_flags = flag; 3425 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3426 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3427 stradd->strchange_instrms = numberin; 3428 stradd->strchange_outstrms = numberout; 3429 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3430 SCTP_BUF_NEXT(m_notify) = NULL; 3431 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3432 /* no space */ 3433 sctp_m_freem(m_notify); 3434 return; 3435 } 3436 /* append to socket */ 3437 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3438 0, 0, stcb->asoc.context, 0, 0, 0, 3439 m_notify); 3440 if (control == NULL) { 3441 /* no memory */ 3442 sctp_m_freem(m_notify); 3443 return; 3444 } 3445 control->length = SCTP_BUF_LEN(m_notify); 3446 control->spec_flags = M_NOTIFICATION; 3447 /* not that we need this */ 3448 control->tail_mbuf = m_notify; 3449 sctp_add_to_readq(stcb->sctp_ep, stcb, 3450 control, 3451 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3452 } 3453 3454 void 3455 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3456 { 3457 struct mbuf *m_notify; 3458 struct sctp_queued_to_read *control; 3459 struct sctp_assoc_reset_event *strasoc; 3460 3461 if ((stcb == NULL) || 3462 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3463 /* event not enabled */ 3464 return; 3465 } 3466 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3467 if (m_notify == NULL) 3468 /* no space left */ 3469 return; 3470 SCTP_BUF_LEN(m_notify) = 0; 3471 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3472 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3473 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3474 strasoc->assocreset_flags = flag; 3475 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3476 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3477 strasoc->assocreset_local_tsn = sending_tsn; 3478 strasoc->assocreset_remote_tsn = recv_tsn; 3479 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3480 SCTP_BUF_NEXT(m_notify) = NULL; 3481 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3482 /* no space */ 3483 sctp_m_freem(m_notify); 3484 return; 3485 } 3486 /* append to socket */ 3487 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3488 0, 0, stcb->asoc.context, 0, 0, 0, 3489 m_notify); 3490 if (control == NULL) { 3491 /* no memory */ 3492 sctp_m_freem(m_notify); 3493 return; 3494 } 3495 control->length = SCTP_BUF_LEN(m_notify); 3496 control->spec_flags = M_NOTIFICATION; 3497 /* not that we need this */ 3498 control->tail_mbuf = m_notify; 3499 sctp_add_to_readq(stcb->sctp_ep, stcb, 3500 control, 3501 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3502 } 3503 3504 3505 3506 static void 3507 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3508 int number_entries, uint16_t *list, int flag) 3509 { 3510 struct mbuf *m_notify; 3511 struct sctp_queued_to_read *control; 3512 struct sctp_stream_reset_event *strreset; 3513 int len; 3514 3515 if ((stcb == NULL) || 3516 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3517 /* event not enabled */ 3518 return; 3519 } 3520 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3521 if (m_notify == NULL) 3522 /* no space left */ 3523 return; 3524 SCTP_BUF_LEN(m_notify) = 0; 3525 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3526 if (len > M_TRAILINGSPACE(m_notify)) { 3527 /* never enough room */ 3528 sctp_m_freem(m_notify); 3529 return; 3530 } 3531 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3532 memset(strreset, 0, len); 3533 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3534 strreset->strreset_flags = flag; 3535 strreset->strreset_length = len; 3536 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3537 if (number_entries) { 3538 int i; 3539 3540 for (i = 0; i < number_entries; i++) { 3541 strreset->strreset_stream_list[i] = ntohs(list[i]); 3542 } 3543 } 3544 SCTP_BUF_LEN(m_notify) = len; 3545 SCTP_BUF_NEXT(m_notify) = NULL; 3546 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3547 /* no space */ 3548 sctp_m_freem(m_notify); 3549 return; 3550 } 3551 /* append to socket */ 3552 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3553 0, 0, stcb->asoc.context, 0, 0, 0, 3554 m_notify); 3555 if (control == NULL) { 3556 /* no memory */ 3557 sctp_m_freem(m_notify); 3558 return; 3559 } 3560 control->length = SCTP_BUF_LEN(m_notify); 3561 control->spec_flags = M_NOTIFICATION; 3562 /* not that we need this */ 3563 control->tail_mbuf = m_notify; 3564 sctp_add_to_readq(stcb->sctp_ep, stcb, 3565 control, 3566 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3567 } 3568 3569 3570 static void 3571 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3572 { 3573 struct mbuf *m_notify; 3574 struct sctp_remote_error *sre; 3575 struct sctp_queued_to_read *control; 3576 unsigned int notif_len; 3577 uint16_t chunk_len; 3578 3579 if ((stcb == NULL) || 3580 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3581 return; 3582 } 3583 if (chunk != NULL) { 3584 chunk_len = ntohs(chunk->ch.chunk_length); 3585 /* 3586 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3587 * contiguous. 3588 */ 3589 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3590 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3591 } 3592 } else { 3593 chunk_len = 0; 3594 } 3595 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 3596 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3597 if (m_notify == NULL) { 3598 /* Retry with smaller value. */ 3599 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 3600 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3601 if (m_notify == NULL) { 3602 return; 3603 } 3604 } 3605 SCTP_BUF_NEXT(m_notify) = NULL; 3606 sre = mtod(m_notify, struct sctp_remote_error *); 3607 memset(sre, 0, notif_len); 3608 sre->sre_type = SCTP_REMOTE_ERROR; 3609 sre->sre_flags = 0; 3610 sre->sre_length = sizeof(struct sctp_remote_error); 3611 sre->sre_error = error; 3612 sre->sre_assoc_id = sctp_get_associd(stcb); 3613 if (notif_len > sizeof(struct sctp_remote_error)) { 3614 memcpy(sre->sre_data, chunk, chunk_len); 3615 sre->sre_length += chunk_len; 3616 } 3617 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3618 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3619 0, 0, stcb->asoc.context, 0, 0, 0, 3620 m_notify); 3621 if (control != NULL) { 3622 control->length = SCTP_BUF_LEN(m_notify); 3623 control->spec_flags = M_NOTIFICATION; 3624 /* not that we need this */ 3625 control->tail_mbuf = m_notify; 3626 sctp_add_to_readq(stcb->sctp_ep, stcb, 3627 control, 3628 &stcb->sctp_socket->so_rcv, 1, 3629 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3630 } else { 3631 sctp_m_freem(m_notify); 3632 } 3633 } 3634 3635 3636 void 3637 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3638 uint32_t error, void *data, int so_locked 3639 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3640 SCTP_UNUSED 3641 #endif 3642 ) 3643 { 3644 if ((stcb == NULL) || 3645 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3646 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3647 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3648 /* If the socket is gone we are out of here */ 3649 return; 3650 } 3651 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3652 return; 3653 } 3654 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3655 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) { 3656 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3657 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3658 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3659 /* Don't report these in front states */ 3660 return; 3661 } 3662 } 3663 switch (notification) { 3664 case SCTP_NOTIFY_ASSOC_UP: 3665 if (stcb->asoc.assoc_up_sent == 0) { 3666 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3667 stcb->asoc.assoc_up_sent = 1; 3668 } 3669 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3670 sctp_notify_adaptation_layer(stcb); 3671 } 3672 if (stcb->asoc.auth_supported == 0) { 3673 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3674 NULL, so_locked); 3675 } 3676 break; 3677 case SCTP_NOTIFY_ASSOC_DOWN: 3678 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3679 break; 3680 case SCTP_NOTIFY_INTERFACE_DOWN: 3681 { 3682 struct sctp_nets *net; 3683 3684 net = (struct sctp_nets *)data; 3685 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3686 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3687 break; 3688 } 3689 case SCTP_NOTIFY_INTERFACE_UP: 3690 { 3691 struct sctp_nets *net; 3692 3693 net = (struct sctp_nets *)data; 3694 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3695 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3696 break; 3697 } 3698 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3699 { 3700 struct sctp_nets *net; 3701 3702 net = (struct sctp_nets *)data; 3703 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3704 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3705 break; 3706 } 3707 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3708 sctp_notify_send_failed2(stcb, error, 3709 (struct sctp_stream_queue_pending *)data, so_locked); 3710 break; 3711 case SCTP_NOTIFY_SENT_DG_FAIL: 3712 sctp_notify_send_failed(stcb, 1, error, 3713 (struct sctp_tmit_chunk *)data, so_locked); 3714 break; 3715 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3716 sctp_notify_send_failed(stcb, 0, error, 3717 (struct sctp_tmit_chunk *)data, so_locked); 3718 break; 3719 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3720 { 3721 uint32_t val; 3722 3723 val = *((uint32_t *)data); 3724 3725 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3726 break; 3727 } 3728 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3729 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3730 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3731 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3732 } else { 3733 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3734 } 3735 break; 3736 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3737 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3738 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3739 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3740 } else { 3741 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3742 } 3743 break; 3744 case SCTP_NOTIFY_ASSOC_RESTART: 3745 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3746 if (stcb->asoc.auth_supported == 0) { 3747 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3748 NULL, so_locked); 3749 } 3750 break; 3751 case SCTP_NOTIFY_STR_RESET_SEND: 3752 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 3753 break; 3754 case SCTP_NOTIFY_STR_RESET_RECV: 3755 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 3756 break; 3757 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3758 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3759 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3760 break; 3761 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3762 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3763 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3764 break; 3765 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3766 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3767 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3768 break; 3769 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3770 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3771 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3772 break; 3773 case SCTP_NOTIFY_ASCONF_ADD_IP: 3774 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3775 error, so_locked); 3776 break; 3777 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3778 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3779 error, so_locked); 3780 break; 3781 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3782 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3783 error, so_locked); 3784 break; 3785 case SCTP_NOTIFY_PEER_SHUTDOWN: 3786 sctp_notify_shutdown_event(stcb); 3787 break; 3788 case SCTP_NOTIFY_AUTH_NEW_KEY: 3789 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3790 (uint16_t)(uintptr_t)data, 3791 so_locked); 3792 break; 3793 case SCTP_NOTIFY_AUTH_FREE_KEY: 3794 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3795 (uint16_t)(uintptr_t)data, 3796 so_locked); 3797 break; 3798 case SCTP_NOTIFY_NO_PEER_AUTH: 3799 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3800 (uint16_t)(uintptr_t)data, 3801 so_locked); 3802 break; 3803 case SCTP_NOTIFY_SENDER_DRY: 3804 sctp_notify_sender_dry_event(stcb, so_locked); 3805 break; 3806 case SCTP_NOTIFY_REMOTE_ERROR: 3807 sctp_notify_remote_error(stcb, error, data); 3808 break; 3809 default: 3810 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3811 __func__, notification, notification); 3812 break; 3813 } /* end switch */ 3814 } 3815 3816 void 3817 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3818 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3819 SCTP_UNUSED 3820 #endif 3821 ) 3822 { 3823 struct sctp_association *asoc; 3824 struct sctp_stream_out *outs; 3825 struct sctp_tmit_chunk *chk, *nchk; 3826 struct sctp_stream_queue_pending *sp, *nsp; 3827 int i; 3828 3829 if (stcb == NULL) { 3830 return; 3831 } 3832 asoc = &stcb->asoc; 3833 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3834 /* already being freed */ 3835 return; 3836 } 3837 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3838 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3839 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3840 return; 3841 } 3842 /* now through all the gunk freeing chunks */ 3843 if (holds_lock == 0) { 3844 SCTP_TCB_SEND_LOCK(stcb); 3845 } 3846 /* sent queue SHOULD be empty */ 3847 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3848 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3849 asoc->sent_queue_cnt--; 3850 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3851 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 3852 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 3853 #ifdef INVARIANTS 3854 } else { 3855 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 3856 #endif 3857 } 3858 } 3859 if (chk->data != NULL) { 3860 sctp_free_bufspace(stcb, asoc, chk, 1); 3861 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3862 error, chk, so_locked); 3863 if (chk->data) { 3864 sctp_m_freem(chk->data); 3865 chk->data = NULL; 3866 } 3867 } 3868 sctp_free_a_chunk(stcb, chk, so_locked); 3869 /* sa_ignore FREED_MEMORY */ 3870 } 3871 /* pending send queue SHOULD be empty */ 3872 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3873 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3874 asoc->send_queue_cnt--; 3875 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 3876 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 3877 #ifdef INVARIANTS 3878 } else { 3879 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 3880 #endif 3881 } 3882 if (chk->data != NULL) { 3883 sctp_free_bufspace(stcb, asoc, chk, 1); 3884 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3885 error, chk, so_locked); 3886 if (chk->data) { 3887 sctp_m_freem(chk->data); 3888 chk->data = NULL; 3889 } 3890 } 3891 sctp_free_a_chunk(stcb, chk, so_locked); 3892 /* sa_ignore FREED_MEMORY */ 3893 } 3894 for (i = 0; i < asoc->streamoutcnt; i++) { 3895 /* For each stream */ 3896 outs = &asoc->strmout[i]; 3897 /* clean up any sends there */ 3898 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3899 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 3900 TAILQ_REMOVE(&outs->outqueue, sp, next); 3901 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock); 3902 sctp_free_spbufspace(stcb, asoc, sp); 3903 if (sp->data) { 3904 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3905 error, (void *)sp, so_locked); 3906 if (sp->data) { 3907 sctp_m_freem(sp->data); 3908 sp->data = NULL; 3909 sp->tail_mbuf = NULL; 3910 sp->length = 0; 3911 } 3912 } 3913 if (sp->net) { 3914 sctp_free_remote_addr(sp->net); 3915 sp->net = NULL; 3916 } 3917 /* Free the chunk */ 3918 sctp_free_a_strmoq(stcb, sp, so_locked); 3919 /* sa_ignore FREED_MEMORY */ 3920 } 3921 } 3922 3923 if (holds_lock == 0) { 3924 SCTP_TCB_SEND_UNLOCK(stcb); 3925 } 3926 } 3927 3928 void 3929 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3930 struct sctp_abort_chunk *abort, int so_locked 3931 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3932 SCTP_UNUSED 3933 #endif 3934 ) 3935 { 3936 if (stcb == NULL) { 3937 return; 3938 } 3939 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3940 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3941 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3942 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3943 } 3944 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3945 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3946 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3947 return; 3948 } 3949 /* Tell them we lost the asoc */ 3950 sctp_report_all_outbound(stcb, error, 1, so_locked); 3951 if (from_peer) { 3952 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3953 } else { 3954 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3955 } 3956 } 3957 3958 void 3959 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3960 struct mbuf *m, int iphlen, 3961 struct sockaddr *src, struct sockaddr *dst, 3962 struct sctphdr *sh, struct mbuf *op_err, 3963 uint8_t mflowtype, uint32_t mflowid, 3964 uint32_t vrf_id, uint16_t port) 3965 { 3966 uint32_t vtag; 3967 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3968 struct socket *so; 3969 #endif 3970 3971 vtag = 0; 3972 if (stcb != NULL) { 3973 vtag = stcb->asoc.peer_vtag; 3974 vrf_id = stcb->asoc.vrf_id; 3975 } 3976 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3977 mflowtype, mflowid, inp->fibnum, 3978 vrf_id, port); 3979 if (stcb != NULL) { 3980 /* We have a TCB to abort, send notification too */ 3981 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3982 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3983 /* Ok, now lets free it */ 3984 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3985 so = SCTP_INP_SO(inp); 3986 atomic_add_int(&stcb->asoc.refcnt, 1); 3987 SCTP_TCB_UNLOCK(stcb); 3988 SCTP_SOCKET_LOCK(so, 1); 3989 SCTP_TCB_LOCK(stcb); 3990 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3991 #endif 3992 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3993 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3994 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3995 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3996 } 3997 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 3998 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3999 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4000 SCTP_SOCKET_UNLOCK(so, 1); 4001 #endif 4002 } 4003 } 4004 #ifdef SCTP_ASOCLOG_OF_TSNS 4005 void 4006 sctp_print_out_track_log(struct sctp_tcb *stcb) 4007 { 4008 #ifdef NOSIY_PRINTS 4009 int i; 4010 4011 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4012 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4013 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4014 SCTP_PRINTF("None rcvd\n"); 4015 goto none_in; 4016 } 4017 if (stcb->asoc.tsn_in_wrapped) { 4018 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4019 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4020 stcb->asoc.in_tsnlog[i].tsn, 4021 stcb->asoc.in_tsnlog[i].strm, 4022 stcb->asoc.in_tsnlog[i].seq, 4023 stcb->asoc.in_tsnlog[i].flgs, 4024 stcb->asoc.in_tsnlog[i].sz); 4025 } 4026 } 4027 if (stcb->asoc.tsn_in_at) { 4028 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4029 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4030 stcb->asoc.in_tsnlog[i].tsn, 4031 stcb->asoc.in_tsnlog[i].strm, 4032 stcb->asoc.in_tsnlog[i].seq, 4033 stcb->asoc.in_tsnlog[i].flgs, 4034 stcb->asoc.in_tsnlog[i].sz); 4035 } 4036 } 4037 none_in: 4038 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4039 if ((stcb->asoc.tsn_out_at == 0) && 4040 (stcb->asoc.tsn_out_wrapped == 0)) { 4041 SCTP_PRINTF("None sent\n"); 4042 } 4043 if (stcb->asoc.tsn_out_wrapped) { 4044 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4045 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4046 stcb->asoc.out_tsnlog[i].tsn, 4047 stcb->asoc.out_tsnlog[i].strm, 4048 stcb->asoc.out_tsnlog[i].seq, 4049 stcb->asoc.out_tsnlog[i].flgs, 4050 stcb->asoc.out_tsnlog[i].sz); 4051 } 4052 } 4053 if (stcb->asoc.tsn_out_at) { 4054 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4055 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4056 stcb->asoc.out_tsnlog[i].tsn, 4057 stcb->asoc.out_tsnlog[i].strm, 4058 stcb->asoc.out_tsnlog[i].seq, 4059 stcb->asoc.out_tsnlog[i].flgs, 4060 stcb->asoc.out_tsnlog[i].sz); 4061 } 4062 } 4063 #endif 4064 } 4065 #endif 4066 4067 void 4068 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4069 struct mbuf *op_err, 4070 int so_locked 4071 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4072 SCTP_UNUSED 4073 #endif 4074 ) 4075 { 4076 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4077 struct socket *so; 4078 #endif 4079 4080 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4081 so = SCTP_INP_SO(inp); 4082 #endif 4083 if (stcb == NULL) { 4084 /* Got to have a TCB */ 4085 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4086 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4087 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4088 SCTP_CALLED_DIRECTLY_NOCMPSET); 4089 } 4090 } 4091 return; 4092 } else { 4093 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 4094 } 4095 /* notify the peer */ 4096 sctp_send_abort_tcb(stcb, op_err, so_locked); 4097 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4098 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 4099 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4100 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4101 } 4102 /* notify the ulp */ 4103 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4104 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4105 } 4106 /* now free the asoc */ 4107 #ifdef SCTP_ASOCLOG_OF_TSNS 4108 sctp_print_out_track_log(stcb); 4109 #endif 4110 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4111 if (!so_locked) { 4112 atomic_add_int(&stcb->asoc.refcnt, 1); 4113 SCTP_TCB_UNLOCK(stcb); 4114 SCTP_SOCKET_LOCK(so, 1); 4115 SCTP_TCB_LOCK(stcb); 4116 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4117 } 4118 #endif 4119 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4120 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4121 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4122 if (!so_locked) { 4123 SCTP_SOCKET_UNLOCK(so, 1); 4124 } 4125 #endif 4126 } 4127 4128 void 4129 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4130 struct sockaddr *src, struct sockaddr *dst, 4131 struct sctphdr *sh, struct sctp_inpcb *inp, 4132 struct mbuf *cause, 4133 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4134 uint32_t vrf_id, uint16_t port) 4135 { 4136 struct sctp_chunkhdr *ch, chunk_buf; 4137 unsigned int chk_length; 4138 int contains_init_chunk; 4139 4140 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4141 /* Generate a TO address for future reference */ 4142 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4143 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4144 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4145 SCTP_CALLED_DIRECTLY_NOCMPSET); 4146 } 4147 } 4148 contains_init_chunk = 0; 4149 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4150 sizeof(*ch), (uint8_t *)&chunk_buf); 4151 while (ch != NULL) { 4152 chk_length = ntohs(ch->chunk_length); 4153 if (chk_length < sizeof(*ch)) { 4154 /* break to abort land */ 4155 break; 4156 } 4157 switch (ch->chunk_type) { 4158 case SCTP_INIT: 4159 contains_init_chunk = 1; 4160 break; 4161 case SCTP_PACKET_DROPPED: 4162 /* we don't respond to pkt-dropped */ 4163 return; 4164 case SCTP_ABORT_ASSOCIATION: 4165 /* we don't respond with an ABORT to an ABORT */ 4166 return; 4167 case SCTP_SHUTDOWN_COMPLETE: 4168 /* 4169 * we ignore it since we are not waiting for it and 4170 * peer is gone 4171 */ 4172 return; 4173 case SCTP_SHUTDOWN_ACK: 4174 sctp_send_shutdown_complete2(src, dst, sh, 4175 mflowtype, mflowid, fibnum, 4176 vrf_id, port); 4177 return; 4178 default: 4179 break; 4180 } 4181 offset += SCTP_SIZE32(chk_length); 4182 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4183 sizeof(*ch), (uint8_t *)&chunk_buf); 4184 } 4185 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4186 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4187 (contains_init_chunk == 0))) { 4188 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4189 mflowtype, mflowid, fibnum, 4190 vrf_id, port); 4191 } 4192 } 4193 4194 /* 4195 * check the inbound datagram to make sure there is not an abort inside it, 4196 * if there is return 1, else return 0. 4197 */ 4198 int 4199 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4200 { 4201 struct sctp_chunkhdr *ch; 4202 struct sctp_init_chunk *init_chk, chunk_buf; 4203 int offset; 4204 unsigned int chk_length; 4205 4206 offset = iphlen + sizeof(struct sctphdr); 4207 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4208 (uint8_t *)&chunk_buf); 4209 while (ch != NULL) { 4210 chk_length = ntohs(ch->chunk_length); 4211 if (chk_length < sizeof(*ch)) { 4212 /* packet is probably corrupt */ 4213 break; 4214 } 4215 /* we seem to be ok, is it an abort? */ 4216 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4217 /* yep, tell them */ 4218 return (1); 4219 } 4220 if (ch->chunk_type == SCTP_INITIATION) { 4221 /* need to update the Vtag */ 4222 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4223 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4224 if (init_chk != NULL) { 4225 *vtagfill = ntohl(init_chk->init.initiate_tag); 4226 } 4227 } 4228 /* Nope, move to the next chunk */ 4229 offset += SCTP_SIZE32(chk_length); 4230 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4231 sizeof(*ch), (uint8_t *)&chunk_buf); 4232 } 4233 return (0); 4234 } 4235 4236 /* 4237 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4238 * set (i.e. it's 0) so, create this function to compare link local scopes 4239 */ 4240 #ifdef INET6 4241 uint32_t 4242 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4243 { 4244 struct sockaddr_in6 a, b; 4245 4246 /* save copies */ 4247 a = *addr1; 4248 b = *addr2; 4249 4250 if (a.sin6_scope_id == 0) 4251 if (sa6_recoverscope(&a)) { 4252 /* can't get scope, so can't match */ 4253 return (0); 4254 } 4255 if (b.sin6_scope_id == 0) 4256 if (sa6_recoverscope(&b)) { 4257 /* can't get scope, so can't match */ 4258 return (0); 4259 } 4260 if (a.sin6_scope_id != b.sin6_scope_id) 4261 return (0); 4262 4263 return (1); 4264 } 4265 4266 /* 4267 * returns a sockaddr_in6 with embedded scope recovered and removed 4268 */ 4269 struct sockaddr_in6 * 4270 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4271 { 4272 /* check and strip embedded scope junk */ 4273 if (addr->sin6_family == AF_INET6) { 4274 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4275 if (addr->sin6_scope_id == 0) { 4276 *store = *addr; 4277 if (!sa6_recoverscope(store)) { 4278 /* use the recovered scope */ 4279 addr = store; 4280 } 4281 } else { 4282 /* else, return the original "to" addr */ 4283 in6_clearscope(&addr->sin6_addr); 4284 } 4285 } 4286 } 4287 return (addr); 4288 } 4289 #endif 4290 4291 /* 4292 * are the two addresses the same? currently a "scopeless" check returns: 1 4293 * if same, 0 if not 4294 */ 4295 int 4296 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4297 { 4298 4299 /* must be valid */ 4300 if (sa1 == NULL || sa2 == NULL) 4301 return (0); 4302 4303 /* must be the same family */ 4304 if (sa1->sa_family != sa2->sa_family) 4305 return (0); 4306 4307 switch (sa1->sa_family) { 4308 #ifdef INET6 4309 case AF_INET6: 4310 { 4311 /* IPv6 addresses */ 4312 struct sockaddr_in6 *sin6_1, *sin6_2; 4313 4314 sin6_1 = (struct sockaddr_in6 *)sa1; 4315 sin6_2 = (struct sockaddr_in6 *)sa2; 4316 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4317 sin6_2)); 4318 } 4319 #endif 4320 #ifdef INET 4321 case AF_INET: 4322 { 4323 /* IPv4 addresses */ 4324 struct sockaddr_in *sin_1, *sin_2; 4325 4326 sin_1 = (struct sockaddr_in *)sa1; 4327 sin_2 = (struct sockaddr_in *)sa2; 4328 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4329 } 4330 #endif 4331 default: 4332 /* we don't do these... */ 4333 return (0); 4334 } 4335 } 4336 4337 void 4338 sctp_print_address(struct sockaddr *sa) 4339 { 4340 #ifdef INET6 4341 char ip6buf[INET6_ADDRSTRLEN]; 4342 #endif 4343 4344 switch (sa->sa_family) { 4345 #ifdef INET6 4346 case AF_INET6: 4347 { 4348 struct sockaddr_in6 *sin6; 4349 4350 sin6 = (struct sockaddr_in6 *)sa; 4351 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4352 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4353 ntohs(sin6->sin6_port), 4354 sin6->sin6_scope_id); 4355 break; 4356 } 4357 #endif 4358 #ifdef INET 4359 case AF_INET: 4360 { 4361 struct sockaddr_in *sin; 4362 unsigned char *p; 4363 4364 sin = (struct sockaddr_in *)sa; 4365 p = (unsigned char *)&sin->sin_addr; 4366 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4367 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4368 break; 4369 } 4370 #endif 4371 default: 4372 SCTP_PRINTF("?\n"); 4373 break; 4374 } 4375 } 4376 4377 void 4378 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4379 struct sctp_inpcb *new_inp, 4380 struct sctp_tcb *stcb, 4381 int waitflags) 4382 { 4383 /* 4384 * go through our old INP and pull off any control structures that 4385 * belong to stcb and move then to the new inp. 4386 */ 4387 struct socket *old_so, *new_so; 4388 struct sctp_queued_to_read *control, *nctl; 4389 struct sctp_readhead tmp_queue; 4390 struct mbuf *m; 4391 int error = 0; 4392 4393 old_so = old_inp->sctp_socket; 4394 new_so = new_inp->sctp_socket; 4395 TAILQ_INIT(&tmp_queue); 4396 error = sblock(&old_so->so_rcv, waitflags); 4397 if (error) { 4398 /* 4399 * Gak, can't get sblock, we have a problem. data will be 4400 * left stranded.. and we don't dare look at it since the 4401 * other thread may be reading something. Oh well, its a 4402 * screwed up app that does a peeloff OR a accept while 4403 * reading from the main socket... actually its only the 4404 * peeloff() case, since I think read will fail on a 4405 * listening socket.. 4406 */ 4407 return; 4408 } 4409 /* lock the socket buffers */ 4410 SCTP_INP_READ_LOCK(old_inp); 4411 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4412 /* Pull off all for out target stcb */ 4413 if (control->stcb == stcb) { 4414 /* remove it we want it */ 4415 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4416 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4417 m = control->data; 4418 while (m) { 4419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4420 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4421 } 4422 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4423 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4424 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4425 } 4426 m = SCTP_BUF_NEXT(m); 4427 } 4428 } 4429 } 4430 SCTP_INP_READ_UNLOCK(old_inp); 4431 /* Remove the sb-lock on the old socket */ 4432 4433 sbunlock(&old_so->so_rcv); 4434 /* Now we move them over to the new socket buffer */ 4435 SCTP_INP_READ_LOCK(new_inp); 4436 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4437 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4438 m = control->data; 4439 while (m) { 4440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4441 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4442 } 4443 sctp_sballoc(stcb, &new_so->so_rcv, m); 4444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4445 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4446 } 4447 m = SCTP_BUF_NEXT(m); 4448 } 4449 } 4450 SCTP_INP_READ_UNLOCK(new_inp); 4451 } 4452 4453 void 4454 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4455 struct sctp_tcb *stcb, 4456 int so_locked 4457 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4458 SCTP_UNUSED 4459 #endif 4460 ) 4461 { 4462 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4463 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4464 struct socket *so; 4465 4466 so = SCTP_INP_SO(inp); 4467 if (!so_locked) { 4468 if (stcb) { 4469 atomic_add_int(&stcb->asoc.refcnt, 1); 4470 SCTP_TCB_UNLOCK(stcb); 4471 } 4472 SCTP_SOCKET_LOCK(so, 1); 4473 if (stcb) { 4474 SCTP_TCB_LOCK(stcb); 4475 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4476 } 4477 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4478 SCTP_SOCKET_UNLOCK(so, 1); 4479 return; 4480 } 4481 } 4482 #endif 4483 sctp_sorwakeup(inp, inp->sctp_socket); 4484 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4485 if (!so_locked) { 4486 SCTP_SOCKET_UNLOCK(so, 1); 4487 } 4488 #endif 4489 } 4490 } 4491 4492 void 4493 sctp_add_to_readq(struct sctp_inpcb *inp, 4494 struct sctp_tcb *stcb, 4495 struct sctp_queued_to_read *control, 4496 struct sockbuf *sb, 4497 int end, 4498 int inp_read_lock_held, 4499 int so_locked 4500 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4501 SCTP_UNUSED 4502 #endif 4503 ) 4504 { 4505 /* 4506 * Here we must place the control on the end of the socket read 4507 * queue AND increment sb_cc so that select will work properly on 4508 * read. 4509 */ 4510 struct mbuf *m, *prev = NULL; 4511 4512 if (inp == NULL) { 4513 /* Gak, TSNH!! */ 4514 #ifdef INVARIANTS 4515 panic("Gak, inp NULL on add_to_readq"); 4516 #endif 4517 return; 4518 } 4519 if (inp_read_lock_held == 0) 4520 SCTP_INP_READ_LOCK(inp); 4521 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4522 sctp_free_remote_addr(control->whoFrom); 4523 if (control->data) { 4524 sctp_m_freem(control->data); 4525 control->data = NULL; 4526 } 4527 sctp_free_a_readq(stcb, control); 4528 if (inp_read_lock_held == 0) 4529 SCTP_INP_READ_UNLOCK(inp); 4530 return; 4531 } 4532 if (!(control->spec_flags & M_NOTIFICATION)) { 4533 atomic_add_int(&inp->total_recvs, 1); 4534 if (!control->do_not_ref_stcb) { 4535 atomic_add_int(&stcb->total_recvs, 1); 4536 } 4537 } 4538 m = control->data; 4539 control->held_length = 0; 4540 control->length = 0; 4541 while (m) { 4542 if (SCTP_BUF_LEN(m) == 0) { 4543 /* Skip mbufs with NO length */ 4544 if (prev == NULL) { 4545 /* First one */ 4546 control->data = sctp_m_free(m); 4547 m = control->data; 4548 } else { 4549 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4550 m = SCTP_BUF_NEXT(prev); 4551 } 4552 if (m == NULL) { 4553 control->tail_mbuf = prev; 4554 } 4555 continue; 4556 } 4557 prev = m; 4558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4559 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4560 } 4561 sctp_sballoc(stcb, sb, m); 4562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4563 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4564 } 4565 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4566 m = SCTP_BUF_NEXT(m); 4567 } 4568 if (prev != NULL) { 4569 control->tail_mbuf = prev; 4570 } else { 4571 /* Everything got collapsed out?? */ 4572 sctp_free_remote_addr(control->whoFrom); 4573 sctp_free_a_readq(stcb, control); 4574 if (inp_read_lock_held == 0) 4575 SCTP_INP_READ_UNLOCK(inp); 4576 return; 4577 } 4578 if (end) { 4579 control->end_added = 1; 4580 } 4581 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4582 control->on_read_q = 1; 4583 if (inp_read_lock_held == 0) 4584 SCTP_INP_READ_UNLOCK(inp); 4585 if (inp && inp->sctp_socket) { 4586 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4587 } 4588 } 4589 4590 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4591 *************ALTERNATE ROUTING CODE 4592 */ 4593 4594 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4595 *************ALTERNATE ROUTING CODE 4596 */ 4597 4598 struct mbuf * 4599 sctp_generate_cause(uint16_t code, char *info) 4600 { 4601 struct mbuf *m; 4602 struct sctp_gen_error_cause *cause; 4603 size_t info_len; 4604 uint16_t len; 4605 4606 if ((code == 0) || (info == NULL)) { 4607 return (NULL); 4608 } 4609 info_len = strlen(info); 4610 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4611 return (NULL); 4612 } 4613 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4614 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4615 if (m != NULL) { 4616 SCTP_BUF_LEN(m) = len; 4617 cause = mtod(m, struct sctp_gen_error_cause *); 4618 cause->code = htons(code); 4619 cause->length = htons(len); 4620 memcpy(cause->info, info, info_len); 4621 } 4622 return (m); 4623 } 4624 4625 struct mbuf * 4626 sctp_generate_no_user_data_cause(uint32_t tsn) 4627 { 4628 struct mbuf *m; 4629 struct sctp_error_no_user_data *no_user_data_cause; 4630 uint16_t len; 4631 4632 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4633 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4634 if (m != NULL) { 4635 SCTP_BUF_LEN(m) = len; 4636 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4637 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4638 no_user_data_cause->cause.length = htons(len); 4639 no_user_data_cause->tsn = htonl(tsn); 4640 } 4641 return (m); 4642 } 4643 4644 #ifdef SCTP_MBCNT_LOGGING 4645 void 4646 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4647 struct sctp_tmit_chunk *tp1, int chk_cnt) 4648 { 4649 if (tp1->data == NULL) { 4650 return; 4651 } 4652 asoc->chunks_on_out_queue -= chk_cnt; 4653 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4654 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4655 asoc->total_output_queue_size, 4656 tp1->book_size, 4657 0, 4658 tp1->mbcnt); 4659 } 4660 if (asoc->total_output_queue_size >= tp1->book_size) { 4661 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4662 } else { 4663 asoc->total_output_queue_size = 0; 4664 } 4665 4666 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4667 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4668 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4669 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4670 } else { 4671 stcb->sctp_socket->so_snd.sb_cc = 0; 4672 4673 } 4674 } 4675 } 4676 4677 #endif 4678 4679 int 4680 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4681 uint8_t sent, int so_locked 4682 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4683 SCTP_UNUSED 4684 #endif 4685 ) 4686 { 4687 struct sctp_stream_out *strq; 4688 struct sctp_tmit_chunk *chk = NULL, *tp2; 4689 struct sctp_stream_queue_pending *sp; 4690 uint32_t mid; 4691 uint16_t sid; 4692 uint8_t foundeom = 0; 4693 int ret_sz = 0; 4694 int notdone; 4695 int do_wakeup_routine = 0; 4696 4697 sid = tp1->rec.data.sid; 4698 mid = tp1->rec.data.mid; 4699 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 4700 stcb->asoc.abandoned_sent[0]++; 4701 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 4702 stcb->asoc.strmout[sid].abandoned_sent[0]++; 4703 #if defined(SCTP_DETAILED_STR_STATS) 4704 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 4705 #endif 4706 } else { 4707 stcb->asoc.abandoned_unsent[0]++; 4708 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 4709 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 4710 #if defined(SCTP_DETAILED_STR_STATS) 4711 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 4712 #endif 4713 } 4714 do { 4715 ret_sz += tp1->book_size; 4716 if (tp1->data != NULL) { 4717 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4718 sctp_flight_size_decrease(tp1); 4719 sctp_total_flight_decrease(stcb, tp1); 4720 } 4721 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4722 stcb->asoc.peers_rwnd += tp1->send_size; 4723 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4724 if (sent) { 4725 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4726 } else { 4727 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4728 } 4729 if (tp1->data) { 4730 sctp_m_freem(tp1->data); 4731 tp1->data = NULL; 4732 } 4733 do_wakeup_routine = 1; 4734 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4735 stcb->asoc.sent_queue_cnt_removeable--; 4736 } 4737 } 4738 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4739 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4740 SCTP_DATA_NOT_FRAG) { 4741 /* not frag'ed we ae done */ 4742 notdone = 0; 4743 foundeom = 1; 4744 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4745 /* end of frag, we are done */ 4746 notdone = 0; 4747 foundeom = 1; 4748 } else { 4749 /* 4750 * Its a begin or middle piece, we must mark all of 4751 * it 4752 */ 4753 notdone = 1; 4754 tp1 = TAILQ_NEXT(tp1, sctp_next); 4755 } 4756 } while (tp1 && notdone); 4757 if (foundeom == 0) { 4758 /* 4759 * The multi-part message was scattered across the send and 4760 * sent queue. 4761 */ 4762 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4763 if ((tp1->rec.data.sid != sid) || 4764 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 4765 break; 4766 } 4767 /* 4768 * save to chk in case we have some on stream out 4769 * queue. If so and we have an un-transmitted one we 4770 * don't have to fudge the TSN. 4771 */ 4772 chk = tp1; 4773 ret_sz += tp1->book_size; 4774 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4775 if (sent) { 4776 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4777 } else { 4778 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4779 } 4780 if (tp1->data) { 4781 sctp_m_freem(tp1->data); 4782 tp1->data = NULL; 4783 } 4784 /* No flight involved here book the size to 0 */ 4785 tp1->book_size = 0; 4786 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4787 foundeom = 1; 4788 } 4789 do_wakeup_routine = 1; 4790 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4791 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4792 /* 4793 * on to the sent queue so we can wait for it to be 4794 * passed by. 4795 */ 4796 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4797 sctp_next); 4798 stcb->asoc.send_queue_cnt--; 4799 stcb->asoc.sent_queue_cnt++; 4800 } 4801 } 4802 if (foundeom == 0) { 4803 /* 4804 * Still no eom found. That means there is stuff left on the 4805 * stream out queue.. yuck. 4806 */ 4807 SCTP_TCB_SEND_LOCK(stcb); 4808 strq = &stcb->asoc.strmout[sid]; 4809 sp = TAILQ_FIRST(&strq->outqueue); 4810 if (sp != NULL) { 4811 sp->discard_rest = 1; 4812 /* 4813 * We may need to put a chunk on the queue that 4814 * holds the TSN that would have been sent with the 4815 * LAST bit. 4816 */ 4817 if (chk == NULL) { 4818 /* Yep, we have to */ 4819 sctp_alloc_a_chunk(stcb, chk); 4820 if (chk == NULL) { 4821 /* 4822 * we are hosed. All we can do is 4823 * nothing.. which will cause an 4824 * abort if the peer is paying 4825 * attention. 4826 */ 4827 goto oh_well; 4828 } 4829 memset(chk, 0, sizeof(*chk)); 4830 chk->rec.data.rcv_flags = 0; 4831 chk->sent = SCTP_FORWARD_TSN_SKIP; 4832 chk->asoc = &stcb->asoc; 4833 if (stcb->asoc.idata_supported == 0) { 4834 if (sp->sinfo_flags & SCTP_UNORDERED) { 4835 chk->rec.data.mid = 0; 4836 } else { 4837 chk->rec.data.mid = strq->next_mid_ordered; 4838 } 4839 } else { 4840 if (sp->sinfo_flags & SCTP_UNORDERED) { 4841 chk->rec.data.mid = strq->next_mid_unordered; 4842 } else { 4843 chk->rec.data.mid = strq->next_mid_ordered; 4844 } 4845 } 4846 chk->rec.data.sid = sp->sid; 4847 chk->rec.data.ppid = sp->ppid; 4848 chk->rec.data.context = sp->context; 4849 chk->flags = sp->act_flags; 4850 chk->whoTo = NULL; 4851 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4852 strq->chunks_on_queues++; 4853 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4854 stcb->asoc.sent_queue_cnt++; 4855 stcb->asoc.pr_sctp_cnt++; 4856 } 4857 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4858 if (sp->sinfo_flags & SCTP_UNORDERED) { 4859 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 4860 } 4861 if (stcb->asoc.idata_supported == 0) { 4862 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 4863 strq->next_mid_ordered++; 4864 } 4865 } else { 4866 if (sp->sinfo_flags & SCTP_UNORDERED) { 4867 strq->next_mid_unordered++; 4868 } else { 4869 strq->next_mid_ordered++; 4870 } 4871 } 4872 oh_well: 4873 if (sp->data) { 4874 /* 4875 * Pull any data to free up the SB and allow 4876 * sender to "add more" while we will throw 4877 * away :-) 4878 */ 4879 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4880 ret_sz += sp->length; 4881 do_wakeup_routine = 1; 4882 sp->some_taken = 1; 4883 sctp_m_freem(sp->data); 4884 sp->data = NULL; 4885 sp->tail_mbuf = NULL; 4886 sp->length = 0; 4887 } 4888 } 4889 SCTP_TCB_SEND_UNLOCK(stcb); 4890 } 4891 if (do_wakeup_routine) { 4892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4893 struct socket *so; 4894 4895 so = SCTP_INP_SO(stcb->sctp_ep); 4896 if (!so_locked) { 4897 atomic_add_int(&stcb->asoc.refcnt, 1); 4898 SCTP_TCB_UNLOCK(stcb); 4899 SCTP_SOCKET_LOCK(so, 1); 4900 SCTP_TCB_LOCK(stcb); 4901 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4902 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4903 /* assoc was freed while we were unlocked */ 4904 SCTP_SOCKET_UNLOCK(so, 1); 4905 return (ret_sz); 4906 } 4907 } 4908 #endif 4909 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4910 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4911 if (!so_locked) { 4912 SCTP_SOCKET_UNLOCK(so, 1); 4913 } 4914 #endif 4915 } 4916 return (ret_sz); 4917 } 4918 4919 /* 4920 * checks to see if the given address, sa, is one that is currently known by 4921 * the kernel note: can't distinguish the same address on multiple interfaces 4922 * and doesn't handle multiple addresses with different zone/scope id's note: 4923 * ifa_ifwithaddr() compares the entire sockaddr struct 4924 */ 4925 struct sctp_ifa * 4926 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4927 int holds_lock) 4928 { 4929 struct sctp_laddr *laddr; 4930 4931 if (holds_lock == 0) { 4932 SCTP_INP_RLOCK(inp); 4933 } 4934 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4935 if (laddr->ifa == NULL) 4936 continue; 4937 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4938 continue; 4939 #ifdef INET 4940 if (addr->sa_family == AF_INET) { 4941 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4942 laddr->ifa->address.sin.sin_addr.s_addr) { 4943 /* found him. */ 4944 if (holds_lock == 0) { 4945 SCTP_INP_RUNLOCK(inp); 4946 } 4947 return (laddr->ifa); 4948 break; 4949 } 4950 } 4951 #endif 4952 #ifdef INET6 4953 if (addr->sa_family == AF_INET6) { 4954 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4955 &laddr->ifa->address.sin6)) { 4956 /* found him. */ 4957 if (holds_lock == 0) { 4958 SCTP_INP_RUNLOCK(inp); 4959 } 4960 return (laddr->ifa); 4961 break; 4962 } 4963 } 4964 #endif 4965 } 4966 if (holds_lock == 0) { 4967 SCTP_INP_RUNLOCK(inp); 4968 } 4969 return (NULL); 4970 } 4971 4972 uint32_t 4973 sctp_get_ifa_hash_val(struct sockaddr *addr) 4974 { 4975 switch (addr->sa_family) { 4976 #ifdef INET 4977 case AF_INET: 4978 { 4979 struct sockaddr_in *sin; 4980 4981 sin = (struct sockaddr_in *)addr; 4982 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4983 } 4984 #endif 4985 #ifdef INET6 4986 case AF_INET6: 4987 { 4988 struct sockaddr_in6 *sin6; 4989 uint32_t hash_of_addr; 4990 4991 sin6 = (struct sockaddr_in6 *)addr; 4992 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4993 sin6->sin6_addr.s6_addr32[1] + 4994 sin6->sin6_addr.s6_addr32[2] + 4995 sin6->sin6_addr.s6_addr32[3]); 4996 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4997 return (hash_of_addr); 4998 } 4999 #endif 5000 default: 5001 break; 5002 } 5003 return (0); 5004 } 5005 5006 struct sctp_ifa * 5007 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5008 { 5009 struct sctp_ifa *sctp_ifap; 5010 struct sctp_vrf *vrf; 5011 struct sctp_ifalist *hash_head; 5012 uint32_t hash_of_addr; 5013 5014 if (holds_lock == 0) 5015 SCTP_IPI_ADDR_RLOCK(); 5016 5017 vrf = sctp_find_vrf(vrf_id); 5018 if (vrf == NULL) { 5019 if (holds_lock == 0) 5020 SCTP_IPI_ADDR_RUNLOCK(); 5021 return (NULL); 5022 } 5023 hash_of_addr = sctp_get_ifa_hash_val(addr); 5024 5025 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5026 if (hash_head == NULL) { 5027 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5028 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5029 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5030 sctp_print_address(addr); 5031 SCTP_PRINTF("No such bucket for address\n"); 5032 if (holds_lock == 0) 5033 SCTP_IPI_ADDR_RUNLOCK(); 5034 5035 return (NULL); 5036 } 5037 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5038 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5039 continue; 5040 #ifdef INET 5041 if (addr->sa_family == AF_INET) { 5042 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5043 sctp_ifap->address.sin.sin_addr.s_addr) { 5044 /* found him. */ 5045 if (holds_lock == 0) 5046 SCTP_IPI_ADDR_RUNLOCK(); 5047 return (sctp_ifap); 5048 break; 5049 } 5050 } 5051 #endif 5052 #ifdef INET6 5053 if (addr->sa_family == AF_INET6) { 5054 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5055 &sctp_ifap->address.sin6)) { 5056 /* found him. */ 5057 if (holds_lock == 0) 5058 SCTP_IPI_ADDR_RUNLOCK(); 5059 return (sctp_ifap); 5060 break; 5061 } 5062 } 5063 #endif 5064 } 5065 if (holds_lock == 0) 5066 SCTP_IPI_ADDR_RUNLOCK(); 5067 return (NULL); 5068 } 5069 5070 static void 5071 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5072 uint32_t rwnd_req) 5073 { 5074 /* User pulled some data, do we need a rwnd update? */ 5075 int r_unlocked = 0; 5076 uint32_t dif, rwnd; 5077 struct socket *so = NULL; 5078 5079 if (stcb == NULL) 5080 return; 5081 5082 atomic_add_int(&stcb->asoc.refcnt, 1); 5083 5084 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5085 SCTP_STATE_SHUTDOWN_RECEIVED | 5086 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5087 /* Pre-check If we are freeing no update */ 5088 goto no_lock; 5089 } 5090 SCTP_INP_INCR_REF(stcb->sctp_ep); 5091 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5092 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5093 goto out; 5094 } 5095 so = stcb->sctp_socket; 5096 if (so == NULL) { 5097 goto out; 5098 } 5099 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5100 /* Have you have freed enough to look */ 5101 *freed_so_far = 0; 5102 /* Yep, its worth a look and the lock overhead */ 5103 5104 /* Figure out what the rwnd would be */ 5105 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5106 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5107 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5108 } else { 5109 dif = 0; 5110 } 5111 if (dif >= rwnd_req) { 5112 if (hold_rlock) { 5113 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5114 r_unlocked = 1; 5115 } 5116 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5117 /* 5118 * One last check before we allow the guy possibly 5119 * to get in. There is a race, where the guy has not 5120 * reached the gate. In that case 5121 */ 5122 goto out; 5123 } 5124 SCTP_TCB_LOCK(stcb); 5125 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5126 /* No reports here */ 5127 SCTP_TCB_UNLOCK(stcb); 5128 goto out; 5129 } 5130 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5131 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5132 5133 sctp_chunk_output(stcb->sctp_ep, stcb, 5134 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5135 /* make sure no timer is running */ 5136 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5137 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5138 SCTP_TCB_UNLOCK(stcb); 5139 } else { 5140 /* Update how much we have pending */ 5141 stcb->freed_by_sorcv_sincelast = dif; 5142 } 5143 out: 5144 if (so && r_unlocked && hold_rlock) { 5145 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5146 } 5147 SCTP_INP_DECR_REF(stcb->sctp_ep); 5148 no_lock: 5149 atomic_add_int(&stcb->asoc.refcnt, -1); 5150 return; 5151 } 5152 5153 int 5154 sctp_sorecvmsg(struct socket *so, 5155 struct uio *uio, 5156 struct mbuf **mp, 5157 struct sockaddr *from, 5158 int fromlen, 5159 int *msg_flags, 5160 struct sctp_sndrcvinfo *sinfo, 5161 int filling_sinfo) 5162 { 5163 /* 5164 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5165 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5166 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5167 * On the way out we may send out any combination of: 5168 * MSG_NOTIFICATION MSG_EOR 5169 * 5170 */ 5171 struct sctp_inpcb *inp = NULL; 5172 int my_len = 0; 5173 int cp_len = 0, error = 0; 5174 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5175 struct mbuf *m = NULL; 5176 struct sctp_tcb *stcb = NULL; 5177 int wakeup_read_socket = 0; 5178 int freecnt_applied = 0; 5179 int out_flags = 0, in_flags = 0; 5180 int block_allowed = 1; 5181 uint32_t freed_so_far = 0; 5182 uint32_t copied_so_far = 0; 5183 int in_eeor_mode = 0; 5184 int no_rcv_needed = 0; 5185 uint32_t rwnd_req = 0; 5186 int hold_sblock = 0; 5187 int hold_rlock = 0; 5188 ssize_t slen = 0; 5189 uint32_t held_length = 0; 5190 int sockbuf_lock = 0; 5191 5192 if (uio == NULL) { 5193 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5194 return (EINVAL); 5195 } 5196 if (msg_flags) { 5197 in_flags = *msg_flags; 5198 if (in_flags & MSG_PEEK) 5199 SCTP_STAT_INCR(sctps_read_peeks); 5200 } else { 5201 in_flags = 0; 5202 } 5203 slen = uio->uio_resid; 5204 5205 /* Pull in and set up our int flags */ 5206 if (in_flags & MSG_OOB) { 5207 /* Out of band's NOT supported */ 5208 return (EOPNOTSUPP); 5209 } 5210 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5211 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5212 return (EINVAL); 5213 } 5214 if ((in_flags & (MSG_DONTWAIT 5215 | MSG_NBIO 5216 )) || 5217 SCTP_SO_IS_NBIO(so)) { 5218 block_allowed = 0; 5219 } 5220 /* setup the endpoint */ 5221 inp = (struct sctp_inpcb *)so->so_pcb; 5222 if (inp == NULL) { 5223 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5224 return (EFAULT); 5225 } 5226 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5227 /* Must be at least a MTU's worth */ 5228 if (rwnd_req < SCTP_MIN_RWND) 5229 rwnd_req = SCTP_MIN_RWND; 5230 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5231 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5232 sctp_misc_ints(SCTP_SORECV_ENTER, 5233 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5234 } 5235 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5236 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5237 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5238 } 5239 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5240 if (error) { 5241 goto release_unlocked; 5242 } 5243 sockbuf_lock = 1; 5244 restart: 5245 5246 5247 restart_nosblocks: 5248 if (hold_sblock == 0) { 5249 SOCKBUF_LOCK(&so->so_rcv); 5250 hold_sblock = 1; 5251 } 5252 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5253 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5254 goto out; 5255 } 5256 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5257 if (so->so_error) { 5258 error = so->so_error; 5259 if ((in_flags & MSG_PEEK) == 0) 5260 so->so_error = 0; 5261 goto out; 5262 } else { 5263 if (so->so_rcv.sb_cc == 0) { 5264 /* indicate EOF */ 5265 error = 0; 5266 goto out; 5267 } 5268 } 5269 } 5270 if (so->so_rcv.sb_cc <= held_length) { 5271 if (so->so_error) { 5272 error = so->so_error; 5273 if ((in_flags & MSG_PEEK) == 0) { 5274 so->so_error = 0; 5275 } 5276 goto out; 5277 } 5278 if ((so->so_rcv.sb_cc == 0) && 5279 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5280 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5281 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5282 /* 5283 * For active open side clear flags for 5284 * re-use passive open is blocked by 5285 * connect. 5286 */ 5287 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5288 /* 5289 * You were aborted, passive side 5290 * always hits here 5291 */ 5292 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5293 error = ECONNRESET; 5294 } 5295 so->so_state &= ~(SS_ISCONNECTING | 5296 SS_ISDISCONNECTING | 5297 SS_ISCONFIRMING | 5298 SS_ISCONNECTED); 5299 if (error == 0) { 5300 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5301 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5302 error = ENOTCONN; 5303 } 5304 } 5305 goto out; 5306 } 5307 } 5308 if (block_allowed) { 5309 error = sbwait(&so->so_rcv); 5310 if (error) { 5311 goto out; 5312 } 5313 held_length = 0; 5314 goto restart_nosblocks; 5315 } else { 5316 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5317 error = EWOULDBLOCK; 5318 goto out; 5319 } 5320 } 5321 if (hold_sblock == 1) { 5322 SOCKBUF_UNLOCK(&so->so_rcv); 5323 hold_sblock = 0; 5324 } 5325 /* we possibly have data we can read */ 5326 /* sa_ignore FREED_MEMORY */ 5327 control = TAILQ_FIRST(&inp->read_queue); 5328 if (control == NULL) { 5329 /* 5330 * This could be happening since the appender did the 5331 * increment but as not yet did the tailq insert onto the 5332 * read_queue 5333 */ 5334 if (hold_rlock == 0) { 5335 SCTP_INP_READ_LOCK(inp); 5336 } 5337 control = TAILQ_FIRST(&inp->read_queue); 5338 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5339 #ifdef INVARIANTS 5340 panic("Huh, its non zero and nothing on control?"); 5341 #endif 5342 so->so_rcv.sb_cc = 0; 5343 } 5344 SCTP_INP_READ_UNLOCK(inp); 5345 hold_rlock = 0; 5346 goto restart; 5347 } 5348 if ((control->length == 0) && 5349 (control->do_not_ref_stcb)) { 5350 /* 5351 * Clean up code for freeing assoc that left behind a 5352 * pdapi.. maybe a peer in EEOR that just closed after 5353 * sending and never indicated a EOR. 5354 */ 5355 if (hold_rlock == 0) { 5356 hold_rlock = 1; 5357 SCTP_INP_READ_LOCK(inp); 5358 } 5359 control->held_length = 0; 5360 if (control->data) { 5361 /* Hmm there is data here .. fix */ 5362 struct mbuf *m_tmp; 5363 int cnt = 0; 5364 5365 m_tmp = control->data; 5366 while (m_tmp) { 5367 cnt += SCTP_BUF_LEN(m_tmp); 5368 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5369 control->tail_mbuf = m_tmp; 5370 control->end_added = 1; 5371 } 5372 m_tmp = SCTP_BUF_NEXT(m_tmp); 5373 } 5374 control->length = cnt; 5375 } else { 5376 /* remove it */ 5377 TAILQ_REMOVE(&inp->read_queue, control, next); 5378 /* Add back any hiddend data */ 5379 sctp_free_remote_addr(control->whoFrom); 5380 sctp_free_a_readq(stcb, control); 5381 } 5382 if (hold_rlock) { 5383 hold_rlock = 0; 5384 SCTP_INP_READ_UNLOCK(inp); 5385 } 5386 goto restart; 5387 } 5388 if ((control->length == 0) && 5389 (control->end_added == 1)) { 5390 /* 5391 * Do we also need to check for (control->pdapi_aborted == 5392 * 1)? 5393 */ 5394 if (hold_rlock == 0) { 5395 hold_rlock = 1; 5396 SCTP_INP_READ_LOCK(inp); 5397 } 5398 TAILQ_REMOVE(&inp->read_queue, control, next); 5399 if (control->data) { 5400 #ifdef INVARIANTS 5401 panic("control->data not null but control->length == 0"); 5402 #else 5403 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5404 sctp_m_freem(control->data); 5405 control->data = NULL; 5406 #endif 5407 } 5408 if (control->aux_data) { 5409 sctp_m_free(control->aux_data); 5410 control->aux_data = NULL; 5411 } 5412 #ifdef INVARIANTS 5413 if (control->on_strm_q) { 5414 panic("About to free ctl:%p so:%p and its in %d", 5415 control, so, control->on_strm_q); 5416 } 5417 #endif 5418 sctp_free_remote_addr(control->whoFrom); 5419 sctp_free_a_readq(stcb, control); 5420 if (hold_rlock) { 5421 hold_rlock = 0; 5422 SCTP_INP_READ_UNLOCK(inp); 5423 } 5424 goto restart; 5425 } 5426 if (control->length == 0) { 5427 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5428 (filling_sinfo)) { 5429 /* find a more suitable one then this */ 5430 ctl = TAILQ_NEXT(control, next); 5431 while (ctl) { 5432 if ((ctl->stcb != control->stcb) && (ctl->length) && 5433 (ctl->some_taken || 5434 (ctl->spec_flags & M_NOTIFICATION) || 5435 ((ctl->do_not_ref_stcb == 0) && 5436 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5437 ) { 5438 /*- 5439 * If we have a different TCB next, and there is data 5440 * present. If we have already taken some (pdapi), OR we can 5441 * ref the tcb and no delivery as started on this stream, we 5442 * take it. Note we allow a notification on a different 5443 * assoc to be delivered.. 5444 */ 5445 control = ctl; 5446 goto found_one; 5447 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5448 (ctl->length) && 5449 ((ctl->some_taken) || 5450 ((ctl->do_not_ref_stcb == 0) && 5451 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5452 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5453 /*- 5454 * If we have the same tcb, and there is data present, and we 5455 * have the strm interleave feature present. Then if we have 5456 * taken some (pdapi) or we can refer to tht tcb AND we have 5457 * not started a delivery for this stream, we can take it. 5458 * Note we do NOT allow a notificaiton on the same assoc to 5459 * be delivered. 5460 */ 5461 control = ctl; 5462 goto found_one; 5463 } 5464 ctl = TAILQ_NEXT(ctl, next); 5465 } 5466 } 5467 /* 5468 * if we reach here, not suitable replacement is available 5469 * <or> fragment interleave is NOT on. So stuff the sb_cc 5470 * into the our held count, and its time to sleep again. 5471 */ 5472 held_length = so->so_rcv.sb_cc; 5473 control->held_length = so->so_rcv.sb_cc; 5474 goto restart; 5475 } 5476 /* Clear the held length since there is something to read */ 5477 control->held_length = 0; 5478 found_one: 5479 /* 5480 * If we reach here, control has a some data for us to read off. 5481 * Note that stcb COULD be NULL. 5482 */ 5483 if (hold_rlock == 0) { 5484 hold_rlock = 1; 5485 SCTP_INP_READ_LOCK(inp); 5486 } 5487 control->some_taken++; 5488 stcb = control->stcb; 5489 if (stcb) { 5490 if ((control->do_not_ref_stcb == 0) && 5491 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5492 if (freecnt_applied == 0) 5493 stcb = NULL; 5494 } else if (control->do_not_ref_stcb == 0) { 5495 /* you can't free it on me please */ 5496 /* 5497 * The lock on the socket buffer protects us so the 5498 * free code will stop. But since we used the 5499 * socketbuf lock and the sender uses the tcb_lock 5500 * to increment, we need to use the atomic add to 5501 * the refcnt 5502 */ 5503 if (freecnt_applied) { 5504 #ifdef INVARIANTS 5505 panic("refcnt already incremented"); 5506 #else 5507 SCTP_PRINTF("refcnt already incremented?\n"); 5508 #endif 5509 } else { 5510 atomic_add_int(&stcb->asoc.refcnt, 1); 5511 freecnt_applied = 1; 5512 } 5513 /* 5514 * Setup to remember how much we have not yet told 5515 * the peer our rwnd has opened up. Note we grab the 5516 * value from the tcb from last time. Note too that 5517 * sack sending clears this when a sack is sent, 5518 * which is fine. Once we hit the rwnd_req, we then 5519 * will go to the sctp_user_rcvd() that will not 5520 * lock until it KNOWs it MUST send a WUP-SACK. 5521 */ 5522 freed_so_far = stcb->freed_by_sorcv_sincelast; 5523 stcb->freed_by_sorcv_sincelast = 0; 5524 } 5525 } 5526 if (stcb && 5527 ((control->spec_flags & M_NOTIFICATION) == 0) && 5528 control->do_not_ref_stcb == 0) { 5529 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5530 } 5531 /* First lets get off the sinfo and sockaddr info */ 5532 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5533 sinfo->sinfo_stream = control->sinfo_stream; 5534 sinfo->sinfo_ssn = (uint16_t)control->mid; 5535 sinfo->sinfo_flags = control->sinfo_flags; 5536 sinfo->sinfo_ppid = control->sinfo_ppid; 5537 sinfo->sinfo_context = control->sinfo_context; 5538 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5539 sinfo->sinfo_tsn = control->sinfo_tsn; 5540 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5541 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5542 nxt = TAILQ_NEXT(control, next); 5543 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5544 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5545 struct sctp_extrcvinfo *s_extra; 5546 5547 s_extra = (struct sctp_extrcvinfo *)sinfo; 5548 if ((nxt) && 5549 (nxt->length)) { 5550 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5551 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5552 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5553 } 5554 if (nxt->spec_flags & M_NOTIFICATION) { 5555 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5556 } 5557 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5558 s_extra->serinfo_next_length = nxt->length; 5559 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5560 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5561 if (nxt->tail_mbuf != NULL) { 5562 if (nxt->end_added) { 5563 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5564 } 5565 } 5566 } else { 5567 /* 5568 * we explicitly 0 this, since the memcpy 5569 * got some other things beyond the older 5570 * sinfo_ that is on the control's structure 5571 * :-D 5572 */ 5573 nxt = NULL; 5574 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5575 s_extra->serinfo_next_aid = 0; 5576 s_extra->serinfo_next_length = 0; 5577 s_extra->serinfo_next_ppid = 0; 5578 s_extra->serinfo_next_stream = 0; 5579 } 5580 } 5581 /* 5582 * update off the real current cum-ack, if we have an stcb. 5583 */ 5584 if ((control->do_not_ref_stcb == 0) && stcb) 5585 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5586 /* 5587 * mask off the high bits, we keep the actual chunk bits in 5588 * there. 5589 */ 5590 sinfo->sinfo_flags &= 0x00ff; 5591 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5592 sinfo->sinfo_flags |= SCTP_UNORDERED; 5593 } 5594 } 5595 #ifdef SCTP_ASOCLOG_OF_TSNS 5596 { 5597 int index, newindex; 5598 struct sctp_pcbtsn_rlog *entry; 5599 5600 do { 5601 index = inp->readlog_index; 5602 newindex = index + 1; 5603 if (newindex >= SCTP_READ_LOG_SIZE) { 5604 newindex = 0; 5605 } 5606 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5607 entry = &inp->readlog[index]; 5608 entry->vtag = control->sinfo_assoc_id; 5609 entry->strm = control->sinfo_stream; 5610 entry->seq = (uint16_t)control->mid; 5611 entry->sz = control->length; 5612 entry->flgs = control->sinfo_flags; 5613 } 5614 #endif 5615 if ((fromlen > 0) && (from != NULL)) { 5616 union sctp_sockstore store; 5617 size_t len; 5618 5619 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5620 #ifdef INET6 5621 case AF_INET6: 5622 len = sizeof(struct sockaddr_in6); 5623 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5624 store.sin6.sin6_port = control->port_from; 5625 break; 5626 #endif 5627 #ifdef INET 5628 case AF_INET: 5629 #ifdef INET6 5630 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5631 len = sizeof(struct sockaddr_in6); 5632 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5633 &store.sin6); 5634 store.sin6.sin6_port = control->port_from; 5635 } else { 5636 len = sizeof(struct sockaddr_in); 5637 store.sin = control->whoFrom->ro._l_addr.sin; 5638 store.sin.sin_port = control->port_from; 5639 } 5640 #else 5641 len = sizeof(struct sockaddr_in); 5642 store.sin = control->whoFrom->ro._l_addr.sin; 5643 store.sin.sin_port = control->port_from; 5644 #endif 5645 break; 5646 #endif 5647 default: 5648 len = 0; 5649 break; 5650 } 5651 memcpy(from, &store, min((size_t)fromlen, len)); 5652 #ifdef INET6 5653 { 5654 struct sockaddr_in6 lsa6, *from6; 5655 5656 from6 = (struct sockaddr_in6 *)from; 5657 sctp_recover_scope_mac(from6, (&lsa6)); 5658 } 5659 #endif 5660 } 5661 if (hold_rlock) { 5662 SCTP_INP_READ_UNLOCK(inp); 5663 hold_rlock = 0; 5664 } 5665 if (hold_sblock) { 5666 SOCKBUF_UNLOCK(&so->so_rcv); 5667 hold_sblock = 0; 5668 } 5669 /* now copy out what data we can */ 5670 if (mp == NULL) { 5671 /* copy out each mbuf in the chain up to length */ 5672 get_more_data: 5673 m = control->data; 5674 while (m) { 5675 /* Move out all we can */ 5676 cp_len = (int)uio->uio_resid; 5677 my_len = (int)SCTP_BUF_LEN(m); 5678 if (cp_len > my_len) { 5679 /* not enough in this buf */ 5680 cp_len = my_len; 5681 } 5682 if (hold_rlock) { 5683 SCTP_INP_READ_UNLOCK(inp); 5684 hold_rlock = 0; 5685 } 5686 if (cp_len > 0) 5687 error = uiomove(mtod(m, char *), cp_len, uio); 5688 /* re-read */ 5689 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5690 goto release; 5691 } 5692 if ((control->do_not_ref_stcb == 0) && stcb && 5693 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5694 no_rcv_needed = 1; 5695 } 5696 if (error) { 5697 /* error we are out of here */ 5698 goto release; 5699 } 5700 SCTP_INP_READ_LOCK(inp); 5701 hold_rlock = 1; 5702 if (cp_len == SCTP_BUF_LEN(m)) { 5703 if ((SCTP_BUF_NEXT(m) == NULL) && 5704 (control->end_added)) { 5705 out_flags |= MSG_EOR; 5706 if ((control->do_not_ref_stcb == 0) && 5707 (control->stcb != NULL) && 5708 ((control->spec_flags & M_NOTIFICATION) == 0)) 5709 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5710 } 5711 if (control->spec_flags & M_NOTIFICATION) { 5712 out_flags |= MSG_NOTIFICATION; 5713 } 5714 /* we ate up the mbuf */ 5715 if (in_flags & MSG_PEEK) { 5716 /* just looking */ 5717 m = SCTP_BUF_NEXT(m); 5718 copied_so_far += cp_len; 5719 } else { 5720 /* dispose of the mbuf */ 5721 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5722 sctp_sblog(&so->so_rcv, 5723 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5724 } 5725 sctp_sbfree(control, stcb, &so->so_rcv, m); 5726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5727 sctp_sblog(&so->so_rcv, 5728 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5729 } 5730 copied_so_far += cp_len; 5731 freed_so_far += cp_len; 5732 freed_so_far += MSIZE; 5733 atomic_subtract_int(&control->length, cp_len); 5734 control->data = sctp_m_free(m); 5735 m = control->data; 5736 /* 5737 * been through it all, must hold sb 5738 * lock ok to null tail 5739 */ 5740 if (control->data == NULL) { 5741 #ifdef INVARIANTS 5742 if ((control->end_added == 0) || 5743 (TAILQ_NEXT(control, next) == NULL)) { 5744 /* 5745 * If the end is not 5746 * added, OR the 5747 * next is NOT null 5748 * we MUST have the 5749 * lock. 5750 */ 5751 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5752 panic("Hmm we don't own the lock?"); 5753 } 5754 } 5755 #endif 5756 control->tail_mbuf = NULL; 5757 #ifdef INVARIANTS 5758 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5759 panic("end_added, nothing left and no MSG_EOR"); 5760 } 5761 #endif 5762 } 5763 } 5764 } else { 5765 /* Do we need to trim the mbuf? */ 5766 if (control->spec_flags & M_NOTIFICATION) { 5767 out_flags |= MSG_NOTIFICATION; 5768 } 5769 if ((in_flags & MSG_PEEK) == 0) { 5770 SCTP_BUF_RESV_UF(m, cp_len); 5771 SCTP_BUF_LEN(m) -= cp_len; 5772 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5773 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5774 } 5775 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5776 if ((control->do_not_ref_stcb == 0) && 5777 stcb) { 5778 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5779 } 5780 copied_so_far += cp_len; 5781 freed_so_far += cp_len; 5782 freed_so_far += MSIZE; 5783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5784 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5785 SCTP_LOG_SBRESULT, 0); 5786 } 5787 atomic_subtract_int(&control->length, cp_len); 5788 } else { 5789 copied_so_far += cp_len; 5790 } 5791 } 5792 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5793 break; 5794 } 5795 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5796 (control->do_not_ref_stcb == 0) && 5797 (freed_so_far >= rwnd_req)) { 5798 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5799 } 5800 } /* end while(m) */ 5801 /* 5802 * At this point we have looked at it all and we either have 5803 * a MSG_EOR/or read all the user wants... <OR> 5804 * control->length == 0. 5805 */ 5806 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5807 /* we are done with this control */ 5808 if (control->length == 0) { 5809 if (control->data) { 5810 #ifdef INVARIANTS 5811 panic("control->data not null at read eor?"); 5812 #else 5813 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5814 sctp_m_freem(control->data); 5815 control->data = NULL; 5816 #endif 5817 } 5818 done_with_control: 5819 if (hold_rlock == 0) { 5820 SCTP_INP_READ_LOCK(inp); 5821 hold_rlock = 1; 5822 } 5823 TAILQ_REMOVE(&inp->read_queue, control, next); 5824 /* Add back any hiddend data */ 5825 if (control->held_length) { 5826 held_length = 0; 5827 control->held_length = 0; 5828 wakeup_read_socket = 1; 5829 } 5830 if (control->aux_data) { 5831 sctp_m_free(control->aux_data); 5832 control->aux_data = NULL; 5833 } 5834 no_rcv_needed = control->do_not_ref_stcb; 5835 sctp_free_remote_addr(control->whoFrom); 5836 control->data = NULL; 5837 #ifdef INVARIANTS 5838 if (control->on_strm_q) { 5839 panic("About to free ctl:%p so:%p and its in %d", 5840 control, so, control->on_strm_q); 5841 } 5842 #endif 5843 sctp_free_a_readq(stcb, control); 5844 control = NULL; 5845 if ((freed_so_far >= rwnd_req) && 5846 (no_rcv_needed == 0)) 5847 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5848 5849 } else { 5850 /* 5851 * The user did not read all of this 5852 * message, turn off the returned MSG_EOR 5853 * since we are leaving more behind on the 5854 * control to read. 5855 */ 5856 #ifdef INVARIANTS 5857 if (control->end_added && 5858 (control->data == NULL) && 5859 (control->tail_mbuf == NULL)) { 5860 panic("Gak, control->length is corrupt?"); 5861 } 5862 #endif 5863 no_rcv_needed = control->do_not_ref_stcb; 5864 out_flags &= ~MSG_EOR; 5865 } 5866 } 5867 if (out_flags & MSG_EOR) { 5868 goto release; 5869 } 5870 if ((uio->uio_resid == 0) || 5871 ((in_eeor_mode) && 5872 (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) { 5873 goto release; 5874 } 5875 /* 5876 * If I hit here the receiver wants more and this message is 5877 * NOT done (pd-api). So two questions. Can we block? if not 5878 * we are done. Did the user NOT set MSG_WAITALL? 5879 */ 5880 if (block_allowed == 0) { 5881 goto release; 5882 } 5883 /* 5884 * We need to wait for more data a few things: - We don't 5885 * sbunlock() so we don't get someone else reading. - We 5886 * must be sure to account for the case where what is added 5887 * is NOT to our control when we wakeup. 5888 */ 5889 5890 /* 5891 * Do we need to tell the transport a rwnd update might be 5892 * needed before we go to sleep? 5893 */ 5894 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5895 ((freed_so_far >= rwnd_req) && 5896 (control->do_not_ref_stcb == 0) && 5897 (no_rcv_needed == 0))) { 5898 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5899 } 5900 wait_some_more: 5901 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5902 goto release; 5903 } 5904 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5905 goto release; 5906 5907 if (hold_rlock == 1) { 5908 SCTP_INP_READ_UNLOCK(inp); 5909 hold_rlock = 0; 5910 } 5911 if (hold_sblock == 0) { 5912 SOCKBUF_LOCK(&so->so_rcv); 5913 hold_sblock = 1; 5914 } 5915 if ((copied_so_far) && (control->length == 0) && 5916 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5917 goto release; 5918 } 5919 if (so->so_rcv.sb_cc <= control->held_length) { 5920 error = sbwait(&so->so_rcv); 5921 if (error) { 5922 goto release; 5923 } 5924 control->held_length = 0; 5925 } 5926 if (hold_sblock) { 5927 SOCKBUF_UNLOCK(&so->so_rcv); 5928 hold_sblock = 0; 5929 } 5930 if (control->length == 0) { 5931 /* still nothing here */ 5932 if (control->end_added == 1) { 5933 /* he aborted, or is done i.e.did a shutdown */ 5934 out_flags |= MSG_EOR; 5935 if (control->pdapi_aborted) { 5936 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5937 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5938 5939 out_flags |= MSG_TRUNC; 5940 } else { 5941 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5942 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5943 } 5944 goto done_with_control; 5945 } 5946 if (so->so_rcv.sb_cc > held_length) { 5947 control->held_length = so->so_rcv.sb_cc; 5948 held_length = 0; 5949 } 5950 goto wait_some_more; 5951 } else if (control->data == NULL) { 5952 /* 5953 * we must re-sync since data is probably being 5954 * added 5955 */ 5956 SCTP_INP_READ_LOCK(inp); 5957 if ((control->length > 0) && (control->data == NULL)) { 5958 /* 5959 * big trouble.. we have the lock and its 5960 * corrupt? 5961 */ 5962 #ifdef INVARIANTS 5963 panic("Impossible data==NULL length !=0"); 5964 #endif 5965 out_flags |= MSG_EOR; 5966 out_flags |= MSG_TRUNC; 5967 control->length = 0; 5968 SCTP_INP_READ_UNLOCK(inp); 5969 goto done_with_control; 5970 } 5971 SCTP_INP_READ_UNLOCK(inp); 5972 /* We will fall around to get more data */ 5973 } 5974 goto get_more_data; 5975 } else { 5976 /*- 5977 * Give caller back the mbuf chain, 5978 * store in uio_resid the length 5979 */ 5980 wakeup_read_socket = 0; 5981 if ((control->end_added == 0) || 5982 (TAILQ_NEXT(control, next) == NULL)) { 5983 /* Need to get rlock */ 5984 if (hold_rlock == 0) { 5985 SCTP_INP_READ_LOCK(inp); 5986 hold_rlock = 1; 5987 } 5988 } 5989 if (control->end_added) { 5990 out_flags |= MSG_EOR; 5991 if ((control->do_not_ref_stcb == 0) && 5992 (control->stcb != NULL) && 5993 ((control->spec_flags & M_NOTIFICATION) == 0)) 5994 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5995 } 5996 if (control->spec_flags & M_NOTIFICATION) { 5997 out_flags |= MSG_NOTIFICATION; 5998 } 5999 uio->uio_resid = control->length; 6000 *mp = control->data; 6001 m = control->data; 6002 while (m) { 6003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6004 sctp_sblog(&so->so_rcv, 6005 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6006 } 6007 sctp_sbfree(control, stcb, &so->so_rcv, m); 6008 freed_so_far += SCTP_BUF_LEN(m); 6009 freed_so_far += MSIZE; 6010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6011 sctp_sblog(&so->so_rcv, 6012 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6013 } 6014 m = SCTP_BUF_NEXT(m); 6015 } 6016 control->data = control->tail_mbuf = NULL; 6017 control->length = 0; 6018 if (out_flags & MSG_EOR) { 6019 /* Done with this control */ 6020 goto done_with_control; 6021 } 6022 } 6023 release: 6024 if (hold_rlock == 1) { 6025 SCTP_INP_READ_UNLOCK(inp); 6026 hold_rlock = 0; 6027 } 6028 if (hold_sblock == 1) { 6029 SOCKBUF_UNLOCK(&so->so_rcv); 6030 hold_sblock = 0; 6031 } 6032 sbunlock(&so->so_rcv); 6033 sockbuf_lock = 0; 6034 6035 release_unlocked: 6036 if (hold_sblock) { 6037 SOCKBUF_UNLOCK(&so->so_rcv); 6038 hold_sblock = 0; 6039 } 6040 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6041 if ((freed_so_far >= rwnd_req) && 6042 (control && (control->do_not_ref_stcb == 0)) && 6043 (no_rcv_needed == 0)) 6044 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6045 } 6046 out: 6047 if (msg_flags) { 6048 *msg_flags = out_flags; 6049 } 6050 if (((out_flags & MSG_EOR) == 0) && 6051 ((in_flags & MSG_PEEK) == 0) && 6052 (sinfo) && 6053 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6054 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6055 struct sctp_extrcvinfo *s_extra; 6056 6057 s_extra = (struct sctp_extrcvinfo *)sinfo; 6058 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6059 } 6060 if (hold_rlock == 1) { 6061 SCTP_INP_READ_UNLOCK(inp); 6062 } 6063 if (hold_sblock) { 6064 SOCKBUF_UNLOCK(&so->so_rcv); 6065 } 6066 if (sockbuf_lock) { 6067 sbunlock(&so->so_rcv); 6068 } 6069 if (freecnt_applied) { 6070 /* 6071 * The lock on the socket buffer protects us so the free 6072 * code will stop. But since we used the socketbuf lock and 6073 * the sender uses the tcb_lock to increment, we need to use 6074 * the atomic add to the refcnt. 6075 */ 6076 if (stcb == NULL) { 6077 #ifdef INVARIANTS 6078 panic("stcb for refcnt has gone NULL?"); 6079 goto stage_left; 6080 #else 6081 goto stage_left; 6082 #endif 6083 } 6084 /* Save the value back for next time */ 6085 stcb->freed_by_sorcv_sincelast = freed_so_far; 6086 atomic_add_int(&stcb->asoc.refcnt, -1); 6087 } 6088 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6089 if (stcb) { 6090 sctp_misc_ints(SCTP_SORECV_DONE, 6091 freed_so_far, 6092 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6093 stcb->asoc.my_rwnd, 6094 so->so_rcv.sb_cc); 6095 } else { 6096 sctp_misc_ints(SCTP_SORECV_DONE, 6097 freed_so_far, 6098 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6099 0, 6100 so->so_rcv.sb_cc); 6101 } 6102 } 6103 stage_left: 6104 if (wakeup_read_socket) { 6105 sctp_sorwakeup(inp, so); 6106 } 6107 return (error); 6108 } 6109 6110 6111 #ifdef SCTP_MBUF_LOGGING 6112 struct mbuf * 6113 sctp_m_free(struct mbuf *m) 6114 { 6115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6116 sctp_log_mb(m, SCTP_MBUF_IFREE); 6117 } 6118 return (m_free(m)); 6119 } 6120 6121 void 6122 sctp_m_freem(struct mbuf *mb) 6123 { 6124 while (mb != NULL) 6125 mb = sctp_m_free(mb); 6126 } 6127 6128 #endif 6129 6130 int 6131 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6132 { 6133 /* 6134 * Given a local address. For all associations that holds the 6135 * address, request a peer-set-primary. 6136 */ 6137 struct sctp_ifa *ifa; 6138 struct sctp_laddr *wi; 6139 6140 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6141 if (ifa == NULL) { 6142 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6143 return (EADDRNOTAVAIL); 6144 } 6145 /* 6146 * Now that we have the ifa we must awaken the iterator with this 6147 * message. 6148 */ 6149 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6150 if (wi == NULL) { 6151 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6152 return (ENOMEM); 6153 } 6154 /* Now incr the count and int wi structure */ 6155 SCTP_INCR_LADDR_COUNT(); 6156 memset(wi, 0, sizeof(*wi)); 6157 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6158 wi->ifa = ifa; 6159 wi->action = SCTP_SET_PRIM_ADDR; 6160 atomic_add_int(&ifa->refcount, 1); 6161 6162 /* Now add it to the work queue */ 6163 SCTP_WQ_ADDR_LOCK(); 6164 /* 6165 * Should this really be a tailq? As it is we will process the 6166 * newest first :-0 6167 */ 6168 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6169 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6170 (struct sctp_inpcb *)NULL, 6171 (struct sctp_tcb *)NULL, 6172 (struct sctp_nets *)NULL); 6173 SCTP_WQ_ADDR_UNLOCK(); 6174 return (0); 6175 } 6176 6177 6178 int 6179 sctp_soreceive(struct socket *so, 6180 struct sockaddr **psa, 6181 struct uio *uio, 6182 struct mbuf **mp0, 6183 struct mbuf **controlp, 6184 int *flagsp) 6185 { 6186 int error, fromlen; 6187 uint8_t sockbuf[256]; 6188 struct sockaddr *from; 6189 struct sctp_extrcvinfo sinfo; 6190 int filling_sinfo = 1; 6191 int flags; 6192 struct sctp_inpcb *inp; 6193 6194 inp = (struct sctp_inpcb *)so->so_pcb; 6195 /* pickup the assoc we are reading from */ 6196 if (inp == NULL) { 6197 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6198 return (EINVAL); 6199 } 6200 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6201 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6202 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6203 (controlp == NULL)) { 6204 /* user does not want the sndrcv ctl */ 6205 filling_sinfo = 0; 6206 } 6207 if (psa) { 6208 from = (struct sockaddr *)sockbuf; 6209 fromlen = sizeof(sockbuf); 6210 from->sa_len = 0; 6211 } else { 6212 from = NULL; 6213 fromlen = 0; 6214 } 6215 6216 if (filling_sinfo) { 6217 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6218 } 6219 if (flagsp != NULL) { 6220 flags = *flagsp; 6221 } else { 6222 flags = 0; 6223 } 6224 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6225 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6226 if (flagsp != NULL) { 6227 *flagsp = flags; 6228 } 6229 if (controlp != NULL) { 6230 /* copy back the sinfo in a CMSG format */ 6231 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6232 *controlp = sctp_build_ctl_nchunk(inp, 6233 (struct sctp_sndrcvinfo *)&sinfo); 6234 } else { 6235 *controlp = NULL; 6236 } 6237 } 6238 if (psa) { 6239 /* copy back the address info */ 6240 if (from && from->sa_len) { 6241 *psa = sodupsockaddr(from, M_NOWAIT); 6242 } else { 6243 *psa = NULL; 6244 } 6245 } 6246 return (error); 6247 } 6248 6249 6250 6251 6252 6253 int 6254 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6255 int totaddr, int *error) 6256 { 6257 int added = 0; 6258 int i; 6259 struct sctp_inpcb *inp; 6260 struct sockaddr *sa; 6261 size_t incr = 0; 6262 #ifdef INET 6263 struct sockaddr_in *sin; 6264 #endif 6265 #ifdef INET6 6266 struct sockaddr_in6 *sin6; 6267 #endif 6268 6269 sa = addr; 6270 inp = stcb->sctp_ep; 6271 *error = 0; 6272 for (i = 0; i < totaddr; i++) { 6273 switch (sa->sa_family) { 6274 #ifdef INET 6275 case AF_INET: 6276 incr = sizeof(struct sockaddr_in); 6277 sin = (struct sockaddr_in *)sa; 6278 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6279 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6280 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6281 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6282 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6283 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6284 *error = EINVAL; 6285 goto out_now; 6286 } 6287 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6288 SCTP_DONOT_SETSCOPE, 6289 SCTP_ADDR_IS_CONFIRMED)) { 6290 /* assoc gone no un-lock */ 6291 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6292 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6293 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6294 *error = ENOBUFS; 6295 goto out_now; 6296 } 6297 added++; 6298 break; 6299 #endif 6300 #ifdef INET6 6301 case AF_INET6: 6302 incr = sizeof(struct sockaddr_in6); 6303 sin6 = (struct sockaddr_in6 *)sa; 6304 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6305 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6306 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6307 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6308 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6309 *error = EINVAL; 6310 goto out_now; 6311 } 6312 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6313 SCTP_DONOT_SETSCOPE, 6314 SCTP_ADDR_IS_CONFIRMED)) { 6315 /* assoc gone no un-lock */ 6316 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6317 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6318 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6319 *error = ENOBUFS; 6320 goto out_now; 6321 } 6322 added++; 6323 break; 6324 #endif 6325 default: 6326 break; 6327 } 6328 sa = (struct sockaddr *)((caddr_t)sa + incr); 6329 } 6330 out_now: 6331 return (added); 6332 } 6333 6334 struct sctp_tcb * 6335 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6336 unsigned int *totaddr, 6337 unsigned int *num_v4, unsigned int *num_v6, int *error, 6338 unsigned int limit, int *bad_addr) 6339 { 6340 struct sockaddr *sa; 6341 struct sctp_tcb *stcb = NULL; 6342 unsigned int incr, at, i; 6343 6344 at = 0; 6345 sa = addr; 6346 *error = *num_v6 = *num_v4 = 0; 6347 /* account and validate addresses */ 6348 for (i = 0; i < *totaddr; i++) { 6349 switch (sa->sa_family) { 6350 #ifdef INET 6351 case AF_INET: 6352 incr = (unsigned int)sizeof(struct sockaddr_in); 6353 if (sa->sa_len != incr) { 6354 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6355 *error = EINVAL; 6356 *bad_addr = 1; 6357 return (NULL); 6358 } 6359 (*num_v4) += 1; 6360 break; 6361 #endif 6362 #ifdef INET6 6363 case AF_INET6: 6364 { 6365 struct sockaddr_in6 *sin6; 6366 6367 sin6 = (struct sockaddr_in6 *)sa; 6368 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6369 /* Must be non-mapped for connectx */ 6370 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6371 *error = EINVAL; 6372 *bad_addr = 1; 6373 return (NULL); 6374 } 6375 incr = (unsigned int)sizeof(struct sockaddr_in6); 6376 if (sa->sa_len != incr) { 6377 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6378 *error = EINVAL; 6379 *bad_addr = 1; 6380 return (NULL); 6381 } 6382 (*num_v6) += 1; 6383 break; 6384 } 6385 #endif 6386 default: 6387 *totaddr = i; 6388 incr = 0; 6389 /* we are done */ 6390 break; 6391 } 6392 if (i == *totaddr) { 6393 break; 6394 } 6395 SCTP_INP_INCR_REF(inp); 6396 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6397 if (stcb != NULL) { 6398 /* Already have or am bring up an association */ 6399 return (stcb); 6400 } else { 6401 SCTP_INP_DECR_REF(inp); 6402 } 6403 if ((at + incr) > limit) { 6404 *totaddr = i; 6405 break; 6406 } 6407 sa = (struct sockaddr *)((caddr_t)sa + incr); 6408 } 6409 return ((struct sctp_tcb *)NULL); 6410 } 6411 6412 /* 6413 * sctp_bindx(ADD) for one address. 6414 * assumes all arguments are valid/checked by caller. 6415 */ 6416 void 6417 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6418 struct sockaddr *sa, sctp_assoc_t assoc_id, 6419 uint32_t vrf_id, int *error, void *p) 6420 { 6421 struct sockaddr *addr_touse; 6422 #if defined(INET) && defined(INET6) 6423 struct sockaddr_in sin; 6424 #endif 6425 6426 /* see if we're bound all already! */ 6427 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6428 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6429 *error = EINVAL; 6430 return; 6431 } 6432 addr_touse = sa; 6433 #ifdef INET6 6434 if (sa->sa_family == AF_INET6) { 6435 #ifdef INET 6436 struct sockaddr_in6 *sin6; 6437 6438 #endif 6439 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6440 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6441 *error = EINVAL; 6442 return; 6443 } 6444 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6445 /* can only bind v6 on PF_INET6 sockets */ 6446 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6447 *error = EINVAL; 6448 return; 6449 } 6450 #ifdef INET 6451 sin6 = (struct sockaddr_in6 *)addr_touse; 6452 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6453 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6454 SCTP_IPV6_V6ONLY(inp)) { 6455 /* can't bind v4-mapped on PF_INET sockets */ 6456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6457 *error = EINVAL; 6458 return; 6459 } 6460 in6_sin6_2_sin(&sin, sin6); 6461 addr_touse = (struct sockaddr *)&sin; 6462 } 6463 #endif 6464 } 6465 #endif 6466 #ifdef INET 6467 if (sa->sa_family == AF_INET) { 6468 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6469 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6470 *error = EINVAL; 6471 return; 6472 } 6473 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6474 SCTP_IPV6_V6ONLY(inp)) { 6475 /* can't bind v4 on PF_INET sockets */ 6476 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6477 *error = EINVAL; 6478 return; 6479 } 6480 } 6481 #endif 6482 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6483 if (p == NULL) { 6484 /* Can't get proc for Net/Open BSD */ 6485 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6486 *error = EINVAL; 6487 return; 6488 } 6489 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6490 return; 6491 } 6492 /* 6493 * No locks required here since bind and mgmt_ep_sa all do their own 6494 * locking. If we do something for the FIX: below we may need to 6495 * lock in that case. 6496 */ 6497 if (assoc_id == 0) { 6498 /* add the address */ 6499 struct sctp_inpcb *lep; 6500 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6501 6502 /* validate the incoming port */ 6503 if ((lsin->sin_port != 0) && 6504 (lsin->sin_port != inp->sctp_lport)) { 6505 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6506 *error = EINVAL; 6507 return; 6508 } else { 6509 /* user specified 0 port, set it to existing port */ 6510 lsin->sin_port = inp->sctp_lport; 6511 } 6512 6513 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6514 if (lep != NULL) { 6515 /* 6516 * We must decrement the refcount since we have the 6517 * ep already and are binding. No remove going on 6518 * here. 6519 */ 6520 SCTP_INP_DECR_REF(lep); 6521 } 6522 if (lep == inp) { 6523 /* already bound to it.. ok */ 6524 return; 6525 } else if (lep == NULL) { 6526 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6527 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6528 SCTP_ADD_IP_ADDRESS, 6529 vrf_id, NULL); 6530 } else { 6531 *error = EADDRINUSE; 6532 } 6533 if (*error) 6534 return; 6535 } else { 6536 /* 6537 * FIX: decide whether we allow assoc based bindx 6538 */ 6539 } 6540 } 6541 6542 /* 6543 * sctp_bindx(DELETE) for one address. 6544 * assumes all arguments are valid/checked by caller. 6545 */ 6546 void 6547 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6548 struct sockaddr *sa, sctp_assoc_t assoc_id, 6549 uint32_t vrf_id, int *error) 6550 { 6551 struct sockaddr *addr_touse; 6552 #if defined(INET) && defined(INET6) 6553 struct sockaddr_in sin; 6554 #endif 6555 6556 /* see if we're bound all already! */ 6557 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6559 *error = EINVAL; 6560 return; 6561 } 6562 addr_touse = sa; 6563 #ifdef INET6 6564 if (sa->sa_family == AF_INET6) { 6565 #ifdef INET 6566 struct sockaddr_in6 *sin6; 6567 #endif 6568 6569 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6570 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6571 *error = EINVAL; 6572 return; 6573 } 6574 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6575 /* can only bind v6 on PF_INET6 sockets */ 6576 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6577 *error = EINVAL; 6578 return; 6579 } 6580 #ifdef INET 6581 sin6 = (struct sockaddr_in6 *)addr_touse; 6582 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6583 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6584 SCTP_IPV6_V6ONLY(inp)) { 6585 /* can't bind mapped-v4 on PF_INET sockets */ 6586 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6587 *error = EINVAL; 6588 return; 6589 } 6590 in6_sin6_2_sin(&sin, sin6); 6591 addr_touse = (struct sockaddr *)&sin; 6592 } 6593 #endif 6594 } 6595 #endif 6596 #ifdef INET 6597 if (sa->sa_family == AF_INET) { 6598 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6599 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6600 *error = EINVAL; 6601 return; 6602 } 6603 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6604 SCTP_IPV6_V6ONLY(inp)) { 6605 /* can't bind v4 on PF_INET sockets */ 6606 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6607 *error = EINVAL; 6608 return; 6609 } 6610 } 6611 #endif 6612 /* 6613 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6614 * below is ever changed we may need to lock before calling 6615 * association level binding. 6616 */ 6617 if (assoc_id == 0) { 6618 /* delete the address */ 6619 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6620 SCTP_DEL_IP_ADDRESS, 6621 vrf_id, NULL); 6622 } else { 6623 /* 6624 * FIX: decide whether we allow assoc based bindx 6625 */ 6626 } 6627 } 6628 6629 /* 6630 * returns the valid local address count for an assoc, taking into account 6631 * all scoping rules 6632 */ 6633 int 6634 sctp_local_addr_count(struct sctp_tcb *stcb) 6635 { 6636 int loopback_scope; 6637 #if defined(INET) 6638 int ipv4_local_scope, ipv4_addr_legal; 6639 #endif 6640 #if defined (INET6) 6641 int local_scope, site_scope, ipv6_addr_legal; 6642 #endif 6643 struct sctp_vrf *vrf; 6644 struct sctp_ifn *sctp_ifn; 6645 struct sctp_ifa *sctp_ifa; 6646 int count = 0; 6647 6648 /* Turn on all the appropriate scopes */ 6649 loopback_scope = stcb->asoc.scope.loopback_scope; 6650 #if defined(INET) 6651 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6652 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6653 #endif 6654 #if defined(INET6) 6655 local_scope = stcb->asoc.scope.local_scope; 6656 site_scope = stcb->asoc.scope.site_scope; 6657 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6658 #endif 6659 SCTP_IPI_ADDR_RLOCK(); 6660 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6661 if (vrf == NULL) { 6662 /* no vrf, no addresses */ 6663 SCTP_IPI_ADDR_RUNLOCK(); 6664 return (0); 6665 } 6666 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6667 /* 6668 * bound all case: go through all ifns on the vrf 6669 */ 6670 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6671 if ((loopback_scope == 0) && 6672 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6673 continue; 6674 } 6675 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6676 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6677 continue; 6678 switch (sctp_ifa->address.sa.sa_family) { 6679 #ifdef INET 6680 case AF_INET: 6681 if (ipv4_addr_legal) { 6682 struct sockaddr_in *sin; 6683 6684 sin = &sctp_ifa->address.sin; 6685 if (sin->sin_addr.s_addr == 0) { 6686 /* 6687 * skip unspecified 6688 * addrs 6689 */ 6690 continue; 6691 } 6692 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6693 &sin->sin_addr) != 0) { 6694 continue; 6695 } 6696 if ((ipv4_local_scope == 0) && 6697 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6698 continue; 6699 } 6700 /* count this one */ 6701 count++; 6702 } else { 6703 continue; 6704 } 6705 break; 6706 #endif 6707 #ifdef INET6 6708 case AF_INET6: 6709 if (ipv6_addr_legal) { 6710 struct sockaddr_in6 *sin6; 6711 6712 sin6 = &sctp_ifa->address.sin6; 6713 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6714 continue; 6715 } 6716 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6717 &sin6->sin6_addr) != 0) { 6718 continue; 6719 } 6720 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6721 if (local_scope == 0) 6722 continue; 6723 if (sin6->sin6_scope_id == 0) { 6724 if (sa6_recoverscope(sin6) != 0) 6725 /* 6726 * 6727 * bad 6728 * link 6729 * 6730 * local 6731 * 6732 * address 6733 */ 6734 continue; 6735 } 6736 } 6737 if ((site_scope == 0) && 6738 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6739 continue; 6740 } 6741 /* count this one */ 6742 count++; 6743 } 6744 break; 6745 #endif 6746 default: 6747 /* TSNH */ 6748 break; 6749 } 6750 } 6751 } 6752 } else { 6753 /* 6754 * subset bound case 6755 */ 6756 struct sctp_laddr *laddr; 6757 6758 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6759 sctp_nxt_addr) { 6760 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6761 continue; 6762 } 6763 /* count this one */ 6764 count++; 6765 } 6766 } 6767 SCTP_IPI_ADDR_RUNLOCK(); 6768 return (count); 6769 } 6770 6771 #if defined(SCTP_LOCAL_TRACE_BUF) 6772 6773 void 6774 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6775 { 6776 uint32_t saveindex, newindex; 6777 6778 do { 6779 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6780 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6781 newindex = 1; 6782 } else { 6783 newindex = saveindex + 1; 6784 } 6785 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6786 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6787 saveindex = 0; 6788 } 6789 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6790 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6791 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6792 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6793 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6794 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6795 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6796 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6797 } 6798 6799 #endif 6800 static void 6801 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 6802 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 6803 { 6804 struct ip *iph; 6805 #ifdef INET6 6806 struct ip6_hdr *ip6; 6807 #endif 6808 struct mbuf *sp, *last; 6809 struct udphdr *uhdr; 6810 uint16_t port; 6811 6812 if ((m->m_flags & M_PKTHDR) == 0) { 6813 /* Can't handle one that is not a pkt hdr */ 6814 goto out; 6815 } 6816 /* Pull the src port */ 6817 iph = mtod(m, struct ip *); 6818 uhdr = (struct udphdr *)((caddr_t)iph + off); 6819 port = uhdr->uh_sport; 6820 /* 6821 * Split out the mbuf chain. Leave the IP header in m, place the 6822 * rest in the sp. 6823 */ 6824 sp = m_split(m, off, M_NOWAIT); 6825 if (sp == NULL) { 6826 /* Gak, drop packet, we can't do a split */ 6827 goto out; 6828 } 6829 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6830 /* Gak, packet can't have an SCTP header in it - too small */ 6831 m_freem(sp); 6832 goto out; 6833 } 6834 /* Now pull up the UDP header and SCTP header together */ 6835 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6836 if (sp == NULL) { 6837 /* Gak pullup failed */ 6838 goto out; 6839 } 6840 /* Trim out the UDP header */ 6841 m_adj(sp, sizeof(struct udphdr)); 6842 6843 /* Now reconstruct the mbuf chain */ 6844 for (last = m; last->m_next; last = last->m_next); 6845 last->m_next = sp; 6846 m->m_pkthdr.len += sp->m_pkthdr.len; 6847 /* 6848 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 6849 * checksum and it was valid. Since CSUM_DATA_VALID == 6850 * CSUM_SCTP_VALID this would imply that the HW also verified the 6851 * SCTP checksum. Therefore, clear the bit. 6852 */ 6853 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6854 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6855 m->m_pkthdr.len, 6856 if_name(m->m_pkthdr.rcvif), 6857 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6858 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 6859 iph = mtod(m, struct ip *); 6860 switch (iph->ip_v) { 6861 #ifdef INET 6862 case IPVERSION: 6863 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6864 sctp_input_with_port(m, off, port); 6865 break; 6866 #endif 6867 #ifdef INET6 6868 case IPV6_VERSION >> 4: 6869 ip6 = mtod(m, struct ip6_hdr *); 6870 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6871 sctp6_input_with_port(&m, &off, port); 6872 break; 6873 #endif 6874 default: 6875 goto out; 6876 break; 6877 } 6878 return; 6879 out: 6880 m_freem(m); 6881 } 6882 6883 #ifdef INET 6884 static void 6885 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 6886 { 6887 struct ip *outer_ip, *inner_ip; 6888 struct sctphdr *sh; 6889 struct icmp *icmp; 6890 struct udphdr *udp; 6891 struct sctp_inpcb *inp; 6892 struct sctp_tcb *stcb; 6893 struct sctp_nets *net; 6894 struct sctp_init_chunk *ch; 6895 struct sockaddr_in src, dst; 6896 uint8_t type, code; 6897 6898 inner_ip = (struct ip *)vip; 6899 icmp = (struct icmp *)((caddr_t)inner_ip - 6900 (sizeof(struct icmp) - sizeof(struct ip))); 6901 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 6902 if (ntohs(outer_ip->ip_len) < 6903 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 6904 return; 6905 } 6906 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 6907 sh = (struct sctphdr *)(udp + 1); 6908 memset(&src, 0, sizeof(struct sockaddr_in)); 6909 src.sin_family = AF_INET; 6910 src.sin_len = sizeof(struct sockaddr_in); 6911 src.sin_port = sh->src_port; 6912 src.sin_addr = inner_ip->ip_src; 6913 memset(&dst, 0, sizeof(struct sockaddr_in)); 6914 dst.sin_family = AF_INET; 6915 dst.sin_len = sizeof(struct sockaddr_in); 6916 dst.sin_port = sh->dest_port; 6917 dst.sin_addr = inner_ip->ip_dst; 6918 /* 6919 * 'dst' holds the dest of the packet that failed to be sent. 'src' 6920 * holds our local endpoint address. Thus we reverse the dst and the 6921 * src in the lookup. 6922 */ 6923 inp = NULL; 6924 net = NULL; 6925 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 6926 (struct sockaddr *)&src, 6927 &inp, &net, 1, 6928 SCTP_DEFAULT_VRFID); 6929 if ((stcb != NULL) && 6930 (net != NULL) && 6931 (inp != NULL)) { 6932 /* Check the UDP port numbers */ 6933 if ((udp->uh_dport != net->port) || 6934 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 6935 SCTP_TCB_UNLOCK(stcb); 6936 return; 6937 } 6938 /* Check the verification tag */ 6939 if (ntohl(sh->v_tag) != 0) { 6940 /* 6941 * This must be the verification tag used for 6942 * sending out packets. We don't consider packets 6943 * reflecting the verification tag. 6944 */ 6945 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 6946 SCTP_TCB_UNLOCK(stcb); 6947 return; 6948 } 6949 } else { 6950 if (ntohs(outer_ip->ip_len) >= 6951 sizeof(struct ip) + 6952 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 6953 /* 6954 * In this case we can check if we got an 6955 * INIT chunk and if the initiate tag 6956 * matches. 6957 */ 6958 ch = (struct sctp_init_chunk *)(sh + 1); 6959 if ((ch->ch.chunk_type != SCTP_INITIATION) || 6960 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 6961 SCTP_TCB_UNLOCK(stcb); 6962 return; 6963 } 6964 } else { 6965 SCTP_TCB_UNLOCK(stcb); 6966 return; 6967 } 6968 } 6969 type = icmp->icmp_type; 6970 code = icmp->icmp_code; 6971 if ((type == ICMP_UNREACH) && 6972 (code == ICMP_UNREACH_PORT)) { 6973 code = ICMP_UNREACH_PROTOCOL; 6974 } 6975 sctp_notify(inp, stcb, net, type, code, 6976 ntohs(inner_ip->ip_len), 6977 (uint32_t)ntohs(icmp->icmp_nextmtu)); 6978 } else { 6979 if ((stcb == NULL) && (inp != NULL)) { 6980 /* reduce ref-count */ 6981 SCTP_INP_WLOCK(inp); 6982 SCTP_INP_DECR_REF(inp); 6983 SCTP_INP_WUNLOCK(inp); 6984 } 6985 if (stcb) { 6986 SCTP_TCB_UNLOCK(stcb); 6987 } 6988 } 6989 return; 6990 } 6991 #endif 6992 6993 #ifdef INET6 6994 static void 6995 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 6996 { 6997 struct ip6ctlparam *ip6cp; 6998 struct sctp_inpcb *inp; 6999 struct sctp_tcb *stcb; 7000 struct sctp_nets *net; 7001 struct sctphdr sh; 7002 struct udphdr udp; 7003 struct sockaddr_in6 src, dst; 7004 uint8_t type, code; 7005 7006 ip6cp = (struct ip6ctlparam *)d; 7007 /* 7008 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7009 */ 7010 if (ip6cp->ip6c_m == NULL) { 7011 return; 7012 } 7013 /* 7014 * Check if we can safely examine the ports and the verification tag 7015 * of the SCTP common header. 7016 */ 7017 if (ip6cp->ip6c_m->m_pkthdr.len < 7018 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7019 return; 7020 } 7021 /* Copy out the UDP header. */ 7022 memset(&udp, 0, sizeof(struct udphdr)); 7023 m_copydata(ip6cp->ip6c_m, 7024 ip6cp->ip6c_off, 7025 sizeof(struct udphdr), 7026 (caddr_t)&udp); 7027 /* Copy out the port numbers and the verification tag. */ 7028 memset(&sh, 0, sizeof(struct sctphdr)); 7029 m_copydata(ip6cp->ip6c_m, 7030 ip6cp->ip6c_off + sizeof(struct udphdr), 7031 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7032 (caddr_t)&sh); 7033 memset(&src, 0, sizeof(struct sockaddr_in6)); 7034 src.sin6_family = AF_INET6; 7035 src.sin6_len = sizeof(struct sockaddr_in6); 7036 src.sin6_port = sh.src_port; 7037 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7038 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7039 return; 7040 } 7041 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7042 dst.sin6_family = AF_INET6; 7043 dst.sin6_len = sizeof(struct sockaddr_in6); 7044 dst.sin6_port = sh.dest_port; 7045 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7046 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7047 return; 7048 } 7049 inp = NULL; 7050 net = NULL; 7051 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7052 (struct sockaddr *)&src, 7053 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7054 if ((stcb != NULL) && 7055 (net != NULL) && 7056 (inp != NULL)) { 7057 /* Check the UDP port numbers */ 7058 if ((udp.uh_dport != net->port) || 7059 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7060 SCTP_TCB_UNLOCK(stcb); 7061 return; 7062 } 7063 /* Check the verification tag */ 7064 if (ntohl(sh.v_tag) != 0) { 7065 /* 7066 * This must be the verification tag used for 7067 * sending out packets. We don't consider packets 7068 * reflecting the verification tag. 7069 */ 7070 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7071 SCTP_TCB_UNLOCK(stcb); 7072 return; 7073 } 7074 } else { 7075 if (ip6cp->ip6c_m->m_pkthdr.len >= 7076 ip6cp->ip6c_off + sizeof(struct udphdr) + 7077 sizeof(struct sctphdr) + 7078 sizeof(struct sctp_chunkhdr) + 7079 offsetof(struct sctp_init, a_rwnd)) { 7080 /* 7081 * In this case we can check if we got an 7082 * INIT chunk and if the initiate tag 7083 * matches. 7084 */ 7085 uint32_t initiate_tag; 7086 uint8_t chunk_type; 7087 7088 m_copydata(ip6cp->ip6c_m, 7089 ip6cp->ip6c_off + 7090 sizeof(struct udphdr) + 7091 sizeof(struct sctphdr), 7092 sizeof(uint8_t), 7093 (caddr_t)&chunk_type); 7094 m_copydata(ip6cp->ip6c_m, 7095 ip6cp->ip6c_off + 7096 sizeof(struct udphdr) + 7097 sizeof(struct sctphdr) + 7098 sizeof(struct sctp_chunkhdr), 7099 sizeof(uint32_t), 7100 (caddr_t)&initiate_tag); 7101 if ((chunk_type != SCTP_INITIATION) || 7102 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7103 SCTP_TCB_UNLOCK(stcb); 7104 return; 7105 } 7106 } else { 7107 SCTP_TCB_UNLOCK(stcb); 7108 return; 7109 } 7110 } 7111 type = ip6cp->ip6c_icmp6->icmp6_type; 7112 code = ip6cp->ip6c_icmp6->icmp6_code; 7113 if ((type == ICMP6_DST_UNREACH) && 7114 (code == ICMP6_DST_UNREACH_NOPORT)) { 7115 type = ICMP6_PARAM_PROB; 7116 code = ICMP6_PARAMPROB_NEXTHEADER; 7117 } 7118 sctp6_notify(inp, stcb, net, type, code, 7119 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7120 } else { 7121 if ((stcb == NULL) && (inp != NULL)) { 7122 /* reduce inp's ref-count */ 7123 SCTP_INP_WLOCK(inp); 7124 SCTP_INP_DECR_REF(inp); 7125 SCTP_INP_WUNLOCK(inp); 7126 } 7127 if (stcb) { 7128 SCTP_TCB_UNLOCK(stcb); 7129 } 7130 } 7131 } 7132 #endif 7133 7134 void 7135 sctp_over_udp_stop(void) 7136 { 7137 /* 7138 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7139 * for writting! 7140 */ 7141 #ifdef INET 7142 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7143 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7144 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7145 } 7146 #endif 7147 #ifdef INET6 7148 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7149 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7150 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7151 } 7152 #endif 7153 } 7154 7155 int 7156 sctp_over_udp_start(void) 7157 { 7158 uint16_t port; 7159 int ret; 7160 #ifdef INET 7161 struct sockaddr_in sin; 7162 #endif 7163 #ifdef INET6 7164 struct sockaddr_in6 sin6; 7165 #endif 7166 /* 7167 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7168 * for writting! 7169 */ 7170 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7171 if (ntohs(port) == 0) { 7172 /* Must have a port set */ 7173 return (EINVAL); 7174 } 7175 #ifdef INET 7176 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7177 /* Already running -- must stop first */ 7178 return (EALREADY); 7179 } 7180 #endif 7181 #ifdef INET6 7182 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7183 /* Already running -- must stop first */ 7184 return (EALREADY); 7185 } 7186 #endif 7187 #ifdef INET 7188 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7189 SOCK_DGRAM, IPPROTO_UDP, 7190 curthread->td_ucred, curthread))) { 7191 sctp_over_udp_stop(); 7192 return (ret); 7193 } 7194 /* Call the special UDP hook. */ 7195 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7196 sctp_recv_udp_tunneled_packet, 7197 sctp_recv_icmp_tunneled_packet, 7198 NULL))) { 7199 sctp_over_udp_stop(); 7200 return (ret); 7201 } 7202 /* Ok, we have a socket, bind it to the port. */ 7203 memset(&sin, 0, sizeof(struct sockaddr_in)); 7204 sin.sin_len = sizeof(struct sockaddr_in); 7205 sin.sin_family = AF_INET; 7206 sin.sin_port = htons(port); 7207 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7208 (struct sockaddr *)&sin, curthread))) { 7209 sctp_over_udp_stop(); 7210 return (ret); 7211 } 7212 #endif 7213 #ifdef INET6 7214 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7215 SOCK_DGRAM, IPPROTO_UDP, 7216 curthread->td_ucred, curthread))) { 7217 sctp_over_udp_stop(); 7218 return (ret); 7219 } 7220 /* Call the special UDP hook. */ 7221 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7222 sctp_recv_udp_tunneled_packet, 7223 sctp_recv_icmp6_tunneled_packet, 7224 NULL))) { 7225 sctp_over_udp_stop(); 7226 return (ret); 7227 } 7228 /* Ok, we have a socket, bind it to the port. */ 7229 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7230 sin6.sin6_len = sizeof(struct sockaddr_in6); 7231 sin6.sin6_family = AF_INET6; 7232 sin6.sin6_port = htons(port); 7233 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7234 (struct sockaddr *)&sin6, curthread))) { 7235 sctp_over_udp_stop(); 7236 return (ret); 7237 } 7238 #endif 7239 return (0); 7240 } 7241 7242 /* 7243 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7244 * If all arguments are zero, zero is returned. 7245 */ 7246 uint32_t 7247 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7248 { 7249 if (mtu1 > 0) { 7250 if (mtu2 > 0) { 7251 if (mtu3 > 0) { 7252 return (min(mtu1, min(mtu2, mtu3))); 7253 } else { 7254 return (min(mtu1, mtu2)); 7255 } 7256 } else { 7257 if (mtu3 > 0) { 7258 return (min(mtu1, mtu3)); 7259 } else { 7260 return (mtu1); 7261 } 7262 } 7263 } else { 7264 if (mtu2 > 0) { 7265 if (mtu3 > 0) { 7266 return (min(mtu2, mtu3)); 7267 } else { 7268 return (mtu2); 7269 } 7270 } else { 7271 return (mtu3); 7272 } 7273 } 7274 } 7275 7276 void 7277 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7278 { 7279 struct in_conninfo inc; 7280 7281 memset(&inc, 0, sizeof(struct in_conninfo)); 7282 inc.inc_fibnum = fibnum; 7283 switch (addr->sa.sa_family) { 7284 #ifdef INET 7285 case AF_INET: 7286 inc.inc_faddr = addr->sin.sin_addr; 7287 break; 7288 #endif 7289 #ifdef INET6 7290 case AF_INET6: 7291 inc.inc_flags |= INC_ISIPV6; 7292 inc.inc6_faddr = addr->sin6.sin6_addr; 7293 break; 7294 #endif 7295 default: 7296 return; 7297 } 7298 tcp_hc_updatemtu(&inc, (u_long)mtu); 7299 } 7300 7301 uint32_t 7302 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7303 { 7304 struct in_conninfo inc; 7305 7306 memset(&inc, 0, sizeof(struct in_conninfo)); 7307 inc.inc_fibnum = fibnum; 7308 switch (addr->sa.sa_family) { 7309 #ifdef INET 7310 case AF_INET: 7311 inc.inc_faddr = addr->sin.sin_addr; 7312 break; 7313 #endif 7314 #ifdef INET6 7315 case AF_INET6: 7316 inc.inc_flags |= INC_ISIPV6; 7317 inc.inc6_faddr = addr->sin6.sin6_addr; 7318 break; 7319 #endif 7320 default: 7321 return (0); 7322 } 7323 return ((uint32_t)tcp_hc_getmtu(&inc)); 7324 } 7325