1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #if defined(INET6) || defined(INET) 55 #include <netinet/tcp_var.h> 56 #endif 57 #include <netinet/udp.h> 58 #include <netinet/udp_var.h> 59 #include <sys/proc.h> 60 #ifdef INET6 61 #include <netinet/icmp6.h> 62 #endif 63 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 740 tot_out = 0; 741 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 742 if ((chk->whoTo == lnet) && 743 (chk->sent < SCTP_DATAGRAM_RESEND)) { 744 tot_out += chk->book_size; 745 } 746 } 747 if (lnet->flight_size != tot_out) { 748 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 749 (void *)lnet, lnet->flight_size, 750 tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * sctp_stop_timers_for_shutdown() should be called 776 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 777 * state to make sure that all timers are stopped. 778 */ 779 void 780 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 781 { 782 struct sctp_association *asoc; 783 struct sctp_nets *net; 784 785 asoc = &stcb->asoc; 786 787 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 788 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 789 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 790 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 791 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 792 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 793 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 794 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 795 } 796 } 797 798 /* 799 * A list of sizes based on typical mtu's, used only if next hop size not 800 * returned. These values MUST be multiples of 4 and MUST be ordered. 801 */ 802 static uint32_t sctp_mtu_sizes[] = { 803 68, 804 296, 805 508, 806 512, 807 544, 808 576, 809 1004, 810 1492, 811 1500, 812 1536, 813 2000, 814 2048, 815 4352, 816 4464, 817 8166, 818 17912, 819 32000, 820 65532 821 }; 822 823 /* 824 * Return the largest MTU in sctp_mtu_sizes smaller than val. 825 * If val is smaller than the minimum, just return the largest 826 * multiple of 4 smaller or equal to val. 827 * Ensure that the result is a multiple of 4. 828 */ 829 uint32_t 830 sctp_get_prev_mtu(uint32_t val) 831 { 832 uint32_t i; 833 834 val &= 0xfffffffc; 835 if (val <= sctp_mtu_sizes[0]) { 836 return (val); 837 } 838 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 839 if (val <= sctp_mtu_sizes[i]) { 840 break; 841 } 842 } 843 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 844 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 845 return (sctp_mtu_sizes[i - 1]); 846 } 847 848 /* 849 * Return the smallest MTU in sctp_mtu_sizes larger than val. 850 * If val is larger than the maximum, just return the largest multiple of 4 smaller 851 * or equal to val. 852 * Ensure that the result is a multiple of 4. 853 */ 854 uint32_t 855 sctp_get_next_mtu(uint32_t val) 856 { 857 /* select another MTU that is just bigger than this one */ 858 uint32_t i; 859 860 val &= 0xfffffffc; 861 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 862 if (val < sctp_mtu_sizes[i]) { 863 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 864 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 865 return (sctp_mtu_sizes[i]); 866 } 867 } 868 return (val); 869 } 870 871 void 872 sctp_fill_random_store(struct sctp_pcb *m) 873 { 874 /* 875 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 876 * our counter. The result becomes our good random numbers and we 877 * then setup to give these out. Note that we do no locking to 878 * protect this. This is ok, since if competing folks call this we 879 * will get more gobbled gook in the random store which is what we 880 * want. There is a danger that two guys will use the same random 881 * numbers, but thats ok too since that is random as well :-> 882 */ 883 m->store_at = 0; 884 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 885 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 886 sizeof(m->random_counter), (uint8_t *)m->random_store); 887 m->random_counter++; 888 } 889 890 uint32_t 891 sctp_select_initial_TSN(struct sctp_pcb *inp) 892 { 893 /* 894 * A true implementation should use random selection process to get 895 * the initial stream sequence number, using RFC1750 as a good 896 * guideline 897 */ 898 uint32_t x, *xp; 899 uint8_t *p; 900 int store_at, new_store; 901 902 if (inp->initial_sequence_debug != 0) { 903 uint32_t ret; 904 905 ret = inp->initial_sequence_debug; 906 inp->initial_sequence_debug++; 907 return (ret); 908 } 909 retry: 910 store_at = inp->store_at; 911 new_store = store_at + sizeof(uint32_t); 912 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 913 new_store = 0; 914 } 915 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 916 goto retry; 917 } 918 if (new_store == 0) { 919 /* Refill the random store */ 920 sctp_fill_random_store(inp); 921 } 922 p = &inp->random_store[store_at]; 923 xp = (uint32_t *)p; 924 x = *xp; 925 return (x); 926 } 927 928 uint32_t 929 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 930 { 931 uint32_t x; 932 struct timeval now; 933 934 if (check) { 935 (void)SCTP_GETTIME_TIMEVAL(&now); 936 } 937 for (;;) { 938 x = sctp_select_initial_TSN(&inp->sctp_ep); 939 if (x == 0) { 940 /* we never use 0 */ 941 continue; 942 } 943 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 944 break; 945 } 946 } 947 return (x); 948 } 949 950 int32_t 951 sctp_map_assoc_state(int kernel_state) 952 { 953 int32_t user_state; 954 955 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 956 user_state = SCTP_CLOSED; 957 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 958 user_state = SCTP_SHUTDOWN_PENDING; 959 } else { 960 switch (kernel_state & SCTP_STATE_MASK) { 961 case SCTP_STATE_EMPTY: 962 user_state = SCTP_CLOSED; 963 break; 964 case SCTP_STATE_INUSE: 965 user_state = SCTP_CLOSED; 966 break; 967 case SCTP_STATE_COOKIE_WAIT: 968 user_state = SCTP_COOKIE_WAIT; 969 break; 970 case SCTP_STATE_COOKIE_ECHOED: 971 user_state = SCTP_COOKIE_ECHOED; 972 break; 973 case SCTP_STATE_OPEN: 974 user_state = SCTP_ESTABLISHED; 975 break; 976 case SCTP_STATE_SHUTDOWN_SENT: 977 user_state = SCTP_SHUTDOWN_SENT; 978 break; 979 case SCTP_STATE_SHUTDOWN_RECEIVED: 980 user_state = SCTP_SHUTDOWN_RECEIVED; 981 break; 982 case SCTP_STATE_SHUTDOWN_ACK_SENT: 983 user_state = SCTP_SHUTDOWN_ACK_SENT; 984 break; 985 default: 986 user_state = SCTP_CLOSED; 987 break; 988 } 989 } 990 return (user_state); 991 } 992 993 int 994 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 995 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 996 { 997 struct sctp_association *asoc; 998 999 /* 1000 * Anything set to zero is taken care of by the allocation routine's 1001 * bzero 1002 */ 1003 1004 /* 1005 * Up front select what scoping to apply on addresses I tell my peer 1006 * Not sure what to do with these right now, we will need to come up 1007 * with a way to set them. We may need to pass them through from the 1008 * caller in the sctp_aloc_assoc() function. 1009 */ 1010 int i; 1011 #if defined(SCTP_DETAILED_STR_STATS) 1012 int j; 1013 #endif 1014 1015 asoc = &stcb->asoc; 1016 /* init all variables to a known value. */ 1017 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1018 asoc->max_burst = inp->sctp_ep.max_burst; 1019 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1020 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1021 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1022 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1023 asoc->ecn_supported = inp->ecn_supported; 1024 asoc->prsctp_supported = inp->prsctp_supported; 1025 asoc->idata_supported = inp->idata_supported; 1026 asoc->auth_supported = inp->auth_supported; 1027 asoc->asconf_supported = inp->asconf_supported; 1028 asoc->reconfig_supported = inp->reconfig_supported; 1029 asoc->nrsack_supported = inp->nrsack_supported; 1030 asoc->pktdrop_supported = inp->pktdrop_supported; 1031 asoc->idata_supported = inp->idata_supported; 1032 asoc->sctp_cmt_pf = (uint8_t)0; 1033 asoc->sctp_frag_point = inp->sctp_frag_point; 1034 asoc->sctp_features = inp->sctp_features; 1035 asoc->default_dscp = inp->sctp_ep.default_dscp; 1036 asoc->max_cwnd = inp->max_cwnd; 1037 #ifdef INET6 1038 if (inp->sctp_ep.default_flowlabel) { 1039 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1040 } else { 1041 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1042 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1043 asoc->default_flowlabel &= 0x000fffff; 1044 asoc->default_flowlabel |= 0x80000000; 1045 } else { 1046 asoc->default_flowlabel = 0; 1047 } 1048 } 1049 #endif 1050 asoc->sb_send_resv = 0; 1051 if (override_tag) { 1052 asoc->my_vtag = override_tag; 1053 } else { 1054 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1055 } 1056 /* Get the nonce tags */ 1057 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1058 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1059 asoc->vrf_id = vrf_id; 1060 1061 #ifdef SCTP_ASOCLOG_OF_TSNS 1062 asoc->tsn_in_at = 0; 1063 asoc->tsn_out_at = 0; 1064 asoc->tsn_in_wrapped = 0; 1065 asoc->tsn_out_wrapped = 0; 1066 asoc->cumack_log_at = 0; 1067 asoc->cumack_log_atsnt = 0; 1068 #endif 1069 #ifdef SCTP_FS_SPEC_LOG 1070 asoc->fs_index = 0; 1071 #endif 1072 asoc->refcnt = 0; 1073 asoc->assoc_up_sent = 0; 1074 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1075 sctp_select_initial_TSN(&inp->sctp_ep); 1076 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1077 /* we are optimisitic here */ 1078 asoc->peer_supports_nat = 0; 1079 asoc->sent_queue_retran_cnt = 0; 1080 1081 /* for CMT */ 1082 asoc->last_net_cmt_send_started = NULL; 1083 1084 /* This will need to be adjusted */ 1085 asoc->last_acked_seq = asoc->init_seq_number - 1; 1086 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1087 asoc->asconf_seq_in = asoc->last_acked_seq; 1088 1089 /* here we are different, we hold the next one we expect */ 1090 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1091 1092 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1093 asoc->initial_rto = inp->sctp_ep.initial_rto; 1094 1095 asoc->default_mtu = inp->sctp_ep.default_mtu; 1096 asoc->max_init_times = inp->sctp_ep.max_init_times; 1097 asoc->max_send_times = inp->sctp_ep.max_send_times; 1098 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1099 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1100 asoc->free_chunk_cnt = 0; 1101 1102 asoc->iam_blocking = 0; 1103 asoc->context = inp->sctp_context; 1104 asoc->local_strreset_support = inp->local_strreset_support; 1105 asoc->def_send = inp->def_send; 1106 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1107 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1108 asoc->pr_sctp_cnt = 0; 1109 asoc->total_output_queue_size = 0; 1110 1111 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1112 asoc->scope.ipv6_addr_legal = 1; 1113 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1114 asoc->scope.ipv4_addr_legal = 1; 1115 } else { 1116 asoc->scope.ipv4_addr_legal = 0; 1117 } 1118 } else { 1119 asoc->scope.ipv6_addr_legal = 0; 1120 asoc->scope.ipv4_addr_legal = 1; 1121 } 1122 1123 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1124 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1125 1126 asoc->smallest_mtu = inp->sctp_frag_point; 1127 asoc->minrto = inp->sctp_ep.sctp_minrto; 1128 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1129 1130 asoc->stream_locked_on = 0; 1131 asoc->ecn_echo_cnt_onq = 0; 1132 asoc->stream_locked = 0; 1133 1134 asoc->send_sack = 1; 1135 1136 LIST_INIT(&asoc->sctp_restricted_addrs); 1137 1138 TAILQ_INIT(&asoc->nets); 1139 TAILQ_INIT(&asoc->pending_reply_queue); 1140 TAILQ_INIT(&asoc->asconf_ack_sent); 1141 /* Setup to fill the hb random cache at first HB */ 1142 asoc->hb_random_idx = 4; 1143 1144 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1145 1146 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1147 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1148 1149 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1150 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1151 1152 /* 1153 * Now the stream parameters, here we allocate space for all streams 1154 * that we request by default. 1155 */ 1156 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1157 o_strms; 1158 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1159 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1160 SCTP_M_STRMO); 1161 if (asoc->strmout == NULL) { 1162 /* big trouble no memory */ 1163 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1164 return (ENOMEM); 1165 } 1166 for (i = 0; i < asoc->streamoutcnt; i++) { 1167 /* 1168 * inbound side must be set to 0xffff, also NOTE when we get 1169 * the INIT-ACK back (for INIT sender) we MUST reduce the 1170 * count (streamoutcnt) but first check if we sent to any of 1171 * the upper streams that were dropped (if some were). Those 1172 * that were dropped must be notified to the upper layer as 1173 * failed to send. 1174 */ 1175 asoc->strmout[i].next_mid_ordered = 0; 1176 asoc->strmout[i].next_mid_unordered = 0; 1177 TAILQ_INIT(&asoc->strmout[i].outqueue); 1178 asoc->strmout[i].chunks_on_queues = 0; 1179 #if defined(SCTP_DETAILED_STR_STATS) 1180 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1181 asoc->strmout[i].abandoned_sent[j] = 0; 1182 asoc->strmout[i].abandoned_unsent[j] = 0; 1183 } 1184 #else 1185 asoc->strmout[i].abandoned_sent[0] = 0; 1186 asoc->strmout[i].abandoned_unsent[0] = 0; 1187 #endif 1188 asoc->strmout[i].sid = i; 1189 asoc->strmout[i].last_msg_incomplete = 0; 1190 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1191 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1192 } 1193 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1194 1195 /* Now the mapping array */ 1196 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1197 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1198 SCTP_M_MAP); 1199 if (asoc->mapping_array == NULL) { 1200 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1201 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1202 return (ENOMEM); 1203 } 1204 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1205 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1206 SCTP_M_MAP); 1207 if (asoc->nr_mapping_array == NULL) { 1208 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1209 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1210 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1211 return (ENOMEM); 1212 } 1213 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1214 1215 /* Now the init of the other outqueues */ 1216 TAILQ_INIT(&asoc->free_chunks); 1217 TAILQ_INIT(&asoc->control_send_queue); 1218 TAILQ_INIT(&asoc->asconf_send_queue); 1219 TAILQ_INIT(&asoc->send_queue); 1220 TAILQ_INIT(&asoc->sent_queue); 1221 TAILQ_INIT(&asoc->resetHead); 1222 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1223 TAILQ_INIT(&asoc->asconf_queue); 1224 /* authentication fields */ 1225 asoc->authinfo.random = NULL; 1226 asoc->authinfo.active_keyid = 0; 1227 asoc->authinfo.assoc_key = NULL; 1228 asoc->authinfo.assoc_keyid = 0; 1229 asoc->authinfo.recv_key = NULL; 1230 asoc->authinfo.recv_keyid = 0; 1231 LIST_INIT(&asoc->shared_keys); 1232 asoc->marked_retrans = 0; 1233 asoc->port = inp->sctp_ep.port; 1234 asoc->timoinit = 0; 1235 asoc->timodata = 0; 1236 asoc->timosack = 0; 1237 asoc->timoshutdown = 0; 1238 asoc->timoheartbeat = 0; 1239 asoc->timocookie = 0; 1240 asoc->timoshutdownack = 0; 1241 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1242 asoc->discontinuity_time = asoc->start_time; 1243 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1244 asoc->abandoned_unsent[i] = 0; 1245 asoc->abandoned_sent[i] = 0; 1246 } 1247 /* 1248 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1249 * freed later when the association is freed. 1250 */ 1251 return (0); 1252 } 1253 1254 void 1255 sctp_print_mapping_array(struct sctp_association *asoc) 1256 { 1257 unsigned int i, limit; 1258 1259 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1260 asoc->mapping_array_size, 1261 asoc->mapping_array_base_tsn, 1262 asoc->cumulative_tsn, 1263 asoc->highest_tsn_inside_map, 1264 asoc->highest_tsn_inside_nr_map); 1265 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1266 if (asoc->mapping_array[limit - 1] != 0) { 1267 break; 1268 } 1269 } 1270 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1271 for (i = 0; i < limit; i++) { 1272 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1273 } 1274 if (limit % 16) 1275 SCTP_PRINTF("\n"); 1276 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1277 if (asoc->nr_mapping_array[limit - 1]) { 1278 break; 1279 } 1280 } 1281 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1282 for (i = 0; i < limit; i++) { 1283 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1284 } 1285 if (limit % 16) 1286 SCTP_PRINTF("\n"); 1287 } 1288 1289 int 1290 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1291 { 1292 /* mapping array needs to grow */ 1293 uint8_t *new_array1, *new_array2; 1294 uint32_t new_size; 1295 1296 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1297 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1298 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1299 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1300 /* can't get more, forget it */ 1301 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1302 if (new_array1) { 1303 SCTP_FREE(new_array1, SCTP_M_MAP); 1304 } 1305 if (new_array2) { 1306 SCTP_FREE(new_array2, SCTP_M_MAP); 1307 } 1308 return (-1); 1309 } 1310 memset(new_array1, 0, new_size); 1311 memset(new_array2, 0, new_size); 1312 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1313 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1314 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1315 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1316 asoc->mapping_array = new_array1; 1317 asoc->nr_mapping_array = new_array2; 1318 asoc->mapping_array_size = new_size; 1319 return (0); 1320 } 1321 1322 1323 static void 1324 sctp_iterator_work(struct sctp_iterator *it) 1325 { 1326 int iteration_count = 0; 1327 int inp_skip = 0; 1328 int first_in = 1; 1329 struct sctp_inpcb *tinp; 1330 1331 SCTP_INP_INFO_RLOCK(); 1332 SCTP_ITERATOR_LOCK(); 1333 sctp_it_ctl.cur_it = it; 1334 if (it->inp) { 1335 SCTP_INP_RLOCK(it->inp); 1336 SCTP_INP_DECR_REF(it->inp); 1337 } 1338 if (it->inp == NULL) { 1339 /* iterator is complete */ 1340 done_with_iterator: 1341 sctp_it_ctl.cur_it = NULL; 1342 SCTP_ITERATOR_UNLOCK(); 1343 SCTP_INP_INFO_RUNLOCK(); 1344 if (it->function_atend != NULL) { 1345 (*it->function_atend) (it->pointer, it->val); 1346 } 1347 SCTP_FREE(it, SCTP_M_ITER); 1348 return; 1349 } 1350 select_a_new_ep: 1351 if (first_in) { 1352 first_in = 0; 1353 } else { 1354 SCTP_INP_RLOCK(it->inp); 1355 } 1356 while (((it->pcb_flags) && 1357 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1358 ((it->pcb_features) && 1359 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1360 /* endpoint flags or features don't match, so keep looking */ 1361 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1362 SCTP_INP_RUNLOCK(it->inp); 1363 goto done_with_iterator; 1364 } 1365 tinp = it->inp; 1366 it->inp = LIST_NEXT(it->inp, sctp_list); 1367 SCTP_INP_RUNLOCK(tinp); 1368 if (it->inp == NULL) { 1369 goto done_with_iterator; 1370 } 1371 SCTP_INP_RLOCK(it->inp); 1372 } 1373 /* now go through each assoc which is in the desired state */ 1374 if (it->done_current_ep == 0) { 1375 if (it->function_inp != NULL) 1376 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1377 it->done_current_ep = 1; 1378 } 1379 if (it->stcb == NULL) { 1380 /* run the per instance function */ 1381 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1382 } 1383 if ((inp_skip) || it->stcb == NULL) { 1384 if (it->function_inp_end != NULL) { 1385 inp_skip = (*it->function_inp_end) (it->inp, 1386 it->pointer, 1387 it->val); 1388 } 1389 SCTP_INP_RUNLOCK(it->inp); 1390 goto no_stcb; 1391 } 1392 while (it->stcb) { 1393 SCTP_TCB_LOCK(it->stcb); 1394 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1395 /* not in the right state... keep looking */ 1396 SCTP_TCB_UNLOCK(it->stcb); 1397 goto next_assoc; 1398 } 1399 /* see if we have limited out the iterator loop */ 1400 iteration_count++; 1401 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1402 /* Pause to let others grab the lock */ 1403 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1404 SCTP_TCB_UNLOCK(it->stcb); 1405 SCTP_INP_INCR_REF(it->inp); 1406 SCTP_INP_RUNLOCK(it->inp); 1407 SCTP_ITERATOR_UNLOCK(); 1408 SCTP_INP_INFO_RUNLOCK(); 1409 SCTP_INP_INFO_RLOCK(); 1410 SCTP_ITERATOR_LOCK(); 1411 if (sctp_it_ctl.iterator_flags) { 1412 /* We won't be staying here */ 1413 SCTP_INP_DECR_REF(it->inp); 1414 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1415 if (sctp_it_ctl.iterator_flags & 1416 SCTP_ITERATOR_STOP_CUR_IT) { 1417 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1418 goto done_with_iterator; 1419 } 1420 if (sctp_it_ctl.iterator_flags & 1421 SCTP_ITERATOR_STOP_CUR_INP) { 1422 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1423 goto no_stcb; 1424 } 1425 /* If we reach here huh? */ 1426 SCTP_PRINTF("Unknown it ctl flag %x\n", 1427 sctp_it_ctl.iterator_flags); 1428 sctp_it_ctl.iterator_flags = 0; 1429 } 1430 SCTP_INP_RLOCK(it->inp); 1431 SCTP_INP_DECR_REF(it->inp); 1432 SCTP_TCB_LOCK(it->stcb); 1433 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1434 iteration_count = 0; 1435 } 1436 1437 /* run function on this one */ 1438 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1439 1440 /* 1441 * we lie here, it really needs to have its own type but 1442 * first I must verify that this won't effect things :-0 1443 */ 1444 if (it->no_chunk_output == 0) 1445 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1446 1447 SCTP_TCB_UNLOCK(it->stcb); 1448 next_assoc: 1449 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1450 if (it->stcb == NULL) { 1451 /* Run last function */ 1452 if (it->function_inp_end != NULL) { 1453 inp_skip = (*it->function_inp_end) (it->inp, 1454 it->pointer, 1455 it->val); 1456 } 1457 } 1458 } 1459 SCTP_INP_RUNLOCK(it->inp); 1460 no_stcb: 1461 /* done with all assocs on this endpoint, move on to next endpoint */ 1462 it->done_current_ep = 0; 1463 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1464 it->inp = NULL; 1465 } else { 1466 it->inp = LIST_NEXT(it->inp, sctp_list); 1467 } 1468 if (it->inp == NULL) { 1469 goto done_with_iterator; 1470 } 1471 goto select_a_new_ep; 1472 } 1473 1474 void 1475 sctp_iterator_worker(void) 1476 { 1477 struct sctp_iterator *it, *nit; 1478 1479 /* This function is called with the WQ lock in place */ 1480 1481 sctp_it_ctl.iterator_running = 1; 1482 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1483 /* now lets work on this one */ 1484 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1485 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1486 CURVNET_SET(it->vn); 1487 sctp_iterator_work(it); 1488 CURVNET_RESTORE(); 1489 SCTP_IPI_ITERATOR_WQ_LOCK(); 1490 /* sa_ignore FREED_MEMORY */ 1491 } 1492 sctp_it_ctl.iterator_running = 0; 1493 return; 1494 } 1495 1496 1497 static void 1498 sctp_handle_addr_wq(void) 1499 { 1500 /* deal with the ADDR wq from the rtsock calls */ 1501 struct sctp_laddr *wi, *nwi; 1502 struct sctp_asconf_iterator *asc; 1503 1504 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1505 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1506 if (asc == NULL) { 1507 /* Try later, no memory */ 1508 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1509 (struct sctp_inpcb *)NULL, 1510 (struct sctp_tcb *)NULL, 1511 (struct sctp_nets *)NULL); 1512 return; 1513 } 1514 LIST_INIT(&asc->list_of_work); 1515 asc->cnt = 0; 1516 1517 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1518 LIST_REMOVE(wi, sctp_nxt_addr); 1519 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1520 asc->cnt++; 1521 } 1522 1523 if (asc->cnt == 0) { 1524 SCTP_FREE(asc, SCTP_M_ASC_IT); 1525 } else { 1526 int ret; 1527 1528 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1529 sctp_asconf_iterator_stcb, 1530 NULL, /* No ep end for boundall */ 1531 SCTP_PCB_FLAGS_BOUNDALL, 1532 SCTP_PCB_ANY_FEATURES, 1533 SCTP_ASOC_ANY_STATE, 1534 (void *)asc, 0, 1535 sctp_asconf_iterator_end, NULL, 0); 1536 if (ret) { 1537 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1538 /* 1539 * Freeing if we are stopping or put back on the 1540 * addr_wq. 1541 */ 1542 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1543 sctp_asconf_iterator_end(asc, 0); 1544 } else { 1545 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1546 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1547 } 1548 SCTP_FREE(asc, SCTP_M_ASC_IT); 1549 } 1550 } 1551 } 1552 } 1553 1554 void 1555 sctp_timeout_handler(void *t) 1556 { 1557 struct sctp_inpcb *inp; 1558 struct sctp_tcb *stcb; 1559 struct sctp_nets *net; 1560 struct sctp_timer *tmr; 1561 struct mbuf *op_err; 1562 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1563 struct socket *so; 1564 #endif 1565 int did_output; 1566 int type; 1567 1568 tmr = (struct sctp_timer *)t; 1569 inp = (struct sctp_inpcb *)tmr->ep; 1570 stcb = (struct sctp_tcb *)tmr->tcb; 1571 net = (struct sctp_nets *)tmr->net; 1572 CURVNET_SET((struct vnet *)tmr->vnet); 1573 did_output = 1; 1574 1575 #ifdef SCTP_AUDITING_ENABLED 1576 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1577 sctp_auditing(3, inp, stcb, net); 1578 #endif 1579 1580 /* sanity checks... */ 1581 if (tmr->self != (void *)tmr) { 1582 /* 1583 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1584 * (void *)tmr); 1585 */ 1586 CURVNET_RESTORE(); 1587 return; 1588 } 1589 tmr->stopped_from = 0xa001; 1590 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1591 /* 1592 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1593 * tmr->type); 1594 */ 1595 CURVNET_RESTORE(); 1596 return; 1597 } 1598 tmr->stopped_from = 0xa002; 1599 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1600 CURVNET_RESTORE(); 1601 return; 1602 } 1603 /* if this is an iterator timeout, get the struct and clear inp */ 1604 tmr->stopped_from = 0xa003; 1605 if (inp) { 1606 SCTP_INP_INCR_REF(inp); 1607 if ((inp->sctp_socket == NULL) && 1608 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1609 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1610 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1611 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1612 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1613 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1614 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1615 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1616 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) { 1617 SCTP_INP_DECR_REF(inp); 1618 CURVNET_RESTORE(); 1619 return; 1620 } 1621 } 1622 tmr->stopped_from = 0xa004; 1623 if (stcb) { 1624 atomic_add_int(&stcb->asoc.refcnt, 1); 1625 if (stcb->asoc.state == 0) { 1626 atomic_add_int(&stcb->asoc.refcnt, -1); 1627 if (inp) { 1628 SCTP_INP_DECR_REF(inp); 1629 } 1630 CURVNET_RESTORE(); 1631 return; 1632 } 1633 } 1634 type = tmr->type; 1635 tmr->stopped_from = 0xa005; 1636 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type); 1637 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1638 if (inp) { 1639 SCTP_INP_DECR_REF(inp); 1640 } 1641 if (stcb) { 1642 atomic_add_int(&stcb->asoc.refcnt, -1); 1643 } 1644 CURVNET_RESTORE(); 1645 return; 1646 } 1647 tmr->stopped_from = 0xa006; 1648 1649 if (stcb) { 1650 SCTP_TCB_LOCK(stcb); 1651 atomic_add_int(&stcb->asoc.refcnt, -1); 1652 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1653 ((stcb->asoc.state == 0) || 1654 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1655 SCTP_TCB_UNLOCK(stcb); 1656 if (inp) { 1657 SCTP_INP_DECR_REF(inp); 1658 } 1659 CURVNET_RESTORE(); 1660 return; 1661 } 1662 } else if (inp != NULL) { 1663 if (type != SCTP_TIMER_TYPE_INPKILL) { 1664 SCTP_INP_WLOCK(inp); 1665 } 1666 } else { 1667 SCTP_WQ_ADDR_LOCK(); 1668 } 1669 /* record in stopped what t-o occurred */ 1670 tmr->stopped_from = type; 1671 1672 /* mark as being serviced now */ 1673 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1674 /* 1675 * Callout has been rescheduled. 1676 */ 1677 goto get_out; 1678 } 1679 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1680 /* 1681 * Not active, so no action. 1682 */ 1683 goto get_out; 1684 } 1685 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1686 1687 /* call the handler for the appropriate timer type */ 1688 switch (type) { 1689 case SCTP_TIMER_TYPE_ADDR_WQ: 1690 sctp_handle_addr_wq(); 1691 break; 1692 case SCTP_TIMER_TYPE_SEND: 1693 if ((stcb == NULL) || (inp == NULL)) { 1694 break; 1695 } 1696 SCTP_STAT_INCR(sctps_timodata); 1697 stcb->asoc.timodata++; 1698 stcb->asoc.num_send_timers_up--; 1699 if (stcb->asoc.num_send_timers_up < 0) { 1700 stcb->asoc.num_send_timers_up = 0; 1701 } 1702 SCTP_TCB_LOCK_ASSERT(stcb); 1703 if (sctp_t3rxt_timer(inp, stcb, net)) { 1704 /* no need to unlock on tcb its gone */ 1705 1706 goto out_decr; 1707 } 1708 SCTP_TCB_LOCK_ASSERT(stcb); 1709 #ifdef SCTP_AUDITING_ENABLED 1710 sctp_auditing(4, inp, stcb, net); 1711 #endif 1712 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1713 if ((stcb->asoc.num_send_timers_up == 0) && 1714 (stcb->asoc.sent_queue_cnt > 0)) { 1715 struct sctp_tmit_chunk *chk; 1716 1717 /* 1718 * safeguard. If there on some on the sent queue 1719 * somewhere but no timers running something is 1720 * wrong... so we start a timer on the first chunk 1721 * on the send queue on whatever net it is sent to. 1722 */ 1723 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1724 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1725 chk->whoTo); 1726 } 1727 break; 1728 case SCTP_TIMER_TYPE_INIT: 1729 if ((stcb == NULL) || (inp == NULL)) { 1730 break; 1731 } 1732 SCTP_STAT_INCR(sctps_timoinit); 1733 stcb->asoc.timoinit++; 1734 if (sctp_t1init_timer(inp, stcb, net)) { 1735 /* no need to unlock on tcb its gone */ 1736 goto out_decr; 1737 } 1738 /* We do output but not here */ 1739 did_output = 0; 1740 break; 1741 case SCTP_TIMER_TYPE_RECV: 1742 if ((stcb == NULL) || (inp == NULL)) { 1743 break; 1744 } 1745 SCTP_STAT_INCR(sctps_timosack); 1746 stcb->asoc.timosack++; 1747 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1748 #ifdef SCTP_AUDITING_ENABLED 1749 sctp_auditing(4, inp, stcb, net); 1750 #endif 1751 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1752 break; 1753 case SCTP_TIMER_TYPE_SHUTDOWN: 1754 if ((stcb == NULL) || (inp == NULL)) { 1755 break; 1756 } 1757 if (sctp_shutdown_timer(inp, stcb, net)) { 1758 /* no need to unlock on tcb its gone */ 1759 goto out_decr; 1760 } 1761 SCTP_STAT_INCR(sctps_timoshutdown); 1762 stcb->asoc.timoshutdown++; 1763 #ifdef SCTP_AUDITING_ENABLED 1764 sctp_auditing(4, inp, stcb, net); 1765 #endif 1766 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1767 break; 1768 case SCTP_TIMER_TYPE_HEARTBEAT: 1769 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1770 break; 1771 } 1772 SCTP_STAT_INCR(sctps_timoheartbeat); 1773 stcb->asoc.timoheartbeat++; 1774 if (sctp_heartbeat_timer(inp, stcb, net)) { 1775 /* no need to unlock on tcb its gone */ 1776 goto out_decr; 1777 } 1778 #ifdef SCTP_AUDITING_ENABLED 1779 sctp_auditing(4, inp, stcb, net); 1780 #endif 1781 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1782 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1783 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1784 } 1785 break; 1786 case SCTP_TIMER_TYPE_COOKIE: 1787 if ((stcb == NULL) || (inp == NULL)) { 1788 break; 1789 } 1790 1791 if (sctp_cookie_timer(inp, stcb, net)) { 1792 /* no need to unlock on tcb its gone */ 1793 goto out_decr; 1794 } 1795 SCTP_STAT_INCR(sctps_timocookie); 1796 stcb->asoc.timocookie++; 1797 #ifdef SCTP_AUDITING_ENABLED 1798 sctp_auditing(4, inp, stcb, net); 1799 #endif 1800 /* 1801 * We consider T3 and Cookie timer pretty much the same with 1802 * respect to where from in chunk_output. 1803 */ 1804 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1805 break; 1806 case SCTP_TIMER_TYPE_NEWCOOKIE: 1807 { 1808 struct timeval tv; 1809 int i, secret; 1810 1811 if (inp == NULL) { 1812 break; 1813 } 1814 SCTP_STAT_INCR(sctps_timosecret); 1815 (void)SCTP_GETTIME_TIMEVAL(&tv); 1816 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1817 inp->sctp_ep.last_secret_number = 1818 inp->sctp_ep.current_secret_number; 1819 inp->sctp_ep.current_secret_number++; 1820 if (inp->sctp_ep.current_secret_number >= 1821 SCTP_HOW_MANY_SECRETS) { 1822 inp->sctp_ep.current_secret_number = 0; 1823 } 1824 secret = (int)inp->sctp_ep.current_secret_number; 1825 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1826 inp->sctp_ep.secret_key[secret][i] = 1827 sctp_select_initial_TSN(&inp->sctp_ep); 1828 } 1829 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1830 } 1831 did_output = 0; 1832 break; 1833 case SCTP_TIMER_TYPE_PATHMTURAISE: 1834 if ((stcb == NULL) || (inp == NULL)) { 1835 break; 1836 } 1837 SCTP_STAT_INCR(sctps_timopathmtu); 1838 sctp_pathmtu_timer(inp, stcb, net); 1839 did_output = 0; 1840 break; 1841 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1842 if ((stcb == NULL) || (inp == NULL)) { 1843 break; 1844 } 1845 if (sctp_shutdownack_timer(inp, stcb, net)) { 1846 /* no need to unlock on tcb its gone */ 1847 goto out_decr; 1848 } 1849 SCTP_STAT_INCR(sctps_timoshutdownack); 1850 stcb->asoc.timoshutdownack++; 1851 #ifdef SCTP_AUDITING_ENABLED 1852 sctp_auditing(4, inp, stcb, net); 1853 #endif 1854 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1855 break; 1856 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1857 if ((stcb == NULL) || (inp == NULL)) { 1858 break; 1859 } 1860 SCTP_STAT_INCR(sctps_timoshutdownguard); 1861 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1862 "Shutdown guard timer expired"); 1863 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1864 /* no need to unlock on tcb its gone */ 1865 goto out_decr; 1866 1867 case SCTP_TIMER_TYPE_STRRESET: 1868 if ((stcb == NULL) || (inp == NULL)) { 1869 break; 1870 } 1871 if (sctp_strreset_timer(inp, stcb, net)) { 1872 /* no need to unlock on tcb its gone */ 1873 goto out_decr; 1874 } 1875 SCTP_STAT_INCR(sctps_timostrmrst); 1876 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1877 break; 1878 case SCTP_TIMER_TYPE_ASCONF: 1879 if ((stcb == NULL) || (inp == NULL)) { 1880 break; 1881 } 1882 if (sctp_asconf_timer(inp, stcb, net)) { 1883 /* no need to unlock on tcb its gone */ 1884 goto out_decr; 1885 } 1886 SCTP_STAT_INCR(sctps_timoasconf); 1887 #ifdef SCTP_AUDITING_ENABLED 1888 sctp_auditing(4, inp, stcb, net); 1889 #endif 1890 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1891 break; 1892 case SCTP_TIMER_TYPE_PRIM_DELETED: 1893 if ((stcb == NULL) || (inp == NULL)) { 1894 break; 1895 } 1896 sctp_delete_prim_timer(inp, stcb, net); 1897 SCTP_STAT_INCR(sctps_timodelprim); 1898 break; 1899 1900 case SCTP_TIMER_TYPE_AUTOCLOSE: 1901 if ((stcb == NULL) || (inp == NULL)) { 1902 break; 1903 } 1904 SCTP_STAT_INCR(sctps_timoautoclose); 1905 sctp_autoclose_timer(inp, stcb, net); 1906 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1907 did_output = 0; 1908 break; 1909 case SCTP_TIMER_TYPE_ASOCKILL: 1910 if ((stcb == NULL) || (inp == NULL)) { 1911 break; 1912 } 1913 SCTP_STAT_INCR(sctps_timoassockill); 1914 /* Can we free it yet? */ 1915 SCTP_INP_DECR_REF(inp); 1916 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 1917 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1918 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1919 so = SCTP_INP_SO(inp); 1920 atomic_add_int(&stcb->asoc.refcnt, 1); 1921 SCTP_TCB_UNLOCK(stcb); 1922 SCTP_SOCKET_LOCK(so, 1); 1923 SCTP_TCB_LOCK(stcb); 1924 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1925 #endif 1926 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1927 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1928 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1929 SCTP_SOCKET_UNLOCK(so, 1); 1930 #endif 1931 /* 1932 * free asoc, always unlocks (or destroy's) so prevent 1933 * duplicate unlock or unlock of a free mtx :-0 1934 */ 1935 stcb = NULL; 1936 goto out_no_decr; 1937 case SCTP_TIMER_TYPE_INPKILL: 1938 SCTP_STAT_INCR(sctps_timoinpkill); 1939 if (inp == NULL) { 1940 break; 1941 } 1942 /* 1943 * special case, take away our increment since WE are the 1944 * killer 1945 */ 1946 SCTP_INP_DECR_REF(inp); 1947 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 1948 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1949 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1950 SCTP_CALLED_FROM_INPKILL_TIMER); 1951 inp = NULL; 1952 goto out_no_decr; 1953 default: 1954 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1955 type); 1956 break; 1957 } 1958 #ifdef SCTP_AUDITING_ENABLED 1959 sctp_audit_log(0xF1, (uint8_t)type); 1960 if (inp) 1961 sctp_auditing(5, inp, stcb, net); 1962 #endif 1963 if ((did_output) && stcb) { 1964 /* 1965 * Now we need to clean up the control chunk chain if an 1966 * ECNE is on it. It must be marked as UNSENT again so next 1967 * call will continue to send it until such time that we get 1968 * a CWR, to remove it. It is, however, less likely that we 1969 * will find a ecn echo on the chain though. 1970 */ 1971 sctp_fix_ecn_echo(&stcb->asoc); 1972 } 1973 get_out: 1974 if (stcb) { 1975 SCTP_TCB_UNLOCK(stcb); 1976 } else if (inp != NULL) { 1977 SCTP_INP_WUNLOCK(inp); 1978 } else { 1979 SCTP_WQ_ADDR_UNLOCK(); 1980 } 1981 1982 out_decr: 1983 if (inp) { 1984 SCTP_INP_DECR_REF(inp); 1985 } 1986 1987 out_no_decr: 1988 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type); 1989 CURVNET_RESTORE(); 1990 } 1991 1992 void 1993 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1994 struct sctp_nets *net) 1995 { 1996 uint32_t to_ticks; 1997 struct sctp_timer *tmr; 1998 1999 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 2000 return; 2001 2002 tmr = NULL; 2003 if (stcb) { 2004 SCTP_TCB_LOCK_ASSERT(stcb); 2005 } 2006 switch (t_type) { 2007 case SCTP_TIMER_TYPE_ADDR_WQ: 2008 /* Only 1 tick away :-) */ 2009 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2010 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2011 break; 2012 case SCTP_TIMER_TYPE_SEND: 2013 /* Here we use the RTO timer */ 2014 { 2015 int rto_val; 2016 2017 if ((stcb == NULL) || (net == NULL)) { 2018 return; 2019 } 2020 tmr = &net->rxt_timer; 2021 if (net->RTO == 0) { 2022 rto_val = stcb->asoc.initial_rto; 2023 } else { 2024 rto_val = net->RTO; 2025 } 2026 to_ticks = MSEC_TO_TICKS(rto_val); 2027 } 2028 break; 2029 case SCTP_TIMER_TYPE_INIT: 2030 /* 2031 * Here we use the INIT timer default usually about 1 2032 * minute. 2033 */ 2034 if ((stcb == NULL) || (net == NULL)) { 2035 return; 2036 } 2037 tmr = &net->rxt_timer; 2038 if (net->RTO == 0) { 2039 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2040 } else { 2041 to_ticks = MSEC_TO_TICKS(net->RTO); 2042 } 2043 break; 2044 case SCTP_TIMER_TYPE_RECV: 2045 /* 2046 * Here we use the Delayed-Ack timer value from the inp 2047 * ususually about 200ms. 2048 */ 2049 if (stcb == NULL) { 2050 return; 2051 } 2052 tmr = &stcb->asoc.dack_timer; 2053 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 2054 break; 2055 case SCTP_TIMER_TYPE_SHUTDOWN: 2056 /* Here we use the RTO of the destination. */ 2057 if ((stcb == NULL) || (net == NULL)) { 2058 return; 2059 } 2060 if (net->RTO == 0) { 2061 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2062 } else { 2063 to_ticks = MSEC_TO_TICKS(net->RTO); 2064 } 2065 tmr = &net->rxt_timer; 2066 break; 2067 case SCTP_TIMER_TYPE_HEARTBEAT: 2068 /* 2069 * the net is used here so that we can add in the RTO. Even 2070 * though we use a different timer. We also add the HB timer 2071 * PLUS a random jitter. 2072 */ 2073 if ((stcb == NULL) || (net == NULL)) { 2074 return; 2075 } else { 2076 uint32_t rndval; 2077 uint32_t jitter; 2078 2079 if ((net->dest_state & SCTP_ADDR_NOHB) && 2080 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2081 return; 2082 } 2083 if (net->RTO == 0) { 2084 to_ticks = stcb->asoc.initial_rto; 2085 } else { 2086 to_ticks = net->RTO; 2087 } 2088 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2089 jitter = rndval % to_ticks; 2090 if (jitter >= (to_ticks >> 1)) { 2091 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2092 } else { 2093 to_ticks = to_ticks - jitter; 2094 } 2095 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2096 !(net->dest_state & SCTP_ADDR_PF)) { 2097 to_ticks += net->heart_beat_delay; 2098 } 2099 /* 2100 * Now we must convert the to_ticks that are now in 2101 * ms to ticks. 2102 */ 2103 to_ticks = MSEC_TO_TICKS(to_ticks); 2104 tmr = &net->hb_timer; 2105 } 2106 break; 2107 case SCTP_TIMER_TYPE_COOKIE: 2108 /* 2109 * Here we can use the RTO timer from the network since one 2110 * RTT was compelete. If a retran happened then we will be 2111 * using the RTO initial value. 2112 */ 2113 if ((stcb == NULL) || (net == NULL)) { 2114 return; 2115 } 2116 if (net->RTO == 0) { 2117 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2118 } else { 2119 to_ticks = MSEC_TO_TICKS(net->RTO); 2120 } 2121 tmr = &net->rxt_timer; 2122 break; 2123 case SCTP_TIMER_TYPE_NEWCOOKIE: 2124 /* 2125 * nothing needed but the endpoint here ususually about 60 2126 * minutes. 2127 */ 2128 tmr = &inp->sctp_ep.signature_change; 2129 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2130 break; 2131 case SCTP_TIMER_TYPE_ASOCKILL: 2132 if (stcb == NULL) { 2133 return; 2134 } 2135 tmr = &stcb->asoc.strreset_timer; 2136 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 2137 break; 2138 case SCTP_TIMER_TYPE_INPKILL: 2139 /* 2140 * The inp is setup to die. We re-use the signature_chage 2141 * timer since that has stopped and we are in the GONE 2142 * state. 2143 */ 2144 tmr = &inp->sctp_ep.signature_change; 2145 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2146 break; 2147 case SCTP_TIMER_TYPE_PATHMTURAISE: 2148 /* 2149 * Here we use the value found in the EP for PMTU ususually 2150 * about 10 minutes. 2151 */ 2152 if ((stcb == NULL) || (net == NULL)) { 2153 return; 2154 } 2155 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2156 return; 2157 } 2158 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2159 tmr = &net->pmtu_timer; 2160 break; 2161 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2162 /* Here we use the RTO of the destination */ 2163 if ((stcb == NULL) || (net == NULL)) { 2164 return; 2165 } 2166 if (net->RTO == 0) { 2167 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2168 } else { 2169 to_ticks = MSEC_TO_TICKS(net->RTO); 2170 } 2171 tmr = &net->rxt_timer; 2172 break; 2173 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2174 /* 2175 * Here we use the endpoints shutdown guard timer usually 2176 * about 3 minutes. 2177 */ 2178 if (stcb == NULL) { 2179 return; 2180 } 2181 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2182 to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto); 2183 } else { 2184 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2185 } 2186 tmr = &stcb->asoc.shut_guard_timer; 2187 break; 2188 case SCTP_TIMER_TYPE_STRRESET: 2189 /* 2190 * Here the timer comes from the stcb but its value is from 2191 * the net's RTO. 2192 */ 2193 if ((stcb == NULL) || (net == NULL)) { 2194 return; 2195 } 2196 if (net->RTO == 0) { 2197 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2198 } else { 2199 to_ticks = MSEC_TO_TICKS(net->RTO); 2200 } 2201 tmr = &stcb->asoc.strreset_timer; 2202 break; 2203 case SCTP_TIMER_TYPE_ASCONF: 2204 /* 2205 * Here the timer comes from the stcb but its value is from 2206 * the net's RTO. 2207 */ 2208 if ((stcb == NULL) || (net == NULL)) { 2209 return; 2210 } 2211 if (net->RTO == 0) { 2212 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2213 } else { 2214 to_ticks = MSEC_TO_TICKS(net->RTO); 2215 } 2216 tmr = &stcb->asoc.asconf_timer; 2217 break; 2218 case SCTP_TIMER_TYPE_PRIM_DELETED: 2219 if ((stcb == NULL) || (net != NULL)) { 2220 return; 2221 } 2222 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2223 tmr = &stcb->asoc.delete_prim_timer; 2224 break; 2225 case SCTP_TIMER_TYPE_AUTOCLOSE: 2226 if (stcb == NULL) { 2227 return; 2228 } 2229 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2230 /* 2231 * Really an error since stcb is NOT set to 2232 * autoclose 2233 */ 2234 return; 2235 } 2236 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2237 tmr = &stcb->asoc.autoclose_timer; 2238 break; 2239 default: 2240 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2241 __func__, t_type); 2242 return; 2243 break; 2244 } 2245 if ((to_ticks <= 0) || (tmr == NULL)) { 2246 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2247 __func__, t_type, to_ticks, (void *)tmr); 2248 return; 2249 } 2250 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2251 /* 2252 * we do NOT allow you to have it already running. if it is 2253 * we leave the current one up unchanged 2254 */ 2255 return; 2256 } 2257 /* At this point we can proceed */ 2258 if (t_type == SCTP_TIMER_TYPE_SEND) { 2259 stcb->asoc.num_send_timers_up++; 2260 } 2261 tmr->stopped_from = 0; 2262 tmr->type = t_type; 2263 tmr->ep = (void *)inp; 2264 tmr->tcb = (void *)stcb; 2265 tmr->net = (void *)net; 2266 tmr->self = (void *)tmr; 2267 tmr->vnet = (void *)curvnet; 2268 tmr->ticks = sctp_get_tick_count(); 2269 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2270 return; 2271 } 2272 2273 void 2274 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2275 struct sctp_nets *net, uint32_t from) 2276 { 2277 struct sctp_timer *tmr; 2278 2279 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2280 (inp == NULL)) 2281 return; 2282 2283 tmr = NULL; 2284 if (stcb) { 2285 SCTP_TCB_LOCK_ASSERT(stcb); 2286 } 2287 switch (t_type) { 2288 case SCTP_TIMER_TYPE_ADDR_WQ: 2289 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2290 break; 2291 case SCTP_TIMER_TYPE_SEND: 2292 if ((stcb == NULL) || (net == NULL)) { 2293 return; 2294 } 2295 tmr = &net->rxt_timer; 2296 break; 2297 case SCTP_TIMER_TYPE_INIT: 2298 if ((stcb == NULL) || (net == NULL)) { 2299 return; 2300 } 2301 tmr = &net->rxt_timer; 2302 break; 2303 case SCTP_TIMER_TYPE_RECV: 2304 if (stcb == NULL) { 2305 return; 2306 } 2307 tmr = &stcb->asoc.dack_timer; 2308 break; 2309 case SCTP_TIMER_TYPE_SHUTDOWN: 2310 if ((stcb == NULL) || (net == NULL)) { 2311 return; 2312 } 2313 tmr = &net->rxt_timer; 2314 break; 2315 case SCTP_TIMER_TYPE_HEARTBEAT: 2316 if ((stcb == NULL) || (net == NULL)) { 2317 return; 2318 } 2319 tmr = &net->hb_timer; 2320 break; 2321 case SCTP_TIMER_TYPE_COOKIE: 2322 if ((stcb == NULL) || (net == NULL)) { 2323 return; 2324 } 2325 tmr = &net->rxt_timer; 2326 break; 2327 case SCTP_TIMER_TYPE_NEWCOOKIE: 2328 /* nothing needed but the endpoint here */ 2329 tmr = &inp->sctp_ep.signature_change; 2330 /* 2331 * We re-use the newcookie timer for the INP kill timer. We 2332 * must assure that we do not kill it by accident. 2333 */ 2334 break; 2335 case SCTP_TIMER_TYPE_ASOCKILL: 2336 /* 2337 * Stop the asoc kill timer. 2338 */ 2339 if (stcb == NULL) { 2340 return; 2341 } 2342 tmr = &stcb->asoc.strreset_timer; 2343 break; 2344 2345 case SCTP_TIMER_TYPE_INPKILL: 2346 /* 2347 * The inp is setup to die. We re-use the signature_chage 2348 * timer since that has stopped and we are in the GONE 2349 * state. 2350 */ 2351 tmr = &inp->sctp_ep.signature_change; 2352 break; 2353 case SCTP_TIMER_TYPE_PATHMTURAISE: 2354 if ((stcb == NULL) || (net == NULL)) { 2355 return; 2356 } 2357 tmr = &net->pmtu_timer; 2358 break; 2359 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2360 if ((stcb == NULL) || (net == NULL)) { 2361 return; 2362 } 2363 tmr = &net->rxt_timer; 2364 break; 2365 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2366 if (stcb == NULL) { 2367 return; 2368 } 2369 tmr = &stcb->asoc.shut_guard_timer; 2370 break; 2371 case SCTP_TIMER_TYPE_STRRESET: 2372 if (stcb == NULL) { 2373 return; 2374 } 2375 tmr = &stcb->asoc.strreset_timer; 2376 break; 2377 case SCTP_TIMER_TYPE_ASCONF: 2378 if (stcb == NULL) { 2379 return; 2380 } 2381 tmr = &stcb->asoc.asconf_timer; 2382 break; 2383 case SCTP_TIMER_TYPE_PRIM_DELETED: 2384 if (stcb == NULL) { 2385 return; 2386 } 2387 tmr = &stcb->asoc.delete_prim_timer; 2388 break; 2389 case SCTP_TIMER_TYPE_AUTOCLOSE: 2390 if (stcb == NULL) { 2391 return; 2392 } 2393 tmr = &stcb->asoc.autoclose_timer; 2394 break; 2395 default: 2396 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2397 __func__, t_type); 2398 break; 2399 } 2400 if (tmr == NULL) { 2401 return; 2402 } 2403 if ((tmr->type != t_type) && tmr->type) { 2404 /* 2405 * Ok we have a timer that is under joint use. Cookie timer 2406 * per chance with the SEND timer. We therefore are NOT 2407 * running the timer that the caller wants stopped. So just 2408 * return. 2409 */ 2410 return; 2411 } 2412 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2413 stcb->asoc.num_send_timers_up--; 2414 if (stcb->asoc.num_send_timers_up < 0) { 2415 stcb->asoc.num_send_timers_up = 0; 2416 } 2417 } 2418 tmr->self = NULL; 2419 tmr->stopped_from = from; 2420 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2421 return; 2422 } 2423 2424 uint32_t 2425 sctp_calculate_len(struct mbuf *m) 2426 { 2427 uint32_t tlen = 0; 2428 struct mbuf *at; 2429 2430 at = m; 2431 while (at) { 2432 tlen += SCTP_BUF_LEN(at); 2433 at = SCTP_BUF_NEXT(at); 2434 } 2435 return (tlen); 2436 } 2437 2438 void 2439 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2440 struct sctp_association *asoc, uint32_t mtu) 2441 { 2442 /* 2443 * Reset the P-MTU size on this association, this involves changing 2444 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2445 * allow the DF flag to be cleared. 2446 */ 2447 struct sctp_tmit_chunk *chk; 2448 unsigned int eff_mtu, ovh; 2449 2450 asoc->smallest_mtu = mtu; 2451 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2452 ovh = SCTP_MIN_OVERHEAD; 2453 } else { 2454 ovh = SCTP_MIN_V4_OVERHEAD; 2455 } 2456 eff_mtu = mtu - ovh; 2457 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2458 if (chk->send_size > eff_mtu) { 2459 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2460 } 2461 } 2462 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2463 if (chk->send_size > eff_mtu) { 2464 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2465 } 2466 } 2467 } 2468 2469 2470 /* 2471 * given an association and starting time of the current RTT period return 2472 * RTO in number of msecs net should point to the current network 2473 */ 2474 2475 uint32_t 2476 sctp_calculate_rto(struct sctp_tcb *stcb, 2477 struct sctp_association *asoc, 2478 struct sctp_nets *net, 2479 struct timeval *old, 2480 int rtt_from_sack) 2481 { 2482 /*- 2483 * given an association and the starting time of the current RTT 2484 * period (in value1/value2) return RTO in number of msecs. 2485 */ 2486 int32_t rtt; /* RTT in ms */ 2487 uint32_t new_rto; 2488 int first_measure = 0; 2489 struct timeval now; 2490 2491 /************************/ 2492 /* 1. calculate new RTT */ 2493 /************************/ 2494 /* get the current time */ 2495 if (stcb->asoc.use_precise_time) { 2496 (void)SCTP_GETPTIME_TIMEVAL(&now); 2497 } else { 2498 (void)SCTP_GETTIME_TIMEVAL(&now); 2499 } 2500 timevalsub(&now, old); 2501 /* store the current RTT in us */ 2502 net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec + 2503 (uint64_t)now.tv_usec; 2504 /* compute rtt in ms */ 2505 rtt = (int32_t)(net->rtt / 1000); 2506 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2507 /* 2508 * Tell the CC module that a new update has just occurred 2509 * from a sack 2510 */ 2511 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2512 } 2513 /* 2514 * Do we need to determine the lan? We do this only on sacks i.e. 2515 * RTT being determined from data not non-data (HB/INIT->INITACK). 2516 */ 2517 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2518 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2519 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2520 net->lan_type = SCTP_LAN_INTERNET; 2521 } else { 2522 net->lan_type = SCTP_LAN_LOCAL; 2523 } 2524 } 2525 2526 /***************************/ 2527 /* 2. update RTTVAR & SRTT */ 2528 /***************************/ 2529 /*- 2530 * Compute the scaled average lastsa and the 2531 * scaled variance lastsv as described in van Jacobson 2532 * Paper "Congestion Avoidance and Control", Annex A. 2533 * 2534 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2535 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2536 */ 2537 if (net->RTO_measured) { 2538 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2539 net->lastsa += rtt; 2540 if (rtt < 0) { 2541 rtt = -rtt; 2542 } 2543 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2544 net->lastsv += rtt; 2545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2546 rto_logging(net, SCTP_LOG_RTTVAR); 2547 } 2548 } else { 2549 /* First RTO measurment */ 2550 net->RTO_measured = 1; 2551 first_measure = 1; 2552 net->lastsa = rtt << SCTP_RTT_SHIFT; 2553 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2555 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2556 } 2557 } 2558 if (net->lastsv == 0) { 2559 net->lastsv = SCTP_CLOCK_GRANULARITY; 2560 } 2561 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2562 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2563 (stcb->asoc.sat_network_lockout == 0)) { 2564 stcb->asoc.sat_network = 1; 2565 } else if ((!first_measure) && stcb->asoc.sat_network) { 2566 stcb->asoc.sat_network = 0; 2567 stcb->asoc.sat_network_lockout = 1; 2568 } 2569 /* bound it, per C6/C7 in Section 5.3.1 */ 2570 if (new_rto < stcb->asoc.minrto) { 2571 new_rto = stcb->asoc.minrto; 2572 } 2573 if (new_rto > stcb->asoc.maxrto) { 2574 new_rto = stcb->asoc.maxrto; 2575 } 2576 /* we are now returning the RTO */ 2577 return (new_rto); 2578 } 2579 2580 /* 2581 * return a pointer to a contiguous piece of data from the given mbuf chain 2582 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2583 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2584 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2585 */ 2586 caddr_t 2587 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 2588 { 2589 uint32_t count; 2590 uint8_t *ptr; 2591 2592 ptr = in_ptr; 2593 if ((off < 0) || (len <= 0)) 2594 return (NULL); 2595 2596 /* find the desired start location */ 2597 while ((m != NULL) && (off > 0)) { 2598 if (off < SCTP_BUF_LEN(m)) 2599 break; 2600 off -= SCTP_BUF_LEN(m); 2601 m = SCTP_BUF_NEXT(m); 2602 } 2603 if (m == NULL) 2604 return (NULL); 2605 2606 /* is the current mbuf large enough (eg. contiguous)? */ 2607 if ((SCTP_BUF_LEN(m) - off) >= len) { 2608 return (mtod(m, caddr_t)+off); 2609 } else { 2610 /* else, it spans more than one mbuf, so save a temp copy... */ 2611 while ((m != NULL) && (len > 0)) { 2612 count = min(SCTP_BUF_LEN(m) - off, len); 2613 memcpy(ptr, mtod(m, caddr_t)+off, count); 2614 len -= count; 2615 ptr += count; 2616 off = 0; 2617 m = SCTP_BUF_NEXT(m); 2618 } 2619 if ((m == NULL) && (len > 0)) 2620 return (NULL); 2621 else 2622 return ((caddr_t)in_ptr); 2623 } 2624 } 2625 2626 2627 2628 struct sctp_paramhdr * 2629 sctp_get_next_param(struct mbuf *m, 2630 int offset, 2631 struct sctp_paramhdr *pull, 2632 int pull_limit) 2633 { 2634 /* This just provides a typed signature to Peter's Pull routine */ 2635 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2636 (uint8_t *)pull)); 2637 } 2638 2639 2640 struct mbuf * 2641 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2642 { 2643 struct mbuf *m_last; 2644 caddr_t dp; 2645 2646 if (padlen > 3) { 2647 return (NULL); 2648 } 2649 if (padlen <= M_TRAILINGSPACE(m)) { 2650 /* 2651 * The easy way. We hope the majority of the time we hit 2652 * here :) 2653 */ 2654 m_last = m; 2655 } else { 2656 /* Hard way we must grow the mbuf chain */ 2657 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2658 if (m_last == NULL) { 2659 return (NULL); 2660 } 2661 SCTP_BUF_LEN(m_last) = 0; 2662 SCTP_BUF_NEXT(m_last) = NULL; 2663 SCTP_BUF_NEXT(m) = m_last; 2664 } 2665 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 2666 SCTP_BUF_LEN(m_last) += padlen; 2667 memset(dp, 0, padlen); 2668 return (m_last); 2669 } 2670 2671 struct mbuf * 2672 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2673 { 2674 /* find the last mbuf in chain and pad it */ 2675 struct mbuf *m_at; 2676 2677 if (last_mbuf != NULL) { 2678 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2679 } else { 2680 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2681 if (SCTP_BUF_NEXT(m_at) == NULL) { 2682 return (sctp_add_pad_tombuf(m_at, padval)); 2683 } 2684 } 2685 } 2686 return (NULL); 2687 } 2688 2689 static void 2690 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2691 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2692 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2693 SCTP_UNUSED 2694 #endif 2695 ) 2696 { 2697 struct mbuf *m_notify; 2698 struct sctp_assoc_change *sac; 2699 struct sctp_queued_to_read *control; 2700 unsigned int notif_len; 2701 uint16_t abort_len; 2702 unsigned int i; 2703 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2704 struct socket *so; 2705 #endif 2706 2707 if (stcb == NULL) { 2708 return; 2709 } 2710 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2711 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 2712 if (abort != NULL) { 2713 abort_len = ntohs(abort->ch.chunk_length); 2714 /* 2715 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 2716 * contiguous. 2717 */ 2718 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 2719 abort_len = SCTP_CHUNK_BUFFER_SIZE; 2720 } 2721 } else { 2722 abort_len = 0; 2723 } 2724 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2725 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2726 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2727 notif_len += abort_len; 2728 } 2729 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2730 if (m_notify == NULL) { 2731 /* Retry with smaller value. */ 2732 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 2733 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2734 if (m_notify == NULL) { 2735 goto set_error; 2736 } 2737 } 2738 SCTP_BUF_NEXT(m_notify) = NULL; 2739 sac = mtod(m_notify, struct sctp_assoc_change *); 2740 memset(sac, 0, notif_len); 2741 sac->sac_type = SCTP_ASSOC_CHANGE; 2742 sac->sac_flags = 0; 2743 sac->sac_length = sizeof(struct sctp_assoc_change); 2744 sac->sac_state = state; 2745 sac->sac_error = error; 2746 /* XXX verify these stream counts */ 2747 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2748 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2749 sac->sac_assoc_id = sctp_get_associd(stcb); 2750 if (notif_len > sizeof(struct sctp_assoc_change)) { 2751 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2752 i = 0; 2753 if (stcb->asoc.prsctp_supported == 1) { 2754 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2755 } 2756 if (stcb->asoc.auth_supported == 1) { 2757 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2758 } 2759 if (stcb->asoc.asconf_supported == 1) { 2760 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2761 } 2762 if (stcb->asoc.idata_supported == 1) { 2763 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 2764 } 2765 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2766 if (stcb->asoc.reconfig_supported == 1) { 2767 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2768 } 2769 sac->sac_length += i; 2770 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2771 memcpy(sac->sac_info, abort, abort_len); 2772 sac->sac_length += abort_len; 2773 } 2774 } 2775 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2776 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2777 0, 0, stcb->asoc.context, 0, 0, 0, 2778 m_notify); 2779 if (control != NULL) { 2780 control->length = SCTP_BUF_LEN(m_notify); 2781 control->spec_flags = M_NOTIFICATION; 2782 /* not that we need this */ 2783 control->tail_mbuf = m_notify; 2784 sctp_add_to_readq(stcb->sctp_ep, stcb, 2785 control, 2786 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2787 so_locked); 2788 } else { 2789 sctp_m_freem(m_notify); 2790 } 2791 } 2792 /* 2793 * For 1-to-1 style sockets, we send up and error when an ABORT 2794 * comes in. 2795 */ 2796 set_error: 2797 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2798 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2799 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2800 SOCK_LOCK(stcb->sctp_socket); 2801 if (from_peer) { 2802 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 2803 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2804 stcb->sctp_socket->so_error = ECONNREFUSED; 2805 } else { 2806 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2807 stcb->sctp_socket->so_error = ECONNRESET; 2808 } 2809 } else { 2810 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 2811 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 2812 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 2813 stcb->sctp_socket->so_error = ETIMEDOUT; 2814 } else { 2815 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2816 stcb->sctp_socket->so_error = ECONNABORTED; 2817 } 2818 } 2819 SOCK_UNLOCK(stcb->sctp_socket); 2820 } 2821 /* Wake ANY sleepers */ 2822 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2823 so = SCTP_INP_SO(stcb->sctp_ep); 2824 if (!so_locked) { 2825 atomic_add_int(&stcb->asoc.refcnt, 1); 2826 SCTP_TCB_UNLOCK(stcb); 2827 SCTP_SOCKET_LOCK(so, 1); 2828 SCTP_TCB_LOCK(stcb); 2829 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2830 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2831 SCTP_SOCKET_UNLOCK(so, 1); 2832 return; 2833 } 2834 } 2835 #endif 2836 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2837 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2838 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2839 socantrcvmore(stcb->sctp_socket); 2840 } 2841 sorwakeup(stcb->sctp_socket); 2842 sowwakeup(stcb->sctp_socket); 2843 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2844 if (!so_locked) { 2845 SCTP_SOCKET_UNLOCK(so, 1); 2846 } 2847 #endif 2848 } 2849 2850 static void 2851 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2852 struct sockaddr *sa, uint32_t error, int so_locked 2853 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2854 SCTP_UNUSED 2855 #endif 2856 ) 2857 { 2858 struct mbuf *m_notify; 2859 struct sctp_paddr_change *spc; 2860 struct sctp_queued_to_read *control; 2861 2862 if ((stcb == NULL) || 2863 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2864 /* event not enabled */ 2865 return; 2866 } 2867 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2868 if (m_notify == NULL) 2869 return; 2870 SCTP_BUF_LEN(m_notify) = 0; 2871 spc = mtod(m_notify, struct sctp_paddr_change *); 2872 memset(spc, 0, sizeof(struct sctp_paddr_change)); 2873 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2874 spc->spc_flags = 0; 2875 spc->spc_length = sizeof(struct sctp_paddr_change); 2876 switch (sa->sa_family) { 2877 #ifdef INET 2878 case AF_INET: 2879 #ifdef INET6 2880 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 2881 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 2882 (struct sockaddr_in6 *)&spc->spc_aaddr); 2883 } else { 2884 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2885 } 2886 #else 2887 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2888 #endif 2889 break; 2890 #endif 2891 #ifdef INET6 2892 case AF_INET6: 2893 { 2894 struct sockaddr_in6 *sin6; 2895 2896 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2897 2898 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2899 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2900 if (sin6->sin6_scope_id == 0) { 2901 /* recover scope_id for user */ 2902 (void)sa6_recoverscope(sin6); 2903 } else { 2904 /* clear embedded scope_id for user */ 2905 in6_clearscope(&sin6->sin6_addr); 2906 } 2907 } 2908 break; 2909 } 2910 #endif 2911 default: 2912 /* TSNH */ 2913 break; 2914 } 2915 spc->spc_state = state; 2916 spc->spc_error = error; 2917 spc->spc_assoc_id = sctp_get_associd(stcb); 2918 2919 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2920 SCTP_BUF_NEXT(m_notify) = NULL; 2921 2922 /* append to socket */ 2923 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2924 0, 0, stcb->asoc.context, 0, 0, 0, 2925 m_notify); 2926 if (control == NULL) { 2927 /* no memory */ 2928 sctp_m_freem(m_notify); 2929 return; 2930 } 2931 control->length = SCTP_BUF_LEN(m_notify); 2932 control->spec_flags = M_NOTIFICATION; 2933 /* not that we need this */ 2934 control->tail_mbuf = m_notify; 2935 sctp_add_to_readq(stcb->sctp_ep, stcb, 2936 control, 2937 &stcb->sctp_socket->so_rcv, 1, 2938 SCTP_READ_LOCK_NOT_HELD, 2939 so_locked); 2940 } 2941 2942 2943 static void 2944 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2945 struct sctp_tmit_chunk *chk, int so_locked 2946 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2947 SCTP_UNUSED 2948 #endif 2949 ) 2950 { 2951 struct mbuf *m_notify; 2952 struct sctp_send_failed *ssf; 2953 struct sctp_send_failed_event *ssfe; 2954 struct sctp_queued_to_read *control; 2955 struct sctp_chunkhdr *chkhdr; 2956 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 2957 2958 if ((stcb == NULL) || 2959 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2960 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2961 /* event not enabled */ 2962 return; 2963 } 2964 2965 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2966 notifhdr_len = sizeof(struct sctp_send_failed_event); 2967 } else { 2968 notifhdr_len = sizeof(struct sctp_send_failed); 2969 } 2970 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 2971 if (m_notify == NULL) 2972 /* no space left */ 2973 return; 2974 SCTP_BUF_LEN(m_notify) = notifhdr_len; 2975 if (stcb->asoc.idata_supported) { 2976 chkhdr_len = sizeof(struct sctp_idata_chunk); 2977 } else { 2978 chkhdr_len = sizeof(struct sctp_data_chunk); 2979 } 2980 /* Use some defaults in case we can't access the chunk header */ 2981 if (chk->send_size >= chkhdr_len) { 2982 payload_len = chk->send_size - chkhdr_len; 2983 } else { 2984 payload_len = 0; 2985 } 2986 padding_len = 0; 2987 if (chk->data != NULL) { 2988 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 2989 if (chkhdr != NULL) { 2990 chk_len = ntohs(chkhdr->chunk_length); 2991 if ((chk_len >= chkhdr_len) && 2992 (chk->send_size >= chk_len) && 2993 (chk->send_size - chk_len < 4)) { 2994 padding_len = chk->send_size - chk_len; 2995 payload_len = chk->send_size - chkhdr_len - padding_len; 2996 } 2997 } 2998 } 2999 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3000 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3001 memset(ssfe, 0, notifhdr_len); 3002 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3003 if (sent) { 3004 ssfe->ssfe_flags = SCTP_DATA_SENT; 3005 } else { 3006 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3007 } 3008 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3009 ssfe->ssfe_error = error; 3010 /* not exactly what the user sent in, but should be close :) */ 3011 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3012 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3013 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3014 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3015 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3016 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3017 } else { 3018 ssf = mtod(m_notify, struct sctp_send_failed *); 3019 memset(ssf, 0, notifhdr_len); 3020 ssf->ssf_type = SCTP_SEND_FAILED; 3021 if (sent) { 3022 ssf->ssf_flags = SCTP_DATA_SENT; 3023 } else { 3024 ssf->ssf_flags = SCTP_DATA_UNSENT; 3025 } 3026 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3027 ssf->ssf_error = error; 3028 /* not exactly what the user sent in, but should be close :) */ 3029 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3030 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3031 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3032 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3033 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3034 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3035 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3036 } 3037 if (chk->data != NULL) { 3038 /* Trim off the sctp chunk header (it should be there) */ 3039 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3040 m_adj(chk->data, chkhdr_len); 3041 m_adj(chk->data, -padding_len); 3042 sctp_mbuf_crush(chk->data); 3043 chk->send_size -= (chkhdr_len + padding_len); 3044 } 3045 } 3046 SCTP_BUF_NEXT(m_notify) = chk->data; 3047 /* Steal off the mbuf */ 3048 chk->data = NULL; 3049 /* 3050 * For this case, we check the actual socket buffer, since the assoc 3051 * is going away we don't want to overfill the socket buffer for a 3052 * non-reader 3053 */ 3054 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3055 sctp_m_freem(m_notify); 3056 return; 3057 } 3058 /* append to socket */ 3059 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3060 0, 0, stcb->asoc.context, 0, 0, 0, 3061 m_notify); 3062 if (control == NULL) { 3063 /* no memory */ 3064 sctp_m_freem(m_notify); 3065 return; 3066 } 3067 control->length = SCTP_BUF_LEN(m_notify); 3068 control->spec_flags = M_NOTIFICATION; 3069 /* not that we need this */ 3070 control->tail_mbuf = m_notify; 3071 sctp_add_to_readq(stcb->sctp_ep, stcb, 3072 control, 3073 &stcb->sctp_socket->so_rcv, 1, 3074 SCTP_READ_LOCK_NOT_HELD, 3075 so_locked); 3076 } 3077 3078 3079 static void 3080 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3081 struct sctp_stream_queue_pending *sp, int so_locked 3082 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3083 SCTP_UNUSED 3084 #endif 3085 ) 3086 { 3087 struct mbuf *m_notify; 3088 struct sctp_send_failed *ssf; 3089 struct sctp_send_failed_event *ssfe; 3090 struct sctp_queued_to_read *control; 3091 int notifhdr_len; 3092 3093 if ((stcb == NULL) || 3094 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3095 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3096 /* event not enabled */ 3097 return; 3098 } 3099 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3100 notifhdr_len = sizeof(struct sctp_send_failed_event); 3101 } else { 3102 notifhdr_len = sizeof(struct sctp_send_failed); 3103 } 3104 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3105 if (m_notify == NULL) { 3106 /* no space left */ 3107 return; 3108 } 3109 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3110 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3111 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3112 memset(ssfe, 0, notifhdr_len); 3113 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3114 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3115 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3116 ssfe->ssfe_error = error; 3117 /* not exactly what the user sent in, but should be close :) */ 3118 ssfe->ssfe_info.snd_sid = sp->sid; 3119 if (sp->some_taken) { 3120 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3121 } else { 3122 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3123 } 3124 ssfe->ssfe_info.snd_ppid = sp->ppid; 3125 ssfe->ssfe_info.snd_context = sp->context; 3126 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3127 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3128 } else { 3129 ssf = mtod(m_notify, struct sctp_send_failed *); 3130 memset(ssf, 0, notifhdr_len); 3131 ssf->ssf_type = SCTP_SEND_FAILED; 3132 ssf->ssf_flags = SCTP_DATA_UNSENT; 3133 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3134 ssf->ssf_error = error; 3135 /* not exactly what the user sent in, but should be close :) */ 3136 ssf->ssf_info.sinfo_stream = sp->sid; 3137 ssf->ssf_info.sinfo_ssn = 0; 3138 if (sp->some_taken) { 3139 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3140 } else { 3141 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3142 } 3143 ssf->ssf_info.sinfo_ppid = sp->ppid; 3144 ssf->ssf_info.sinfo_context = sp->context; 3145 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3146 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3147 } 3148 SCTP_BUF_NEXT(m_notify) = sp->data; 3149 3150 /* Steal off the mbuf */ 3151 sp->data = NULL; 3152 /* 3153 * For this case, we check the actual socket buffer, since the assoc 3154 * is going away we don't want to overfill the socket buffer for a 3155 * non-reader 3156 */ 3157 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3158 sctp_m_freem(m_notify); 3159 return; 3160 } 3161 /* append to socket */ 3162 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3163 0, 0, stcb->asoc.context, 0, 0, 0, 3164 m_notify); 3165 if (control == NULL) { 3166 /* no memory */ 3167 sctp_m_freem(m_notify); 3168 return; 3169 } 3170 control->length = SCTP_BUF_LEN(m_notify); 3171 control->spec_flags = M_NOTIFICATION; 3172 /* not that we need this */ 3173 control->tail_mbuf = m_notify; 3174 sctp_add_to_readq(stcb->sctp_ep, stcb, 3175 control, 3176 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3177 } 3178 3179 3180 3181 static void 3182 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3183 { 3184 struct mbuf *m_notify; 3185 struct sctp_adaptation_event *sai; 3186 struct sctp_queued_to_read *control; 3187 3188 if ((stcb == NULL) || 3189 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3190 /* event not enabled */ 3191 return; 3192 } 3193 3194 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3195 if (m_notify == NULL) 3196 /* no space left */ 3197 return; 3198 SCTP_BUF_LEN(m_notify) = 0; 3199 sai = mtod(m_notify, struct sctp_adaptation_event *); 3200 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3201 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3202 sai->sai_flags = 0; 3203 sai->sai_length = sizeof(struct sctp_adaptation_event); 3204 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3205 sai->sai_assoc_id = sctp_get_associd(stcb); 3206 3207 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3208 SCTP_BUF_NEXT(m_notify) = NULL; 3209 3210 /* append to socket */ 3211 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3212 0, 0, stcb->asoc.context, 0, 0, 0, 3213 m_notify); 3214 if (control == NULL) { 3215 /* no memory */ 3216 sctp_m_freem(m_notify); 3217 return; 3218 } 3219 control->length = SCTP_BUF_LEN(m_notify); 3220 control->spec_flags = M_NOTIFICATION; 3221 /* not that we need this */ 3222 control->tail_mbuf = m_notify; 3223 sctp_add_to_readq(stcb->sctp_ep, stcb, 3224 control, 3225 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3226 } 3227 3228 /* This always must be called with the read-queue LOCKED in the INP */ 3229 static void 3230 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3231 uint32_t val, int so_locked 3232 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3233 SCTP_UNUSED 3234 #endif 3235 ) 3236 { 3237 struct mbuf *m_notify; 3238 struct sctp_pdapi_event *pdapi; 3239 struct sctp_queued_to_read *control; 3240 struct sockbuf *sb; 3241 3242 if ((stcb == NULL) || 3243 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3244 /* event not enabled */ 3245 return; 3246 } 3247 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3248 return; 3249 } 3250 3251 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3252 if (m_notify == NULL) 3253 /* no space left */ 3254 return; 3255 SCTP_BUF_LEN(m_notify) = 0; 3256 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3257 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3258 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3259 pdapi->pdapi_flags = 0; 3260 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3261 pdapi->pdapi_indication = error; 3262 pdapi->pdapi_stream = (val >> 16); 3263 pdapi->pdapi_seq = (val & 0x0000ffff); 3264 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3265 3266 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3267 SCTP_BUF_NEXT(m_notify) = NULL; 3268 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3269 0, 0, stcb->asoc.context, 0, 0, 0, 3270 m_notify); 3271 if (control == NULL) { 3272 /* no memory */ 3273 sctp_m_freem(m_notify); 3274 return; 3275 } 3276 control->length = SCTP_BUF_LEN(m_notify); 3277 control->spec_flags = M_NOTIFICATION; 3278 /* not that we need this */ 3279 control->tail_mbuf = m_notify; 3280 sb = &stcb->sctp_socket->so_rcv; 3281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3282 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3283 } 3284 sctp_sballoc(stcb, sb, m_notify); 3285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3286 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3287 } 3288 control->end_added = 1; 3289 if (stcb->asoc.control_pdapi) 3290 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3291 else { 3292 /* we really should not see this case */ 3293 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3294 } 3295 if (stcb->sctp_ep && stcb->sctp_socket) { 3296 /* This should always be the case */ 3297 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3298 struct socket *so; 3299 3300 so = SCTP_INP_SO(stcb->sctp_ep); 3301 if (!so_locked) { 3302 atomic_add_int(&stcb->asoc.refcnt, 1); 3303 SCTP_TCB_UNLOCK(stcb); 3304 SCTP_SOCKET_LOCK(so, 1); 3305 SCTP_TCB_LOCK(stcb); 3306 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3307 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3308 SCTP_SOCKET_UNLOCK(so, 1); 3309 return; 3310 } 3311 } 3312 #endif 3313 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3314 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3315 if (!so_locked) { 3316 SCTP_SOCKET_UNLOCK(so, 1); 3317 } 3318 #endif 3319 } 3320 } 3321 3322 static void 3323 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3324 { 3325 struct mbuf *m_notify; 3326 struct sctp_shutdown_event *sse; 3327 struct sctp_queued_to_read *control; 3328 3329 /* 3330 * For TCP model AND UDP connected sockets we will send an error up 3331 * when an SHUTDOWN completes 3332 */ 3333 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3334 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3335 /* mark socket closed for read/write and wakeup! */ 3336 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3337 struct socket *so; 3338 3339 so = SCTP_INP_SO(stcb->sctp_ep); 3340 atomic_add_int(&stcb->asoc.refcnt, 1); 3341 SCTP_TCB_UNLOCK(stcb); 3342 SCTP_SOCKET_LOCK(so, 1); 3343 SCTP_TCB_LOCK(stcb); 3344 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3345 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3346 SCTP_SOCKET_UNLOCK(so, 1); 3347 return; 3348 } 3349 #endif 3350 socantsendmore(stcb->sctp_socket); 3351 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3352 SCTP_SOCKET_UNLOCK(so, 1); 3353 #endif 3354 } 3355 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3356 /* event not enabled */ 3357 return; 3358 } 3359 3360 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3361 if (m_notify == NULL) 3362 /* no space left */ 3363 return; 3364 sse = mtod(m_notify, struct sctp_shutdown_event *); 3365 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3366 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3367 sse->sse_flags = 0; 3368 sse->sse_length = sizeof(struct sctp_shutdown_event); 3369 sse->sse_assoc_id = sctp_get_associd(stcb); 3370 3371 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3372 SCTP_BUF_NEXT(m_notify) = NULL; 3373 3374 /* append to socket */ 3375 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3376 0, 0, stcb->asoc.context, 0, 0, 0, 3377 m_notify); 3378 if (control == NULL) { 3379 /* no memory */ 3380 sctp_m_freem(m_notify); 3381 return; 3382 } 3383 control->length = SCTP_BUF_LEN(m_notify); 3384 control->spec_flags = M_NOTIFICATION; 3385 /* not that we need this */ 3386 control->tail_mbuf = m_notify; 3387 sctp_add_to_readq(stcb->sctp_ep, stcb, 3388 control, 3389 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3390 } 3391 3392 static void 3393 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3394 int so_locked 3395 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3396 SCTP_UNUSED 3397 #endif 3398 ) 3399 { 3400 struct mbuf *m_notify; 3401 struct sctp_sender_dry_event *event; 3402 struct sctp_queued_to_read *control; 3403 3404 if ((stcb == NULL) || 3405 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3406 /* event not enabled */ 3407 return; 3408 } 3409 3410 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3411 if (m_notify == NULL) { 3412 /* no space left */ 3413 return; 3414 } 3415 SCTP_BUF_LEN(m_notify) = 0; 3416 event = mtod(m_notify, struct sctp_sender_dry_event *); 3417 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3418 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3419 event->sender_dry_flags = 0; 3420 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3421 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3422 3423 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3424 SCTP_BUF_NEXT(m_notify) = NULL; 3425 3426 /* append to socket */ 3427 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3428 0, 0, stcb->asoc.context, 0, 0, 0, 3429 m_notify); 3430 if (control == NULL) { 3431 /* no memory */ 3432 sctp_m_freem(m_notify); 3433 return; 3434 } 3435 control->length = SCTP_BUF_LEN(m_notify); 3436 control->spec_flags = M_NOTIFICATION; 3437 /* not that we need this */ 3438 control->tail_mbuf = m_notify; 3439 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3440 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3441 } 3442 3443 3444 void 3445 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3446 { 3447 struct mbuf *m_notify; 3448 struct sctp_queued_to_read *control; 3449 struct sctp_stream_change_event *stradd; 3450 3451 if ((stcb == NULL) || 3452 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3453 /* event not enabled */ 3454 return; 3455 } 3456 if ((stcb->asoc.peer_req_out) && flag) { 3457 /* Peer made the request, don't tell the local user */ 3458 stcb->asoc.peer_req_out = 0; 3459 return; 3460 } 3461 stcb->asoc.peer_req_out = 0; 3462 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3463 if (m_notify == NULL) 3464 /* no space left */ 3465 return; 3466 SCTP_BUF_LEN(m_notify) = 0; 3467 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3468 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3469 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3470 stradd->strchange_flags = flag; 3471 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3472 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3473 stradd->strchange_instrms = numberin; 3474 stradd->strchange_outstrms = numberout; 3475 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3476 SCTP_BUF_NEXT(m_notify) = NULL; 3477 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3478 /* no space */ 3479 sctp_m_freem(m_notify); 3480 return; 3481 } 3482 /* append to socket */ 3483 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3484 0, 0, stcb->asoc.context, 0, 0, 0, 3485 m_notify); 3486 if (control == NULL) { 3487 /* no memory */ 3488 sctp_m_freem(m_notify); 3489 return; 3490 } 3491 control->length = SCTP_BUF_LEN(m_notify); 3492 control->spec_flags = M_NOTIFICATION; 3493 /* not that we need this */ 3494 control->tail_mbuf = m_notify; 3495 sctp_add_to_readq(stcb->sctp_ep, stcb, 3496 control, 3497 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3498 } 3499 3500 void 3501 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3502 { 3503 struct mbuf *m_notify; 3504 struct sctp_queued_to_read *control; 3505 struct sctp_assoc_reset_event *strasoc; 3506 3507 if ((stcb == NULL) || 3508 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3509 /* event not enabled */ 3510 return; 3511 } 3512 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3513 if (m_notify == NULL) 3514 /* no space left */ 3515 return; 3516 SCTP_BUF_LEN(m_notify) = 0; 3517 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3518 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3519 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3520 strasoc->assocreset_flags = flag; 3521 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3522 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3523 strasoc->assocreset_local_tsn = sending_tsn; 3524 strasoc->assocreset_remote_tsn = recv_tsn; 3525 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3526 SCTP_BUF_NEXT(m_notify) = NULL; 3527 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3528 /* no space */ 3529 sctp_m_freem(m_notify); 3530 return; 3531 } 3532 /* append to socket */ 3533 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3534 0, 0, stcb->asoc.context, 0, 0, 0, 3535 m_notify); 3536 if (control == NULL) { 3537 /* no memory */ 3538 sctp_m_freem(m_notify); 3539 return; 3540 } 3541 control->length = SCTP_BUF_LEN(m_notify); 3542 control->spec_flags = M_NOTIFICATION; 3543 /* not that we need this */ 3544 control->tail_mbuf = m_notify; 3545 sctp_add_to_readq(stcb->sctp_ep, stcb, 3546 control, 3547 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3548 } 3549 3550 3551 3552 static void 3553 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3554 int number_entries, uint16_t *list, int flag) 3555 { 3556 struct mbuf *m_notify; 3557 struct sctp_queued_to_read *control; 3558 struct sctp_stream_reset_event *strreset; 3559 int len; 3560 3561 if ((stcb == NULL) || 3562 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3563 /* event not enabled */ 3564 return; 3565 } 3566 3567 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3568 if (m_notify == NULL) 3569 /* no space left */ 3570 return; 3571 SCTP_BUF_LEN(m_notify) = 0; 3572 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3573 if (len > M_TRAILINGSPACE(m_notify)) { 3574 /* never enough room */ 3575 sctp_m_freem(m_notify); 3576 return; 3577 } 3578 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3579 memset(strreset, 0, len); 3580 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3581 strreset->strreset_flags = flag; 3582 strreset->strreset_length = len; 3583 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3584 if (number_entries) { 3585 int i; 3586 3587 for (i = 0; i < number_entries; i++) { 3588 strreset->strreset_stream_list[i] = ntohs(list[i]); 3589 } 3590 } 3591 SCTP_BUF_LEN(m_notify) = len; 3592 SCTP_BUF_NEXT(m_notify) = NULL; 3593 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3594 /* no space */ 3595 sctp_m_freem(m_notify); 3596 return; 3597 } 3598 /* append to socket */ 3599 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3600 0, 0, stcb->asoc.context, 0, 0, 0, 3601 m_notify); 3602 if (control == NULL) { 3603 /* no memory */ 3604 sctp_m_freem(m_notify); 3605 return; 3606 } 3607 control->length = SCTP_BUF_LEN(m_notify); 3608 control->spec_flags = M_NOTIFICATION; 3609 /* not that we need this */ 3610 control->tail_mbuf = m_notify; 3611 sctp_add_to_readq(stcb->sctp_ep, stcb, 3612 control, 3613 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3614 } 3615 3616 3617 static void 3618 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3619 { 3620 struct mbuf *m_notify; 3621 struct sctp_remote_error *sre; 3622 struct sctp_queued_to_read *control; 3623 unsigned int notif_len; 3624 uint16_t chunk_len; 3625 3626 if ((stcb == NULL) || 3627 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3628 return; 3629 } 3630 if (chunk != NULL) { 3631 chunk_len = ntohs(chunk->ch.chunk_length); 3632 /* 3633 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3634 * contiguous. 3635 */ 3636 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3637 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3638 } 3639 } else { 3640 chunk_len = 0; 3641 } 3642 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 3643 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3644 if (m_notify == NULL) { 3645 /* Retry with smaller value. */ 3646 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 3647 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3648 if (m_notify == NULL) { 3649 return; 3650 } 3651 } 3652 SCTP_BUF_NEXT(m_notify) = NULL; 3653 sre = mtod(m_notify, struct sctp_remote_error *); 3654 memset(sre, 0, notif_len); 3655 sre->sre_type = SCTP_REMOTE_ERROR; 3656 sre->sre_flags = 0; 3657 sre->sre_length = sizeof(struct sctp_remote_error); 3658 sre->sre_error = error; 3659 sre->sre_assoc_id = sctp_get_associd(stcb); 3660 if (notif_len > sizeof(struct sctp_remote_error)) { 3661 memcpy(sre->sre_data, chunk, chunk_len); 3662 sre->sre_length += chunk_len; 3663 } 3664 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3665 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3666 0, 0, stcb->asoc.context, 0, 0, 0, 3667 m_notify); 3668 if (control != NULL) { 3669 control->length = SCTP_BUF_LEN(m_notify); 3670 control->spec_flags = M_NOTIFICATION; 3671 /* not that we need this */ 3672 control->tail_mbuf = m_notify; 3673 sctp_add_to_readq(stcb->sctp_ep, stcb, 3674 control, 3675 &stcb->sctp_socket->so_rcv, 1, 3676 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3677 } else { 3678 sctp_m_freem(m_notify); 3679 } 3680 } 3681 3682 3683 void 3684 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3685 uint32_t error, void *data, int so_locked 3686 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3687 SCTP_UNUSED 3688 #endif 3689 ) 3690 { 3691 if ((stcb == NULL) || 3692 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3693 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3694 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3695 /* If the socket is gone we are out of here */ 3696 return; 3697 } 3698 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3699 return; 3700 } 3701 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3702 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3703 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3704 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3705 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3706 /* Don't report these in front states */ 3707 return; 3708 } 3709 } 3710 switch (notification) { 3711 case SCTP_NOTIFY_ASSOC_UP: 3712 if (stcb->asoc.assoc_up_sent == 0) { 3713 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3714 stcb->asoc.assoc_up_sent = 1; 3715 } 3716 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3717 sctp_notify_adaptation_layer(stcb); 3718 } 3719 if (stcb->asoc.auth_supported == 0) { 3720 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3721 NULL, so_locked); 3722 } 3723 break; 3724 case SCTP_NOTIFY_ASSOC_DOWN: 3725 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3726 break; 3727 case SCTP_NOTIFY_INTERFACE_DOWN: 3728 { 3729 struct sctp_nets *net; 3730 3731 net = (struct sctp_nets *)data; 3732 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3733 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3734 break; 3735 } 3736 case SCTP_NOTIFY_INTERFACE_UP: 3737 { 3738 struct sctp_nets *net; 3739 3740 net = (struct sctp_nets *)data; 3741 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3742 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3743 break; 3744 } 3745 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3746 { 3747 struct sctp_nets *net; 3748 3749 net = (struct sctp_nets *)data; 3750 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3751 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 3752 break; 3753 } 3754 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3755 sctp_notify_send_failed2(stcb, error, 3756 (struct sctp_stream_queue_pending *)data, so_locked); 3757 break; 3758 case SCTP_NOTIFY_SENT_DG_FAIL: 3759 sctp_notify_send_failed(stcb, 1, error, 3760 (struct sctp_tmit_chunk *)data, so_locked); 3761 break; 3762 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3763 sctp_notify_send_failed(stcb, 0, error, 3764 (struct sctp_tmit_chunk *)data, so_locked); 3765 break; 3766 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3767 { 3768 uint32_t val; 3769 3770 val = *((uint32_t *)data); 3771 3772 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3773 break; 3774 } 3775 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3776 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3777 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3778 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3779 } else { 3780 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3781 } 3782 break; 3783 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3784 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3785 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3786 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3787 } else { 3788 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3789 } 3790 break; 3791 case SCTP_NOTIFY_ASSOC_RESTART: 3792 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3793 if (stcb->asoc.auth_supported == 0) { 3794 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3795 NULL, so_locked); 3796 } 3797 break; 3798 case SCTP_NOTIFY_STR_RESET_SEND: 3799 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 3800 break; 3801 case SCTP_NOTIFY_STR_RESET_RECV: 3802 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 3803 break; 3804 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3805 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3806 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3807 break; 3808 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3809 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3810 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3811 break; 3812 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3813 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3814 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3815 break; 3816 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3817 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 3818 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3819 break; 3820 case SCTP_NOTIFY_ASCONF_ADD_IP: 3821 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3822 error, so_locked); 3823 break; 3824 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3825 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3826 error, so_locked); 3827 break; 3828 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3829 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3830 error, so_locked); 3831 break; 3832 case SCTP_NOTIFY_PEER_SHUTDOWN: 3833 sctp_notify_shutdown_event(stcb); 3834 break; 3835 case SCTP_NOTIFY_AUTH_NEW_KEY: 3836 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3837 (uint16_t)(uintptr_t)data, 3838 so_locked); 3839 break; 3840 case SCTP_NOTIFY_AUTH_FREE_KEY: 3841 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3842 (uint16_t)(uintptr_t)data, 3843 so_locked); 3844 break; 3845 case SCTP_NOTIFY_NO_PEER_AUTH: 3846 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3847 (uint16_t)(uintptr_t)data, 3848 so_locked); 3849 break; 3850 case SCTP_NOTIFY_SENDER_DRY: 3851 sctp_notify_sender_dry_event(stcb, so_locked); 3852 break; 3853 case SCTP_NOTIFY_REMOTE_ERROR: 3854 sctp_notify_remote_error(stcb, error, data); 3855 break; 3856 default: 3857 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3858 __func__, notification, notification); 3859 break; 3860 } /* end switch */ 3861 } 3862 3863 void 3864 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3865 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3866 SCTP_UNUSED 3867 #endif 3868 ) 3869 { 3870 struct sctp_association *asoc; 3871 struct sctp_stream_out *outs; 3872 struct sctp_tmit_chunk *chk, *nchk; 3873 struct sctp_stream_queue_pending *sp, *nsp; 3874 int i; 3875 3876 if (stcb == NULL) { 3877 return; 3878 } 3879 asoc = &stcb->asoc; 3880 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3881 /* already being freed */ 3882 return; 3883 } 3884 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3885 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3886 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3887 return; 3888 } 3889 /* now through all the gunk freeing chunks */ 3890 if (holds_lock == 0) { 3891 SCTP_TCB_SEND_LOCK(stcb); 3892 } 3893 /* sent queue SHOULD be empty */ 3894 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3895 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3896 asoc->sent_queue_cnt--; 3897 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3898 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 3899 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 3900 #ifdef INVARIANTS 3901 } else { 3902 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 3903 #endif 3904 } 3905 } 3906 if (chk->data != NULL) { 3907 sctp_free_bufspace(stcb, asoc, chk, 1); 3908 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3909 error, chk, so_locked); 3910 if (chk->data) { 3911 sctp_m_freem(chk->data); 3912 chk->data = NULL; 3913 } 3914 } 3915 sctp_free_a_chunk(stcb, chk, so_locked); 3916 /* sa_ignore FREED_MEMORY */ 3917 } 3918 /* pending send queue SHOULD be empty */ 3919 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3920 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3921 asoc->send_queue_cnt--; 3922 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 3923 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 3924 #ifdef INVARIANTS 3925 } else { 3926 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 3927 #endif 3928 } 3929 if (chk->data != NULL) { 3930 sctp_free_bufspace(stcb, asoc, chk, 1); 3931 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3932 error, chk, so_locked); 3933 if (chk->data) { 3934 sctp_m_freem(chk->data); 3935 chk->data = NULL; 3936 } 3937 } 3938 sctp_free_a_chunk(stcb, chk, so_locked); 3939 /* sa_ignore FREED_MEMORY */ 3940 } 3941 for (i = 0; i < asoc->streamoutcnt; i++) { 3942 /* For each stream */ 3943 outs = &asoc->strmout[i]; 3944 /* clean up any sends there */ 3945 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3946 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 3947 TAILQ_REMOVE(&outs->outqueue, sp, next); 3948 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock); 3949 sctp_free_spbufspace(stcb, asoc, sp); 3950 if (sp->data) { 3951 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3952 error, (void *)sp, so_locked); 3953 if (sp->data) { 3954 sctp_m_freem(sp->data); 3955 sp->data = NULL; 3956 sp->tail_mbuf = NULL; 3957 sp->length = 0; 3958 } 3959 } 3960 if (sp->net) { 3961 sctp_free_remote_addr(sp->net); 3962 sp->net = NULL; 3963 } 3964 /* Free the chunk */ 3965 sctp_free_a_strmoq(stcb, sp, so_locked); 3966 /* sa_ignore FREED_MEMORY */ 3967 } 3968 } 3969 3970 if (holds_lock == 0) { 3971 SCTP_TCB_SEND_UNLOCK(stcb); 3972 } 3973 } 3974 3975 void 3976 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3977 struct sctp_abort_chunk *abort, int so_locked 3978 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3979 SCTP_UNUSED 3980 #endif 3981 ) 3982 { 3983 if (stcb == NULL) { 3984 return; 3985 } 3986 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3987 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3988 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3989 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3990 } 3991 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3992 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3993 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3994 return; 3995 } 3996 /* Tell them we lost the asoc */ 3997 sctp_report_all_outbound(stcb, error, 1, so_locked); 3998 if (from_peer) { 3999 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4000 } else { 4001 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4002 } 4003 } 4004 4005 void 4006 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4007 struct mbuf *m, int iphlen, 4008 struct sockaddr *src, struct sockaddr *dst, 4009 struct sctphdr *sh, struct mbuf *op_err, 4010 uint8_t mflowtype, uint32_t mflowid, 4011 uint32_t vrf_id, uint16_t port) 4012 { 4013 uint32_t vtag; 4014 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4015 struct socket *so; 4016 #endif 4017 4018 vtag = 0; 4019 if (stcb != NULL) { 4020 vtag = stcb->asoc.peer_vtag; 4021 vrf_id = stcb->asoc.vrf_id; 4022 } 4023 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4024 mflowtype, mflowid, inp->fibnum, 4025 vrf_id, port); 4026 if (stcb != NULL) { 4027 /* We have a TCB to abort, send notification too */ 4028 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4029 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4030 /* Ok, now lets free it */ 4031 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4032 so = SCTP_INP_SO(inp); 4033 atomic_add_int(&stcb->asoc.refcnt, 1); 4034 SCTP_TCB_UNLOCK(stcb); 4035 SCTP_SOCKET_LOCK(so, 1); 4036 SCTP_TCB_LOCK(stcb); 4037 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4038 #endif 4039 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4040 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4041 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4042 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4043 } 4044 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4045 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4046 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4047 SCTP_SOCKET_UNLOCK(so, 1); 4048 #endif 4049 } 4050 } 4051 #ifdef SCTP_ASOCLOG_OF_TSNS 4052 void 4053 sctp_print_out_track_log(struct sctp_tcb *stcb) 4054 { 4055 #ifdef NOSIY_PRINTS 4056 int i; 4057 4058 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4059 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4060 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4061 SCTP_PRINTF("None rcvd\n"); 4062 goto none_in; 4063 } 4064 if (stcb->asoc.tsn_in_wrapped) { 4065 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4066 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4067 stcb->asoc.in_tsnlog[i].tsn, 4068 stcb->asoc.in_tsnlog[i].strm, 4069 stcb->asoc.in_tsnlog[i].seq, 4070 stcb->asoc.in_tsnlog[i].flgs, 4071 stcb->asoc.in_tsnlog[i].sz); 4072 } 4073 } 4074 if (stcb->asoc.tsn_in_at) { 4075 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4076 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4077 stcb->asoc.in_tsnlog[i].tsn, 4078 stcb->asoc.in_tsnlog[i].strm, 4079 stcb->asoc.in_tsnlog[i].seq, 4080 stcb->asoc.in_tsnlog[i].flgs, 4081 stcb->asoc.in_tsnlog[i].sz); 4082 } 4083 } 4084 none_in: 4085 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4086 if ((stcb->asoc.tsn_out_at == 0) && 4087 (stcb->asoc.tsn_out_wrapped == 0)) { 4088 SCTP_PRINTF("None sent\n"); 4089 } 4090 if (stcb->asoc.tsn_out_wrapped) { 4091 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4092 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4093 stcb->asoc.out_tsnlog[i].tsn, 4094 stcb->asoc.out_tsnlog[i].strm, 4095 stcb->asoc.out_tsnlog[i].seq, 4096 stcb->asoc.out_tsnlog[i].flgs, 4097 stcb->asoc.out_tsnlog[i].sz); 4098 } 4099 } 4100 if (stcb->asoc.tsn_out_at) { 4101 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4102 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4103 stcb->asoc.out_tsnlog[i].tsn, 4104 stcb->asoc.out_tsnlog[i].strm, 4105 stcb->asoc.out_tsnlog[i].seq, 4106 stcb->asoc.out_tsnlog[i].flgs, 4107 stcb->asoc.out_tsnlog[i].sz); 4108 } 4109 } 4110 #endif 4111 } 4112 #endif 4113 4114 void 4115 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4116 struct mbuf *op_err, 4117 int so_locked 4118 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4119 SCTP_UNUSED 4120 #endif 4121 ) 4122 { 4123 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4124 struct socket *so; 4125 #endif 4126 4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4128 so = SCTP_INP_SO(inp); 4129 #endif 4130 if (stcb == NULL) { 4131 /* Got to have a TCB */ 4132 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4133 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4134 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4135 SCTP_CALLED_DIRECTLY_NOCMPSET); 4136 } 4137 } 4138 return; 4139 } else { 4140 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4141 } 4142 /* notify the peer */ 4143 sctp_send_abort_tcb(stcb, op_err, so_locked); 4144 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4145 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4146 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4147 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4148 } 4149 /* notify the ulp */ 4150 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4151 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4152 } 4153 /* now free the asoc */ 4154 #ifdef SCTP_ASOCLOG_OF_TSNS 4155 sctp_print_out_track_log(stcb); 4156 #endif 4157 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4158 if (!so_locked) { 4159 atomic_add_int(&stcb->asoc.refcnt, 1); 4160 SCTP_TCB_UNLOCK(stcb); 4161 SCTP_SOCKET_LOCK(so, 1); 4162 SCTP_TCB_LOCK(stcb); 4163 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4164 } 4165 #endif 4166 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4167 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4168 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4169 if (!so_locked) { 4170 SCTP_SOCKET_UNLOCK(so, 1); 4171 } 4172 #endif 4173 } 4174 4175 void 4176 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4177 struct sockaddr *src, struct sockaddr *dst, 4178 struct sctphdr *sh, struct sctp_inpcb *inp, 4179 struct mbuf *cause, 4180 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4181 uint32_t vrf_id, uint16_t port) 4182 { 4183 struct sctp_chunkhdr *ch, chunk_buf; 4184 unsigned int chk_length; 4185 int contains_init_chunk; 4186 4187 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4188 /* Generate a TO address for future reference */ 4189 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4190 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4191 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4192 SCTP_CALLED_DIRECTLY_NOCMPSET); 4193 } 4194 } 4195 contains_init_chunk = 0; 4196 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4197 sizeof(*ch), (uint8_t *)&chunk_buf); 4198 while (ch != NULL) { 4199 chk_length = ntohs(ch->chunk_length); 4200 if (chk_length < sizeof(*ch)) { 4201 /* break to abort land */ 4202 break; 4203 } 4204 switch (ch->chunk_type) { 4205 case SCTP_INIT: 4206 contains_init_chunk = 1; 4207 break; 4208 case SCTP_PACKET_DROPPED: 4209 /* we don't respond to pkt-dropped */ 4210 return; 4211 case SCTP_ABORT_ASSOCIATION: 4212 /* we don't respond with an ABORT to an ABORT */ 4213 return; 4214 case SCTP_SHUTDOWN_COMPLETE: 4215 /* 4216 * we ignore it since we are not waiting for it and 4217 * peer is gone 4218 */ 4219 return; 4220 case SCTP_SHUTDOWN_ACK: 4221 sctp_send_shutdown_complete2(src, dst, sh, 4222 mflowtype, mflowid, fibnum, 4223 vrf_id, port); 4224 return; 4225 default: 4226 break; 4227 } 4228 offset += SCTP_SIZE32(chk_length); 4229 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4230 sizeof(*ch), (uint8_t *)&chunk_buf); 4231 } 4232 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4233 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4234 (contains_init_chunk == 0))) { 4235 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4236 mflowtype, mflowid, fibnum, 4237 vrf_id, port); 4238 } 4239 } 4240 4241 /* 4242 * check the inbound datagram to make sure there is not an abort inside it, 4243 * if there is return 1, else return 0. 4244 */ 4245 int 4246 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4247 { 4248 struct sctp_chunkhdr *ch; 4249 struct sctp_init_chunk *init_chk, chunk_buf; 4250 int offset; 4251 unsigned int chk_length; 4252 4253 offset = iphlen + sizeof(struct sctphdr); 4254 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4255 (uint8_t *)&chunk_buf); 4256 while (ch != NULL) { 4257 chk_length = ntohs(ch->chunk_length); 4258 if (chk_length < sizeof(*ch)) { 4259 /* packet is probably corrupt */ 4260 break; 4261 } 4262 /* we seem to be ok, is it an abort? */ 4263 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4264 /* yep, tell them */ 4265 return (1); 4266 } 4267 if (ch->chunk_type == SCTP_INITIATION) { 4268 /* need to update the Vtag */ 4269 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4270 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4271 if (init_chk != NULL) { 4272 *vtagfill = ntohl(init_chk->init.initiate_tag); 4273 } 4274 } 4275 /* Nope, move to the next chunk */ 4276 offset += SCTP_SIZE32(chk_length); 4277 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4278 sizeof(*ch), (uint8_t *)&chunk_buf); 4279 } 4280 return (0); 4281 } 4282 4283 /* 4284 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4285 * set (i.e. it's 0) so, create this function to compare link local scopes 4286 */ 4287 #ifdef INET6 4288 uint32_t 4289 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4290 { 4291 struct sockaddr_in6 a, b; 4292 4293 /* save copies */ 4294 a = *addr1; 4295 b = *addr2; 4296 4297 if (a.sin6_scope_id == 0) 4298 if (sa6_recoverscope(&a)) { 4299 /* can't get scope, so can't match */ 4300 return (0); 4301 } 4302 if (b.sin6_scope_id == 0) 4303 if (sa6_recoverscope(&b)) { 4304 /* can't get scope, so can't match */ 4305 return (0); 4306 } 4307 if (a.sin6_scope_id != b.sin6_scope_id) 4308 return (0); 4309 4310 return (1); 4311 } 4312 4313 /* 4314 * returns a sockaddr_in6 with embedded scope recovered and removed 4315 */ 4316 struct sockaddr_in6 * 4317 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4318 { 4319 /* check and strip embedded scope junk */ 4320 if (addr->sin6_family == AF_INET6) { 4321 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4322 if (addr->sin6_scope_id == 0) { 4323 *store = *addr; 4324 if (!sa6_recoverscope(store)) { 4325 /* use the recovered scope */ 4326 addr = store; 4327 } 4328 } else { 4329 /* else, return the original "to" addr */ 4330 in6_clearscope(&addr->sin6_addr); 4331 } 4332 } 4333 } 4334 return (addr); 4335 } 4336 #endif 4337 4338 /* 4339 * are the two addresses the same? currently a "scopeless" check returns: 1 4340 * if same, 0 if not 4341 */ 4342 int 4343 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4344 { 4345 4346 /* must be valid */ 4347 if (sa1 == NULL || sa2 == NULL) 4348 return (0); 4349 4350 /* must be the same family */ 4351 if (sa1->sa_family != sa2->sa_family) 4352 return (0); 4353 4354 switch (sa1->sa_family) { 4355 #ifdef INET6 4356 case AF_INET6: 4357 { 4358 /* IPv6 addresses */ 4359 struct sockaddr_in6 *sin6_1, *sin6_2; 4360 4361 sin6_1 = (struct sockaddr_in6 *)sa1; 4362 sin6_2 = (struct sockaddr_in6 *)sa2; 4363 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4364 sin6_2)); 4365 } 4366 #endif 4367 #ifdef INET 4368 case AF_INET: 4369 { 4370 /* IPv4 addresses */ 4371 struct sockaddr_in *sin_1, *sin_2; 4372 4373 sin_1 = (struct sockaddr_in *)sa1; 4374 sin_2 = (struct sockaddr_in *)sa2; 4375 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4376 } 4377 #endif 4378 default: 4379 /* we don't do these... */ 4380 return (0); 4381 } 4382 } 4383 4384 void 4385 sctp_print_address(struct sockaddr *sa) 4386 { 4387 #ifdef INET6 4388 char ip6buf[INET6_ADDRSTRLEN]; 4389 #endif 4390 4391 switch (sa->sa_family) { 4392 #ifdef INET6 4393 case AF_INET6: 4394 { 4395 struct sockaddr_in6 *sin6; 4396 4397 sin6 = (struct sockaddr_in6 *)sa; 4398 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4399 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4400 ntohs(sin6->sin6_port), 4401 sin6->sin6_scope_id); 4402 break; 4403 } 4404 #endif 4405 #ifdef INET 4406 case AF_INET: 4407 { 4408 struct sockaddr_in *sin; 4409 unsigned char *p; 4410 4411 sin = (struct sockaddr_in *)sa; 4412 p = (unsigned char *)&sin->sin_addr; 4413 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4414 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4415 break; 4416 } 4417 #endif 4418 default: 4419 SCTP_PRINTF("?\n"); 4420 break; 4421 } 4422 } 4423 4424 void 4425 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4426 struct sctp_inpcb *new_inp, 4427 struct sctp_tcb *stcb, 4428 int waitflags) 4429 { 4430 /* 4431 * go through our old INP and pull off any control structures that 4432 * belong to stcb and move then to the new inp. 4433 */ 4434 struct socket *old_so, *new_so; 4435 struct sctp_queued_to_read *control, *nctl; 4436 struct sctp_readhead tmp_queue; 4437 struct mbuf *m; 4438 int error = 0; 4439 4440 old_so = old_inp->sctp_socket; 4441 new_so = new_inp->sctp_socket; 4442 TAILQ_INIT(&tmp_queue); 4443 error = sblock(&old_so->so_rcv, waitflags); 4444 if (error) { 4445 /* 4446 * Gak, can't get sblock, we have a problem. data will be 4447 * left stranded.. and we don't dare look at it since the 4448 * other thread may be reading something. Oh well, its a 4449 * screwed up app that does a peeloff OR a accept while 4450 * reading from the main socket... actually its only the 4451 * peeloff() case, since I think read will fail on a 4452 * listening socket.. 4453 */ 4454 return; 4455 } 4456 /* lock the socket buffers */ 4457 SCTP_INP_READ_LOCK(old_inp); 4458 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4459 /* Pull off all for out target stcb */ 4460 if (control->stcb == stcb) { 4461 /* remove it we want it */ 4462 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4463 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4464 m = control->data; 4465 while (m) { 4466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4467 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4468 } 4469 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4471 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4472 } 4473 m = SCTP_BUF_NEXT(m); 4474 } 4475 } 4476 } 4477 SCTP_INP_READ_UNLOCK(old_inp); 4478 /* Remove the sb-lock on the old socket */ 4479 4480 sbunlock(&old_so->so_rcv); 4481 /* Now we move them over to the new socket buffer */ 4482 SCTP_INP_READ_LOCK(new_inp); 4483 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4484 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4485 m = control->data; 4486 while (m) { 4487 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4488 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4489 } 4490 sctp_sballoc(stcb, &new_so->so_rcv, m); 4491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4492 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4493 } 4494 m = SCTP_BUF_NEXT(m); 4495 } 4496 } 4497 SCTP_INP_READ_UNLOCK(new_inp); 4498 } 4499 4500 void 4501 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4502 struct sctp_tcb *stcb, 4503 int so_locked 4504 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4505 SCTP_UNUSED 4506 #endif 4507 ) 4508 { 4509 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4510 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4511 struct socket *so; 4512 4513 so = SCTP_INP_SO(inp); 4514 if (!so_locked) { 4515 if (stcb) { 4516 atomic_add_int(&stcb->asoc.refcnt, 1); 4517 SCTP_TCB_UNLOCK(stcb); 4518 } 4519 SCTP_SOCKET_LOCK(so, 1); 4520 if (stcb) { 4521 SCTP_TCB_LOCK(stcb); 4522 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4523 } 4524 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4525 SCTP_SOCKET_UNLOCK(so, 1); 4526 return; 4527 } 4528 } 4529 #endif 4530 sctp_sorwakeup(inp, inp->sctp_socket); 4531 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4532 if (!so_locked) { 4533 SCTP_SOCKET_UNLOCK(so, 1); 4534 } 4535 #endif 4536 } 4537 } 4538 4539 void 4540 sctp_add_to_readq(struct sctp_inpcb *inp, 4541 struct sctp_tcb *stcb, 4542 struct sctp_queued_to_read *control, 4543 struct sockbuf *sb, 4544 int end, 4545 int inp_read_lock_held, 4546 int so_locked 4547 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4548 SCTP_UNUSED 4549 #endif 4550 ) 4551 { 4552 /* 4553 * Here we must place the control on the end of the socket read 4554 * queue AND increment sb_cc so that select will work properly on 4555 * read. 4556 */ 4557 struct mbuf *m, *prev = NULL; 4558 4559 if (inp == NULL) { 4560 /* Gak, TSNH!! */ 4561 #ifdef INVARIANTS 4562 panic("Gak, inp NULL on add_to_readq"); 4563 #endif 4564 return; 4565 } 4566 if (inp_read_lock_held == 0) 4567 SCTP_INP_READ_LOCK(inp); 4568 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4569 sctp_free_remote_addr(control->whoFrom); 4570 if (control->data) { 4571 sctp_m_freem(control->data); 4572 control->data = NULL; 4573 } 4574 sctp_free_a_readq(stcb, control); 4575 if (inp_read_lock_held == 0) 4576 SCTP_INP_READ_UNLOCK(inp); 4577 return; 4578 } 4579 if (!(control->spec_flags & M_NOTIFICATION)) { 4580 atomic_add_int(&inp->total_recvs, 1); 4581 if (!control->do_not_ref_stcb) { 4582 atomic_add_int(&stcb->total_recvs, 1); 4583 } 4584 } 4585 m = control->data; 4586 control->held_length = 0; 4587 control->length = 0; 4588 while (m) { 4589 if (SCTP_BUF_LEN(m) == 0) { 4590 /* Skip mbufs with NO length */ 4591 if (prev == NULL) { 4592 /* First one */ 4593 control->data = sctp_m_free(m); 4594 m = control->data; 4595 } else { 4596 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4597 m = SCTP_BUF_NEXT(prev); 4598 } 4599 if (m == NULL) { 4600 control->tail_mbuf = prev; 4601 } 4602 continue; 4603 } 4604 prev = m; 4605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4606 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4607 } 4608 sctp_sballoc(stcb, sb, m); 4609 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4610 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4611 } 4612 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4613 m = SCTP_BUF_NEXT(m); 4614 } 4615 if (prev != NULL) { 4616 control->tail_mbuf = prev; 4617 } else { 4618 /* Everything got collapsed out?? */ 4619 sctp_free_remote_addr(control->whoFrom); 4620 sctp_free_a_readq(stcb, control); 4621 if (inp_read_lock_held == 0) 4622 SCTP_INP_READ_UNLOCK(inp); 4623 return; 4624 } 4625 if (end) { 4626 control->end_added = 1; 4627 } 4628 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4629 control->on_read_q = 1; 4630 if (inp_read_lock_held == 0) 4631 SCTP_INP_READ_UNLOCK(inp); 4632 if (inp && inp->sctp_socket) { 4633 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4634 } 4635 } 4636 4637 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4638 *************ALTERNATE ROUTING CODE 4639 */ 4640 4641 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4642 *************ALTERNATE ROUTING CODE 4643 */ 4644 4645 struct mbuf * 4646 sctp_generate_cause(uint16_t code, char *info) 4647 { 4648 struct mbuf *m; 4649 struct sctp_gen_error_cause *cause; 4650 size_t info_len; 4651 uint16_t len; 4652 4653 if ((code == 0) || (info == NULL)) { 4654 return (NULL); 4655 } 4656 info_len = strlen(info); 4657 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4658 return (NULL); 4659 } 4660 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4661 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4662 if (m != NULL) { 4663 SCTP_BUF_LEN(m) = len; 4664 cause = mtod(m, struct sctp_gen_error_cause *); 4665 cause->code = htons(code); 4666 cause->length = htons(len); 4667 memcpy(cause->info, info, info_len); 4668 } 4669 return (m); 4670 } 4671 4672 struct mbuf * 4673 sctp_generate_no_user_data_cause(uint32_t tsn) 4674 { 4675 struct mbuf *m; 4676 struct sctp_error_no_user_data *no_user_data_cause; 4677 uint16_t len; 4678 4679 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4680 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4681 if (m != NULL) { 4682 SCTP_BUF_LEN(m) = len; 4683 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4684 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4685 no_user_data_cause->cause.length = htons(len); 4686 no_user_data_cause->tsn = htonl(tsn); 4687 } 4688 return (m); 4689 } 4690 4691 #ifdef SCTP_MBCNT_LOGGING 4692 void 4693 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4694 struct sctp_tmit_chunk *tp1, int chk_cnt) 4695 { 4696 if (tp1->data == NULL) { 4697 return; 4698 } 4699 asoc->chunks_on_out_queue -= chk_cnt; 4700 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4701 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4702 asoc->total_output_queue_size, 4703 tp1->book_size, 4704 0, 4705 tp1->mbcnt); 4706 } 4707 if (asoc->total_output_queue_size >= tp1->book_size) { 4708 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4709 } else { 4710 asoc->total_output_queue_size = 0; 4711 } 4712 4713 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4714 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4715 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4716 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4717 } else { 4718 stcb->sctp_socket->so_snd.sb_cc = 0; 4719 4720 } 4721 } 4722 } 4723 4724 #endif 4725 4726 int 4727 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4728 uint8_t sent, int so_locked 4729 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4730 SCTP_UNUSED 4731 #endif 4732 ) 4733 { 4734 struct sctp_stream_out *strq; 4735 struct sctp_tmit_chunk *chk = NULL, *tp2; 4736 struct sctp_stream_queue_pending *sp; 4737 uint32_t mid; 4738 uint16_t sid; 4739 uint8_t foundeom = 0; 4740 int ret_sz = 0; 4741 int notdone; 4742 int do_wakeup_routine = 0; 4743 4744 sid = tp1->rec.data.sid; 4745 mid = tp1->rec.data.mid; 4746 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 4747 stcb->asoc.abandoned_sent[0]++; 4748 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 4749 stcb->asoc.strmout[sid].abandoned_sent[0]++; 4750 #if defined(SCTP_DETAILED_STR_STATS) 4751 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 4752 #endif 4753 } else { 4754 stcb->asoc.abandoned_unsent[0]++; 4755 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 4756 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 4757 #if defined(SCTP_DETAILED_STR_STATS) 4758 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 4759 #endif 4760 } 4761 do { 4762 ret_sz += tp1->book_size; 4763 if (tp1->data != NULL) { 4764 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4765 sctp_flight_size_decrease(tp1); 4766 sctp_total_flight_decrease(stcb, tp1); 4767 } 4768 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4769 stcb->asoc.peers_rwnd += tp1->send_size; 4770 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4771 if (sent) { 4772 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4773 } else { 4774 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4775 } 4776 if (tp1->data) { 4777 sctp_m_freem(tp1->data); 4778 tp1->data = NULL; 4779 } 4780 do_wakeup_routine = 1; 4781 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4782 stcb->asoc.sent_queue_cnt_removeable--; 4783 } 4784 } 4785 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4786 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4787 SCTP_DATA_NOT_FRAG) { 4788 /* not frag'ed we ae done */ 4789 notdone = 0; 4790 foundeom = 1; 4791 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4792 /* end of frag, we are done */ 4793 notdone = 0; 4794 foundeom = 1; 4795 } else { 4796 /* 4797 * Its a begin or middle piece, we must mark all of 4798 * it 4799 */ 4800 notdone = 1; 4801 tp1 = TAILQ_NEXT(tp1, sctp_next); 4802 } 4803 } while (tp1 && notdone); 4804 if (foundeom == 0) { 4805 /* 4806 * The multi-part message was scattered across the send and 4807 * sent queue. 4808 */ 4809 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4810 if ((tp1->rec.data.sid != sid) || 4811 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 4812 break; 4813 } 4814 /* 4815 * save to chk in case we have some on stream out 4816 * queue. If so and we have an un-transmitted one we 4817 * don't have to fudge the TSN. 4818 */ 4819 chk = tp1; 4820 ret_sz += tp1->book_size; 4821 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4822 if (sent) { 4823 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4824 } else { 4825 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4826 } 4827 if (tp1->data) { 4828 sctp_m_freem(tp1->data); 4829 tp1->data = NULL; 4830 } 4831 /* No flight involved here book the size to 0 */ 4832 tp1->book_size = 0; 4833 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4834 foundeom = 1; 4835 } 4836 do_wakeup_routine = 1; 4837 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4838 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4839 /* 4840 * on to the sent queue so we can wait for it to be 4841 * passed by. 4842 */ 4843 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4844 sctp_next); 4845 stcb->asoc.send_queue_cnt--; 4846 stcb->asoc.sent_queue_cnt++; 4847 } 4848 } 4849 if (foundeom == 0) { 4850 /* 4851 * Still no eom found. That means there is stuff left on the 4852 * stream out queue.. yuck. 4853 */ 4854 SCTP_TCB_SEND_LOCK(stcb); 4855 strq = &stcb->asoc.strmout[sid]; 4856 sp = TAILQ_FIRST(&strq->outqueue); 4857 if (sp != NULL) { 4858 sp->discard_rest = 1; 4859 /* 4860 * We may need to put a chunk on the queue that 4861 * holds the TSN that would have been sent with the 4862 * LAST bit. 4863 */ 4864 if (chk == NULL) { 4865 /* Yep, we have to */ 4866 sctp_alloc_a_chunk(stcb, chk); 4867 if (chk == NULL) { 4868 /* 4869 * we are hosed. All we can do is 4870 * nothing.. which will cause an 4871 * abort if the peer is paying 4872 * attention. 4873 */ 4874 goto oh_well; 4875 } 4876 memset(chk, 0, sizeof(*chk)); 4877 chk->rec.data.rcv_flags = 0; 4878 chk->sent = SCTP_FORWARD_TSN_SKIP; 4879 chk->asoc = &stcb->asoc; 4880 if (stcb->asoc.idata_supported == 0) { 4881 if (sp->sinfo_flags & SCTP_UNORDERED) { 4882 chk->rec.data.mid = 0; 4883 } else { 4884 chk->rec.data.mid = strq->next_mid_ordered; 4885 } 4886 } else { 4887 if (sp->sinfo_flags & SCTP_UNORDERED) { 4888 chk->rec.data.mid = strq->next_mid_unordered; 4889 } else { 4890 chk->rec.data.mid = strq->next_mid_ordered; 4891 } 4892 } 4893 chk->rec.data.sid = sp->sid; 4894 chk->rec.data.ppid = sp->ppid; 4895 chk->rec.data.context = sp->context; 4896 chk->flags = sp->act_flags; 4897 chk->whoTo = NULL; 4898 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4899 strq->chunks_on_queues++; 4900 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4901 stcb->asoc.sent_queue_cnt++; 4902 stcb->asoc.pr_sctp_cnt++; 4903 } 4904 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4905 if (sp->sinfo_flags & SCTP_UNORDERED) { 4906 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 4907 } 4908 if (stcb->asoc.idata_supported == 0) { 4909 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 4910 strq->next_mid_ordered++; 4911 } 4912 } else { 4913 if (sp->sinfo_flags & SCTP_UNORDERED) { 4914 strq->next_mid_unordered++; 4915 } else { 4916 strq->next_mid_ordered++; 4917 } 4918 } 4919 oh_well: 4920 if (sp->data) { 4921 /* 4922 * Pull any data to free up the SB and allow 4923 * sender to "add more" while we will throw 4924 * away :-) 4925 */ 4926 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4927 ret_sz += sp->length; 4928 do_wakeup_routine = 1; 4929 sp->some_taken = 1; 4930 sctp_m_freem(sp->data); 4931 sp->data = NULL; 4932 sp->tail_mbuf = NULL; 4933 sp->length = 0; 4934 } 4935 } 4936 SCTP_TCB_SEND_UNLOCK(stcb); 4937 } 4938 if (do_wakeup_routine) { 4939 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4940 struct socket *so; 4941 4942 so = SCTP_INP_SO(stcb->sctp_ep); 4943 if (!so_locked) { 4944 atomic_add_int(&stcb->asoc.refcnt, 1); 4945 SCTP_TCB_UNLOCK(stcb); 4946 SCTP_SOCKET_LOCK(so, 1); 4947 SCTP_TCB_LOCK(stcb); 4948 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4949 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4950 /* assoc was freed while we were unlocked */ 4951 SCTP_SOCKET_UNLOCK(so, 1); 4952 return (ret_sz); 4953 } 4954 } 4955 #endif 4956 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4957 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4958 if (!so_locked) { 4959 SCTP_SOCKET_UNLOCK(so, 1); 4960 } 4961 #endif 4962 } 4963 return (ret_sz); 4964 } 4965 4966 /* 4967 * checks to see if the given address, sa, is one that is currently known by 4968 * the kernel note: can't distinguish the same address on multiple interfaces 4969 * and doesn't handle multiple addresses with different zone/scope id's note: 4970 * ifa_ifwithaddr() compares the entire sockaddr struct 4971 */ 4972 struct sctp_ifa * 4973 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4974 int holds_lock) 4975 { 4976 struct sctp_laddr *laddr; 4977 4978 if (holds_lock == 0) { 4979 SCTP_INP_RLOCK(inp); 4980 } 4981 4982 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4983 if (laddr->ifa == NULL) 4984 continue; 4985 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4986 continue; 4987 #ifdef INET 4988 if (addr->sa_family == AF_INET) { 4989 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4990 laddr->ifa->address.sin.sin_addr.s_addr) { 4991 /* found him. */ 4992 if (holds_lock == 0) { 4993 SCTP_INP_RUNLOCK(inp); 4994 } 4995 return (laddr->ifa); 4996 break; 4997 } 4998 } 4999 #endif 5000 #ifdef INET6 5001 if (addr->sa_family == AF_INET6) { 5002 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5003 &laddr->ifa->address.sin6)) { 5004 /* found him. */ 5005 if (holds_lock == 0) { 5006 SCTP_INP_RUNLOCK(inp); 5007 } 5008 return (laddr->ifa); 5009 break; 5010 } 5011 } 5012 #endif 5013 } 5014 if (holds_lock == 0) { 5015 SCTP_INP_RUNLOCK(inp); 5016 } 5017 return (NULL); 5018 } 5019 5020 uint32_t 5021 sctp_get_ifa_hash_val(struct sockaddr *addr) 5022 { 5023 switch (addr->sa_family) { 5024 #ifdef INET 5025 case AF_INET: 5026 { 5027 struct sockaddr_in *sin; 5028 5029 sin = (struct sockaddr_in *)addr; 5030 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5031 } 5032 #endif 5033 #ifdef INET6 5034 case AF_INET6: 5035 { 5036 struct sockaddr_in6 *sin6; 5037 uint32_t hash_of_addr; 5038 5039 sin6 = (struct sockaddr_in6 *)addr; 5040 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5041 sin6->sin6_addr.s6_addr32[1] + 5042 sin6->sin6_addr.s6_addr32[2] + 5043 sin6->sin6_addr.s6_addr32[3]); 5044 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5045 return (hash_of_addr); 5046 } 5047 #endif 5048 default: 5049 break; 5050 } 5051 return (0); 5052 } 5053 5054 struct sctp_ifa * 5055 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5056 { 5057 struct sctp_ifa *sctp_ifap; 5058 struct sctp_vrf *vrf; 5059 struct sctp_ifalist *hash_head; 5060 uint32_t hash_of_addr; 5061 5062 if (holds_lock == 0) 5063 SCTP_IPI_ADDR_RLOCK(); 5064 5065 vrf = sctp_find_vrf(vrf_id); 5066 if (vrf == NULL) { 5067 if (holds_lock == 0) 5068 SCTP_IPI_ADDR_RUNLOCK(); 5069 return (NULL); 5070 } 5071 5072 hash_of_addr = sctp_get_ifa_hash_val(addr); 5073 5074 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5075 if (hash_head == NULL) { 5076 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5077 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5078 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5079 sctp_print_address(addr); 5080 SCTP_PRINTF("No such bucket for address\n"); 5081 if (holds_lock == 0) 5082 SCTP_IPI_ADDR_RUNLOCK(); 5083 5084 return (NULL); 5085 } 5086 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5087 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5088 continue; 5089 #ifdef INET 5090 if (addr->sa_family == AF_INET) { 5091 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5092 sctp_ifap->address.sin.sin_addr.s_addr) { 5093 /* found him. */ 5094 if (holds_lock == 0) 5095 SCTP_IPI_ADDR_RUNLOCK(); 5096 return (sctp_ifap); 5097 break; 5098 } 5099 } 5100 #endif 5101 #ifdef INET6 5102 if (addr->sa_family == AF_INET6) { 5103 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5104 &sctp_ifap->address.sin6)) { 5105 /* found him. */ 5106 if (holds_lock == 0) 5107 SCTP_IPI_ADDR_RUNLOCK(); 5108 return (sctp_ifap); 5109 break; 5110 } 5111 } 5112 #endif 5113 } 5114 if (holds_lock == 0) 5115 SCTP_IPI_ADDR_RUNLOCK(); 5116 return (NULL); 5117 } 5118 5119 static void 5120 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5121 uint32_t rwnd_req) 5122 { 5123 /* User pulled some data, do we need a rwnd update? */ 5124 int r_unlocked = 0; 5125 uint32_t dif, rwnd; 5126 struct socket *so = NULL; 5127 5128 if (stcb == NULL) 5129 return; 5130 5131 atomic_add_int(&stcb->asoc.refcnt, 1); 5132 5133 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5134 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5135 /* Pre-check If we are freeing no update */ 5136 goto no_lock; 5137 } 5138 SCTP_INP_INCR_REF(stcb->sctp_ep); 5139 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5140 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5141 goto out; 5142 } 5143 so = stcb->sctp_socket; 5144 if (so == NULL) { 5145 goto out; 5146 } 5147 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5148 /* Have you have freed enough to look */ 5149 *freed_so_far = 0; 5150 /* Yep, its worth a look and the lock overhead */ 5151 5152 /* Figure out what the rwnd would be */ 5153 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5154 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5155 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5156 } else { 5157 dif = 0; 5158 } 5159 if (dif >= rwnd_req) { 5160 if (hold_rlock) { 5161 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5162 r_unlocked = 1; 5163 } 5164 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5165 /* 5166 * One last check before we allow the guy possibly 5167 * to get in. There is a race, where the guy has not 5168 * reached the gate. In that case 5169 */ 5170 goto out; 5171 } 5172 SCTP_TCB_LOCK(stcb); 5173 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5174 /* No reports here */ 5175 SCTP_TCB_UNLOCK(stcb); 5176 goto out; 5177 } 5178 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5179 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5180 5181 sctp_chunk_output(stcb->sctp_ep, stcb, 5182 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5183 /* make sure no timer is running */ 5184 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5185 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5186 SCTP_TCB_UNLOCK(stcb); 5187 } else { 5188 /* Update how much we have pending */ 5189 stcb->freed_by_sorcv_sincelast = dif; 5190 } 5191 out: 5192 if (so && r_unlocked && hold_rlock) { 5193 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5194 } 5195 5196 SCTP_INP_DECR_REF(stcb->sctp_ep); 5197 no_lock: 5198 atomic_add_int(&stcb->asoc.refcnt, -1); 5199 return; 5200 } 5201 5202 int 5203 sctp_sorecvmsg(struct socket *so, 5204 struct uio *uio, 5205 struct mbuf **mp, 5206 struct sockaddr *from, 5207 int fromlen, 5208 int *msg_flags, 5209 struct sctp_sndrcvinfo *sinfo, 5210 int filling_sinfo) 5211 { 5212 /* 5213 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5214 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5215 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5216 * On the way out we may send out any combination of: 5217 * MSG_NOTIFICATION MSG_EOR 5218 * 5219 */ 5220 struct sctp_inpcb *inp = NULL; 5221 int my_len = 0; 5222 int cp_len = 0, error = 0; 5223 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5224 struct mbuf *m = NULL; 5225 struct sctp_tcb *stcb = NULL; 5226 int wakeup_read_socket = 0; 5227 int freecnt_applied = 0; 5228 int out_flags = 0, in_flags = 0; 5229 int block_allowed = 1; 5230 uint32_t freed_so_far = 0; 5231 uint32_t copied_so_far = 0; 5232 int in_eeor_mode = 0; 5233 int no_rcv_needed = 0; 5234 uint32_t rwnd_req = 0; 5235 int hold_sblock = 0; 5236 int hold_rlock = 0; 5237 ssize_t slen = 0; 5238 uint32_t held_length = 0; 5239 int sockbuf_lock = 0; 5240 5241 if (uio == NULL) { 5242 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5243 return (EINVAL); 5244 } 5245 5246 if (msg_flags) { 5247 in_flags = *msg_flags; 5248 if (in_flags & MSG_PEEK) 5249 SCTP_STAT_INCR(sctps_read_peeks); 5250 } else { 5251 in_flags = 0; 5252 } 5253 slen = uio->uio_resid; 5254 5255 /* Pull in and set up our int flags */ 5256 if (in_flags & MSG_OOB) { 5257 /* Out of band's NOT supported */ 5258 return (EOPNOTSUPP); 5259 } 5260 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5261 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5262 return (EINVAL); 5263 } 5264 if ((in_flags & (MSG_DONTWAIT 5265 | MSG_NBIO 5266 )) || 5267 SCTP_SO_IS_NBIO(so)) { 5268 block_allowed = 0; 5269 } 5270 /* setup the endpoint */ 5271 inp = (struct sctp_inpcb *)so->so_pcb; 5272 if (inp == NULL) { 5273 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5274 return (EFAULT); 5275 } 5276 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5277 /* Must be at least a MTU's worth */ 5278 if (rwnd_req < SCTP_MIN_RWND) 5279 rwnd_req = SCTP_MIN_RWND; 5280 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5282 sctp_misc_ints(SCTP_SORECV_ENTER, 5283 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5284 } 5285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5286 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5287 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5288 } 5289 5290 5291 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5292 if (error) { 5293 goto release_unlocked; 5294 } 5295 sockbuf_lock = 1; 5296 restart: 5297 5298 5299 restart_nosblocks: 5300 if (hold_sblock == 0) { 5301 SOCKBUF_LOCK(&so->so_rcv); 5302 hold_sblock = 1; 5303 } 5304 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5305 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5306 goto out; 5307 } 5308 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5309 if (so->so_error) { 5310 error = so->so_error; 5311 if ((in_flags & MSG_PEEK) == 0) 5312 so->so_error = 0; 5313 goto out; 5314 } else { 5315 if (so->so_rcv.sb_cc == 0) { 5316 /* indicate EOF */ 5317 error = 0; 5318 goto out; 5319 } 5320 } 5321 } 5322 if (so->so_rcv.sb_cc <= held_length) { 5323 if (so->so_error) { 5324 error = so->so_error; 5325 if ((in_flags & MSG_PEEK) == 0) { 5326 so->so_error = 0; 5327 } 5328 goto out; 5329 } 5330 if ((so->so_rcv.sb_cc == 0) && 5331 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5332 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5333 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5334 /* 5335 * For active open side clear flags for 5336 * re-use passive open is blocked by 5337 * connect. 5338 */ 5339 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5340 /* 5341 * You were aborted, passive side 5342 * always hits here 5343 */ 5344 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5345 error = ECONNRESET; 5346 } 5347 so->so_state &= ~(SS_ISCONNECTING | 5348 SS_ISDISCONNECTING | 5349 SS_ISCONFIRMING | 5350 SS_ISCONNECTED); 5351 if (error == 0) { 5352 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5353 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5354 error = ENOTCONN; 5355 } 5356 } 5357 goto out; 5358 } 5359 } 5360 if (block_allowed) { 5361 error = sbwait(&so->so_rcv); 5362 if (error) { 5363 goto out; 5364 } 5365 held_length = 0; 5366 goto restart_nosblocks; 5367 } else { 5368 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5369 error = EWOULDBLOCK; 5370 goto out; 5371 } 5372 } 5373 if (hold_sblock == 1) { 5374 SOCKBUF_UNLOCK(&so->so_rcv); 5375 hold_sblock = 0; 5376 } 5377 /* we possibly have data we can read */ 5378 /* sa_ignore FREED_MEMORY */ 5379 control = TAILQ_FIRST(&inp->read_queue); 5380 if (control == NULL) { 5381 /* 5382 * This could be happening since the appender did the 5383 * increment but as not yet did the tailq insert onto the 5384 * read_queue 5385 */ 5386 if (hold_rlock == 0) { 5387 SCTP_INP_READ_LOCK(inp); 5388 } 5389 control = TAILQ_FIRST(&inp->read_queue); 5390 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5391 #ifdef INVARIANTS 5392 panic("Huh, its non zero and nothing on control?"); 5393 #endif 5394 so->so_rcv.sb_cc = 0; 5395 } 5396 SCTP_INP_READ_UNLOCK(inp); 5397 hold_rlock = 0; 5398 goto restart; 5399 } 5400 5401 if ((control->length == 0) && 5402 (control->do_not_ref_stcb)) { 5403 /* 5404 * Clean up code for freeing assoc that left behind a 5405 * pdapi.. maybe a peer in EEOR that just closed after 5406 * sending and never indicated a EOR. 5407 */ 5408 if (hold_rlock == 0) { 5409 hold_rlock = 1; 5410 SCTP_INP_READ_LOCK(inp); 5411 } 5412 control->held_length = 0; 5413 if (control->data) { 5414 /* Hmm there is data here .. fix */ 5415 struct mbuf *m_tmp; 5416 int cnt = 0; 5417 5418 m_tmp = control->data; 5419 while (m_tmp) { 5420 cnt += SCTP_BUF_LEN(m_tmp); 5421 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5422 control->tail_mbuf = m_tmp; 5423 control->end_added = 1; 5424 } 5425 m_tmp = SCTP_BUF_NEXT(m_tmp); 5426 } 5427 control->length = cnt; 5428 } else { 5429 /* remove it */ 5430 TAILQ_REMOVE(&inp->read_queue, control, next); 5431 /* Add back any hiddend data */ 5432 sctp_free_remote_addr(control->whoFrom); 5433 sctp_free_a_readq(stcb, control); 5434 } 5435 if (hold_rlock) { 5436 hold_rlock = 0; 5437 SCTP_INP_READ_UNLOCK(inp); 5438 } 5439 goto restart; 5440 } 5441 if ((control->length == 0) && 5442 (control->end_added == 1)) { 5443 /* 5444 * Do we also need to check for (control->pdapi_aborted == 5445 * 1)? 5446 */ 5447 if (hold_rlock == 0) { 5448 hold_rlock = 1; 5449 SCTP_INP_READ_LOCK(inp); 5450 } 5451 TAILQ_REMOVE(&inp->read_queue, control, next); 5452 if (control->data) { 5453 #ifdef INVARIANTS 5454 panic("control->data not null but control->length == 0"); 5455 #else 5456 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5457 sctp_m_freem(control->data); 5458 control->data = NULL; 5459 #endif 5460 } 5461 if (control->aux_data) { 5462 sctp_m_free(control->aux_data); 5463 control->aux_data = NULL; 5464 } 5465 #ifdef INVARIANTS 5466 if (control->on_strm_q) { 5467 panic("About to free ctl:%p so:%p and its in %d", 5468 control, so, control->on_strm_q); 5469 } 5470 #endif 5471 sctp_free_remote_addr(control->whoFrom); 5472 sctp_free_a_readq(stcb, control); 5473 if (hold_rlock) { 5474 hold_rlock = 0; 5475 SCTP_INP_READ_UNLOCK(inp); 5476 } 5477 goto restart; 5478 } 5479 if (control->length == 0) { 5480 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5481 (filling_sinfo)) { 5482 /* find a more suitable one then this */ 5483 ctl = TAILQ_NEXT(control, next); 5484 while (ctl) { 5485 if ((ctl->stcb != control->stcb) && (ctl->length) && 5486 (ctl->some_taken || 5487 (ctl->spec_flags & M_NOTIFICATION) || 5488 ((ctl->do_not_ref_stcb == 0) && 5489 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5490 ) { 5491 /*- 5492 * If we have a different TCB next, and there is data 5493 * present. If we have already taken some (pdapi), OR we can 5494 * ref the tcb and no delivery as started on this stream, we 5495 * take it. Note we allow a notification on a different 5496 * assoc to be delivered.. 5497 */ 5498 control = ctl; 5499 goto found_one; 5500 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5501 (ctl->length) && 5502 ((ctl->some_taken) || 5503 ((ctl->do_not_ref_stcb == 0) && 5504 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5505 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5506 /*- 5507 * If we have the same tcb, and there is data present, and we 5508 * have the strm interleave feature present. Then if we have 5509 * taken some (pdapi) or we can refer to tht tcb AND we have 5510 * not started a delivery for this stream, we can take it. 5511 * Note we do NOT allow a notificaiton on the same assoc to 5512 * be delivered. 5513 */ 5514 control = ctl; 5515 goto found_one; 5516 } 5517 ctl = TAILQ_NEXT(ctl, next); 5518 } 5519 } 5520 /* 5521 * if we reach here, not suitable replacement is available 5522 * <or> fragment interleave is NOT on. So stuff the sb_cc 5523 * into the our held count, and its time to sleep again. 5524 */ 5525 held_length = so->so_rcv.sb_cc; 5526 control->held_length = so->so_rcv.sb_cc; 5527 goto restart; 5528 } 5529 /* Clear the held length since there is something to read */ 5530 control->held_length = 0; 5531 found_one: 5532 /* 5533 * If we reach here, control has a some data for us to read off. 5534 * Note that stcb COULD be NULL. 5535 */ 5536 if (hold_rlock == 0) { 5537 hold_rlock = 1; 5538 SCTP_INP_READ_LOCK(inp); 5539 } 5540 control->some_taken++; 5541 stcb = control->stcb; 5542 if (stcb) { 5543 if ((control->do_not_ref_stcb == 0) && 5544 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5545 if (freecnt_applied == 0) 5546 stcb = NULL; 5547 } else if (control->do_not_ref_stcb == 0) { 5548 /* you can't free it on me please */ 5549 /* 5550 * The lock on the socket buffer protects us so the 5551 * free code will stop. But since we used the 5552 * socketbuf lock and the sender uses the tcb_lock 5553 * to increment, we need to use the atomic add to 5554 * the refcnt 5555 */ 5556 if (freecnt_applied) { 5557 #ifdef INVARIANTS 5558 panic("refcnt already incremented"); 5559 #else 5560 SCTP_PRINTF("refcnt already incremented?\n"); 5561 #endif 5562 } else { 5563 atomic_add_int(&stcb->asoc.refcnt, 1); 5564 freecnt_applied = 1; 5565 } 5566 /* 5567 * Setup to remember how much we have not yet told 5568 * the peer our rwnd has opened up. Note we grab the 5569 * value from the tcb from last time. Note too that 5570 * sack sending clears this when a sack is sent, 5571 * which is fine. Once we hit the rwnd_req, we then 5572 * will go to the sctp_user_rcvd() that will not 5573 * lock until it KNOWs it MUST send a WUP-SACK. 5574 */ 5575 freed_so_far = stcb->freed_by_sorcv_sincelast; 5576 stcb->freed_by_sorcv_sincelast = 0; 5577 } 5578 } 5579 if (stcb && 5580 ((control->spec_flags & M_NOTIFICATION) == 0) && 5581 control->do_not_ref_stcb == 0) { 5582 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5583 } 5584 5585 /* First lets get off the sinfo and sockaddr info */ 5586 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5587 sinfo->sinfo_stream = control->sinfo_stream; 5588 sinfo->sinfo_ssn = (uint16_t)control->mid; 5589 sinfo->sinfo_flags = control->sinfo_flags; 5590 sinfo->sinfo_ppid = control->sinfo_ppid; 5591 sinfo->sinfo_context = control->sinfo_context; 5592 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5593 sinfo->sinfo_tsn = control->sinfo_tsn; 5594 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5595 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5596 nxt = TAILQ_NEXT(control, next); 5597 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5598 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5599 struct sctp_extrcvinfo *s_extra; 5600 5601 s_extra = (struct sctp_extrcvinfo *)sinfo; 5602 if ((nxt) && 5603 (nxt->length)) { 5604 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5605 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5606 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5607 } 5608 if (nxt->spec_flags & M_NOTIFICATION) { 5609 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5610 } 5611 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5612 s_extra->serinfo_next_length = nxt->length; 5613 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5614 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5615 if (nxt->tail_mbuf != NULL) { 5616 if (nxt->end_added) { 5617 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5618 } 5619 } 5620 } else { 5621 /* 5622 * we explicitly 0 this, since the memcpy 5623 * got some other things beyond the older 5624 * sinfo_ that is on the control's structure 5625 * :-D 5626 */ 5627 nxt = NULL; 5628 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5629 s_extra->serinfo_next_aid = 0; 5630 s_extra->serinfo_next_length = 0; 5631 s_extra->serinfo_next_ppid = 0; 5632 s_extra->serinfo_next_stream = 0; 5633 } 5634 } 5635 /* 5636 * update off the real current cum-ack, if we have an stcb. 5637 */ 5638 if ((control->do_not_ref_stcb == 0) && stcb) 5639 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5640 /* 5641 * mask off the high bits, we keep the actual chunk bits in 5642 * there. 5643 */ 5644 sinfo->sinfo_flags &= 0x00ff; 5645 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5646 sinfo->sinfo_flags |= SCTP_UNORDERED; 5647 } 5648 } 5649 #ifdef SCTP_ASOCLOG_OF_TSNS 5650 { 5651 int index, newindex; 5652 struct sctp_pcbtsn_rlog *entry; 5653 5654 do { 5655 index = inp->readlog_index; 5656 newindex = index + 1; 5657 if (newindex >= SCTP_READ_LOG_SIZE) { 5658 newindex = 0; 5659 } 5660 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5661 entry = &inp->readlog[index]; 5662 entry->vtag = control->sinfo_assoc_id; 5663 entry->strm = control->sinfo_stream; 5664 entry->seq = (uint16_t)control->mid; 5665 entry->sz = control->length; 5666 entry->flgs = control->sinfo_flags; 5667 } 5668 #endif 5669 if ((fromlen > 0) && (from != NULL)) { 5670 union sctp_sockstore store; 5671 size_t len; 5672 5673 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5674 #ifdef INET6 5675 case AF_INET6: 5676 len = sizeof(struct sockaddr_in6); 5677 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5678 store.sin6.sin6_port = control->port_from; 5679 break; 5680 #endif 5681 #ifdef INET 5682 case AF_INET: 5683 #ifdef INET6 5684 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5685 len = sizeof(struct sockaddr_in6); 5686 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5687 &store.sin6); 5688 store.sin6.sin6_port = control->port_from; 5689 } else { 5690 len = sizeof(struct sockaddr_in); 5691 store.sin = control->whoFrom->ro._l_addr.sin; 5692 store.sin.sin_port = control->port_from; 5693 } 5694 #else 5695 len = sizeof(struct sockaddr_in); 5696 store.sin = control->whoFrom->ro._l_addr.sin; 5697 store.sin.sin_port = control->port_from; 5698 #endif 5699 break; 5700 #endif 5701 default: 5702 len = 0; 5703 break; 5704 } 5705 memcpy(from, &store, min((size_t)fromlen, len)); 5706 #ifdef INET6 5707 { 5708 struct sockaddr_in6 lsa6, *from6; 5709 5710 from6 = (struct sockaddr_in6 *)from; 5711 sctp_recover_scope_mac(from6, (&lsa6)); 5712 } 5713 #endif 5714 } 5715 if (hold_rlock) { 5716 SCTP_INP_READ_UNLOCK(inp); 5717 hold_rlock = 0; 5718 } 5719 if (hold_sblock) { 5720 SOCKBUF_UNLOCK(&so->so_rcv); 5721 hold_sblock = 0; 5722 } 5723 /* now copy out what data we can */ 5724 if (mp == NULL) { 5725 /* copy out each mbuf in the chain up to length */ 5726 get_more_data: 5727 m = control->data; 5728 while (m) { 5729 /* Move out all we can */ 5730 cp_len = (int)uio->uio_resid; 5731 my_len = (int)SCTP_BUF_LEN(m); 5732 if (cp_len > my_len) { 5733 /* not enough in this buf */ 5734 cp_len = my_len; 5735 } 5736 if (hold_rlock) { 5737 SCTP_INP_READ_UNLOCK(inp); 5738 hold_rlock = 0; 5739 } 5740 if (cp_len > 0) 5741 error = uiomove(mtod(m, char *), cp_len, uio); 5742 /* re-read */ 5743 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5744 goto release; 5745 } 5746 5747 if ((control->do_not_ref_stcb == 0) && stcb && 5748 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5749 no_rcv_needed = 1; 5750 } 5751 if (error) { 5752 /* error we are out of here */ 5753 goto release; 5754 } 5755 SCTP_INP_READ_LOCK(inp); 5756 hold_rlock = 1; 5757 if (cp_len == SCTP_BUF_LEN(m)) { 5758 if ((SCTP_BUF_NEXT(m) == NULL) && 5759 (control->end_added)) { 5760 out_flags |= MSG_EOR; 5761 if ((control->do_not_ref_stcb == 0) && 5762 (control->stcb != NULL) && 5763 ((control->spec_flags & M_NOTIFICATION) == 0)) 5764 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5765 } 5766 if (control->spec_flags & M_NOTIFICATION) { 5767 out_flags |= MSG_NOTIFICATION; 5768 } 5769 /* we ate up the mbuf */ 5770 if (in_flags & MSG_PEEK) { 5771 /* just looking */ 5772 m = SCTP_BUF_NEXT(m); 5773 copied_so_far += cp_len; 5774 } else { 5775 /* dispose of the mbuf */ 5776 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5777 sctp_sblog(&so->so_rcv, 5778 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5779 } 5780 sctp_sbfree(control, stcb, &so->so_rcv, m); 5781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5782 sctp_sblog(&so->so_rcv, 5783 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5784 } 5785 copied_so_far += cp_len; 5786 freed_so_far += cp_len; 5787 freed_so_far += MSIZE; 5788 atomic_subtract_int(&control->length, cp_len); 5789 control->data = sctp_m_free(m); 5790 m = control->data; 5791 /* 5792 * been through it all, must hold sb 5793 * lock ok to null tail 5794 */ 5795 if (control->data == NULL) { 5796 #ifdef INVARIANTS 5797 if ((control->end_added == 0) || 5798 (TAILQ_NEXT(control, next) == NULL)) { 5799 /* 5800 * If the end is not 5801 * added, OR the 5802 * next is NOT null 5803 * we MUST have the 5804 * lock. 5805 */ 5806 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5807 panic("Hmm we don't own the lock?"); 5808 } 5809 } 5810 #endif 5811 control->tail_mbuf = NULL; 5812 #ifdef INVARIANTS 5813 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5814 panic("end_added, nothing left and no MSG_EOR"); 5815 } 5816 #endif 5817 } 5818 } 5819 } else { 5820 /* Do we need to trim the mbuf? */ 5821 if (control->spec_flags & M_NOTIFICATION) { 5822 out_flags |= MSG_NOTIFICATION; 5823 } 5824 if ((in_flags & MSG_PEEK) == 0) { 5825 SCTP_BUF_RESV_UF(m, cp_len); 5826 SCTP_BUF_LEN(m) -= cp_len; 5827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5828 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5829 } 5830 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5831 if ((control->do_not_ref_stcb == 0) && 5832 stcb) { 5833 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5834 } 5835 copied_so_far += cp_len; 5836 freed_so_far += cp_len; 5837 freed_so_far += MSIZE; 5838 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5839 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5840 SCTP_LOG_SBRESULT, 0); 5841 } 5842 atomic_subtract_int(&control->length, cp_len); 5843 } else { 5844 copied_so_far += cp_len; 5845 } 5846 } 5847 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5848 break; 5849 } 5850 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5851 (control->do_not_ref_stcb == 0) && 5852 (freed_so_far >= rwnd_req)) { 5853 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5854 } 5855 } /* end while(m) */ 5856 /* 5857 * At this point we have looked at it all and we either have 5858 * a MSG_EOR/or read all the user wants... <OR> 5859 * control->length == 0. 5860 */ 5861 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5862 /* we are done with this control */ 5863 if (control->length == 0) { 5864 if (control->data) { 5865 #ifdef INVARIANTS 5866 panic("control->data not null at read eor?"); 5867 #else 5868 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5869 sctp_m_freem(control->data); 5870 control->data = NULL; 5871 #endif 5872 } 5873 done_with_control: 5874 if (hold_rlock == 0) { 5875 SCTP_INP_READ_LOCK(inp); 5876 hold_rlock = 1; 5877 } 5878 TAILQ_REMOVE(&inp->read_queue, control, next); 5879 /* Add back any hiddend data */ 5880 if (control->held_length) { 5881 held_length = 0; 5882 control->held_length = 0; 5883 wakeup_read_socket = 1; 5884 } 5885 if (control->aux_data) { 5886 sctp_m_free(control->aux_data); 5887 control->aux_data = NULL; 5888 } 5889 no_rcv_needed = control->do_not_ref_stcb; 5890 sctp_free_remote_addr(control->whoFrom); 5891 control->data = NULL; 5892 #ifdef INVARIANTS 5893 if (control->on_strm_q) { 5894 panic("About to free ctl:%p so:%p and its in %d", 5895 control, so, control->on_strm_q); 5896 } 5897 #endif 5898 sctp_free_a_readq(stcb, control); 5899 control = NULL; 5900 if ((freed_so_far >= rwnd_req) && 5901 (no_rcv_needed == 0)) 5902 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5903 5904 } else { 5905 /* 5906 * The user did not read all of this 5907 * message, turn off the returned MSG_EOR 5908 * since we are leaving more behind on the 5909 * control to read. 5910 */ 5911 #ifdef INVARIANTS 5912 if (control->end_added && 5913 (control->data == NULL) && 5914 (control->tail_mbuf == NULL)) { 5915 panic("Gak, control->length is corrupt?"); 5916 } 5917 #endif 5918 no_rcv_needed = control->do_not_ref_stcb; 5919 out_flags &= ~MSG_EOR; 5920 } 5921 } 5922 if (out_flags & MSG_EOR) { 5923 goto release; 5924 } 5925 if ((uio->uio_resid == 0) || 5926 ((in_eeor_mode) && 5927 (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) { 5928 goto release; 5929 } 5930 /* 5931 * If I hit here the receiver wants more and this message is 5932 * NOT done (pd-api). So two questions. Can we block? if not 5933 * we are done. Did the user NOT set MSG_WAITALL? 5934 */ 5935 if (block_allowed == 0) { 5936 goto release; 5937 } 5938 /* 5939 * We need to wait for more data a few things: - We don't 5940 * sbunlock() so we don't get someone else reading. - We 5941 * must be sure to account for the case where what is added 5942 * is NOT to our control when we wakeup. 5943 */ 5944 5945 /* 5946 * Do we need to tell the transport a rwnd update might be 5947 * needed before we go to sleep? 5948 */ 5949 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5950 ((freed_so_far >= rwnd_req) && 5951 (control->do_not_ref_stcb == 0) && 5952 (no_rcv_needed == 0))) { 5953 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5954 } 5955 wait_some_more: 5956 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5957 goto release; 5958 } 5959 5960 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5961 goto release; 5962 5963 if (hold_rlock == 1) { 5964 SCTP_INP_READ_UNLOCK(inp); 5965 hold_rlock = 0; 5966 } 5967 if (hold_sblock == 0) { 5968 SOCKBUF_LOCK(&so->so_rcv); 5969 hold_sblock = 1; 5970 } 5971 if ((copied_so_far) && (control->length == 0) && 5972 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5973 goto release; 5974 } 5975 if (so->so_rcv.sb_cc <= control->held_length) { 5976 error = sbwait(&so->so_rcv); 5977 if (error) { 5978 goto release; 5979 } 5980 control->held_length = 0; 5981 } 5982 if (hold_sblock) { 5983 SOCKBUF_UNLOCK(&so->so_rcv); 5984 hold_sblock = 0; 5985 } 5986 if (control->length == 0) { 5987 /* still nothing here */ 5988 if (control->end_added == 1) { 5989 /* he aborted, or is done i.e.did a shutdown */ 5990 out_flags |= MSG_EOR; 5991 if (control->pdapi_aborted) { 5992 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5993 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5994 5995 out_flags |= MSG_TRUNC; 5996 } else { 5997 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5998 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5999 } 6000 goto done_with_control; 6001 } 6002 if (so->so_rcv.sb_cc > held_length) { 6003 control->held_length = so->so_rcv.sb_cc; 6004 held_length = 0; 6005 } 6006 goto wait_some_more; 6007 } else if (control->data == NULL) { 6008 /* 6009 * we must re-sync since data is probably being 6010 * added 6011 */ 6012 SCTP_INP_READ_LOCK(inp); 6013 if ((control->length > 0) && (control->data == NULL)) { 6014 /* 6015 * big trouble.. we have the lock and its 6016 * corrupt? 6017 */ 6018 #ifdef INVARIANTS 6019 panic("Impossible data==NULL length !=0"); 6020 #endif 6021 out_flags |= MSG_EOR; 6022 out_flags |= MSG_TRUNC; 6023 control->length = 0; 6024 SCTP_INP_READ_UNLOCK(inp); 6025 goto done_with_control; 6026 } 6027 SCTP_INP_READ_UNLOCK(inp); 6028 /* We will fall around to get more data */ 6029 } 6030 goto get_more_data; 6031 } else { 6032 /*- 6033 * Give caller back the mbuf chain, 6034 * store in uio_resid the length 6035 */ 6036 wakeup_read_socket = 0; 6037 if ((control->end_added == 0) || 6038 (TAILQ_NEXT(control, next) == NULL)) { 6039 /* Need to get rlock */ 6040 if (hold_rlock == 0) { 6041 SCTP_INP_READ_LOCK(inp); 6042 hold_rlock = 1; 6043 } 6044 } 6045 if (control->end_added) { 6046 out_flags |= MSG_EOR; 6047 if ((control->do_not_ref_stcb == 0) && 6048 (control->stcb != NULL) && 6049 ((control->spec_flags & M_NOTIFICATION) == 0)) 6050 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6051 } 6052 if (control->spec_flags & M_NOTIFICATION) { 6053 out_flags |= MSG_NOTIFICATION; 6054 } 6055 uio->uio_resid = control->length; 6056 *mp = control->data; 6057 m = control->data; 6058 while (m) { 6059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6060 sctp_sblog(&so->so_rcv, 6061 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6062 } 6063 sctp_sbfree(control, stcb, &so->so_rcv, m); 6064 freed_so_far += SCTP_BUF_LEN(m); 6065 freed_so_far += MSIZE; 6066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6067 sctp_sblog(&so->so_rcv, 6068 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6069 } 6070 m = SCTP_BUF_NEXT(m); 6071 } 6072 control->data = control->tail_mbuf = NULL; 6073 control->length = 0; 6074 if (out_flags & MSG_EOR) { 6075 /* Done with this control */ 6076 goto done_with_control; 6077 } 6078 } 6079 release: 6080 if (hold_rlock == 1) { 6081 SCTP_INP_READ_UNLOCK(inp); 6082 hold_rlock = 0; 6083 } 6084 if (hold_sblock == 1) { 6085 SOCKBUF_UNLOCK(&so->so_rcv); 6086 hold_sblock = 0; 6087 } 6088 6089 sbunlock(&so->so_rcv); 6090 sockbuf_lock = 0; 6091 6092 release_unlocked: 6093 if (hold_sblock) { 6094 SOCKBUF_UNLOCK(&so->so_rcv); 6095 hold_sblock = 0; 6096 } 6097 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6098 if ((freed_so_far >= rwnd_req) && 6099 (control && (control->do_not_ref_stcb == 0)) && 6100 (no_rcv_needed == 0)) 6101 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6102 } 6103 out: 6104 if (msg_flags) { 6105 *msg_flags = out_flags; 6106 } 6107 if (((out_flags & MSG_EOR) == 0) && 6108 ((in_flags & MSG_PEEK) == 0) && 6109 (sinfo) && 6110 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6111 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6112 struct sctp_extrcvinfo *s_extra; 6113 6114 s_extra = (struct sctp_extrcvinfo *)sinfo; 6115 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6116 } 6117 if (hold_rlock == 1) { 6118 SCTP_INP_READ_UNLOCK(inp); 6119 } 6120 if (hold_sblock) { 6121 SOCKBUF_UNLOCK(&so->so_rcv); 6122 } 6123 if (sockbuf_lock) { 6124 sbunlock(&so->so_rcv); 6125 } 6126 6127 if (freecnt_applied) { 6128 /* 6129 * The lock on the socket buffer protects us so the free 6130 * code will stop. But since we used the socketbuf lock and 6131 * the sender uses the tcb_lock to increment, we need to use 6132 * the atomic add to the refcnt. 6133 */ 6134 if (stcb == NULL) { 6135 #ifdef INVARIANTS 6136 panic("stcb for refcnt has gone NULL?"); 6137 goto stage_left; 6138 #else 6139 goto stage_left; 6140 #endif 6141 } 6142 /* Save the value back for next time */ 6143 stcb->freed_by_sorcv_sincelast = freed_so_far; 6144 atomic_add_int(&stcb->asoc.refcnt, -1); 6145 } 6146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6147 if (stcb) { 6148 sctp_misc_ints(SCTP_SORECV_DONE, 6149 freed_so_far, 6150 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6151 stcb->asoc.my_rwnd, 6152 so->so_rcv.sb_cc); 6153 } else { 6154 sctp_misc_ints(SCTP_SORECV_DONE, 6155 freed_so_far, 6156 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6157 0, 6158 so->so_rcv.sb_cc); 6159 } 6160 } 6161 stage_left: 6162 if (wakeup_read_socket) { 6163 sctp_sorwakeup(inp, so); 6164 } 6165 return (error); 6166 } 6167 6168 6169 #ifdef SCTP_MBUF_LOGGING 6170 struct mbuf * 6171 sctp_m_free(struct mbuf *m) 6172 { 6173 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6174 sctp_log_mb(m, SCTP_MBUF_IFREE); 6175 } 6176 return (m_free(m)); 6177 } 6178 6179 void 6180 sctp_m_freem(struct mbuf *mb) 6181 { 6182 while (mb != NULL) 6183 mb = sctp_m_free(mb); 6184 } 6185 6186 #endif 6187 6188 int 6189 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6190 { 6191 /* 6192 * Given a local address. For all associations that holds the 6193 * address, request a peer-set-primary. 6194 */ 6195 struct sctp_ifa *ifa; 6196 struct sctp_laddr *wi; 6197 6198 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6199 if (ifa == NULL) { 6200 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6201 return (EADDRNOTAVAIL); 6202 } 6203 /* 6204 * Now that we have the ifa we must awaken the iterator with this 6205 * message. 6206 */ 6207 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6208 if (wi == NULL) { 6209 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6210 return (ENOMEM); 6211 } 6212 /* Now incr the count and int wi structure */ 6213 SCTP_INCR_LADDR_COUNT(); 6214 memset(wi, 0, sizeof(*wi)); 6215 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6216 wi->ifa = ifa; 6217 wi->action = SCTP_SET_PRIM_ADDR; 6218 atomic_add_int(&ifa->refcount, 1); 6219 6220 /* Now add it to the work queue */ 6221 SCTP_WQ_ADDR_LOCK(); 6222 /* 6223 * Should this really be a tailq? As it is we will process the 6224 * newest first :-0 6225 */ 6226 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6227 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6228 (struct sctp_inpcb *)NULL, 6229 (struct sctp_tcb *)NULL, 6230 (struct sctp_nets *)NULL); 6231 SCTP_WQ_ADDR_UNLOCK(); 6232 return (0); 6233 } 6234 6235 6236 int 6237 sctp_soreceive(struct socket *so, 6238 struct sockaddr **psa, 6239 struct uio *uio, 6240 struct mbuf **mp0, 6241 struct mbuf **controlp, 6242 int *flagsp) 6243 { 6244 int error, fromlen; 6245 uint8_t sockbuf[256]; 6246 struct sockaddr *from; 6247 struct sctp_extrcvinfo sinfo; 6248 int filling_sinfo = 1; 6249 int flags; 6250 struct sctp_inpcb *inp; 6251 6252 inp = (struct sctp_inpcb *)so->so_pcb; 6253 /* pickup the assoc we are reading from */ 6254 if (inp == NULL) { 6255 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6256 return (EINVAL); 6257 } 6258 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6259 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6260 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6261 (controlp == NULL)) { 6262 /* user does not want the sndrcv ctl */ 6263 filling_sinfo = 0; 6264 } 6265 if (psa) { 6266 from = (struct sockaddr *)sockbuf; 6267 fromlen = sizeof(sockbuf); 6268 from->sa_len = 0; 6269 } else { 6270 from = NULL; 6271 fromlen = 0; 6272 } 6273 6274 if (filling_sinfo) { 6275 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6276 } 6277 if (flagsp != NULL) { 6278 flags = *flagsp; 6279 } else { 6280 flags = 0; 6281 } 6282 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6283 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6284 if (flagsp != NULL) { 6285 *flagsp = flags; 6286 } 6287 if (controlp != NULL) { 6288 /* copy back the sinfo in a CMSG format */ 6289 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6290 *controlp = sctp_build_ctl_nchunk(inp, 6291 (struct sctp_sndrcvinfo *)&sinfo); 6292 } else { 6293 *controlp = NULL; 6294 } 6295 } 6296 if (psa) { 6297 /* copy back the address info */ 6298 if (from && from->sa_len) { 6299 *psa = sodupsockaddr(from, M_NOWAIT); 6300 } else { 6301 *psa = NULL; 6302 } 6303 } 6304 return (error); 6305 } 6306 6307 6308 6309 6310 6311 int 6312 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6313 int totaddr, int *error) 6314 { 6315 int added = 0; 6316 int i; 6317 struct sctp_inpcb *inp; 6318 struct sockaddr *sa; 6319 size_t incr = 0; 6320 #ifdef INET 6321 struct sockaddr_in *sin; 6322 #endif 6323 #ifdef INET6 6324 struct sockaddr_in6 *sin6; 6325 #endif 6326 6327 sa = addr; 6328 inp = stcb->sctp_ep; 6329 *error = 0; 6330 for (i = 0; i < totaddr; i++) { 6331 switch (sa->sa_family) { 6332 #ifdef INET 6333 case AF_INET: 6334 incr = sizeof(struct sockaddr_in); 6335 sin = (struct sockaddr_in *)sa; 6336 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6337 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6338 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6339 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6340 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6341 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6342 *error = EINVAL; 6343 goto out_now; 6344 } 6345 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6346 SCTP_DONOT_SETSCOPE, 6347 SCTP_ADDR_IS_CONFIRMED)) { 6348 /* assoc gone no un-lock */ 6349 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6350 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6351 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6352 *error = ENOBUFS; 6353 goto out_now; 6354 } 6355 added++; 6356 break; 6357 #endif 6358 #ifdef INET6 6359 case AF_INET6: 6360 incr = sizeof(struct sockaddr_in6); 6361 sin6 = (struct sockaddr_in6 *)sa; 6362 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6363 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6364 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6365 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6366 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6367 *error = EINVAL; 6368 goto out_now; 6369 } 6370 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6371 SCTP_DONOT_SETSCOPE, 6372 SCTP_ADDR_IS_CONFIRMED)) { 6373 /* assoc gone no un-lock */ 6374 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6375 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6376 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6377 *error = ENOBUFS; 6378 goto out_now; 6379 } 6380 added++; 6381 break; 6382 #endif 6383 default: 6384 break; 6385 } 6386 sa = (struct sockaddr *)((caddr_t)sa + incr); 6387 } 6388 out_now: 6389 return (added); 6390 } 6391 6392 struct sctp_tcb * 6393 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6394 unsigned int *totaddr, 6395 unsigned int *num_v4, unsigned int *num_v6, int *error, 6396 unsigned int limit, int *bad_addr) 6397 { 6398 struct sockaddr *sa; 6399 struct sctp_tcb *stcb = NULL; 6400 unsigned int incr, at, i; 6401 6402 at = 0; 6403 sa = addr; 6404 *error = *num_v6 = *num_v4 = 0; 6405 /* account and validate addresses */ 6406 for (i = 0; i < *totaddr; i++) { 6407 switch (sa->sa_family) { 6408 #ifdef INET 6409 case AF_INET: 6410 incr = (unsigned int)sizeof(struct sockaddr_in); 6411 if (sa->sa_len != incr) { 6412 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6413 *error = EINVAL; 6414 *bad_addr = 1; 6415 return (NULL); 6416 } 6417 (*num_v4) += 1; 6418 break; 6419 #endif 6420 #ifdef INET6 6421 case AF_INET6: 6422 { 6423 struct sockaddr_in6 *sin6; 6424 6425 sin6 = (struct sockaddr_in6 *)sa; 6426 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6427 /* Must be non-mapped for connectx */ 6428 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6429 *error = EINVAL; 6430 *bad_addr = 1; 6431 return (NULL); 6432 } 6433 incr = (unsigned int)sizeof(struct sockaddr_in6); 6434 if (sa->sa_len != incr) { 6435 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6436 *error = EINVAL; 6437 *bad_addr = 1; 6438 return (NULL); 6439 } 6440 (*num_v6) += 1; 6441 break; 6442 } 6443 #endif 6444 default: 6445 *totaddr = i; 6446 incr = 0; 6447 /* we are done */ 6448 break; 6449 } 6450 if (i == *totaddr) { 6451 break; 6452 } 6453 SCTP_INP_INCR_REF(inp); 6454 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6455 if (stcb != NULL) { 6456 /* Already have or am bring up an association */ 6457 return (stcb); 6458 } else { 6459 SCTP_INP_DECR_REF(inp); 6460 } 6461 if ((at + incr) > limit) { 6462 *totaddr = i; 6463 break; 6464 } 6465 sa = (struct sockaddr *)((caddr_t)sa + incr); 6466 } 6467 return ((struct sctp_tcb *)NULL); 6468 } 6469 6470 /* 6471 * sctp_bindx(ADD) for one address. 6472 * assumes all arguments are valid/checked by caller. 6473 */ 6474 void 6475 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6476 struct sockaddr *sa, sctp_assoc_t assoc_id, 6477 uint32_t vrf_id, int *error, void *p) 6478 { 6479 struct sockaddr *addr_touse; 6480 #if defined(INET) && defined(INET6) 6481 struct sockaddr_in sin; 6482 #endif 6483 6484 /* see if we're bound all already! */ 6485 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6486 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6487 *error = EINVAL; 6488 return; 6489 } 6490 addr_touse = sa; 6491 #ifdef INET6 6492 if (sa->sa_family == AF_INET6) { 6493 #ifdef INET 6494 struct sockaddr_in6 *sin6; 6495 6496 #endif 6497 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6498 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6499 *error = EINVAL; 6500 return; 6501 } 6502 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6503 /* can only bind v6 on PF_INET6 sockets */ 6504 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6505 *error = EINVAL; 6506 return; 6507 } 6508 #ifdef INET 6509 sin6 = (struct sockaddr_in6 *)addr_touse; 6510 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6511 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6512 SCTP_IPV6_V6ONLY(inp)) { 6513 /* can't bind v4-mapped on PF_INET sockets */ 6514 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6515 *error = EINVAL; 6516 return; 6517 } 6518 in6_sin6_2_sin(&sin, sin6); 6519 addr_touse = (struct sockaddr *)&sin; 6520 } 6521 #endif 6522 } 6523 #endif 6524 #ifdef INET 6525 if (sa->sa_family == AF_INET) { 6526 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6527 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6528 *error = EINVAL; 6529 return; 6530 } 6531 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6532 SCTP_IPV6_V6ONLY(inp)) { 6533 /* can't bind v4 on PF_INET sockets */ 6534 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6535 *error = EINVAL; 6536 return; 6537 } 6538 } 6539 #endif 6540 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6541 if (p == NULL) { 6542 /* Can't get proc for Net/Open BSD */ 6543 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6544 *error = EINVAL; 6545 return; 6546 } 6547 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6548 return; 6549 } 6550 /* 6551 * No locks required here since bind and mgmt_ep_sa all do their own 6552 * locking. If we do something for the FIX: below we may need to 6553 * lock in that case. 6554 */ 6555 if (assoc_id == 0) { 6556 /* add the address */ 6557 struct sctp_inpcb *lep; 6558 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6559 6560 /* validate the incoming port */ 6561 if ((lsin->sin_port != 0) && 6562 (lsin->sin_port != inp->sctp_lport)) { 6563 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6564 *error = EINVAL; 6565 return; 6566 } else { 6567 /* user specified 0 port, set it to existing port */ 6568 lsin->sin_port = inp->sctp_lport; 6569 } 6570 6571 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6572 if (lep != NULL) { 6573 /* 6574 * We must decrement the refcount since we have the 6575 * ep already and are binding. No remove going on 6576 * here. 6577 */ 6578 SCTP_INP_DECR_REF(lep); 6579 } 6580 if (lep == inp) { 6581 /* already bound to it.. ok */ 6582 return; 6583 } else if (lep == NULL) { 6584 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6585 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6586 SCTP_ADD_IP_ADDRESS, 6587 vrf_id, NULL); 6588 } else { 6589 *error = EADDRINUSE; 6590 } 6591 if (*error) 6592 return; 6593 } else { 6594 /* 6595 * FIX: decide whether we allow assoc based bindx 6596 */ 6597 } 6598 } 6599 6600 /* 6601 * sctp_bindx(DELETE) for one address. 6602 * assumes all arguments are valid/checked by caller. 6603 */ 6604 void 6605 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6606 struct sockaddr *sa, sctp_assoc_t assoc_id, 6607 uint32_t vrf_id, int *error) 6608 { 6609 struct sockaddr *addr_touse; 6610 #if defined(INET) && defined(INET6) 6611 struct sockaddr_in sin; 6612 #endif 6613 6614 /* see if we're bound all already! */ 6615 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6616 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6617 *error = EINVAL; 6618 return; 6619 } 6620 addr_touse = sa; 6621 #ifdef INET6 6622 if (sa->sa_family == AF_INET6) { 6623 #ifdef INET 6624 struct sockaddr_in6 *sin6; 6625 #endif 6626 6627 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6628 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6629 *error = EINVAL; 6630 return; 6631 } 6632 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6633 /* can only bind v6 on PF_INET6 sockets */ 6634 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6635 *error = EINVAL; 6636 return; 6637 } 6638 #ifdef INET 6639 sin6 = (struct sockaddr_in6 *)addr_touse; 6640 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6641 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6642 SCTP_IPV6_V6ONLY(inp)) { 6643 /* can't bind mapped-v4 on PF_INET sockets */ 6644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6645 *error = EINVAL; 6646 return; 6647 } 6648 in6_sin6_2_sin(&sin, sin6); 6649 addr_touse = (struct sockaddr *)&sin; 6650 } 6651 #endif 6652 } 6653 #endif 6654 #ifdef INET 6655 if (sa->sa_family == AF_INET) { 6656 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6657 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6658 *error = EINVAL; 6659 return; 6660 } 6661 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6662 SCTP_IPV6_V6ONLY(inp)) { 6663 /* can't bind v4 on PF_INET sockets */ 6664 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6665 *error = EINVAL; 6666 return; 6667 } 6668 } 6669 #endif 6670 /* 6671 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6672 * below is ever changed we may need to lock before calling 6673 * association level binding. 6674 */ 6675 if (assoc_id == 0) { 6676 /* delete the address */ 6677 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6678 SCTP_DEL_IP_ADDRESS, 6679 vrf_id, NULL); 6680 } else { 6681 /* 6682 * FIX: decide whether we allow assoc based bindx 6683 */ 6684 } 6685 } 6686 6687 /* 6688 * returns the valid local address count for an assoc, taking into account 6689 * all scoping rules 6690 */ 6691 int 6692 sctp_local_addr_count(struct sctp_tcb *stcb) 6693 { 6694 int loopback_scope; 6695 #if defined(INET) 6696 int ipv4_local_scope, ipv4_addr_legal; 6697 #endif 6698 #if defined (INET6) 6699 int local_scope, site_scope, ipv6_addr_legal; 6700 #endif 6701 struct sctp_vrf *vrf; 6702 struct sctp_ifn *sctp_ifn; 6703 struct sctp_ifa *sctp_ifa; 6704 int count = 0; 6705 6706 /* Turn on all the appropriate scopes */ 6707 loopback_scope = stcb->asoc.scope.loopback_scope; 6708 #if defined(INET) 6709 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6710 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6711 #endif 6712 #if defined(INET6) 6713 local_scope = stcb->asoc.scope.local_scope; 6714 site_scope = stcb->asoc.scope.site_scope; 6715 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6716 #endif 6717 SCTP_IPI_ADDR_RLOCK(); 6718 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6719 if (vrf == NULL) { 6720 /* no vrf, no addresses */ 6721 SCTP_IPI_ADDR_RUNLOCK(); 6722 return (0); 6723 } 6724 6725 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6726 /* 6727 * bound all case: go through all ifns on the vrf 6728 */ 6729 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6730 if ((loopback_scope == 0) && 6731 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6732 continue; 6733 } 6734 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6735 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6736 continue; 6737 switch (sctp_ifa->address.sa.sa_family) { 6738 #ifdef INET 6739 case AF_INET: 6740 if (ipv4_addr_legal) { 6741 struct sockaddr_in *sin; 6742 6743 sin = &sctp_ifa->address.sin; 6744 if (sin->sin_addr.s_addr == 0) { 6745 /* 6746 * skip unspecified 6747 * addrs 6748 */ 6749 continue; 6750 } 6751 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6752 &sin->sin_addr) != 0) { 6753 continue; 6754 } 6755 if ((ipv4_local_scope == 0) && 6756 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6757 continue; 6758 } 6759 /* count this one */ 6760 count++; 6761 } else { 6762 continue; 6763 } 6764 break; 6765 #endif 6766 #ifdef INET6 6767 case AF_INET6: 6768 if (ipv6_addr_legal) { 6769 struct sockaddr_in6 *sin6; 6770 6771 sin6 = &sctp_ifa->address.sin6; 6772 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6773 continue; 6774 } 6775 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6776 &sin6->sin6_addr) != 0) { 6777 continue; 6778 } 6779 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6780 if (local_scope == 0) 6781 continue; 6782 if (sin6->sin6_scope_id == 0) { 6783 if (sa6_recoverscope(sin6) != 0) 6784 /* 6785 * 6786 * bad 6787 * link 6788 * 6789 * local 6790 * 6791 * address 6792 */ 6793 continue; 6794 } 6795 } 6796 if ((site_scope == 0) && 6797 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6798 continue; 6799 } 6800 /* count this one */ 6801 count++; 6802 } 6803 break; 6804 #endif 6805 default: 6806 /* TSNH */ 6807 break; 6808 } 6809 } 6810 } 6811 } else { 6812 /* 6813 * subset bound case 6814 */ 6815 struct sctp_laddr *laddr; 6816 6817 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6818 sctp_nxt_addr) { 6819 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6820 continue; 6821 } 6822 /* count this one */ 6823 count++; 6824 } 6825 } 6826 SCTP_IPI_ADDR_RUNLOCK(); 6827 return (count); 6828 } 6829 6830 #if defined(SCTP_LOCAL_TRACE_BUF) 6831 6832 void 6833 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6834 { 6835 uint32_t saveindex, newindex; 6836 6837 do { 6838 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6839 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6840 newindex = 1; 6841 } else { 6842 newindex = saveindex + 1; 6843 } 6844 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6845 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6846 saveindex = 0; 6847 } 6848 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6849 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6850 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6851 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6852 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6853 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6854 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6855 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6856 } 6857 6858 #endif 6859 static void 6860 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 6861 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 6862 { 6863 struct ip *iph; 6864 #ifdef INET6 6865 struct ip6_hdr *ip6; 6866 #endif 6867 struct mbuf *sp, *last; 6868 struct udphdr *uhdr; 6869 uint16_t port; 6870 6871 if ((m->m_flags & M_PKTHDR) == 0) { 6872 /* Can't handle one that is not a pkt hdr */ 6873 goto out; 6874 } 6875 /* Pull the src port */ 6876 iph = mtod(m, struct ip *); 6877 uhdr = (struct udphdr *)((caddr_t)iph + off); 6878 port = uhdr->uh_sport; 6879 /* 6880 * Split out the mbuf chain. Leave the IP header in m, place the 6881 * rest in the sp. 6882 */ 6883 sp = m_split(m, off, M_NOWAIT); 6884 if (sp == NULL) { 6885 /* Gak, drop packet, we can't do a split */ 6886 goto out; 6887 } 6888 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6889 /* Gak, packet can't have an SCTP header in it - too small */ 6890 m_freem(sp); 6891 goto out; 6892 } 6893 /* Now pull up the UDP header and SCTP header together */ 6894 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6895 if (sp == NULL) { 6896 /* Gak pullup failed */ 6897 goto out; 6898 } 6899 /* Trim out the UDP header */ 6900 m_adj(sp, sizeof(struct udphdr)); 6901 6902 /* Now reconstruct the mbuf chain */ 6903 for (last = m; last->m_next; last = last->m_next); 6904 last->m_next = sp; 6905 m->m_pkthdr.len += sp->m_pkthdr.len; 6906 /* 6907 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 6908 * checksum and it was valid. Since CSUM_DATA_VALID == 6909 * CSUM_SCTP_VALID this would imply that the HW also verified the 6910 * SCTP checksum. Therefore, clear the bit. 6911 */ 6912 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6913 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6914 m->m_pkthdr.len, 6915 if_name(m->m_pkthdr.rcvif), 6916 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6917 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 6918 iph = mtod(m, struct ip *); 6919 switch (iph->ip_v) { 6920 #ifdef INET 6921 case IPVERSION: 6922 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6923 sctp_input_with_port(m, off, port); 6924 break; 6925 #endif 6926 #ifdef INET6 6927 case IPV6_VERSION >> 4: 6928 ip6 = mtod(m, struct ip6_hdr *); 6929 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6930 sctp6_input_with_port(&m, &off, port); 6931 break; 6932 #endif 6933 default: 6934 goto out; 6935 break; 6936 } 6937 return; 6938 out: 6939 m_freem(m); 6940 } 6941 6942 #ifdef INET 6943 static void 6944 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 6945 { 6946 struct ip *outer_ip, *inner_ip; 6947 struct sctphdr *sh; 6948 struct icmp *icmp; 6949 struct udphdr *udp; 6950 struct sctp_inpcb *inp; 6951 struct sctp_tcb *stcb; 6952 struct sctp_nets *net; 6953 struct sctp_init_chunk *ch; 6954 struct sockaddr_in src, dst; 6955 uint8_t type, code; 6956 6957 inner_ip = (struct ip *)vip; 6958 icmp = (struct icmp *)((caddr_t)inner_ip - 6959 (sizeof(struct icmp) - sizeof(struct ip))); 6960 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 6961 if (ntohs(outer_ip->ip_len) < 6962 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 6963 return; 6964 } 6965 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 6966 sh = (struct sctphdr *)(udp + 1); 6967 memset(&src, 0, sizeof(struct sockaddr_in)); 6968 src.sin_family = AF_INET; 6969 src.sin_len = sizeof(struct sockaddr_in); 6970 src.sin_port = sh->src_port; 6971 src.sin_addr = inner_ip->ip_src; 6972 memset(&dst, 0, sizeof(struct sockaddr_in)); 6973 dst.sin_family = AF_INET; 6974 dst.sin_len = sizeof(struct sockaddr_in); 6975 dst.sin_port = sh->dest_port; 6976 dst.sin_addr = inner_ip->ip_dst; 6977 /* 6978 * 'dst' holds the dest of the packet that failed to be sent. 'src' 6979 * holds our local endpoint address. Thus we reverse the dst and the 6980 * src in the lookup. 6981 */ 6982 inp = NULL; 6983 net = NULL; 6984 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 6985 (struct sockaddr *)&src, 6986 &inp, &net, 1, 6987 SCTP_DEFAULT_VRFID); 6988 if ((stcb != NULL) && 6989 (net != NULL) && 6990 (inp != NULL)) { 6991 /* Check the UDP port numbers */ 6992 if ((udp->uh_dport != net->port) || 6993 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 6994 SCTP_TCB_UNLOCK(stcb); 6995 return; 6996 } 6997 /* Check the verification tag */ 6998 if (ntohl(sh->v_tag) != 0) { 6999 /* 7000 * This must be the verification tag used for 7001 * sending out packets. We don't consider packets 7002 * reflecting the verification tag. 7003 */ 7004 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7005 SCTP_TCB_UNLOCK(stcb); 7006 return; 7007 } 7008 } else { 7009 if (ntohs(outer_ip->ip_len) >= 7010 sizeof(struct ip) + 7011 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7012 /* 7013 * In this case we can check if we got an 7014 * INIT chunk and if the initiate tag 7015 * matches. 7016 */ 7017 ch = (struct sctp_init_chunk *)(sh + 1); 7018 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7019 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7020 SCTP_TCB_UNLOCK(stcb); 7021 return; 7022 } 7023 } else { 7024 SCTP_TCB_UNLOCK(stcb); 7025 return; 7026 } 7027 } 7028 type = icmp->icmp_type; 7029 code = icmp->icmp_code; 7030 if ((type == ICMP_UNREACH) && 7031 (code == ICMP_UNREACH_PORT)) { 7032 code = ICMP_UNREACH_PROTOCOL; 7033 } 7034 sctp_notify(inp, stcb, net, type, code, 7035 ntohs(inner_ip->ip_len), 7036 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7037 } else { 7038 if ((stcb == NULL) && (inp != NULL)) { 7039 /* reduce ref-count */ 7040 SCTP_INP_WLOCK(inp); 7041 SCTP_INP_DECR_REF(inp); 7042 SCTP_INP_WUNLOCK(inp); 7043 } 7044 if (stcb) { 7045 SCTP_TCB_UNLOCK(stcb); 7046 } 7047 } 7048 return; 7049 } 7050 #endif 7051 7052 #ifdef INET6 7053 static void 7054 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7055 { 7056 struct ip6ctlparam *ip6cp; 7057 struct sctp_inpcb *inp; 7058 struct sctp_tcb *stcb; 7059 struct sctp_nets *net; 7060 struct sctphdr sh; 7061 struct udphdr udp; 7062 struct sockaddr_in6 src, dst; 7063 uint8_t type, code; 7064 7065 ip6cp = (struct ip6ctlparam *)d; 7066 /* 7067 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7068 */ 7069 if (ip6cp->ip6c_m == NULL) { 7070 return; 7071 } 7072 /* 7073 * Check if we can safely examine the ports and the verification tag 7074 * of the SCTP common header. 7075 */ 7076 if (ip6cp->ip6c_m->m_pkthdr.len < 7077 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7078 return; 7079 } 7080 /* Copy out the UDP header. */ 7081 memset(&udp, 0, sizeof(struct udphdr)); 7082 m_copydata(ip6cp->ip6c_m, 7083 ip6cp->ip6c_off, 7084 sizeof(struct udphdr), 7085 (caddr_t)&udp); 7086 /* Copy out the port numbers and the verification tag. */ 7087 memset(&sh, 0, sizeof(struct sctphdr)); 7088 m_copydata(ip6cp->ip6c_m, 7089 ip6cp->ip6c_off + sizeof(struct udphdr), 7090 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7091 (caddr_t)&sh); 7092 memset(&src, 0, sizeof(struct sockaddr_in6)); 7093 src.sin6_family = AF_INET6; 7094 src.sin6_len = sizeof(struct sockaddr_in6); 7095 src.sin6_port = sh.src_port; 7096 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7097 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7098 return; 7099 } 7100 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7101 dst.sin6_family = AF_INET6; 7102 dst.sin6_len = sizeof(struct sockaddr_in6); 7103 dst.sin6_port = sh.dest_port; 7104 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7105 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7106 return; 7107 } 7108 inp = NULL; 7109 net = NULL; 7110 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7111 (struct sockaddr *)&src, 7112 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7113 if ((stcb != NULL) && 7114 (net != NULL) && 7115 (inp != NULL)) { 7116 /* Check the UDP port numbers */ 7117 if ((udp.uh_dport != net->port) || 7118 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7119 SCTP_TCB_UNLOCK(stcb); 7120 return; 7121 } 7122 /* Check the verification tag */ 7123 if (ntohl(sh.v_tag) != 0) { 7124 /* 7125 * This must be the verification tag used for 7126 * sending out packets. We don't consider packets 7127 * reflecting the verification tag. 7128 */ 7129 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7130 SCTP_TCB_UNLOCK(stcb); 7131 return; 7132 } 7133 } else { 7134 if (ip6cp->ip6c_m->m_pkthdr.len >= 7135 ip6cp->ip6c_off + sizeof(struct udphdr) + 7136 sizeof(struct sctphdr) + 7137 sizeof(struct sctp_chunkhdr) + 7138 offsetof(struct sctp_init, a_rwnd)) { 7139 /* 7140 * In this case we can check if we got an 7141 * INIT chunk and if the initiate tag 7142 * matches. 7143 */ 7144 uint32_t initiate_tag; 7145 uint8_t chunk_type; 7146 7147 m_copydata(ip6cp->ip6c_m, 7148 ip6cp->ip6c_off + 7149 sizeof(struct udphdr) + 7150 sizeof(struct sctphdr), 7151 sizeof(uint8_t), 7152 (caddr_t)&chunk_type); 7153 m_copydata(ip6cp->ip6c_m, 7154 ip6cp->ip6c_off + 7155 sizeof(struct udphdr) + 7156 sizeof(struct sctphdr) + 7157 sizeof(struct sctp_chunkhdr), 7158 sizeof(uint32_t), 7159 (caddr_t)&initiate_tag); 7160 if ((chunk_type != SCTP_INITIATION) || 7161 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7162 SCTP_TCB_UNLOCK(stcb); 7163 return; 7164 } 7165 } else { 7166 SCTP_TCB_UNLOCK(stcb); 7167 return; 7168 } 7169 } 7170 type = ip6cp->ip6c_icmp6->icmp6_type; 7171 code = ip6cp->ip6c_icmp6->icmp6_code; 7172 if ((type == ICMP6_DST_UNREACH) && 7173 (code == ICMP6_DST_UNREACH_NOPORT)) { 7174 type = ICMP6_PARAM_PROB; 7175 code = ICMP6_PARAMPROB_NEXTHEADER; 7176 } 7177 sctp6_notify(inp, stcb, net, type, code, 7178 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7179 } else { 7180 if ((stcb == NULL) && (inp != NULL)) { 7181 /* reduce inp's ref-count */ 7182 SCTP_INP_WLOCK(inp); 7183 SCTP_INP_DECR_REF(inp); 7184 SCTP_INP_WUNLOCK(inp); 7185 } 7186 if (stcb) { 7187 SCTP_TCB_UNLOCK(stcb); 7188 } 7189 } 7190 } 7191 #endif 7192 7193 void 7194 sctp_over_udp_stop(void) 7195 { 7196 /* 7197 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7198 * for writting! 7199 */ 7200 #ifdef INET 7201 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7202 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7203 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7204 } 7205 #endif 7206 #ifdef INET6 7207 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7208 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7209 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7210 } 7211 #endif 7212 } 7213 7214 int 7215 sctp_over_udp_start(void) 7216 { 7217 uint16_t port; 7218 int ret; 7219 #ifdef INET 7220 struct sockaddr_in sin; 7221 #endif 7222 #ifdef INET6 7223 struct sockaddr_in6 sin6; 7224 #endif 7225 /* 7226 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7227 * for writting! 7228 */ 7229 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7230 if (ntohs(port) == 0) { 7231 /* Must have a port set */ 7232 return (EINVAL); 7233 } 7234 #ifdef INET 7235 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7236 /* Already running -- must stop first */ 7237 return (EALREADY); 7238 } 7239 #endif 7240 #ifdef INET6 7241 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7242 /* Already running -- must stop first */ 7243 return (EALREADY); 7244 } 7245 #endif 7246 #ifdef INET 7247 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7248 SOCK_DGRAM, IPPROTO_UDP, 7249 curthread->td_ucred, curthread))) { 7250 sctp_over_udp_stop(); 7251 return (ret); 7252 } 7253 /* Call the special UDP hook. */ 7254 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7255 sctp_recv_udp_tunneled_packet, 7256 sctp_recv_icmp_tunneled_packet, 7257 NULL))) { 7258 sctp_over_udp_stop(); 7259 return (ret); 7260 } 7261 /* Ok, we have a socket, bind it to the port. */ 7262 memset(&sin, 0, sizeof(struct sockaddr_in)); 7263 sin.sin_len = sizeof(struct sockaddr_in); 7264 sin.sin_family = AF_INET; 7265 sin.sin_port = htons(port); 7266 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7267 (struct sockaddr *)&sin, curthread))) { 7268 sctp_over_udp_stop(); 7269 return (ret); 7270 } 7271 #endif 7272 #ifdef INET6 7273 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7274 SOCK_DGRAM, IPPROTO_UDP, 7275 curthread->td_ucred, curthread))) { 7276 sctp_over_udp_stop(); 7277 return (ret); 7278 } 7279 /* Call the special UDP hook. */ 7280 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7281 sctp_recv_udp_tunneled_packet, 7282 sctp_recv_icmp6_tunneled_packet, 7283 NULL))) { 7284 sctp_over_udp_stop(); 7285 return (ret); 7286 } 7287 /* Ok, we have a socket, bind it to the port. */ 7288 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7289 sin6.sin6_len = sizeof(struct sockaddr_in6); 7290 sin6.sin6_family = AF_INET6; 7291 sin6.sin6_port = htons(port); 7292 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7293 (struct sockaddr *)&sin6, curthread))) { 7294 sctp_over_udp_stop(); 7295 return (ret); 7296 } 7297 #endif 7298 return (0); 7299 } 7300 7301 /* 7302 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7303 * If all arguments are zero, zero is returned. 7304 */ 7305 uint32_t 7306 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7307 { 7308 if (mtu1 > 0) { 7309 if (mtu2 > 0) { 7310 if (mtu3 > 0) { 7311 return (min(mtu1, min(mtu2, mtu3))); 7312 } else { 7313 return (min(mtu1, mtu2)); 7314 } 7315 } else { 7316 if (mtu3 > 0) { 7317 return (min(mtu1, mtu3)); 7318 } else { 7319 return (mtu1); 7320 } 7321 } 7322 } else { 7323 if (mtu2 > 0) { 7324 if (mtu3 > 0) { 7325 return (min(mtu2, mtu3)); 7326 } else { 7327 return (mtu2); 7328 } 7329 } else { 7330 return (mtu3); 7331 } 7332 } 7333 } 7334 7335 void 7336 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7337 { 7338 struct in_conninfo inc; 7339 7340 memset(&inc, 0, sizeof(struct in_conninfo)); 7341 inc.inc_fibnum = fibnum; 7342 switch (addr->sa.sa_family) { 7343 #ifdef INET 7344 case AF_INET: 7345 inc.inc_faddr = addr->sin.sin_addr; 7346 break; 7347 #endif 7348 #ifdef INET6 7349 case AF_INET6: 7350 inc.inc_flags |= INC_ISIPV6; 7351 inc.inc6_faddr = addr->sin6.sin6_addr; 7352 break; 7353 #endif 7354 default: 7355 return; 7356 } 7357 tcp_hc_updatemtu(&inc, (u_long)mtu); 7358 } 7359 7360 uint32_t 7361 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7362 { 7363 struct in_conninfo inc; 7364 7365 memset(&inc, 0, sizeof(struct in_conninfo)); 7366 inc.inc_fibnum = fibnum; 7367 switch (addr->sa.sa_family) { 7368 #ifdef INET 7369 case AF_INET: 7370 inc.inc_faddr = addr->sin.sin_addr; 7371 break; 7372 #endif 7373 #ifdef INET6 7374 case AF_INET6: 7375 inc.inc_flags |= INC_ISIPV6; 7376 inc.inc6_faddr = addr->sin6.sin6_addr; 7377 break; 7378 #endif 7379 default: 7380 return (0); 7381 } 7382 return ((uint32_t)tcp_hc_getmtu(&inc)); 7383 } 7384