1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1116 { 1117 struct sctp_association *asoc; 1118 1119 /* 1120 * Anything set to zero is taken care of by the allocation routine's 1121 * bzero 1122 */ 1123 1124 /* 1125 * Up front select what scoping to apply on addresses I tell my peer 1126 * Not sure what to do with these right now, we will need to come up 1127 * with a way to set them. We may need to pass them through from the 1128 * caller in the sctp_aloc_assoc() function. 1129 */ 1130 int i; 1131 #if defined(SCTP_DETAILED_STR_STATS) 1132 int j; 1133 #endif 1134 1135 asoc = &stcb->asoc; 1136 /* init all variables to a known value. */ 1137 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1138 asoc->max_burst = inp->sctp_ep.max_burst; 1139 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1140 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1141 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1142 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1143 asoc->ecn_supported = inp->ecn_supported; 1144 asoc->prsctp_supported = inp->prsctp_supported; 1145 asoc->auth_supported = inp->auth_supported; 1146 asoc->asconf_supported = inp->asconf_supported; 1147 asoc->reconfig_supported = inp->reconfig_supported; 1148 asoc->nrsack_supported = inp->nrsack_supported; 1149 asoc->pktdrop_supported = inp->pktdrop_supported; 1150 asoc->idata_supported = inp->idata_supported; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1194 sctp_select_initial_TSN(&inp->sctp_ep); 1195 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1196 /* we are optimisitic here */ 1197 asoc->peer_supports_nat = 0; 1198 asoc->sent_queue_retran_cnt = 0; 1199 1200 /* for CMT */ 1201 asoc->last_net_cmt_send_started = NULL; 1202 1203 /* This will need to be adjusted */ 1204 asoc->last_acked_seq = asoc->init_seq_number - 1; 1205 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1206 asoc->asconf_seq_in = asoc->last_acked_seq; 1207 1208 /* here we are different, we hold the next one we expect */ 1209 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1210 1211 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1212 asoc->initial_rto = inp->sctp_ep.initial_rto; 1213 1214 asoc->default_mtu = inp->sctp_ep.default_mtu; 1215 asoc->max_init_times = inp->sctp_ep.max_init_times; 1216 asoc->max_send_times = inp->sctp_ep.max_send_times; 1217 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1218 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1219 asoc->free_chunk_cnt = 0; 1220 1221 asoc->iam_blocking = 0; 1222 asoc->context = inp->sctp_context; 1223 asoc->local_strreset_support = inp->local_strreset_support; 1224 asoc->def_send = inp->def_send; 1225 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1226 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1227 asoc->pr_sctp_cnt = 0; 1228 asoc->total_output_queue_size = 0; 1229 1230 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1231 asoc->scope.ipv6_addr_legal = 1; 1232 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1233 asoc->scope.ipv4_addr_legal = 1; 1234 } else { 1235 asoc->scope.ipv4_addr_legal = 0; 1236 } 1237 } else { 1238 asoc->scope.ipv6_addr_legal = 0; 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } 1241 1242 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1243 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1244 1245 asoc->smallest_mtu = inp->sctp_frag_point; 1246 asoc->minrto = inp->sctp_ep.sctp_minrto; 1247 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1248 1249 asoc->stream_locked_on = 0; 1250 asoc->ecn_echo_cnt_onq = 0; 1251 asoc->stream_locked = 0; 1252 1253 asoc->send_sack = 1; 1254 1255 LIST_INIT(&asoc->sctp_restricted_addrs); 1256 1257 TAILQ_INIT(&asoc->nets); 1258 TAILQ_INIT(&asoc->pending_reply_queue); 1259 TAILQ_INIT(&asoc->asconf_ack_sent); 1260 /* Setup to fill the hb random cache at first HB */ 1261 asoc->hb_random_idx = 4; 1262 1263 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1264 1265 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1266 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1267 1268 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1269 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1270 1271 /* 1272 * Now the stream parameters, here we allocate space for all streams 1273 * that we request by default. 1274 */ 1275 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1276 o_strms; 1277 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1278 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1279 SCTP_M_STRMO); 1280 if (asoc->strmout == NULL) { 1281 /* big trouble no memory */ 1282 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1283 return (ENOMEM); 1284 } 1285 for (i = 0; i < asoc->streamoutcnt; i++) { 1286 /* 1287 * inbound side must be set to 0xffff, also NOTE when we get 1288 * the INIT-ACK back (for INIT sender) we MUST reduce the 1289 * count (streamoutcnt) but first check if we sent to any of 1290 * the upper streams that were dropped (if some were). Those 1291 * that were dropped must be notified to the upper layer as 1292 * failed to send. 1293 */ 1294 TAILQ_INIT(&asoc->strmout[i].outqueue); 1295 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1296 asoc->strmout[i].chunks_on_queues = 0; 1297 #if defined(SCTP_DETAILED_STR_STATS) 1298 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1299 asoc->strmout[i].abandoned_sent[j] = 0; 1300 asoc->strmout[i].abandoned_unsent[j] = 0; 1301 } 1302 #else 1303 asoc->strmout[i].abandoned_sent[0] = 0; 1304 asoc->strmout[i].abandoned_unsent[0] = 0; 1305 #endif 1306 asoc->strmout[i].next_mid_ordered = 0; 1307 asoc->strmout[i].next_mid_unordered = 0; 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 } 1312 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1313 1314 /* Now the mapping array */ 1315 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1316 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1317 SCTP_M_MAP); 1318 if (asoc->mapping_array == NULL) { 1319 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1320 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1321 return (ENOMEM); 1322 } 1323 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1324 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->nr_mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1330 return (ENOMEM); 1331 } 1332 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1333 1334 /* Now the init of the other outqueues */ 1335 TAILQ_INIT(&asoc->free_chunks); 1336 TAILQ_INIT(&asoc->control_send_queue); 1337 TAILQ_INIT(&asoc->asconf_send_queue); 1338 TAILQ_INIT(&asoc->send_queue); 1339 TAILQ_INIT(&asoc->sent_queue); 1340 TAILQ_INIT(&asoc->resetHead); 1341 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1342 TAILQ_INIT(&asoc->asconf_queue); 1343 /* authentication fields */ 1344 asoc->authinfo.random = NULL; 1345 asoc->authinfo.active_keyid = 0; 1346 asoc->authinfo.assoc_key = NULL; 1347 asoc->authinfo.assoc_keyid = 0; 1348 asoc->authinfo.recv_key = NULL; 1349 asoc->authinfo.recv_keyid = 0; 1350 LIST_INIT(&asoc->shared_keys); 1351 asoc->marked_retrans = 0; 1352 asoc->port = inp->sctp_ep.port; 1353 asoc->timoinit = 0; 1354 asoc->timodata = 0; 1355 asoc->timosack = 0; 1356 asoc->timoshutdown = 0; 1357 asoc->timoheartbeat = 0; 1358 asoc->timocookie = 0; 1359 asoc->timoshutdownack = 0; 1360 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1361 asoc->discontinuity_time = asoc->start_time; 1362 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1363 asoc->abandoned_unsent[i] = 0; 1364 asoc->abandoned_sent[i] = 0; 1365 } 1366 /* 1367 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1368 * freed later when the association is freed. 1369 */ 1370 return (0); 1371 } 1372 1373 void 1374 sctp_print_mapping_array(struct sctp_association *asoc) 1375 { 1376 unsigned int i, limit; 1377 1378 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1379 asoc->mapping_array_size, 1380 asoc->mapping_array_base_tsn, 1381 asoc->cumulative_tsn, 1382 asoc->highest_tsn_inside_map, 1383 asoc->highest_tsn_inside_nr_map); 1384 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1385 if (asoc->mapping_array[limit - 1] != 0) { 1386 break; 1387 } 1388 } 1389 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1390 for (i = 0; i < limit; i++) { 1391 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1392 } 1393 if (limit % 16) 1394 SCTP_PRINTF("\n"); 1395 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1396 if (asoc->nr_mapping_array[limit - 1]) { 1397 break; 1398 } 1399 } 1400 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1401 for (i = 0; i < limit; i++) { 1402 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1403 } 1404 if (limit % 16) 1405 SCTP_PRINTF("\n"); 1406 } 1407 1408 int 1409 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1410 { 1411 /* mapping array needs to grow */ 1412 uint8_t *new_array1, *new_array2; 1413 uint32_t new_size; 1414 1415 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1416 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1417 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1418 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1419 /* can't get more, forget it */ 1420 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1421 if (new_array1) { 1422 SCTP_FREE(new_array1, SCTP_M_MAP); 1423 } 1424 if (new_array2) { 1425 SCTP_FREE(new_array2, SCTP_M_MAP); 1426 } 1427 return (-1); 1428 } 1429 memset(new_array1, 0, new_size); 1430 memset(new_array2, 0, new_size); 1431 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1432 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1433 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1434 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1435 asoc->mapping_array = new_array1; 1436 asoc->nr_mapping_array = new_array2; 1437 asoc->mapping_array_size = new_size; 1438 return (0); 1439 } 1440 1441 static void 1442 sctp_iterator_work(struct sctp_iterator *it) 1443 { 1444 struct epoch_tracker et; 1445 struct sctp_inpcb *tinp; 1446 int iteration_count = 0; 1447 int inp_skip = 0; 1448 int first_in = 1; 1449 1450 NET_EPOCH_ENTER(et); 1451 SCTP_INP_INFO_RLOCK(); 1452 SCTP_ITERATOR_LOCK(); 1453 sctp_it_ctl.cur_it = it; 1454 if (it->inp) { 1455 SCTP_INP_RLOCK(it->inp); 1456 SCTP_INP_DECR_REF(it->inp); 1457 } 1458 if (it->inp == NULL) { 1459 /* iterator is complete */ 1460 done_with_iterator: 1461 sctp_it_ctl.cur_it = NULL; 1462 SCTP_ITERATOR_UNLOCK(); 1463 SCTP_INP_INFO_RUNLOCK(); 1464 if (it->function_atend != NULL) { 1465 (*it->function_atend) (it->pointer, it->val); 1466 } 1467 SCTP_FREE(it, SCTP_M_ITER); 1468 NET_EPOCH_EXIT(et); 1469 return; 1470 } 1471 select_a_new_ep: 1472 if (first_in) { 1473 first_in = 0; 1474 } else { 1475 SCTP_INP_RLOCK(it->inp); 1476 } 1477 while (((it->pcb_flags) && 1478 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1479 ((it->pcb_features) && 1480 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1481 /* endpoint flags or features don't match, so keep looking */ 1482 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1483 SCTP_INP_RUNLOCK(it->inp); 1484 goto done_with_iterator; 1485 } 1486 tinp = it->inp; 1487 it->inp = LIST_NEXT(it->inp, sctp_list); 1488 it->stcb = NULL; 1489 SCTP_INP_RUNLOCK(tinp); 1490 if (it->inp == NULL) { 1491 goto done_with_iterator; 1492 } 1493 SCTP_INP_RLOCK(it->inp); 1494 } 1495 /* now go through each assoc which is in the desired state */ 1496 if (it->done_current_ep == 0) { 1497 if (it->function_inp != NULL) 1498 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1499 it->done_current_ep = 1; 1500 } 1501 if (it->stcb == NULL) { 1502 /* run the per instance function */ 1503 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1504 } 1505 if ((inp_skip) || it->stcb == NULL) { 1506 if (it->function_inp_end != NULL) { 1507 inp_skip = (*it->function_inp_end) (it->inp, 1508 it->pointer, 1509 it->val); 1510 } 1511 SCTP_INP_RUNLOCK(it->inp); 1512 goto no_stcb; 1513 } 1514 while (it->stcb) { 1515 SCTP_TCB_LOCK(it->stcb); 1516 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1517 /* not in the right state... keep looking */ 1518 SCTP_TCB_UNLOCK(it->stcb); 1519 goto next_assoc; 1520 } 1521 /* see if we have limited out the iterator loop */ 1522 iteration_count++; 1523 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1524 /* Pause to let others grab the lock */ 1525 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 SCTP_INP_INCR_REF(it->inp); 1528 SCTP_INP_RUNLOCK(it->inp); 1529 SCTP_ITERATOR_UNLOCK(); 1530 SCTP_INP_INFO_RUNLOCK(); 1531 SCTP_INP_INFO_RLOCK(); 1532 SCTP_ITERATOR_LOCK(); 1533 if (sctp_it_ctl.iterator_flags) { 1534 /* We won't be staying here */ 1535 SCTP_INP_DECR_REF(it->inp); 1536 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1537 if (sctp_it_ctl.iterator_flags & 1538 SCTP_ITERATOR_STOP_CUR_IT) { 1539 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1540 goto done_with_iterator; 1541 } 1542 if (sctp_it_ctl.iterator_flags & 1543 SCTP_ITERATOR_STOP_CUR_INP) { 1544 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1545 goto no_stcb; 1546 } 1547 /* If we reach here huh? */ 1548 SCTP_PRINTF("Unknown it ctl flag %x\n", 1549 sctp_it_ctl.iterator_flags); 1550 sctp_it_ctl.iterator_flags = 0; 1551 } 1552 SCTP_INP_RLOCK(it->inp); 1553 SCTP_INP_DECR_REF(it->inp); 1554 SCTP_TCB_LOCK(it->stcb); 1555 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1556 iteration_count = 0; 1557 } 1558 KASSERT(it->inp == it->stcb->sctp_ep, 1559 ("%s: stcb %p does not belong to inp %p, but inp %p", 1560 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1561 1562 /* run function on this one */ 1563 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1564 1565 /* 1566 * we lie here, it really needs to have its own type but 1567 * first I must verify that this won't effect things :-0 1568 */ 1569 if (it->no_chunk_output == 0) 1570 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 1572 SCTP_TCB_UNLOCK(it->stcb); 1573 next_assoc: 1574 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1575 if (it->stcb == NULL) { 1576 /* Run last function */ 1577 if (it->function_inp_end != NULL) { 1578 inp_skip = (*it->function_inp_end) (it->inp, 1579 it->pointer, 1580 it->val); 1581 } 1582 } 1583 } 1584 SCTP_INP_RUNLOCK(it->inp); 1585 no_stcb: 1586 /* done with all assocs on this endpoint, move on to next endpoint */ 1587 it->done_current_ep = 0; 1588 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1589 it->inp = NULL; 1590 } else { 1591 it->inp = LIST_NEXT(it->inp, sctp_list); 1592 } 1593 it->stcb = NULL; 1594 if (it->inp == NULL) { 1595 goto done_with_iterator; 1596 } 1597 goto select_a_new_ep; 1598 } 1599 1600 void 1601 sctp_iterator_worker(void) 1602 { 1603 struct sctp_iterator *it; 1604 1605 /* This function is called with the WQ lock in place */ 1606 sctp_it_ctl.iterator_running = 1; 1607 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1608 /* now lets work on this one */ 1609 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1610 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1611 CURVNET_SET(it->vn); 1612 sctp_iterator_work(it); 1613 CURVNET_RESTORE(); 1614 SCTP_IPI_ITERATOR_WQ_LOCK(); 1615 /* sa_ignore FREED_MEMORY */ 1616 } 1617 sctp_it_ctl.iterator_running = 0; 1618 return; 1619 } 1620 1621 static void 1622 sctp_handle_addr_wq(void) 1623 { 1624 /* deal with the ADDR wq from the rtsock calls */ 1625 struct sctp_laddr *wi, *nwi; 1626 struct sctp_asconf_iterator *asc; 1627 1628 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1629 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1630 if (asc == NULL) { 1631 /* Try later, no memory */ 1632 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1633 (struct sctp_inpcb *)NULL, 1634 (struct sctp_tcb *)NULL, 1635 (struct sctp_nets *)NULL); 1636 return; 1637 } 1638 LIST_INIT(&asc->list_of_work); 1639 asc->cnt = 0; 1640 1641 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1642 LIST_REMOVE(wi, sctp_nxt_addr); 1643 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1644 asc->cnt++; 1645 } 1646 1647 if (asc->cnt == 0) { 1648 SCTP_FREE(asc, SCTP_M_ASC_IT); 1649 } else { 1650 int ret; 1651 1652 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1653 sctp_asconf_iterator_stcb, 1654 NULL, /* No ep end for boundall */ 1655 SCTP_PCB_FLAGS_BOUNDALL, 1656 SCTP_PCB_ANY_FEATURES, 1657 SCTP_ASOC_ANY_STATE, 1658 (void *)asc, 0, 1659 sctp_asconf_iterator_end, NULL, 0); 1660 if (ret) { 1661 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1662 /* 1663 * Freeing if we are stopping or put back on the 1664 * addr_wq. 1665 */ 1666 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1667 sctp_asconf_iterator_end(asc, 0); 1668 } else { 1669 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1670 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1671 } 1672 SCTP_FREE(asc, SCTP_M_ASC_IT); 1673 } 1674 } 1675 } 1676 } 1677 1678 /*- 1679 * The following table shows which pointers for the inp, stcb, or net are 1680 * stored for each timer after it was started. 1681 * 1682 *|Name |Timer |inp |stcb|net | 1683 *|-----------------------------|-----------------------------|----|----|----| 1684 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1685 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1686 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1687 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1690 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1691 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1696 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1697 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1698 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1700 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1701 */ 1702 1703 void 1704 sctp_timeout_handler(void *t) 1705 { 1706 struct epoch_tracker et; 1707 struct timeval tv; 1708 struct sctp_inpcb *inp; 1709 struct sctp_tcb *stcb; 1710 struct sctp_nets *net; 1711 struct sctp_timer *tmr; 1712 struct mbuf *op_err; 1713 int type; 1714 int i, secret; 1715 bool did_output, released_asoc_reference; 1716 1717 /* 1718 * If inp, stcb or net are not NULL, then references to these were 1719 * added when the timer was started, and must be released before 1720 * this function returns. 1721 */ 1722 tmr = (struct sctp_timer *)t; 1723 inp = (struct sctp_inpcb *)tmr->ep; 1724 stcb = (struct sctp_tcb *)tmr->tcb; 1725 net = (struct sctp_nets *)tmr->net; 1726 CURVNET_SET((struct vnet *)tmr->vnet); 1727 NET_EPOCH_ENTER(et); 1728 released_asoc_reference = false; 1729 1730 #ifdef SCTP_AUDITING_ENABLED 1731 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1732 sctp_auditing(3, inp, stcb, net); 1733 #endif 1734 1735 /* sanity checks... */ 1736 KASSERT(tmr->self == NULL || tmr->self == tmr, 1737 ("sctp_timeout_handler: tmr->self corrupted")); 1738 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1739 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1740 type = tmr->type; 1741 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1742 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1743 type, stcb, stcb->sctp_ep)); 1744 tmr->stopped_from = 0xa001; 1745 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1746 SCTPDBG(SCTP_DEBUG_TIMER2, 1747 "Timer type %d handler exiting due to CLOSED association.\n", 1748 type); 1749 goto out_decr; 1750 } 1751 tmr->stopped_from = 0xa002; 1752 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1753 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1754 SCTPDBG(SCTP_DEBUG_TIMER2, 1755 "Timer type %d handler exiting due to not being active.\n", 1756 type); 1757 goto out_decr; 1758 } 1759 1760 tmr->stopped_from = 0xa003; 1761 if (stcb) { 1762 SCTP_TCB_LOCK(stcb); 1763 /* 1764 * Release reference so that association can be freed if 1765 * necessary below. This is safe now that we have acquired 1766 * the lock. 1767 */ 1768 atomic_add_int(&stcb->asoc.refcnt, -1); 1769 released_asoc_reference = true; 1770 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1771 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1772 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1773 SCTPDBG(SCTP_DEBUG_TIMER2, 1774 "Timer type %d handler exiting due to CLOSED association.\n", 1775 type); 1776 goto out; 1777 } 1778 } else if (inp != NULL) { 1779 SCTP_INP_WLOCK(inp); 1780 } else { 1781 SCTP_WQ_ADDR_LOCK(); 1782 } 1783 1784 /* Record in stopped_from which timeout occurred. */ 1785 tmr->stopped_from = type; 1786 /* mark as being serviced now */ 1787 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1788 /* 1789 * Callout has been rescheduled. 1790 */ 1791 goto out; 1792 } 1793 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1794 /* 1795 * Not active, so no action. 1796 */ 1797 goto out; 1798 } 1799 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1800 1801 /* call the handler for the appropriate timer type */ 1802 switch (type) { 1803 case SCTP_TIMER_TYPE_SEND: 1804 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1805 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1806 type, inp, stcb, net)); 1807 SCTP_STAT_INCR(sctps_timodata); 1808 stcb->asoc.timodata++; 1809 stcb->asoc.num_send_timers_up--; 1810 if (stcb->asoc.num_send_timers_up < 0) { 1811 stcb->asoc.num_send_timers_up = 0; 1812 } 1813 SCTP_TCB_LOCK_ASSERT(stcb); 1814 if (sctp_t3rxt_timer(inp, stcb, net)) { 1815 /* no need to unlock on tcb its gone */ 1816 1817 goto out_decr; 1818 } 1819 SCTP_TCB_LOCK_ASSERT(stcb); 1820 #ifdef SCTP_AUDITING_ENABLED 1821 sctp_auditing(4, inp, stcb, net); 1822 #endif 1823 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1824 did_output = true; 1825 if ((stcb->asoc.num_send_timers_up == 0) && 1826 (stcb->asoc.sent_queue_cnt > 0)) { 1827 struct sctp_tmit_chunk *chk; 1828 1829 /* 1830 * Safeguard. If there on some on the sent queue 1831 * somewhere but no timers running something is 1832 * wrong... so we start a timer on the first chunk 1833 * on the send queue on whatever net it is sent to. 1834 */ 1835 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1836 if (chk->whoTo != NULL) { 1837 break; 1838 } 1839 } 1840 if (chk != NULL) { 1841 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1842 } 1843 } 1844 break; 1845 case SCTP_TIMER_TYPE_INIT: 1846 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1847 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1848 type, inp, stcb, net)); 1849 SCTP_STAT_INCR(sctps_timoinit); 1850 stcb->asoc.timoinit++; 1851 if (sctp_t1init_timer(inp, stcb, net)) { 1852 /* no need to unlock on tcb its gone */ 1853 goto out_decr; 1854 } 1855 did_output = false; 1856 break; 1857 case SCTP_TIMER_TYPE_RECV: 1858 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1859 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1860 type, inp, stcb, net)); 1861 SCTP_STAT_INCR(sctps_timosack); 1862 stcb->asoc.timosack++; 1863 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1864 #ifdef SCTP_AUDITING_ENABLED 1865 sctp_auditing(4, inp, stcb, NULL); 1866 #endif 1867 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1868 did_output = true; 1869 break; 1870 case SCTP_TIMER_TYPE_SHUTDOWN: 1871 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1872 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1873 type, inp, stcb, net)); 1874 SCTP_STAT_INCR(sctps_timoshutdown); 1875 stcb->asoc.timoshutdown++; 1876 if (sctp_shutdown_timer(inp, stcb, net)) { 1877 /* no need to unlock on tcb its gone */ 1878 goto out_decr; 1879 } 1880 #ifdef SCTP_AUDITING_ENABLED 1881 sctp_auditing(4, inp, stcb, net); 1882 #endif 1883 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1884 did_output = true; 1885 break; 1886 case SCTP_TIMER_TYPE_HEARTBEAT: 1887 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1888 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1889 type, inp, stcb, net)); 1890 SCTP_STAT_INCR(sctps_timoheartbeat); 1891 stcb->asoc.timoheartbeat++; 1892 if (sctp_heartbeat_timer(inp, stcb, net)) { 1893 /* no need to unlock on tcb its gone */ 1894 goto out_decr; 1895 } 1896 #ifdef SCTP_AUDITING_ENABLED 1897 sctp_auditing(4, inp, stcb, net); 1898 #endif 1899 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1900 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1901 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1902 did_output = true; 1903 } else { 1904 did_output = false; 1905 } 1906 break; 1907 case SCTP_TIMER_TYPE_COOKIE: 1908 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1909 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1910 type, inp, stcb, net)); 1911 SCTP_STAT_INCR(sctps_timocookie); 1912 stcb->asoc.timocookie++; 1913 if (sctp_cookie_timer(inp, stcb, net)) { 1914 /* no need to unlock on tcb its gone */ 1915 goto out_decr; 1916 } 1917 #ifdef SCTP_AUDITING_ENABLED 1918 sctp_auditing(4, inp, stcb, net); 1919 #endif 1920 /* 1921 * We consider T3 and Cookie timer pretty much the same with 1922 * respect to where from in chunk_output. 1923 */ 1924 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1925 did_output = true; 1926 break; 1927 case SCTP_TIMER_TYPE_NEWCOOKIE: 1928 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1929 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1930 type, inp, stcb, net)); 1931 SCTP_STAT_INCR(sctps_timosecret); 1932 (void)SCTP_GETTIME_TIMEVAL(&tv); 1933 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1934 inp->sctp_ep.last_secret_number = 1935 inp->sctp_ep.current_secret_number; 1936 inp->sctp_ep.current_secret_number++; 1937 if (inp->sctp_ep.current_secret_number >= 1938 SCTP_HOW_MANY_SECRETS) { 1939 inp->sctp_ep.current_secret_number = 0; 1940 } 1941 secret = (int)inp->sctp_ep.current_secret_number; 1942 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1943 inp->sctp_ep.secret_key[secret][i] = 1944 sctp_select_initial_TSN(&inp->sctp_ep); 1945 } 1946 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1947 did_output = false; 1948 break; 1949 case SCTP_TIMER_TYPE_PATHMTURAISE: 1950 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1951 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1952 type, inp, stcb, net)); 1953 SCTP_STAT_INCR(sctps_timopathmtu); 1954 sctp_pathmtu_timer(inp, stcb, net); 1955 did_output = false; 1956 break; 1957 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1958 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1959 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1960 type, inp, stcb, net)); 1961 if (sctp_shutdownack_timer(inp, stcb, net)) { 1962 /* no need to unlock on tcb its gone */ 1963 goto out_decr; 1964 } 1965 SCTP_STAT_INCR(sctps_timoshutdownack); 1966 stcb->asoc.timoshutdownack++; 1967 #ifdef SCTP_AUDITING_ENABLED 1968 sctp_auditing(4, inp, stcb, net); 1969 #endif 1970 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1971 did_output = true; 1972 break; 1973 case SCTP_TIMER_TYPE_ASCONF: 1974 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1975 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1976 type, inp, stcb, net)); 1977 SCTP_STAT_INCR(sctps_timoasconf); 1978 if (sctp_asconf_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1989 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoshutdownguard); 1993 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1994 "Shutdown guard timer expired"); 1995 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 case SCTP_TIMER_TYPE_AUTOCLOSE: 1999 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2000 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2001 type, inp, stcb, net)); 2002 SCTP_STAT_INCR(sctps_timoautoclose); 2003 sctp_autoclose_timer(inp, stcb); 2004 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2005 did_output = true; 2006 break; 2007 case SCTP_TIMER_TYPE_STRRESET: 2008 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2009 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2010 type, inp, stcb, net)); 2011 SCTP_STAT_INCR(sctps_timostrmrst); 2012 if (sctp_strreset_timer(inp, stcb)) { 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 } 2016 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2017 did_output = true; 2018 break; 2019 case SCTP_TIMER_TYPE_INPKILL: 2020 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2021 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2022 type, inp, stcb, net)); 2023 SCTP_STAT_INCR(sctps_timoinpkill); 2024 /* 2025 * special case, take away our increment since WE are the 2026 * killer 2027 */ 2028 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2029 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2030 SCTP_INP_DECR_REF(inp); 2031 SCTP_INP_WUNLOCK(inp); 2032 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2033 SCTP_CALLED_FROM_INPKILL_TIMER); 2034 inp = NULL; 2035 goto out_decr; 2036 case SCTP_TIMER_TYPE_ASOCKILL: 2037 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoassockill); 2041 /* Can we free it yet? */ 2042 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2043 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2044 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2045 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2046 /* 2047 * free asoc, always unlocks (or destroy's) so prevent 2048 * duplicate unlock or unlock of a free mtx :-0 2049 */ 2050 stcb = NULL; 2051 goto out_decr; 2052 case SCTP_TIMER_TYPE_ADDR_WQ: 2053 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2054 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2055 type, inp, stcb, net)); 2056 sctp_handle_addr_wq(); 2057 did_output = true; 2058 break; 2059 case SCTP_TIMER_TYPE_PRIM_DELETED: 2060 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2061 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2062 type, inp, stcb, net)); 2063 SCTP_STAT_INCR(sctps_timodelprim); 2064 sctp_delete_prim_timer(inp, stcb); 2065 did_output = false; 2066 break; 2067 default: 2068 #ifdef INVARIANTS 2069 panic("Unknown timer type %d", type); 2070 #else 2071 goto out; 2072 #endif 2073 } 2074 #ifdef SCTP_AUDITING_ENABLED 2075 sctp_audit_log(0xF1, (uint8_t)type); 2076 if (inp != NULL) 2077 sctp_auditing(5, inp, stcb, net); 2078 #endif 2079 if (did_output && (stcb != NULL)) { 2080 /* 2081 * Now we need to clean up the control chunk chain if an 2082 * ECNE is on it. It must be marked as UNSENT again so next 2083 * call will continue to send it until such time that we get 2084 * a CWR, to remove it. It is, however, less likely that we 2085 * will find a ecn echo on the chain though. 2086 */ 2087 sctp_fix_ecn_echo(&stcb->asoc); 2088 } 2089 out: 2090 if (stcb != NULL) { 2091 SCTP_TCB_UNLOCK(stcb); 2092 } else if (inp != NULL) { 2093 SCTP_INP_WUNLOCK(inp); 2094 } else { 2095 SCTP_WQ_ADDR_UNLOCK(); 2096 } 2097 2098 out_decr: 2099 /* These reference counts were incremented in sctp_timer_start(). */ 2100 if (inp != NULL) { 2101 SCTP_INP_DECR_REF(inp); 2102 } 2103 if ((stcb != NULL) && !released_asoc_reference) { 2104 atomic_add_int(&stcb->asoc.refcnt, -1); 2105 } 2106 if (net != NULL) { 2107 sctp_free_remote_addr(net); 2108 } 2109 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2110 CURVNET_RESTORE(); 2111 NET_EPOCH_EXIT(et); 2112 } 2113 2114 /*- 2115 * The following table shows which parameters must be provided 2116 * when calling sctp_timer_start(). For parameters not being 2117 * provided, NULL must be used. 2118 * 2119 * |Name |inp |stcb|net | 2120 * |-----------------------------|----|----|----| 2121 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2122 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2123 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2124 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2126 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2128 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2133 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2135 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2136 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2137 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2138 * 2139 */ 2140 2141 void 2142 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2143 struct sctp_nets *net) 2144 { 2145 struct sctp_timer *tmr; 2146 uint32_t to_ticks; 2147 uint32_t rndval, jitter; 2148 2149 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2150 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2151 t_type, stcb, stcb->sctp_ep)); 2152 tmr = NULL; 2153 if (stcb != NULL) { 2154 SCTP_TCB_LOCK_ASSERT(stcb); 2155 } else if (inp != NULL) { 2156 SCTP_INP_WLOCK_ASSERT(inp); 2157 } else { 2158 SCTP_WQ_ADDR_LOCK_ASSERT(); 2159 } 2160 if (stcb != NULL) { 2161 /* 2162 * Don't restart timer on association that's about to be 2163 * killed. 2164 */ 2165 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2166 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2167 SCTPDBG(SCTP_DEBUG_TIMER2, 2168 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2169 t_type, inp, stcb, net); 2170 return; 2171 } 2172 /* Don't restart timer on net that's been removed. */ 2173 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2174 SCTPDBG(SCTP_DEBUG_TIMER2, 2175 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2176 t_type, inp, stcb, net); 2177 return; 2178 } 2179 } 2180 switch (t_type) { 2181 case SCTP_TIMER_TYPE_SEND: 2182 /* Here we use the RTO timer. */ 2183 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2184 #ifdef INVARIANTS 2185 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2186 t_type, inp, stcb, net); 2187 #else 2188 return; 2189 #endif 2190 } 2191 tmr = &net->rxt_timer; 2192 if (net->RTO == 0) { 2193 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2194 } else { 2195 to_ticks = sctp_msecs_to_ticks(net->RTO); 2196 } 2197 break; 2198 case SCTP_TIMER_TYPE_INIT: 2199 /* 2200 * Here we use the INIT timer default usually about 1 2201 * second. 2202 */ 2203 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2204 #ifdef INVARIANTS 2205 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2206 t_type, inp, stcb, net); 2207 #else 2208 return; 2209 #endif 2210 } 2211 tmr = &net->rxt_timer; 2212 if (net->RTO == 0) { 2213 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2214 } else { 2215 to_ticks = sctp_msecs_to_ticks(net->RTO); 2216 } 2217 break; 2218 case SCTP_TIMER_TYPE_RECV: 2219 /* 2220 * Here we use the Delayed-Ack timer value from the inp, 2221 * ususually about 200ms. 2222 */ 2223 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2224 #ifdef INVARIANTS 2225 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2226 t_type, inp, stcb, net); 2227 #else 2228 return; 2229 #endif 2230 } 2231 tmr = &stcb->asoc.dack_timer; 2232 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2233 break; 2234 case SCTP_TIMER_TYPE_SHUTDOWN: 2235 /* Here we use the RTO of the destination. */ 2236 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2237 #ifdef INVARIANTS 2238 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2239 t_type, inp, stcb, net); 2240 #else 2241 return; 2242 #endif 2243 } 2244 tmr = &net->rxt_timer; 2245 if (net->RTO == 0) { 2246 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2247 } else { 2248 to_ticks = sctp_msecs_to_ticks(net->RTO); 2249 } 2250 break; 2251 case SCTP_TIMER_TYPE_HEARTBEAT: 2252 /* 2253 * The net is used here so that we can add in the RTO. Even 2254 * though we use a different timer. We also add the HB timer 2255 * PLUS a random jitter. 2256 */ 2257 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2258 #ifdef INVARIANTS 2259 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2260 t_type, inp, stcb, net); 2261 #else 2262 return; 2263 #endif 2264 } 2265 if ((net->dest_state & SCTP_ADDR_NOHB) && 2266 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2267 SCTPDBG(SCTP_DEBUG_TIMER2, 2268 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2269 t_type, inp, stcb, net); 2270 return; 2271 } 2272 tmr = &net->hb_timer; 2273 if (net->RTO == 0) { 2274 to_ticks = stcb->asoc.initial_rto; 2275 } else { 2276 to_ticks = net->RTO; 2277 } 2278 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2279 jitter = rndval % to_ticks; 2280 to_ticks >>= 1; 2281 if (jitter < (UINT32_MAX - to_ticks)) { 2282 to_ticks += jitter; 2283 } else { 2284 to_ticks = UINT32_MAX; 2285 } 2286 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2287 !(net->dest_state & SCTP_ADDR_PF)) { 2288 if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { 2289 to_ticks += net->heart_beat_delay; 2290 } else { 2291 to_ticks = UINT32_MAX; 2292 } 2293 } 2294 /* 2295 * Now we must convert the to_ticks that are now in ms to 2296 * ticks. 2297 */ 2298 to_ticks = sctp_msecs_to_ticks(to_ticks); 2299 break; 2300 case SCTP_TIMER_TYPE_COOKIE: 2301 /* 2302 * Here we can use the RTO timer from the network since one 2303 * RTT was complete. If a retransmission happened then we 2304 * will be using the RTO initial value. 2305 */ 2306 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2307 #ifdef INVARIANTS 2308 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2309 t_type, inp, stcb, net); 2310 #else 2311 return; 2312 #endif 2313 } 2314 tmr = &net->rxt_timer; 2315 if (net->RTO == 0) { 2316 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2317 } else { 2318 to_ticks = sctp_msecs_to_ticks(net->RTO); 2319 } 2320 break; 2321 case SCTP_TIMER_TYPE_NEWCOOKIE: 2322 /* 2323 * Nothing needed but the endpoint here ususually about 60 2324 * minutes. 2325 */ 2326 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2327 #ifdef INVARIANTS 2328 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2329 t_type, inp, stcb, net); 2330 #else 2331 return; 2332 #endif 2333 } 2334 tmr = &inp->sctp_ep.signature_change; 2335 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2336 break; 2337 case SCTP_TIMER_TYPE_PATHMTURAISE: 2338 /* 2339 * Here we use the value found in the EP for PMTUD, 2340 * ususually about 10 minutes. 2341 */ 2342 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2343 #ifdef INVARIANTS 2344 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2345 t_type, inp, stcb, net); 2346 #else 2347 return; 2348 #endif 2349 } 2350 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2351 SCTPDBG(SCTP_DEBUG_TIMER2, 2352 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2353 t_type, inp, stcb, net); 2354 return; 2355 } 2356 tmr = &net->pmtu_timer; 2357 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2358 break; 2359 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2360 /* Here we use the RTO of the destination. */ 2361 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2362 #ifdef INVARIANTS 2363 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2364 t_type, inp, stcb, net); 2365 #else 2366 return; 2367 #endif 2368 } 2369 tmr = &net->rxt_timer; 2370 if (net->RTO == 0) { 2371 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2372 } else { 2373 to_ticks = sctp_msecs_to_ticks(net->RTO); 2374 } 2375 break; 2376 case SCTP_TIMER_TYPE_ASCONF: 2377 /* 2378 * Here the timer comes from the stcb but its value is from 2379 * the net's RTO. 2380 */ 2381 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2382 #ifdef INVARIANTS 2383 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2384 t_type, inp, stcb, net); 2385 #else 2386 return; 2387 #endif 2388 } 2389 tmr = &stcb->asoc.asconf_timer; 2390 if (net->RTO == 0) { 2391 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2392 } else { 2393 to_ticks = sctp_msecs_to_ticks(net->RTO); 2394 } 2395 break; 2396 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2397 /* 2398 * Here we use the endpoints shutdown guard timer usually 2399 * about 3 minutes. 2400 */ 2401 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2402 #ifdef INVARIANTS 2403 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2404 t_type, inp, stcb, net); 2405 #else 2406 return; 2407 #endif 2408 } 2409 tmr = &stcb->asoc.shut_guard_timer; 2410 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2411 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2412 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2413 } else { 2414 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2415 } 2416 } else { 2417 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2418 } 2419 break; 2420 case SCTP_TIMER_TYPE_AUTOCLOSE: 2421 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2422 #ifdef INVARIANTS 2423 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2424 t_type, inp, stcb, net); 2425 #else 2426 return; 2427 #endif 2428 } 2429 tmr = &stcb->asoc.autoclose_timer; 2430 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2431 break; 2432 case SCTP_TIMER_TYPE_STRRESET: 2433 /* 2434 * Here the timer comes from the stcb but its value is from 2435 * the net's RTO. 2436 */ 2437 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2438 #ifdef INVARIANTS 2439 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2440 t_type, inp, stcb, net); 2441 #else 2442 return; 2443 #endif 2444 } 2445 tmr = &stcb->asoc.strreset_timer; 2446 if (net->RTO == 0) { 2447 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2448 } else { 2449 to_ticks = sctp_msecs_to_ticks(net->RTO); 2450 } 2451 break; 2452 case SCTP_TIMER_TYPE_INPKILL: 2453 /* 2454 * The inp is setup to die. We re-use the signature_chage 2455 * timer since that has stopped and we are in the GONE 2456 * state. 2457 */ 2458 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2459 #ifdef INVARIANTS 2460 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2461 t_type, inp, stcb, net); 2462 #else 2463 return; 2464 #endif 2465 } 2466 tmr = &inp->sctp_ep.signature_change; 2467 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2468 break; 2469 case SCTP_TIMER_TYPE_ASOCKILL: 2470 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2471 #ifdef INVARIANTS 2472 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2473 t_type, inp, stcb, net); 2474 #else 2475 return; 2476 #endif 2477 } 2478 tmr = &stcb->asoc.strreset_timer; 2479 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2480 break; 2481 case SCTP_TIMER_TYPE_ADDR_WQ: 2482 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2483 #ifdef INVARIANTS 2484 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2485 t_type, inp, stcb, net); 2486 #else 2487 return; 2488 #endif 2489 } 2490 /* Only 1 tick away :-) */ 2491 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2492 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2493 break; 2494 case SCTP_TIMER_TYPE_PRIM_DELETED: 2495 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2496 #ifdef INVARIANTS 2497 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2498 t_type, inp, stcb, net); 2499 #else 2500 return; 2501 #endif 2502 } 2503 tmr = &stcb->asoc.delete_prim_timer; 2504 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2505 break; 2506 default: 2507 #ifdef INVARIANTS 2508 panic("Unknown timer type %d", t_type); 2509 #else 2510 return; 2511 #endif 2512 } 2513 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2514 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2515 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2516 /* 2517 * We do NOT allow you to have it already running. If it is, 2518 * we leave the current one up unchanged. 2519 */ 2520 SCTPDBG(SCTP_DEBUG_TIMER2, 2521 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2522 t_type, inp, stcb, net); 2523 return; 2524 } 2525 /* At this point we can proceed. */ 2526 if (t_type == SCTP_TIMER_TYPE_SEND) { 2527 stcb->asoc.num_send_timers_up++; 2528 } 2529 tmr->stopped_from = 0; 2530 tmr->type = t_type; 2531 tmr->ep = (void *)inp; 2532 tmr->tcb = (void *)stcb; 2533 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2534 tmr->net = NULL; 2535 } else { 2536 tmr->net = (void *)net; 2537 } 2538 tmr->self = (void *)tmr; 2539 tmr->vnet = (void *)curvnet; 2540 tmr->ticks = sctp_get_tick_count(); 2541 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2542 SCTPDBG(SCTP_DEBUG_TIMER2, 2543 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2544 t_type, to_ticks, inp, stcb, net); 2545 /* 2546 * If this is a newly scheduled callout, as opposed to a 2547 * rescheduled one, increment relevant reference counts. 2548 */ 2549 if (tmr->ep != NULL) { 2550 SCTP_INP_INCR_REF(inp); 2551 } 2552 if (tmr->tcb != NULL) { 2553 atomic_add_int(&stcb->asoc.refcnt, 1); 2554 } 2555 if (tmr->net != NULL) { 2556 atomic_add_int(&net->ref_count, 1); 2557 } 2558 } else { 2559 /* 2560 * This should not happen, since we checked for pending 2561 * above. 2562 */ 2563 SCTPDBG(SCTP_DEBUG_TIMER2, 2564 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2565 t_type, to_ticks, inp, stcb, net); 2566 } 2567 return; 2568 } 2569 2570 /*- 2571 * The following table shows which parameters must be provided 2572 * when calling sctp_timer_stop(). For parameters not being 2573 * provided, NULL must be used. 2574 * 2575 * |Name |inp |stcb|net | 2576 * |-----------------------------|----|----|----| 2577 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2578 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2579 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2580 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2581 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2582 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2583 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2584 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2585 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2586 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2587 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2588 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2589 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2590 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2591 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2592 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2593 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2594 * 2595 */ 2596 2597 void 2598 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2599 struct sctp_nets *net, uint32_t from) 2600 { 2601 struct sctp_timer *tmr; 2602 2603 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2604 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2605 t_type, stcb, stcb->sctp_ep)); 2606 if (stcb != NULL) { 2607 SCTP_TCB_LOCK_ASSERT(stcb); 2608 } else if (inp != NULL) { 2609 SCTP_INP_WLOCK_ASSERT(inp); 2610 } else { 2611 SCTP_WQ_ADDR_LOCK_ASSERT(); 2612 } 2613 tmr = NULL; 2614 switch (t_type) { 2615 case SCTP_TIMER_TYPE_SEND: 2616 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2617 #ifdef INVARIANTS 2618 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2619 t_type, inp, stcb, net); 2620 #else 2621 return; 2622 #endif 2623 } 2624 tmr = &net->rxt_timer; 2625 break; 2626 case SCTP_TIMER_TYPE_INIT: 2627 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2628 #ifdef INVARIANTS 2629 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2630 t_type, inp, stcb, net); 2631 #else 2632 return; 2633 #endif 2634 } 2635 tmr = &net->rxt_timer; 2636 break; 2637 case SCTP_TIMER_TYPE_RECV: 2638 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2639 #ifdef INVARIANTS 2640 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2641 t_type, inp, stcb, net); 2642 #else 2643 return; 2644 #endif 2645 } 2646 tmr = &stcb->asoc.dack_timer; 2647 break; 2648 case SCTP_TIMER_TYPE_SHUTDOWN: 2649 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2650 #ifdef INVARIANTS 2651 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2652 t_type, inp, stcb, net); 2653 #else 2654 return; 2655 #endif 2656 } 2657 tmr = &net->rxt_timer; 2658 break; 2659 case SCTP_TIMER_TYPE_HEARTBEAT: 2660 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2661 #ifdef INVARIANTS 2662 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2663 t_type, inp, stcb, net); 2664 #else 2665 return; 2666 #endif 2667 } 2668 tmr = &net->hb_timer; 2669 break; 2670 case SCTP_TIMER_TYPE_COOKIE: 2671 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2672 #ifdef INVARIANTS 2673 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2674 t_type, inp, stcb, net); 2675 #else 2676 return; 2677 #endif 2678 } 2679 tmr = &net->rxt_timer; 2680 break; 2681 case SCTP_TIMER_TYPE_NEWCOOKIE: 2682 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2683 #ifdef INVARIANTS 2684 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2685 t_type, inp, stcb, net); 2686 #else 2687 return; 2688 #endif 2689 } 2690 tmr = &inp->sctp_ep.signature_change; 2691 break; 2692 case SCTP_TIMER_TYPE_PATHMTURAISE: 2693 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2694 #ifdef INVARIANTS 2695 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2696 t_type, inp, stcb, net); 2697 #else 2698 return; 2699 #endif 2700 } 2701 tmr = &net->pmtu_timer; 2702 break; 2703 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2704 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2705 #ifdef INVARIANTS 2706 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2707 t_type, inp, stcb, net); 2708 #else 2709 return; 2710 #endif 2711 } 2712 tmr = &net->rxt_timer; 2713 break; 2714 case SCTP_TIMER_TYPE_ASCONF: 2715 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2716 #ifdef INVARIANTS 2717 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2718 t_type, inp, stcb, net); 2719 #else 2720 return; 2721 #endif 2722 } 2723 tmr = &stcb->asoc.asconf_timer; 2724 break; 2725 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2726 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2727 #ifdef INVARIANTS 2728 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2729 t_type, inp, stcb, net); 2730 #else 2731 return; 2732 #endif 2733 } 2734 tmr = &stcb->asoc.shut_guard_timer; 2735 break; 2736 case SCTP_TIMER_TYPE_AUTOCLOSE: 2737 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2738 #ifdef INVARIANTS 2739 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2740 t_type, inp, stcb, net); 2741 #else 2742 return; 2743 #endif 2744 } 2745 tmr = &stcb->asoc.autoclose_timer; 2746 break; 2747 case SCTP_TIMER_TYPE_STRRESET: 2748 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2749 #ifdef INVARIANTS 2750 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2751 t_type, inp, stcb, net); 2752 #else 2753 return; 2754 #endif 2755 } 2756 tmr = &stcb->asoc.strreset_timer; 2757 break; 2758 case SCTP_TIMER_TYPE_INPKILL: 2759 /* 2760 * The inp is setup to die. We re-use the signature_chage 2761 * timer since that has stopped and we are in the GONE 2762 * state. 2763 */ 2764 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2765 #ifdef INVARIANTS 2766 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2767 t_type, inp, stcb, net); 2768 #else 2769 return; 2770 #endif 2771 } 2772 tmr = &inp->sctp_ep.signature_change; 2773 break; 2774 case SCTP_TIMER_TYPE_ASOCKILL: 2775 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2776 #ifdef INVARIANTS 2777 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2778 t_type, inp, stcb, net); 2779 #else 2780 return; 2781 #endif 2782 } 2783 tmr = &stcb->asoc.strreset_timer; 2784 break; 2785 case SCTP_TIMER_TYPE_ADDR_WQ: 2786 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2787 #ifdef INVARIANTS 2788 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2789 t_type, inp, stcb, net); 2790 #else 2791 return; 2792 #endif 2793 } 2794 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2795 break; 2796 case SCTP_TIMER_TYPE_PRIM_DELETED: 2797 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2798 #ifdef INVARIANTS 2799 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2800 t_type, inp, stcb, net); 2801 #else 2802 return; 2803 #endif 2804 } 2805 tmr = &stcb->asoc.delete_prim_timer; 2806 break; 2807 default: 2808 #ifdef INVARIANTS 2809 panic("Unknown timer type %d", t_type); 2810 #else 2811 return; 2812 #endif 2813 } 2814 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2815 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2816 (tmr->type != t_type)) { 2817 /* 2818 * Ok we have a timer that is under joint use. Cookie timer 2819 * per chance with the SEND timer. We therefore are NOT 2820 * running the timer that the caller wants stopped. So just 2821 * return. 2822 */ 2823 SCTPDBG(SCTP_DEBUG_TIMER2, 2824 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2825 t_type, inp, stcb, net); 2826 return; 2827 } 2828 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2829 stcb->asoc.num_send_timers_up--; 2830 if (stcb->asoc.num_send_timers_up < 0) { 2831 stcb->asoc.num_send_timers_up = 0; 2832 } 2833 } 2834 tmr->self = NULL; 2835 tmr->stopped_from = from; 2836 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2837 KASSERT(tmr->ep == inp, 2838 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2839 t_type, inp, tmr->ep)); 2840 KASSERT(tmr->tcb == stcb, 2841 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2842 t_type, stcb, tmr->tcb)); 2843 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2844 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2845 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2846 t_type, net, tmr->net)); 2847 SCTPDBG(SCTP_DEBUG_TIMER2, 2848 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2849 t_type, inp, stcb, net); 2850 /* 2851 * If the timer was actually stopped, decrement reference 2852 * counts that were incremented in sctp_timer_start(). 2853 */ 2854 if (tmr->ep != NULL) { 2855 SCTP_INP_DECR_REF(inp); 2856 tmr->ep = NULL; 2857 } 2858 if (tmr->tcb != NULL) { 2859 atomic_add_int(&stcb->asoc.refcnt, -1); 2860 tmr->tcb = NULL; 2861 } 2862 if (tmr->net != NULL) { 2863 /* 2864 * Can't use net, since it doesn't work for 2865 * SCTP_TIMER_TYPE_ASCONF. 2866 */ 2867 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2868 tmr->net = NULL; 2869 } 2870 } else { 2871 SCTPDBG(SCTP_DEBUG_TIMER2, 2872 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2873 t_type, inp, stcb, net); 2874 } 2875 return; 2876 } 2877 2878 uint32_t 2879 sctp_calculate_len(struct mbuf *m) 2880 { 2881 uint32_t tlen = 0; 2882 struct mbuf *at; 2883 2884 at = m; 2885 while (at) { 2886 tlen += SCTP_BUF_LEN(at); 2887 at = SCTP_BUF_NEXT(at); 2888 } 2889 return (tlen); 2890 } 2891 2892 void 2893 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2894 struct sctp_association *asoc, uint32_t mtu) 2895 { 2896 /* 2897 * Reset the P-MTU size on this association, this involves changing 2898 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2899 * allow the DF flag to be cleared. 2900 */ 2901 struct sctp_tmit_chunk *chk; 2902 unsigned int eff_mtu, ovh; 2903 2904 asoc->smallest_mtu = mtu; 2905 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2906 ovh = SCTP_MIN_OVERHEAD; 2907 } else { 2908 ovh = SCTP_MIN_V4_OVERHEAD; 2909 } 2910 eff_mtu = mtu - ovh; 2911 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2912 if (chk->send_size > eff_mtu) { 2913 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2914 } 2915 } 2916 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2917 if (chk->send_size > eff_mtu) { 2918 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2919 } 2920 } 2921 } 2922 2923 /* 2924 * Given an association and starting time of the current RTT period, update 2925 * RTO in number of msecs. net should point to the current network. 2926 * Return 1, if an RTO update was performed, return 0 if no update was 2927 * performed due to invalid starting point. 2928 */ 2929 2930 int 2931 sctp_calculate_rto(struct sctp_tcb *stcb, 2932 struct sctp_association *asoc, 2933 struct sctp_nets *net, 2934 struct timeval *old, 2935 int rtt_from_sack) 2936 { 2937 struct timeval now; 2938 uint64_t rtt_us; /* RTT in us */ 2939 int32_t rtt; /* RTT in ms */ 2940 uint32_t new_rto; 2941 int first_measure = 0; 2942 2943 /************************/ 2944 /* 1. calculate new RTT */ 2945 /************************/ 2946 /* get the current time */ 2947 if (stcb->asoc.use_precise_time) { 2948 (void)SCTP_GETPTIME_TIMEVAL(&now); 2949 } else { 2950 (void)SCTP_GETTIME_TIMEVAL(&now); 2951 } 2952 if ((old->tv_sec > now.tv_sec) || 2953 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2954 /* The starting point is in the future. */ 2955 return (0); 2956 } 2957 timevalsub(&now, old); 2958 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2959 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2960 /* The RTT is larger than a sane value. */ 2961 return (0); 2962 } 2963 /* store the current RTT in us */ 2964 net->rtt = rtt_us; 2965 /* compute rtt in ms */ 2966 rtt = (int32_t)(net->rtt / 1000); 2967 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2968 /* 2969 * Tell the CC module that a new update has just occurred 2970 * from a sack 2971 */ 2972 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2973 } 2974 /* 2975 * Do we need to determine the lan? We do this only on sacks i.e. 2976 * RTT being determined from data not non-data (HB/INIT->INITACK). 2977 */ 2978 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2979 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2980 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2981 net->lan_type = SCTP_LAN_INTERNET; 2982 } else { 2983 net->lan_type = SCTP_LAN_LOCAL; 2984 } 2985 } 2986 2987 /***************************/ 2988 /* 2. update RTTVAR & SRTT */ 2989 /***************************/ 2990 /*- 2991 * Compute the scaled average lastsa and the 2992 * scaled variance lastsv as described in van Jacobson 2993 * Paper "Congestion Avoidance and Control", Annex A. 2994 * 2995 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2996 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2997 */ 2998 if (net->RTO_measured) { 2999 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3000 net->lastsa += rtt; 3001 if (rtt < 0) { 3002 rtt = -rtt; 3003 } 3004 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3005 net->lastsv += rtt; 3006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3007 rto_logging(net, SCTP_LOG_RTTVAR); 3008 } 3009 } else { 3010 /* First RTO measurment */ 3011 net->RTO_measured = 1; 3012 first_measure = 1; 3013 net->lastsa = rtt << SCTP_RTT_SHIFT; 3014 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3016 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3017 } 3018 } 3019 if (net->lastsv == 0) { 3020 net->lastsv = SCTP_CLOCK_GRANULARITY; 3021 } 3022 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3023 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3024 (stcb->asoc.sat_network_lockout == 0)) { 3025 stcb->asoc.sat_network = 1; 3026 } else if ((!first_measure) && stcb->asoc.sat_network) { 3027 stcb->asoc.sat_network = 0; 3028 stcb->asoc.sat_network_lockout = 1; 3029 } 3030 /* bound it, per C6/C7 in Section 5.3.1 */ 3031 if (new_rto < stcb->asoc.minrto) { 3032 new_rto = stcb->asoc.minrto; 3033 } 3034 if (new_rto > stcb->asoc.maxrto) { 3035 new_rto = stcb->asoc.maxrto; 3036 } 3037 net->RTO = new_rto; 3038 return (1); 3039 } 3040 3041 /* 3042 * return a pointer to a contiguous piece of data from the given mbuf chain 3043 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3044 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3045 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3046 */ 3047 caddr_t 3048 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3049 { 3050 uint32_t count; 3051 uint8_t *ptr; 3052 3053 ptr = in_ptr; 3054 if ((off < 0) || (len <= 0)) 3055 return (NULL); 3056 3057 /* find the desired start location */ 3058 while ((m != NULL) && (off > 0)) { 3059 if (off < SCTP_BUF_LEN(m)) 3060 break; 3061 off -= SCTP_BUF_LEN(m); 3062 m = SCTP_BUF_NEXT(m); 3063 } 3064 if (m == NULL) 3065 return (NULL); 3066 3067 /* is the current mbuf large enough (eg. contiguous)? */ 3068 if ((SCTP_BUF_LEN(m) - off) >= len) { 3069 return (mtod(m, caddr_t)+off); 3070 } else { 3071 /* else, it spans more than one mbuf, so save a temp copy... */ 3072 while ((m != NULL) && (len > 0)) { 3073 count = min(SCTP_BUF_LEN(m) - off, len); 3074 memcpy(ptr, mtod(m, caddr_t)+off, count); 3075 len -= count; 3076 ptr += count; 3077 off = 0; 3078 m = SCTP_BUF_NEXT(m); 3079 } 3080 if ((m == NULL) && (len > 0)) 3081 return (NULL); 3082 else 3083 return ((caddr_t)in_ptr); 3084 } 3085 } 3086 3087 struct sctp_paramhdr * 3088 sctp_get_next_param(struct mbuf *m, 3089 int offset, 3090 struct sctp_paramhdr *pull, 3091 int pull_limit) 3092 { 3093 /* This just provides a typed signature to Peter's Pull routine */ 3094 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3095 (uint8_t *)pull)); 3096 } 3097 3098 struct mbuf * 3099 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3100 { 3101 struct mbuf *m_last; 3102 caddr_t dp; 3103 3104 if (padlen > 3) { 3105 return (NULL); 3106 } 3107 if (padlen <= M_TRAILINGSPACE(m)) { 3108 /* 3109 * The easy way. We hope the majority of the time we hit 3110 * here :) 3111 */ 3112 m_last = m; 3113 } else { 3114 /* Hard way we must grow the mbuf chain */ 3115 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3116 if (m_last == NULL) { 3117 return (NULL); 3118 } 3119 SCTP_BUF_LEN(m_last) = 0; 3120 SCTP_BUF_NEXT(m_last) = NULL; 3121 SCTP_BUF_NEXT(m) = m_last; 3122 } 3123 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3124 SCTP_BUF_LEN(m_last) += padlen; 3125 memset(dp, 0, padlen); 3126 return (m_last); 3127 } 3128 3129 struct mbuf * 3130 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3131 { 3132 /* find the last mbuf in chain and pad it */ 3133 struct mbuf *m_at; 3134 3135 if (last_mbuf != NULL) { 3136 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3137 } else { 3138 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3139 if (SCTP_BUF_NEXT(m_at) == NULL) { 3140 return (sctp_add_pad_tombuf(m_at, padval)); 3141 } 3142 } 3143 } 3144 return (NULL); 3145 } 3146 3147 static void 3148 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3149 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3150 { 3151 struct mbuf *m_notify; 3152 struct sctp_assoc_change *sac; 3153 struct sctp_queued_to_read *control; 3154 unsigned int notif_len; 3155 uint16_t abort_len; 3156 unsigned int i; 3157 3158 if (stcb == NULL) { 3159 return; 3160 } 3161 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3162 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3163 if (abort != NULL) { 3164 abort_len = ntohs(abort->ch.chunk_length); 3165 /* 3166 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3167 * contiguous. 3168 */ 3169 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3170 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3171 } 3172 } else { 3173 abort_len = 0; 3174 } 3175 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3176 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3177 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3178 notif_len += abort_len; 3179 } 3180 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3181 if (m_notify == NULL) { 3182 /* Retry with smaller value. */ 3183 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3184 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3185 if (m_notify == NULL) { 3186 goto set_error; 3187 } 3188 } 3189 SCTP_BUF_NEXT(m_notify) = NULL; 3190 sac = mtod(m_notify, struct sctp_assoc_change *); 3191 memset(sac, 0, notif_len); 3192 sac->sac_type = SCTP_ASSOC_CHANGE; 3193 sac->sac_flags = 0; 3194 sac->sac_length = sizeof(struct sctp_assoc_change); 3195 sac->sac_state = state; 3196 sac->sac_error = error; 3197 /* XXX verify these stream counts */ 3198 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3199 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3200 sac->sac_assoc_id = sctp_get_associd(stcb); 3201 if (notif_len > sizeof(struct sctp_assoc_change)) { 3202 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3203 i = 0; 3204 if (stcb->asoc.prsctp_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3206 } 3207 if (stcb->asoc.auth_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3209 } 3210 if (stcb->asoc.asconf_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3212 } 3213 if (stcb->asoc.idata_supported == 1) { 3214 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3215 } 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3217 if (stcb->asoc.reconfig_supported == 1) { 3218 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3219 } 3220 sac->sac_length += i; 3221 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3222 memcpy(sac->sac_info, abort, abort_len); 3223 sac->sac_length += abort_len; 3224 } 3225 } 3226 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3227 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3228 0, 0, stcb->asoc.context, 0, 0, 0, 3229 m_notify); 3230 if (control != NULL) { 3231 control->length = SCTP_BUF_LEN(m_notify); 3232 control->spec_flags = M_NOTIFICATION; 3233 /* not that we need this */ 3234 control->tail_mbuf = m_notify; 3235 sctp_add_to_readq(stcb->sctp_ep, stcb, 3236 control, 3237 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3238 so_locked); 3239 } else { 3240 sctp_m_freem(m_notify); 3241 } 3242 } 3243 /* 3244 * For 1-to-1 style sockets, we send up and error when an ABORT 3245 * comes in. 3246 */ 3247 set_error: 3248 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3249 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3250 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3251 SOCK_LOCK(stcb->sctp_socket); 3252 if (from_peer) { 3253 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3255 stcb->sctp_socket->so_error = ECONNREFUSED; 3256 } else { 3257 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3258 stcb->sctp_socket->so_error = ECONNRESET; 3259 } 3260 } else { 3261 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3262 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3263 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3264 stcb->sctp_socket->so_error = ETIMEDOUT; 3265 } else { 3266 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3267 stcb->sctp_socket->so_error = ECONNABORTED; 3268 } 3269 } 3270 SOCK_UNLOCK(stcb->sctp_socket); 3271 } 3272 /* Wake ANY sleepers */ 3273 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3274 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3275 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3276 socantrcvmore(stcb->sctp_socket); 3277 } 3278 sorwakeup(stcb->sctp_socket); 3279 sowwakeup(stcb->sctp_socket); 3280 } 3281 3282 static void 3283 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3284 struct sockaddr *sa, uint32_t error, int so_locked) 3285 { 3286 struct mbuf *m_notify; 3287 struct sctp_paddr_change *spc; 3288 struct sctp_queued_to_read *control; 3289 3290 if ((stcb == NULL) || 3291 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3292 /* event not enabled */ 3293 return; 3294 } 3295 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3296 if (m_notify == NULL) 3297 return; 3298 SCTP_BUF_LEN(m_notify) = 0; 3299 spc = mtod(m_notify, struct sctp_paddr_change *); 3300 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3301 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3302 spc->spc_flags = 0; 3303 spc->spc_length = sizeof(struct sctp_paddr_change); 3304 switch (sa->sa_family) { 3305 #ifdef INET 3306 case AF_INET: 3307 #ifdef INET6 3308 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3309 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3310 (struct sockaddr_in6 *)&spc->spc_aaddr); 3311 } else { 3312 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3313 } 3314 #else 3315 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3316 #endif 3317 break; 3318 #endif 3319 #ifdef INET6 3320 case AF_INET6: 3321 { 3322 struct sockaddr_in6 *sin6; 3323 3324 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3325 3326 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3327 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3328 if (sin6->sin6_scope_id == 0) { 3329 /* recover scope_id for user */ 3330 (void)sa6_recoverscope(sin6); 3331 } else { 3332 /* clear embedded scope_id for user */ 3333 in6_clearscope(&sin6->sin6_addr); 3334 } 3335 } 3336 break; 3337 } 3338 #endif 3339 default: 3340 /* TSNH */ 3341 break; 3342 } 3343 spc->spc_state = state; 3344 spc->spc_error = error; 3345 spc->spc_assoc_id = sctp_get_associd(stcb); 3346 3347 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3348 SCTP_BUF_NEXT(m_notify) = NULL; 3349 3350 /* append to socket */ 3351 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3352 0, 0, stcb->asoc.context, 0, 0, 0, 3353 m_notify); 3354 if (control == NULL) { 3355 /* no memory */ 3356 sctp_m_freem(m_notify); 3357 return; 3358 } 3359 control->length = SCTP_BUF_LEN(m_notify); 3360 control->spec_flags = M_NOTIFICATION; 3361 /* not that we need this */ 3362 control->tail_mbuf = m_notify; 3363 sctp_add_to_readq(stcb->sctp_ep, stcb, 3364 control, 3365 &stcb->sctp_socket->so_rcv, 1, 3366 SCTP_READ_LOCK_NOT_HELD, 3367 so_locked); 3368 } 3369 3370 static void 3371 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3372 struct sctp_tmit_chunk *chk, int so_locked) 3373 { 3374 struct mbuf *m_notify; 3375 struct sctp_send_failed *ssf; 3376 struct sctp_send_failed_event *ssfe; 3377 struct sctp_queued_to_read *control; 3378 struct sctp_chunkhdr *chkhdr; 3379 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3380 3381 if ((stcb == NULL) || 3382 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3383 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3384 /* event not enabled */ 3385 return; 3386 } 3387 3388 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3389 notifhdr_len = sizeof(struct sctp_send_failed_event); 3390 } else { 3391 notifhdr_len = sizeof(struct sctp_send_failed); 3392 } 3393 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3394 if (m_notify == NULL) 3395 /* no space left */ 3396 return; 3397 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3398 if (stcb->asoc.idata_supported) { 3399 chkhdr_len = sizeof(struct sctp_idata_chunk); 3400 } else { 3401 chkhdr_len = sizeof(struct sctp_data_chunk); 3402 } 3403 /* Use some defaults in case we can't access the chunk header */ 3404 if (chk->send_size >= chkhdr_len) { 3405 payload_len = chk->send_size - chkhdr_len; 3406 } else { 3407 payload_len = 0; 3408 } 3409 padding_len = 0; 3410 if (chk->data != NULL) { 3411 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3412 if (chkhdr != NULL) { 3413 chk_len = ntohs(chkhdr->chunk_length); 3414 if ((chk_len >= chkhdr_len) && 3415 (chk->send_size >= chk_len) && 3416 (chk->send_size - chk_len < 4)) { 3417 padding_len = chk->send_size - chk_len; 3418 payload_len = chk->send_size - chkhdr_len - padding_len; 3419 } 3420 } 3421 } 3422 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3423 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3424 memset(ssfe, 0, notifhdr_len); 3425 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3426 if (sent) { 3427 ssfe->ssfe_flags = SCTP_DATA_SENT; 3428 } else { 3429 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3430 } 3431 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3432 ssfe->ssfe_error = error; 3433 /* not exactly what the user sent in, but should be close :) */ 3434 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3435 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3436 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3437 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3438 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3439 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3440 } else { 3441 ssf = mtod(m_notify, struct sctp_send_failed *); 3442 memset(ssf, 0, notifhdr_len); 3443 ssf->ssf_type = SCTP_SEND_FAILED; 3444 if (sent) { 3445 ssf->ssf_flags = SCTP_DATA_SENT; 3446 } else { 3447 ssf->ssf_flags = SCTP_DATA_UNSENT; 3448 } 3449 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3450 ssf->ssf_error = error; 3451 /* not exactly what the user sent in, but should be close :) */ 3452 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3453 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3454 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3455 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3456 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3457 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3458 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3459 } 3460 if (chk->data != NULL) { 3461 /* Trim off the sctp chunk header (it should be there) */ 3462 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3463 m_adj(chk->data, chkhdr_len); 3464 m_adj(chk->data, -padding_len); 3465 sctp_mbuf_crush(chk->data); 3466 chk->send_size -= (chkhdr_len + padding_len); 3467 } 3468 } 3469 SCTP_BUF_NEXT(m_notify) = chk->data; 3470 /* Steal off the mbuf */ 3471 chk->data = NULL; 3472 /* 3473 * For this case, we check the actual socket buffer, since the assoc 3474 * is going away we don't want to overfill the socket buffer for a 3475 * non-reader 3476 */ 3477 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3478 sctp_m_freem(m_notify); 3479 return; 3480 } 3481 /* append to socket */ 3482 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3483 0, 0, stcb->asoc.context, 0, 0, 0, 3484 m_notify); 3485 if (control == NULL) { 3486 /* no memory */ 3487 sctp_m_freem(m_notify); 3488 return; 3489 } 3490 control->length = SCTP_BUF_LEN(m_notify); 3491 control->spec_flags = M_NOTIFICATION; 3492 /* not that we need this */ 3493 control->tail_mbuf = m_notify; 3494 sctp_add_to_readq(stcb->sctp_ep, stcb, 3495 control, 3496 &stcb->sctp_socket->so_rcv, 1, 3497 SCTP_READ_LOCK_NOT_HELD, 3498 so_locked); 3499 } 3500 3501 static void 3502 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3503 struct sctp_stream_queue_pending *sp, int so_locked) 3504 { 3505 struct mbuf *m_notify; 3506 struct sctp_send_failed *ssf; 3507 struct sctp_send_failed_event *ssfe; 3508 struct sctp_queued_to_read *control; 3509 int notifhdr_len; 3510 3511 if ((stcb == NULL) || 3512 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3513 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3514 /* event not enabled */ 3515 return; 3516 } 3517 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3518 notifhdr_len = sizeof(struct sctp_send_failed_event); 3519 } else { 3520 notifhdr_len = sizeof(struct sctp_send_failed); 3521 } 3522 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3523 if (m_notify == NULL) { 3524 /* no space left */ 3525 return; 3526 } 3527 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3528 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3529 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3530 memset(ssfe, 0, notifhdr_len); 3531 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3532 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3533 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3534 ssfe->ssfe_error = error; 3535 /* not exactly what the user sent in, but should be close :) */ 3536 ssfe->ssfe_info.snd_sid = sp->sid; 3537 if (sp->some_taken) { 3538 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3539 } else { 3540 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3541 } 3542 ssfe->ssfe_info.snd_ppid = sp->ppid; 3543 ssfe->ssfe_info.snd_context = sp->context; 3544 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3545 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3546 } else { 3547 ssf = mtod(m_notify, struct sctp_send_failed *); 3548 memset(ssf, 0, notifhdr_len); 3549 ssf->ssf_type = SCTP_SEND_FAILED; 3550 ssf->ssf_flags = SCTP_DATA_UNSENT; 3551 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3552 ssf->ssf_error = error; 3553 /* not exactly what the user sent in, but should be close :) */ 3554 ssf->ssf_info.sinfo_stream = sp->sid; 3555 ssf->ssf_info.sinfo_ssn = 0; 3556 if (sp->some_taken) { 3557 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3558 } else { 3559 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3560 } 3561 ssf->ssf_info.sinfo_ppid = sp->ppid; 3562 ssf->ssf_info.sinfo_context = sp->context; 3563 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3564 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3565 } 3566 SCTP_BUF_NEXT(m_notify) = sp->data; 3567 3568 /* Steal off the mbuf */ 3569 sp->data = NULL; 3570 /* 3571 * For this case, we check the actual socket buffer, since the assoc 3572 * is going away we don't want to overfill the socket buffer for a 3573 * non-reader 3574 */ 3575 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3576 sctp_m_freem(m_notify); 3577 return; 3578 } 3579 /* append to socket */ 3580 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3581 0, 0, stcb->asoc.context, 0, 0, 0, 3582 m_notify); 3583 if (control == NULL) { 3584 /* no memory */ 3585 sctp_m_freem(m_notify); 3586 return; 3587 } 3588 control->length = SCTP_BUF_LEN(m_notify); 3589 control->spec_flags = M_NOTIFICATION; 3590 /* not that we need this */ 3591 control->tail_mbuf = m_notify; 3592 sctp_add_to_readq(stcb->sctp_ep, stcb, 3593 control, 3594 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3595 } 3596 3597 static void 3598 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3599 { 3600 struct mbuf *m_notify; 3601 struct sctp_adaptation_event *sai; 3602 struct sctp_queued_to_read *control; 3603 3604 if ((stcb == NULL) || 3605 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3606 /* event not enabled */ 3607 return; 3608 } 3609 3610 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3611 if (m_notify == NULL) 3612 /* no space left */ 3613 return; 3614 SCTP_BUF_LEN(m_notify) = 0; 3615 sai = mtod(m_notify, struct sctp_adaptation_event *); 3616 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3617 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3618 sai->sai_flags = 0; 3619 sai->sai_length = sizeof(struct sctp_adaptation_event); 3620 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3621 sai->sai_assoc_id = sctp_get_associd(stcb); 3622 3623 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3624 SCTP_BUF_NEXT(m_notify) = NULL; 3625 3626 /* append to socket */ 3627 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3628 0, 0, stcb->asoc.context, 0, 0, 0, 3629 m_notify); 3630 if (control == NULL) { 3631 /* no memory */ 3632 sctp_m_freem(m_notify); 3633 return; 3634 } 3635 control->length = SCTP_BUF_LEN(m_notify); 3636 control->spec_flags = M_NOTIFICATION; 3637 /* not that we need this */ 3638 control->tail_mbuf = m_notify; 3639 sctp_add_to_readq(stcb->sctp_ep, stcb, 3640 control, 3641 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3642 } 3643 3644 /* This always must be called with the read-queue LOCKED in the INP */ 3645 static void 3646 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3647 uint32_t val, int so_locked) 3648 { 3649 struct mbuf *m_notify; 3650 struct sctp_pdapi_event *pdapi; 3651 struct sctp_queued_to_read *control; 3652 struct sockbuf *sb; 3653 3654 if ((stcb == NULL) || 3655 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3656 /* event not enabled */ 3657 return; 3658 } 3659 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3660 return; 3661 } 3662 3663 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3664 if (m_notify == NULL) 3665 /* no space left */ 3666 return; 3667 SCTP_BUF_LEN(m_notify) = 0; 3668 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3669 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3670 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3671 pdapi->pdapi_flags = 0; 3672 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3673 pdapi->pdapi_indication = error; 3674 pdapi->pdapi_stream = (val >> 16); 3675 pdapi->pdapi_seq = (val & 0x0000ffff); 3676 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3677 3678 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3679 SCTP_BUF_NEXT(m_notify) = NULL; 3680 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3681 0, 0, stcb->asoc.context, 0, 0, 0, 3682 m_notify); 3683 if (control == NULL) { 3684 /* no memory */ 3685 sctp_m_freem(m_notify); 3686 return; 3687 } 3688 control->length = SCTP_BUF_LEN(m_notify); 3689 control->spec_flags = M_NOTIFICATION; 3690 /* not that we need this */ 3691 control->tail_mbuf = m_notify; 3692 sb = &stcb->sctp_socket->so_rcv; 3693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3694 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3695 } 3696 sctp_sballoc(stcb, sb, m_notify); 3697 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3698 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3699 } 3700 control->end_added = 1; 3701 if (stcb->asoc.control_pdapi) 3702 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3703 else { 3704 /* we really should not see this case */ 3705 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3706 } 3707 if (stcb->sctp_ep && stcb->sctp_socket) { 3708 /* This should always be the case */ 3709 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3710 } 3711 } 3712 3713 static void 3714 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3715 { 3716 struct mbuf *m_notify; 3717 struct sctp_shutdown_event *sse; 3718 struct sctp_queued_to_read *control; 3719 3720 /* 3721 * For TCP model AND UDP connected sockets we will send an error up 3722 * when an SHUTDOWN completes 3723 */ 3724 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3725 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3726 /* mark socket closed for read/write and wakeup! */ 3727 socantsendmore(stcb->sctp_socket); 3728 } 3729 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3730 /* event not enabled */ 3731 return; 3732 } 3733 3734 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3735 if (m_notify == NULL) 3736 /* no space left */ 3737 return; 3738 sse = mtod(m_notify, struct sctp_shutdown_event *); 3739 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3740 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3741 sse->sse_flags = 0; 3742 sse->sse_length = sizeof(struct sctp_shutdown_event); 3743 sse->sse_assoc_id = sctp_get_associd(stcb); 3744 3745 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3746 SCTP_BUF_NEXT(m_notify) = NULL; 3747 3748 /* append to socket */ 3749 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3750 0, 0, stcb->asoc.context, 0, 0, 0, 3751 m_notify); 3752 if (control == NULL) { 3753 /* no memory */ 3754 sctp_m_freem(m_notify); 3755 return; 3756 } 3757 control->length = SCTP_BUF_LEN(m_notify); 3758 control->spec_flags = M_NOTIFICATION; 3759 /* not that we need this */ 3760 control->tail_mbuf = m_notify; 3761 sctp_add_to_readq(stcb->sctp_ep, stcb, 3762 control, 3763 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3764 } 3765 3766 static void 3767 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3768 int so_locked) 3769 { 3770 struct mbuf *m_notify; 3771 struct sctp_sender_dry_event *event; 3772 struct sctp_queued_to_read *control; 3773 3774 if ((stcb == NULL) || 3775 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3776 /* event not enabled */ 3777 return; 3778 } 3779 3780 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3781 if (m_notify == NULL) { 3782 /* no space left */ 3783 return; 3784 } 3785 SCTP_BUF_LEN(m_notify) = 0; 3786 event = mtod(m_notify, struct sctp_sender_dry_event *); 3787 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3788 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3789 event->sender_dry_flags = 0; 3790 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3791 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3792 3793 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3794 SCTP_BUF_NEXT(m_notify) = NULL; 3795 3796 /* append to socket */ 3797 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3798 0, 0, stcb->asoc.context, 0, 0, 0, 3799 m_notify); 3800 if (control == NULL) { 3801 /* no memory */ 3802 sctp_m_freem(m_notify); 3803 return; 3804 } 3805 control->length = SCTP_BUF_LEN(m_notify); 3806 control->spec_flags = M_NOTIFICATION; 3807 /* not that we need this */ 3808 control->tail_mbuf = m_notify; 3809 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3810 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3811 } 3812 3813 void 3814 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3815 { 3816 struct mbuf *m_notify; 3817 struct sctp_queued_to_read *control; 3818 struct sctp_stream_change_event *stradd; 3819 3820 if ((stcb == NULL) || 3821 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3822 /* event not enabled */ 3823 return; 3824 } 3825 if ((stcb->asoc.peer_req_out) && flag) { 3826 /* Peer made the request, don't tell the local user */ 3827 stcb->asoc.peer_req_out = 0; 3828 return; 3829 } 3830 stcb->asoc.peer_req_out = 0; 3831 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3832 if (m_notify == NULL) 3833 /* no space left */ 3834 return; 3835 SCTP_BUF_LEN(m_notify) = 0; 3836 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3837 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3838 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3839 stradd->strchange_flags = flag; 3840 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3841 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3842 stradd->strchange_instrms = numberin; 3843 stradd->strchange_outstrms = numberout; 3844 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3845 SCTP_BUF_NEXT(m_notify) = NULL; 3846 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3847 /* no space */ 3848 sctp_m_freem(m_notify); 3849 return; 3850 } 3851 /* append to socket */ 3852 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3853 0, 0, stcb->asoc.context, 0, 0, 0, 3854 m_notify); 3855 if (control == NULL) { 3856 /* no memory */ 3857 sctp_m_freem(m_notify); 3858 return; 3859 } 3860 control->length = SCTP_BUF_LEN(m_notify); 3861 control->spec_flags = M_NOTIFICATION; 3862 /* not that we need this */ 3863 control->tail_mbuf = m_notify; 3864 sctp_add_to_readq(stcb->sctp_ep, stcb, 3865 control, 3866 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3867 } 3868 3869 void 3870 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3871 { 3872 struct mbuf *m_notify; 3873 struct sctp_queued_to_read *control; 3874 struct sctp_assoc_reset_event *strasoc; 3875 3876 if ((stcb == NULL) || 3877 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3878 /* event not enabled */ 3879 return; 3880 } 3881 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3882 if (m_notify == NULL) 3883 /* no space left */ 3884 return; 3885 SCTP_BUF_LEN(m_notify) = 0; 3886 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3887 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3888 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3889 strasoc->assocreset_flags = flag; 3890 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3891 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3892 strasoc->assocreset_local_tsn = sending_tsn; 3893 strasoc->assocreset_remote_tsn = recv_tsn; 3894 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3895 SCTP_BUF_NEXT(m_notify) = NULL; 3896 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3897 /* no space */ 3898 sctp_m_freem(m_notify); 3899 return; 3900 } 3901 /* append to socket */ 3902 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3903 0, 0, stcb->asoc.context, 0, 0, 0, 3904 m_notify); 3905 if (control == NULL) { 3906 /* no memory */ 3907 sctp_m_freem(m_notify); 3908 return; 3909 } 3910 control->length = SCTP_BUF_LEN(m_notify); 3911 control->spec_flags = M_NOTIFICATION; 3912 /* not that we need this */ 3913 control->tail_mbuf = m_notify; 3914 sctp_add_to_readq(stcb->sctp_ep, stcb, 3915 control, 3916 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3917 } 3918 3919 static void 3920 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3921 int number_entries, uint16_t *list, int flag) 3922 { 3923 struct mbuf *m_notify; 3924 struct sctp_queued_to_read *control; 3925 struct sctp_stream_reset_event *strreset; 3926 int len; 3927 3928 if ((stcb == NULL) || 3929 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3930 /* event not enabled */ 3931 return; 3932 } 3933 3934 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3935 if (m_notify == NULL) 3936 /* no space left */ 3937 return; 3938 SCTP_BUF_LEN(m_notify) = 0; 3939 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3940 if (len > M_TRAILINGSPACE(m_notify)) { 3941 /* never enough room */ 3942 sctp_m_freem(m_notify); 3943 return; 3944 } 3945 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3946 memset(strreset, 0, len); 3947 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3948 strreset->strreset_flags = flag; 3949 strreset->strreset_length = len; 3950 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3951 if (number_entries) { 3952 int i; 3953 3954 for (i = 0; i < number_entries; i++) { 3955 strreset->strreset_stream_list[i] = ntohs(list[i]); 3956 } 3957 } 3958 SCTP_BUF_LEN(m_notify) = len; 3959 SCTP_BUF_NEXT(m_notify) = NULL; 3960 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3961 /* no space */ 3962 sctp_m_freem(m_notify); 3963 return; 3964 } 3965 /* append to socket */ 3966 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3967 0, 0, stcb->asoc.context, 0, 0, 0, 3968 m_notify); 3969 if (control == NULL) { 3970 /* no memory */ 3971 sctp_m_freem(m_notify); 3972 return; 3973 } 3974 control->length = SCTP_BUF_LEN(m_notify); 3975 control->spec_flags = M_NOTIFICATION; 3976 /* not that we need this */ 3977 control->tail_mbuf = m_notify; 3978 sctp_add_to_readq(stcb->sctp_ep, stcb, 3979 control, 3980 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3981 } 3982 3983 static void 3984 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3985 { 3986 struct mbuf *m_notify; 3987 struct sctp_remote_error *sre; 3988 struct sctp_queued_to_read *control; 3989 unsigned int notif_len; 3990 uint16_t chunk_len; 3991 3992 if ((stcb == NULL) || 3993 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3994 return; 3995 } 3996 if (chunk != NULL) { 3997 chunk_len = ntohs(chunk->ch.chunk_length); 3998 /* 3999 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4000 * contiguous. 4001 */ 4002 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4003 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4004 } 4005 } else { 4006 chunk_len = 0; 4007 } 4008 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4009 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4010 if (m_notify == NULL) { 4011 /* Retry with smaller value. */ 4012 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4013 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4014 if (m_notify == NULL) { 4015 return; 4016 } 4017 } 4018 SCTP_BUF_NEXT(m_notify) = NULL; 4019 sre = mtod(m_notify, struct sctp_remote_error *); 4020 memset(sre, 0, notif_len); 4021 sre->sre_type = SCTP_REMOTE_ERROR; 4022 sre->sre_flags = 0; 4023 sre->sre_length = sizeof(struct sctp_remote_error); 4024 sre->sre_error = error; 4025 sre->sre_assoc_id = sctp_get_associd(stcb); 4026 if (notif_len > sizeof(struct sctp_remote_error)) { 4027 memcpy(sre->sre_data, chunk, chunk_len); 4028 sre->sre_length += chunk_len; 4029 } 4030 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4031 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4032 0, 0, stcb->asoc.context, 0, 0, 0, 4033 m_notify); 4034 if (control != NULL) { 4035 control->length = SCTP_BUF_LEN(m_notify); 4036 control->spec_flags = M_NOTIFICATION; 4037 /* not that we need this */ 4038 control->tail_mbuf = m_notify; 4039 sctp_add_to_readq(stcb->sctp_ep, stcb, 4040 control, 4041 &stcb->sctp_socket->so_rcv, 1, 4042 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4043 } else { 4044 sctp_m_freem(m_notify); 4045 } 4046 } 4047 4048 void 4049 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4050 uint32_t error, void *data, int so_locked) 4051 { 4052 if ((stcb == NULL) || 4053 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4054 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4055 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4056 /* If the socket is gone we are out of here */ 4057 return; 4058 } 4059 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4060 return; 4061 } 4062 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4063 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4064 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4065 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4066 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4067 /* Don't report these in front states */ 4068 return; 4069 } 4070 } 4071 switch (notification) { 4072 case SCTP_NOTIFY_ASSOC_UP: 4073 if (stcb->asoc.assoc_up_sent == 0) { 4074 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4075 stcb->asoc.assoc_up_sent = 1; 4076 } 4077 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4078 sctp_notify_adaptation_layer(stcb); 4079 } 4080 if (stcb->asoc.auth_supported == 0) { 4081 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4082 NULL, so_locked); 4083 } 4084 break; 4085 case SCTP_NOTIFY_ASSOC_DOWN: 4086 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4087 break; 4088 case SCTP_NOTIFY_INTERFACE_DOWN: 4089 { 4090 struct sctp_nets *net; 4091 4092 net = (struct sctp_nets *)data; 4093 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4094 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4095 break; 4096 } 4097 case SCTP_NOTIFY_INTERFACE_UP: 4098 { 4099 struct sctp_nets *net; 4100 4101 net = (struct sctp_nets *)data; 4102 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4103 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4104 break; 4105 } 4106 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4107 { 4108 struct sctp_nets *net; 4109 4110 net = (struct sctp_nets *)data; 4111 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4112 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4113 break; 4114 } 4115 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4116 sctp_notify_send_failed2(stcb, error, 4117 (struct sctp_stream_queue_pending *)data, so_locked); 4118 break; 4119 case SCTP_NOTIFY_SENT_DG_FAIL: 4120 sctp_notify_send_failed(stcb, 1, error, 4121 (struct sctp_tmit_chunk *)data, so_locked); 4122 break; 4123 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4124 sctp_notify_send_failed(stcb, 0, error, 4125 (struct sctp_tmit_chunk *)data, so_locked); 4126 break; 4127 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4128 { 4129 uint32_t val; 4130 4131 val = *((uint32_t *)data); 4132 4133 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4134 break; 4135 } 4136 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4137 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4138 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4139 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4140 } else { 4141 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4142 } 4143 break; 4144 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4145 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4146 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4147 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4148 } else { 4149 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4150 } 4151 break; 4152 case SCTP_NOTIFY_ASSOC_RESTART: 4153 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4154 if (stcb->asoc.auth_supported == 0) { 4155 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4156 NULL, so_locked); 4157 } 4158 break; 4159 case SCTP_NOTIFY_STR_RESET_SEND: 4160 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4161 break; 4162 case SCTP_NOTIFY_STR_RESET_RECV: 4163 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4164 break; 4165 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4166 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4167 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4168 break; 4169 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4170 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4171 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4172 break; 4173 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4174 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4175 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4176 break; 4177 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4178 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4179 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4180 break; 4181 case SCTP_NOTIFY_ASCONF_ADD_IP: 4182 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4183 error, so_locked); 4184 break; 4185 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4186 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4187 error, so_locked); 4188 break; 4189 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4190 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4191 error, so_locked); 4192 break; 4193 case SCTP_NOTIFY_PEER_SHUTDOWN: 4194 sctp_notify_shutdown_event(stcb); 4195 break; 4196 case SCTP_NOTIFY_AUTH_NEW_KEY: 4197 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4198 (uint16_t)(uintptr_t)data, 4199 so_locked); 4200 break; 4201 case SCTP_NOTIFY_AUTH_FREE_KEY: 4202 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4203 (uint16_t)(uintptr_t)data, 4204 so_locked); 4205 break; 4206 case SCTP_NOTIFY_NO_PEER_AUTH: 4207 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4208 (uint16_t)(uintptr_t)data, 4209 so_locked); 4210 break; 4211 case SCTP_NOTIFY_SENDER_DRY: 4212 sctp_notify_sender_dry_event(stcb, so_locked); 4213 break; 4214 case SCTP_NOTIFY_REMOTE_ERROR: 4215 sctp_notify_remote_error(stcb, error, data); 4216 break; 4217 default: 4218 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4219 __func__, notification, notification); 4220 break; 4221 } /* end switch */ 4222 } 4223 4224 void 4225 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4226 { 4227 struct sctp_association *asoc; 4228 struct sctp_stream_out *outs; 4229 struct sctp_tmit_chunk *chk, *nchk; 4230 struct sctp_stream_queue_pending *sp, *nsp; 4231 int i; 4232 4233 if (stcb == NULL) { 4234 return; 4235 } 4236 asoc = &stcb->asoc; 4237 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4238 /* already being freed */ 4239 return; 4240 } 4241 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4242 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4243 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4244 return; 4245 } 4246 /* now through all the gunk freeing chunks */ 4247 /* sent queue SHOULD be empty */ 4248 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4249 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4250 asoc->sent_queue_cnt--; 4251 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4252 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4253 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4254 #ifdef INVARIANTS 4255 } else { 4256 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4257 #endif 4258 } 4259 } 4260 if (chk->data != NULL) { 4261 sctp_free_bufspace(stcb, asoc, chk, 1); 4262 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4263 error, chk, so_locked); 4264 if (chk->data) { 4265 sctp_m_freem(chk->data); 4266 chk->data = NULL; 4267 } 4268 } 4269 sctp_free_a_chunk(stcb, chk, so_locked); 4270 /* sa_ignore FREED_MEMORY */ 4271 } 4272 /* pending send queue SHOULD be empty */ 4273 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4274 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4275 asoc->send_queue_cnt--; 4276 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4277 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4278 #ifdef INVARIANTS 4279 } else { 4280 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4281 #endif 4282 } 4283 if (chk->data != NULL) { 4284 sctp_free_bufspace(stcb, asoc, chk, 1); 4285 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4286 error, chk, so_locked); 4287 if (chk->data) { 4288 sctp_m_freem(chk->data); 4289 chk->data = NULL; 4290 } 4291 } 4292 sctp_free_a_chunk(stcb, chk, so_locked); 4293 /* sa_ignore FREED_MEMORY */ 4294 } 4295 for (i = 0; i < asoc->streamoutcnt; i++) { 4296 /* For each stream */ 4297 outs = &asoc->strmout[i]; 4298 /* clean up any sends there */ 4299 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4300 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4301 TAILQ_REMOVE(&outs->outqueue, sp, next); 4302 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4303 sctp_free_spbufspace(stcb, asoc, sp); 4304 if (sp->data) { 4305 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4306 error, (void *)sp, so_locked); 4307 if (sp->data) { 4308 sctp_m_freem(sp->data); 4309 sp->data = NULL; 4310 sp->tail_mbuf = NULL; 4311 sp->length = 0; 4312 } 4313 } 4314 if (sp->net) { 4315 sctp_free_remote_addr(sp->net); 4316 sp->net = NULL; 4317 } 4318 /* Free the chunk */ 4319 sctp_free_a_strmoq(stcb, sp, so_locked); 4320 /* sa_ignore FREED_MEMORY */ 4321 } 4322 } 4323 } 4324 4325 void 4326 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4327 struct sctp_abort_chunk *abort, int so_locked) 4328 { 4329 if (stcb == NULL) { 4330 return; 4331 } 4332 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4333 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4334 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4335 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4336 } 4337 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4338 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4339 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4340 return; 4341 } 4342 SCTP_TCB_SEND_LOCK(stcb); 4343 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4344 /* Tell them we lost the asoc */ 4345 sctp_report_all_outbound(stcb, error, so_locked); 4346 SCTP_TCB_SEND_UNLOCK(stcb); 4347 if (from_peer) { 4348 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4349 } else { 4350 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4351 } 4352 } 4353 4354 void 4355 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4356 struct mbuf *m, int iphlen, 4357 struct sockaddr *src, struct sockaddr *dst, 4358 struct sctphdr *sh, struct mbuf *op_err, 4359 uint8_t mflowtype, uint32_t mflowid, 4360 uint32_t vrf_id, uint16_t port) 4361 { 4362 uint32_t vtag; 4363 4364 vtag = 0; 4365 if (stcb != NULL) { 4366 vtag = stcb->asoc.peer_vtag; 4367 vrf_id = stcb->asoc.vrf_id; 4368 } 4369 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4370 mflowtype, mflowid, inp->fibnum, 4371 vrf_id, port); 4372 if (stcb != NULL) { 4373 /* We have a TCB to abort, send notification too */ 4374 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4375 /* Ok, now lets free it */ 4376 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4377 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4378 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4379 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4380 } 4381 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4382 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4383 } 4384 } 4385 #ifdef SCTP_ASOCLOG_OF_TSNS 4386 void 4387 sctp_print_out_track_log(struct sctp_tcb *stcb) 4388 { 4389 #ifdef NOSIY_PRINTS 4390 int i; 4391 4392 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4393 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4394 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4395 SCTP_PRINTF("None rcvd\n"); 4396 goto none_in; 4397 } 4398 if (stcb->asoc.tsn_in_wrapped) { 4399 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4400 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4401 stcb->asoc.in_tsnlog[i].tsn, 4402 stcb->asoc.in_tsnlog[i].strm, 4403 stcb->asoc.in_tsnlog[i].seq, 4404 stcb->asoc.in_tsnlog[i].flgs, 4405 stcb->asoc.in_tsnlog[i].sz); 4406 } 4407 } 4408 if (stcb->asoc.tsn_in_at) { 4409 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4410 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4411 stcb->asoc.in_tsnlog[i].tsn, 4412 stcb->asoc.in_tsnlog[i].strm, 4413 stcb->asoc.in_tsnlog[i].seq, 4414 stcb->asoc.in_tsnlog[i].flgs, 4415 stcb->asoc.in_tsnlog[i].sz); 4416 } 4417 } 4418 none_in: 4419 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4420 if ((stcb->asoc.tsn_out_at == 0) && 4421 (stcb->asoc.tsn_out_wrapped == 0)) { 4422 SCTP_PRINTF("None sent\n"); 4423 } 4424 if (stcb->asoc.tsn_out_wrapped) { 4425 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4426 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4427 stcb->asoc.out_tsnlog[i].tsn, 4428 stcb->asoc.out_tsnlog[i].strm, 4429 stcb->asoc.out_tsnlog[i].seq, 4430 stcb->asoc.out_tsnlog[i].flgs, 4431 stcb->asoc.out_tsnlog[i].sz); 4432 } 4433 } 4434 if (stcb->asoc.tsn_out_at) { 4435 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4436 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4437 stcb->asoc.out_tsnlog[i].tsn, 4438 stcb->asoc.out_tsnlog[i].strm, 4439 stcb->asoc.out_tsnlog[i].seq, 4440 stcb->asoc.out_tsnlog[i].flgs, 4441 stcb->asoc.out_tsnlog[i].sz); 4442 } 4443 } 4444 #endif 4445 } 4446 #endif 4447 4448 void 4449 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4450 struct mbuf *op_err, 4451 int so_locked) 4452 { 4453 4454 if (stcb == NULL) { 4455 /* Got to have a TCB */ 4456 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4457 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4458 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4459 SCTP_CALLED_DIRECTLY_NOCMPSET); 4460 } 4461 } 4462 return; 4463 } 4464 /* notify the peer */ 4465 sctp_send_abort_tcb(stcb, op_err, so_locked); 4466 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4467 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4468 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4469 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4470 } 4471 /* notify the ulp */ 4472 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4473 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4474 } 4475 /* now free the asoc */ 4476 #ifdef SCTP_ASOCLOG_OF_TSNS 4477 sctp_print_out_track_log(stcb); 4478 #endif 4479 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4480 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4481 } 4482 4483 void 4484 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4485 struct sockaddr *src, struct sockaddr *dst, 4486 struct sctphdr *sh, struct sctp_inpcb *inp, 4487 struct mbuf *cause, 4488 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4489 uint32_t vrf_id, uint16_t port) 4490 { 4491 struct sctp_chunkhdr *ch, chunk_buf; 4492 unsigned int chk_length; 4493 int contains_init_chunk; 4494 4495 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4496 /* Generate a TO address for future reference */ 4497 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4498 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4499 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4500 SCTP_CALLED_DIRECTLY_NOCMPSET); 4501 } 4502 } 4503 contains_init_chunk = 0; 4504 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4505 sizeof(*ch), (uint8_t *)&chunk_buf); 4506 while (ch != NULL) { 4507 chk_length = ntohs(ch->chunk_length); 4508 if (chk_length < sizeof(*ch)) { 4509 /* break to abort land */ 4510 break; 4511 } 4512 switch (ch->chunk_type) { 4513 case SCTP_INIT: 4514 contains_init_chunk = 1; 4515 break; 4516 case SCTP_PACKET_DROPPED: 4517 /* we don't respond to pkt-dropped */ 4518 return; 4519 case SCTP_ABORT_ASSOCIATION: 4520 /* we don't respond with an ABORT to an ABORT */ 4521 return; 4522 case SCTP_SHUTDOWN_COMPLETE: 4523 /* 4524 * we ignore it since we are not waiting for it and 4525 * peer is gone 4526 */ 4527 return; 4528 case SCTP_SHUTDOWN_ACK: 4529 sctp_send_shutdown_complete2(src, dst, sh, 4530 mflowtype, mflowid, fibnum, 4531 vrf_id, port); 4532 return; 4533 default: 4534 break; 4535 } 4536 offset += SCTP_SIZE32(chk_length); 4537 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4538 sizeof(*ch), (uint8_t *)&chunk_buf); 4539 } 4540 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4541 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4542 (contains_init_chunk == 0))) { 4543 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4544 mflowtype, mflowid, fibnum, 4545 vrf_id, port); 4546 } 4547 } 4548 4549 /* 4550 * check the inbound datagram to make sure there is not an abort inside it, 4551 * if there is return 1, else return 0. 4552 */ 4553 int 4554 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4555 { 4556 struct sctp_chunkhdr *ch; 4557 struct sctp_init_chunk *init_chk, chunk_buf; 4558 int offset; 4559 unsigned int chk_length; 4560 4561 offset = iphlen + sizeof(struct sctphdr); 4562 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4563 (uint8_t *)&chunk_buf); 4564 while (ch != NULL) { 4565 chk_length = ntohs(ch->chunk_length); 4566 if (chk_length < sizeof(*ch)) { 4567 /* packet is probably corrupt */ 4568 break; 4569 } 4570 /* we seem to be ok, is it an abort? */ 4571 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4572 /* yep, tell them */ 4573 return (1); 4574 } 4575 if (ch->chunk_type == SCTP_INITIATION) { 4576 /* need to update the Vtag */ 4577 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4578 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4579 if (init_chk != NULL) { 4580 *vtagfill = ntohl(init_chk->init.initiate_tag); 4581 } 4582 } 4583 /* Nope, move to the next chunk */ 4584 offset += SCTP_SIZE32(chk_length); 4585 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4586 sizeof(*ch), (uint8_t *)&chunk_buf); 4587 } 4588 return (0); 4589 } 4590 4591 /* 4592 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4593 * set (i.e. it's 0) so, create this function to compare link local scopes 4594 */ 4595 #ifdef INET6 4596 uint32_t 4597 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4598 { 4599 struct sockaddr_in6 a, b; 4600 4601 /* save copies */ 4602 a = *addr1; 4603 b = *addr2; 4604 4605 if (a.sin6_scope_id == 0) 4606 if (sa6_recoverscope(&a)) { 4607 /* can't get scope, so can't match */ 4608 return (0); 4609 } 4610 if (b.sin6_scope_id == 0) 4611 if (sa6_recoverscope(&b)) { 4612 /* can't get scope, so can't match */ 4613 return (0); 4614 } 4615 if (a.sin6_scope_id != b.sin6_scope_id) 4616 return (0); 4617 4618 return (1); 4619 } 4620 4621 /* 4622 * returns a sockaddr_in6 with embedded scope recovered and removed 4623 */ 4624 struct sockaddr_in6 * 4625 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4626 { 4627 /* check and strip embedded scope junk */ 4628 if (addr->sin6_family == AF_INET6) { 4629 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4630 if (addr->sin6_scope_id == 0) { 4631 *store = *addr; 4632 if (!sa6_recoverscope(store)) { 4633 /* use the recovered scope */ 4634 addr = store; 4635 } 4636 } else { 4637 /* else, return the original "to" addr */ 4638 in6_clearscope(&addr->sin6_addr); 4639 } 4640 } 4641 } 4642 return (addr); 4643 } 4644 #endif 4645 4646 /* 4647 * are the two addresses the same? currently a "scopeless" check returns: 1 4648 * if same, 0 if not 4649 */ 4650 int 4651 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4652 { 4653 4654 /* must be valid */ 4655 if (sa1 == NULL || sa2 == NULL) 4656 return (0); 4657 4658 /* must be the same family */ 4659 if (sa1->sa_family != sa2->sa_family) 4660 return (0); 4661 4662 switch (sa1->sa_family) { 4663 #ifdef INET6 4664 case AF_INET6: 4665 { 4666 /* IPv6 addresses */ 4667 struct sockaddr_in6 *sin6_1, *sin6_2; 4668 4669 sin6_1 = (struct sockaddr_in6 *)sa1; 4670 sin6_2 = (struct sockaddr_in6 *)sa2; 4671 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4672 sin6_2)); 4673 } 4674 #endif 4675 #ifdef INET 4676 case AF_INET: 4677 { 4678 /* IPv4 addresses */ 4679 struct sockaddr_in *sin_1, *sin_2; 4680 4681 sin_1 = (struct sockaddr_in *)sa1; 4682 sin_2 = (struct sockaddr_in *)sa2; 4683 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4684 } 4685 #endif 4686 default: 4687 /* we don't do these... */ 4688 return (0); 4689 } 4690 } 4691 4692 void 4693 sctp_print_address(struct sockaddr *sa) 4694 { 4695 #ifdef INET6 4696 char ip6buf[INET6_ADDRSTRLEN]; 4697 #endif 4698 4699 switch (sa->sa_family) { 4700 #ifdef INET6 4701 case AF_INET6: 4702 { 4703 struct sockaddr_in6 *sin6; 4704 4705 sin6 = (struct sockaddr_in6 *)sa; 4706 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4707 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4708 ntohs(sin6->sin6_port), 4709 sin6->sin6_scope_id); 4710 break; 4711 } 4712 #endif 4713 #ifdef INET 4714 case AF_INET: 4715 { 4716 struct sockaddr_in *sin; 4717 unsigned char *p; 4718 4719 sin = (struct sockaddr_in *)sa; 4720 p = (unsigned char *)&sin->sin_addr; 4721 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4722 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4723 break; 4724 } 4725 #endif 4726 default: 4727 SCTP_PRINTF("?\n"); 4728 break; 4729 } 4730 } 4731 4732 void 4733 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4734 struct sctp_inpcb *new_inp, 4735 struct sctp_tcb *stcb, 4736 int waitflags) 4737 { 4738 /* 4739 * go through our old INP and pull off any control structures that 4740 * belong to stcb and move then to the new inp. 4741 */ 4742 struct socket *old_so, *new_so; 4743 struct sctp_queued_to_read *control, *nctl; 4744 struct sctp_readhead tmp_queue; 4745 struct mbuf *m; 4746 int error = 0; 4747 4748 old_so = old_inp->sctp_socket; 4749 new_so = new_inp->sctp_socket; 4750 TAILQ_INIT(&tmp_queue); 4751 error = sblock(&old_so->so_rcv, waitflags); 4752 if (error) { 4753 /* 4754 * Gak, can't get sblock, we have a problem. data will be 4755 * left stranded.. and we don't dare look at it since the 4756 * other thread may be reading something. Oh well, its a 4757 * screwed up app that does a peeloff OR a accept while 4758 * reading from the main socket... actually its only the 4759 * peeloff() case, since I think read will fail on a 4760 * listening socket.. 4761 */ 4762 return; 4763 } 4764 /* lock the socket buffers */ 4765 SCTP_INP_READ_LOCK(old_inp); 4766 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4767 /* Pull off all for out target stcb */ 4768 if (control->stcb == stcb) { 4769 /* remove it we want it */ 4770 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4771 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4772 m = control->data; 4773 while (m) { 4774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4775 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4776 } 4777 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4779 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4780 } 4781 m = SCTP_BUF_NEXT(m); 4782 } 4783 } 4784 } 4785 SCTP_INP_READ_UNLOCK(old_inp); 4786 /* Remove the sb-lock on the old socket */ 4787 4788 sbunlock(&old_so->so_rcv); 4789 /* Now we move them over to the new socket buffer */ 4790 SCTP_INP_READ_LOCK(new_inp); 4791 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4792 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4793 m = control->data; 4794 while (m) { 4795 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4796 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4797 } 4798 sctp_sballoc(stcb, &new_so->so_rcv, m); 4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4800 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4801 } 4802 m = SCTP_BUF_NEXT(m); 4803 } 4804 } 4805 SCTP_INP_READ_UNLOCK(new_inp); 4806 } 4807 4808 void 4809 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4810 struct sctp_tcb *stcb, 4811 int so_locked 4812 SCTP_UNUSED 4813 ) 4814 { 4815 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4816 sctp_sorwakeup(inp, inp->sctp_socket); 4817 } 4818 } 4819 4820 void 4821 sctp_add_to_readq(struct sctp_inpcb *inp, 4822 struct sctp_tcb *stcb, 4823 struct sctp_queued_to_read *control, 4824 struct sockbuf *sb, 4825 int end, 4826 int inp_read_lock_held, 4827 int so_locked) 4828 { 4829 /* 4830 * Here we must place the control on the end of the socket read 4831 * queue AND increment sb_cc so that select will work properly on 4832 * read. 4833 */ 4834 struct mbuf *m, *prev = NULL; 4835 4836 if (inp == NULL) { 4837 /* Gak, TSNH!! */ 4838 #ifdef INVARIANTS 4839 panic("Gak, inp NULL on add_to_readq"); 4840 #endif 4841 return; 4842 } 4843 if (inp_read_lock_held == 0) 4844 SCTP_INP_READ_LOCK(inp); 4845 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4846 if (!control->on_strm_q) { 4847 sctp_free_remote_addr(control->whoFrom); 4848 if (control->data) { 4849 sctp_m_freem(control->data); 4850 control->data = NULL; 4851 } 4852 sctp_free_a_readq(stcb, control); 4853 } 4854 if (inp_read_lock_held == 0) 4855 SCTP_INP_READ_UNLOCK(inp); 4856 return; 4857 } 4858 if (!(control->spec_flags & M_NOTIFICATION)) { 4859 atomic_add_int(&inp->total_recvs, 1); 4860 if (!control->do_not_ref_stcb) { 4861 atomic_add_int(&stcb->total_recvs, 1); 4862 } 4863 } 4864 m = control->data; 4865 control->held_length = 0; 4866 control->length = 0; 4867 while (m) { 4868 if (SCTP_BUF_LEN(m) == 0) { 4869 /* Skip mbufs with NO length */ 4870 if (prev == NULL) { 4871 /* First one */ 4872 control->data = sctp_m_free(m); 4873 m = control->data; 4874 } else { 4875 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4876 m = SCTP_BUF_NEXT(prev); 4877 } 4878 if (m == NULL) { 4879 control->tail_mbuf = prev; 4880 } 4881 continue; 4882 } 4883 prev = m; 4884 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4885 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4886 } 4887 sctp_sballoc(stcb, sb, m); 4888 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4889 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4890 } 4891 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4892 m = SCTP_BUF_NEXT(m); 4893 } 4894 if (prev != NULL) { 4895 control->tail_mbuf = prev; 4896 } else { 4897 /* Everything got collapsed out?? */ 4898 if (!control->on_strm_q) { 4899 sctp_free_remote_addr(control->whoFrom); 4900 sctp_free_a_readq(stcb, control); 4901 } 4902 if (inp_read_lock_held == 0) 4903 SCTP_INP_READ_UNLOCK(inp); 4904 return; 4905 } 4906 if (end) { 4907 control->end_added = 1; 4908 } 4909 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4910 control->on_read_q = 1; 4911 if (inp_read_lock_held == 0) 4912 SCTP_INP_READ_UNLOCK(inp); 4913 if (inp && inp->sctp_socket) { 4914 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4915 } 4916 } 4917 4918 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4919 *************ALTERNATE ROUTING CODE 4920 */ 4921 4922 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4923 *************ALTERNATE ROUTING CODE 4924 */ 4925 4926 struct mbuf * 4927 sctp_generate_cause(uint16_t code, char *info) 4928 { 4929 struct mbuf *m; 4930 struct sctp_gen_error_cause *cause; 4931 size_t info_len; 4932 uint16_t len; 4933 4934 if ((code == 0) || (info == NULL)) { 4935 return (NULL); 4936 } 4937 info_len = strlen(info); 4938 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4939 return (NULL); 4940 } 4941 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4942 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4943 if (m != NULL) { 4944 SCTP_BUF_LEN(m) = len; 4945 cause = mtod(m, struct sctp_gen_error_cause *); 4946 cause->code = htons(code); 4947 cause->length = htons(len); 4948 memcpy(cause->info, info, info_len); 4949 } 4950 return (m); 4951 } 4952 4953 struct mbuf * 4954 sctp_generate_no_user_data_cause(uint32_t tsn) 4955 { 4956 struct mbuf *m; 4957 struct sctp_error_no_user_data *no_user_data_cause; 4958 uint16_t len; 4959 4960 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4961 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4962 if (m != NULL) { 4963 SCTP_BUF_LEN(m) = len; 4964 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4965 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4966 no_user_data_cause->cause.length = htons(len); 4967 no_user_data_cause->tsn = htonl(tsn); 4968 } 4969 return (m); 4970 } 4971 4972 #ifdef SCTP_MBCNT_LOGGING 4973 void 4974 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4975 struct sctp_tmit_chunk *tp1, int chk_cnt) 4976 { 4977 if (tp1->data == NULL) { 4978 return; 4979 } 4980 asoc->chunks_on_out_queue -= chk_cnt; 4981 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4982 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4983 asoc->total_output_queue_size, 4984 tp1->book_size, 4985 0, 4986 tp1->mbcnt); 4987 } 4988 if (asoc->total_output_queue_size >= tp1->book_size) { 4989 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4990 } else { 4991 asoc->total_output_queue_size = 0; 4992 } 4993 4994 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4995 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4996 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4997 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4998 } else { 4999 stcb->sctp_socket->so_snd.sb_cc = 0; 5000 } 5001 } 5002 } 5003 5004 #endif 5005 5006 int 5007 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5008 uint8_t sent, int so_locked) 5009 { 5010 struct sctp_stream_out *strq; 5011 struct sctp_tmit_chunk *chk = NULL, *tp2; 5012 struct sctp_stream_queue_pending *sp; 5013 uint32_t mid; 5014 uint16_t sid; 5015 uint8_t foundeom = 0; 5016 int ret_sz = 0; 5017 int notdone; 5018 int do_wakeup_routine = 0; 5019 5020 sid = tp1->rec.data.sid; 5021 mid = tp1->rec.data.mid; 5022 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5023 stcb->asoc.abandoned_sent[0]++; 5024 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5025 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5026 #if defined(SCTP_DETAILED_STR_STATS) 5027 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5028 #endif 5029 } else { 5030 stcb->asoc.abandoned_unsent[0]++; 5031 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5032 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5033 #if defined(SCTP_DETAILED_STR_STATS) 5034 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5035 #endif 5036 } 5037 do { 5038 ret_sz += tp1->book_size; 5039 if (tp1->data != NULL) { 5040 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5041 sctp_flight_size_decrease(tp1); 5042 sctp_total_flight_decrease(stcb, tp1); 5043 } 5044 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5045 stcb->asoc.peers_rwnd += tp1->send_size; 5046 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5047 if (sent) { 5048 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5049 } else { 5050 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5051 } 5052 if (tp1->data) { 5053 sctp_m_freem(tp1->data); 5054 tp1->data = NULL; 5055 } 5056 do_wakeup_routine = 1; 5057 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5058 stcb->asoc.sent_queue_cnt_removeable--; 5059 } 5060 } 5061 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5062 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5063 SCTP_DATA_NOT_FRAG) { 5064 /* not frag'ed we ae done */ 5065 notdone = 0; 5066 foundeom = 1; 5067 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5068 /* end of frag, we are done */ 5069 notdone = 0; 5070 foundeom = 1; 5071 } else { 5072 /* 5073 * Its a begin or middle piece, we must mark all of 5074 * it 5075 */ 5076 notdone = 1; 5077 tp1 = TAILQ_NEXT(tp1, sctp_next); 5078 } 5079 } while (tp1 && notdone); 5080 if (foundeom == 0) { 5081 /* 5082 * The multi-part message was scattered across the send and 5083 * sent queue. 5084 */ 5085 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5086 if ((tp1->rec.data.sid != sid) || 5087 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5088 break; 5089 } 5090 /* 5091 * save to chk in case we have some on stream out 5092 * queue. If so and we have an un-transmitted one we 5093 * don't have to fudge the TSN. 5094 */ 5095 chk = tp1; 5096 ret_sz += tp1->book_size; 5097 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5098 if (sent) { 5099 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5100 } else { 5101 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5102 } 5103 if (tp1->data) { 5104 sctp_m_freem(tp1->data); 5105 tp1->data = NULL; 5106 } 5107 /* No flight involved here book the size to 0 */ 5108 tp1->book_size = 0; 5109 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5110 foundeom = 1; 5111 } 5112 do_wakeup_routine = 1; 5113 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5114 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5115 /* 5116 * on to the sent queue so we can wait for it to be 5117 * passed by. 5118 */ 5119 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5120 sctp_next); 5121 stcb->asoc.send_queue_cnt--; 5122 stcb->asoc.sent_queue_cnt++; 5123 } 5124 } 5125 if (foundeom == 0) { 5126 /* 5127 * Still no eom found. That means there is stuff left on the 5128 * stream out queue.. yuck. 5129 */ 5130 SCTP_TCB_SEND_LOCK(stcb); 5131 strq = &stcb->asoc.strmout[sid]; 5132 sp = TAILQ_FIRST(&strq->outqueue); 5133 if (sp != NULL) { 5134 sp->discard_rest = 1; 5135 /* 5136 * We may need to put a chunk on the queue that 5137 * holds the TSN that would have been sent with the 5138 * LAST bit. 5139 */ 5140 if (chk == NULL) { 5141 /* Yep, we have to */ 5142 sctp_alloc_a_chunk(stcb, chk); 5143 if (chk == NULL) { 5144 /* 5145 * we are hosed. All we can do is 5146 * nothing.. which will cause an 5147 * abort if the peer is paying 5148 * attention. 5149 */ 5150 goto oh_well; 5151 } 5152 memset(chk, 0, sizeof(*chk)); 5153 chk->rec.data.rcv_flags = 0; 5154 chk->sent = SCTP_FORWARD_TSN_SKIP; 5155 chk->asoc = &stcb->asoc; 5156 if (stcb->asoc.idata_supported == 0) { 5157 if (sp->sinfo_flags & SCTP_UNORDERED) { 5158 chk->rec.data.mid = 0; 5159 } else { 5160 chk->rec.data.mid = strq->next_mid_ordered; 5161 } 5162 } else { 5163 if (sp->sinfo_flags & SCTP_UNORDERED) { 5164 chk->rec.data.mid = strq->next_mid_unordered; 5165 } else { 5166 chk->rec.data.mid = strq->next_mid_ordered; 5167 } 5168 } 5169 chk->rec.data.sid = sp->sid; 5170 chk->rec.data.ppid = sp->ppid; 5171 chk->rec.data.context = sp->context; 5172 chk->flags = sp->act_flags; 5173 chk->whoTo = NULL; 5174 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5175 strq->chunks_on_queues++; 5176 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5177 stcb->asoc.sent_queue_cnt++; 5178 stcb->asoc.pr_sctp_cnt++; 5179 } 5180 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5181 if (sp->sinfo_flags & SCTP_UNORDERED) { 5182 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5183 } 5184 if (stcb->asoc.idata_supported == 0) { 5185 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5186 strq->next_mid_ordered++; 5187 } 5188 } else { 5189 if (sp->sinfo_flags & SCTP_UNORDERED) { 5190 strq->next_mid_unordered++; 5191 } else { 5192 strq->next_mid_ordered++; 5193 } 5194 } 5195 oh_well: 5196 if (sp->data) { 5197 /* 5198 * Pull any data to free up the SB and allow 5199 * sender to "add more" while we will throw 5200 * away :-) 5201 */ 5202 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5203 ret_sz += sp->length; 5204 do_wakeup_routine = 1; 5205 sp->some_taken = 1; 5206 sctp_m_freem(sp->data); 5207 sp->data = NULL; 5208 sp->tail_mbuf = NULL; 5209 sp->length = 0; 5210 } 5211 } 5212 SCTP_TCB_SEND_UNLOCK(stcb); 5213 } 5214 if (do_wakeup_routine) { 5215 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5216 } 5217 return (ret_sz); 5218 } 5219 5220 /* 5221 * checks to see if the given address, sa, is one that is currently known by 5222 * the kernel note: can't distinguish the same address on multiple interfaces 5223 * and doesn't handle multiple addresses with different zone/scope id's note: 5224 * ifa_ifwithaddr() compares the entire sockaddr struct 5225 */ 5226 struct sctp_ifa * 5227 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5228 int holds_lock) 5229 { 5230 struct sctp_laddr *laddr; 5231 5232 if (holds_lock == 0) { 5233 SCTP_INP_RLOCK(inp); 5234 } 5235 5236 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5237 if (laddr->ifa == NULL) 5238 continue; 5239 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5240 continue; 5241 #ifdef INET 5242 if (addr->sa_family == AF_INET) { 5243 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5244 laddr->ifa->address.sin.sin_addr.s_addr) { 5245 /* found him. */ 5246 break; 5247 } 5248 } 5249 #endif 5250 #ifdef INET6 5251 if (addr->sa_family == AF_INET6) { 5252 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5253 &laddr->ifa->address.sin6)) { 5254 /* found him. */ 5255 break; 5256 } 5257 } 5258 #endif 5259 } 5260 if (holds_lock == 0) { 5261 SCTP_INP_RUNLOCK(inp); 5262 } 5263 if (laddr != NULL) { 5264 return (laddr->ifa); 5265 } else { 5266 return (NULL); 5267 } 5268 } 5269 5270 uint32_t 5271 sctp_get_ifa_hash_val(struct sockaddr *addr) 5272 { 5273 switch (addr->sa_family) { 5274 #ifdef INET 5275 case AF_INET: 5276 { 5277 struct sockaddr_in *sin; 5278 5279 sin = (struct sockaddr_in *)addr; 5280 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5281 } 5282 #endif 5283 #ifdef INET6 5284 case AF_INET6: 5285 { 5286 struct sockaddr_in6 *sin6; 5287 uint32_t hash_of_addr; 5288 5289 sin6 = (struct sockaddr_in6 *)addr; 5290 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5291 sin6->sin6_addr.s6_addr32[1] + 5292 sin6->sin6_addr.s6_addr32[2] + 5293 sin6->sin6_addr.s6_addr32[3]); 5294 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5295 return (hash_of_addr); 5296 } 5297 #endif 5298 default: 5299 break; 5300 } 5301 return (0); 5302 } 5303 5304 struct sctp_ifa * 5305 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5306 { 5307 struct sctp_ifa *sctp_ifap; 5308 struct sctp_vrf *vrf; 5309 struct sctp_ifalist *hash_head; 5310 uint32_t hash_of_addr; 5311 5312 if (holds_lock == 0) { 5313 SCTP_IPI_ADDR_RLOCK(); 5314 } else { 5315 SCTP_IPI_ADDR_LOCK_ASSERT(); 5316 } 5317 5318 vrf = sctp_find_vrf(vrf_id); 5319 if (vrf == NULL) { 5320 if (holds_lock == 0) 5321 SCTP_IPI_ADDR_RUNLOCK(); 5322 return (NULL); 5323 } 5324 5325 hash_of_addr = sctp_get_ifa_hash_val(addr); 5326 5327 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5328 if (hash_head == NULL) { 5329 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5330 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5331 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5332 sctp_print_address(addr); 5333 SCTP_PRINTF("No such bucket for address\n"); 5334 if (holds_lock == 0) 5335 SCTP_IPI_ADDR_RUNLOCK(); 5336 5337 return (NULL); 5338 } 5339 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5340 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5341 continue; 5342 #ifdef INET 5343 if (addr->sa_family == AF_INET) { 5344 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5345 sctp_ifap->address.sin.sin_addr.s_addr) { 5346 /* found him. */ 5347 break; 5348 } 5349 } 5350 #endif 5351 #ifdef INET6 5352 if (addr->sa_family == AF_INET6) { 5353 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5354 &sctp_ifap->address.sin6)) { 5355 /* found him. */ 5356 break; 5357 } 5358 } 5359 #endif 5360 } 5361 if (holds_lock == 0) 5362 SCTP_IPI_ADDR_RUNLOCK(); 5363 return (sctp_ifap); 5364 } 5365 5366 static void 5367 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5368 uint32_t rwnd_req) 5369 { 5370 /* User pulled some data, do we need a rwnd update? */ 5371 struct epoch_tracker et; 5372 int r_unlocked = 0; 5373 uint32_t dif, rwnd; 5374 struct socket *so = NULL; 5375 5376 if (stcb == NULL) 5377 return; 5378 5379 atomic_add_int(&stcb->asoc.refcnt, 1); 5380 5381 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5382 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5383 /* Pre-check If we are freeing no update */ 5384 goto no_lock; 5385 } 5386 SCTP_INP_INCR_REF(stcb->sctp_ep); 5387 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5388 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5389 goto out; 5390 } 5391 so = stcb->sctp_socket; 5392 if (so == NULL) { 5393 goto out; 5394 } 5395 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5396 /* Have you have freed enough to look */ 5397 *freed_so_far = 0; 5398 /* Yep, its worth a look and the lock overhead */ 5399 5400 /* Figure out what the rwnd would be */ 5401 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5402 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5403 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5404 } else { 5405 dif = 0; 5406 } 5407 if (dif >= rwnd_req) { 5408 if (hold_rlock) { 5409 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5410 r_unlocked = 1; 5411 } 5412 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5413 /* 5414 * One last check before we allow the guy possibly 5415 * to get in. There is a race, where the guy has not 5416 * reached the gate. In that case 5417 */ 5418 goto out; 5419 } 5420 SCTP_TCB_LOCK(stcb); 5421 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5422 /* No reports here */ 5423 SCTP_TCB_UNLOCK(stcb); 5424 goto out; 5425 } 5426 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5427 NET_EPOCH_ENTER(et); 5428 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5429 5430 sctp_chunk_output(stcb->sctp_ep, stcb, 5431 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5432 /* make sure no timer is running */ 5433 NET_EPOCH_EXIT(et); 5434 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5435 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5436 SCTP_TCB_UNLOCK(stcb); 5437 } else { 5438 /* Update how much we have pending */ 5439 stcb->freed_by_sorcv_sincelast = dif; 5440 } 5441 out: 5442 if (so && r_unlocked && hold_rlock) { 5443 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5444 } 5445 5446 SCTP_INP_DECR_REF(stcb->sctp_ep); 5447 no_lock: 5448 atomic_add_int(&stcb->asoc.refcnt, -1); 5449 return; 5450 } 5451 5452 int 5453 sctp_sorecvmsg(struct socket *so, 5454 struct uio *uio, 5455 struct mbuf **mp, 5456 struct sockaddr *from, 5457 int fromlen, 5458 int *msg_flags, 5459 struct sctp_sndrcvinfo *sinfo, 5460 int filling_sinfo) 5461 { 5462 /* 5463 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5464 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5465 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5466 * On the way out we may send out any combination of: 5467 * MSG_NOTIFICATION MSG_EOR 5468 * 5469 */ 5470 struct sctp_inpcb *inp = NULL; 5471 ssize_t my_len = 0; 5472 ssize_t cp_len = 0; 5473 int error = 0; 5474 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5475 struct mbuf *m = NULL; 5476 struct sctp_tcb *stcb = NULL; 5477 int wakeup_read_socket = 0; 5478 int freecnt_applied = 0; 5479 int out_flags = 0, in_flags = 0; 5480 int block_allowed = 1; 5481 uint32_t freed_so_far = 0; 5482 ssize_t copied_so_far = 0; 5483 int in_eeor_mode = 0; 5484 int no_rcv_needed = 0; 5485 uint32_t rwnd_req = 0; 5486 int hold_sblock = 0; 5487 int hold_rlock = 0; 5488 ssize_t slen = 0; 5489 uint32_t held_length = 0; 5490 int sockbuf_lock = 0; 5491 5492 if (uio == NULL) { 5493 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5494 return (EINVAL); 5495 } 5496 5497 if (msg_flags) { 5498 in_flags = *msg_flags; 5499 if (in_flags & MSG_PEEK) 5500 SCTP_STAT_INCR(sctps_read_peeks); 5501 } else { 5502 in_flags = 0; 5503 } 5504 slen = uio->uio_resid; 5505 5506 /* Pull in and set up our int flags */ 5507 if (in_flags & MSG_OOB) { 5508 /* Out of band's NOT supported */ 5509 return (EOPNOTSUPP); 5510 } 5511 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5512 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5513 return (EINVAL); 5514 } 5515 if ((in_flags & (MSG_DONTWAIT 5516 | MSG_NBIO 5517 )) || 5518 SCTP_SO_IS_NBIO(so)) { 5519 block_allowed = 0; 5520 } 5521 /* setup the endpoint */ 5522 inp = (struct sctp_inpcb *)so->so_pcb; 5523 if (inp == NULL) { 5524 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5525 return (EFAULT); 5526 } 5527 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5528 /* Must be at least a MTU's worth */ 5529 if (rwnd_req < SCTP_MIN_RWND) 5530 rwnd_req = SCTP_MIN_RWND; 5531 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5533 sctp_misc_ints(SCTP_SORECV_ENTER, 5534 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5535 } 5536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5537 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5538 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5539 } 5540 5541 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5542 if (error) { 5543 goto release_unlocked; 5544 } 5545 sockbuf_lock = 1; 5546 restart: 5547 5548 restart_nosblocks: 5549 if (hold_sblock == 0) { 5550 SOCKBUF_LOCK(&so->so_rcv); 5551 hold_sblock = 1; 5552 } 5553 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5554 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5555 goto out; 5556 } 5557 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5558 if (so->so_error) { 5559 error = so->so_error; 5560 if ((in_flags & MSG_PEEK) == 0) 5561 so->so_error = 0; 5562 goto out; 5563 } else { 5564 if (so->so_rcv.sb_cc == 0) { 5565 /* indicate EOF */ 5566 error = 0; 5567 goto out; 5568 } 5569 } 5570 } 5571 if (so->so_rcv.sb_cc <= held_length) { 5572 if (so->so_error) { 5573 error = so->so_error; 5574 if ((in_flags & MSG_PEEK) == 0) { 5575 so->so_error = 0; 5576 } 5577 goto out; 5578 } 5579 if ((so->so_rcv.sb_cc == 0) && 5580 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5581 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5582 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5583 /* 5584 * For active open side clear flags for 5585 * re-use passive open is blocked by 5586 * connect. 5587 */ 5588 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5589 /* 5590 * You were aborted, passive side 5591 * always hits here 5592 */ 5593 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5594 error = ECONNRESET; 5595 } 5596 so->so_state &= ~(SS_ISCONNECTING | 5597 SS_ISDISCONNECTING | 5598 SS_ISCONFIRMING | 5599 SS_ISCONNECTED); 5600 if (error == 0) { 5601 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5602 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5603 error = ENOTCONN; 5604 } 5605 } 5606 goto out; 5607 } 5608 } 5609 if (block_allowed) { 5610 error = sbwait(&so->so_rcv); 5611 if (error) { 5612 goto out; 5613 } 5614 held_length = 0; 5615 goto restart_nosblocks; 5616 } else { 5617 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5618 error = EWOULDBLOCK; 5619 goto out; 5620 } 5621 } 5622 if (hold_sblock == 1) { 5623 SOCKBUF_UNLOCK(&so->so_rcv); 5624 hold_sblock = 0; 5625 } 5626 /* we possibly have data we can read */ 5627 /* sa_ignore FREED_MEMORY */ 5628 control = TAILQ_FIRST(&inp->read_queue); 5629 if (control == NULL) { 5630 /* 5631 * This could be happening since the appender did the 5632 * increment but as not yet did the tailq insert onto the 5633 * read_queue 5634 */ 5635 if (hold_rlock == 0) { 5636 SCTP_INP_READ_LOCK(inp); 5637 } 5638 control = TAILQ_FIRST(&inp->read_queue); 5639 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5640 #ifdef INVARIANTS 5641 panic("Huh, its non zero and nothing on control?"); 5642 #endif 5643 so->so_rcv.sb_cc = 0; 5644 } 5645 SCTP_INP_READ_UNLOCK(inp); 5646 hold_rlock = 0; 5647 goto restart; 5648 } 5649 5650 if ((control->length == 0) && 5651 (control->do_not_ref_stcb)) { 5652 /* 5653 * Clean up code for freeing assoc that left behind a 5654 * pdapi.. maybe a peer in EEOR that just closed after 5655 * sending and never indicated a EOR. 5656 */ 5657 if (hold_rlock == 0) { 5658 hold_rlock = 1; 5659 SCTP_INP_READ_LOCK(inp); 5660 } 5661 control->held_length = 0; 5662 if (control->data) { 5663 /* Hmm there is data here .. fix */ 5664 struct mbuf *m_tmp; 5665 int cnt = 0; 5666 5667 m_tmp = control->data; 5668 while (m_tmp) { 5669 cnt += SCTP_BUF_LEN(m_tmp); 5670 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5671 control->tail_mbuf = m_tmp; 5672 control->end_added = 1; 5673 } 5674 m_tmp = SCTP_BUF_NEXT(m_tmp); 5675 } 5676 control->length = cnt; 5677 } else { 5678 /* remove it */ 5679 TAILQ_REMOVE(&inp->read_queue, control, next); 5680 /* Add back any hiddend data */ 5681 sctp_free_remote_addr(control->whoFrom); 5682 sctp_free_a_readq(stcb, control); 5683 } 5684 if (hold_rlock) { 5685 hold_rlock = 0; 5686 SCTP_INP_READ_UNLOCK(inp); 5687 } 5688 goto restart; 5689 } 5690 if ((control->length == 0) && 5691 (control->end_added == 1)) { 5692 /* 5693 * Do we also need to check for (control->pdapi_aborted == 5694 * 1)? 5695 */ 5696 if (hold_rlock == 0) { 5697 hold_rlock = 1; 5698 SCTP_INP_READ_LOCK(inp); 5699 } 5700 TAILQ_REMOVE(&inp->read_queue, control, next); 5701 if (control->data) { 5702 #ifdef INVARIANTS 5703 panic("control->data not null but control->length == 0"); 5704 #else 5705 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5706 sctp_m_freem(control->data); 5707 control->data = NULL; 5708 #endif 5709 } 5710 if (control->aux_data) { 5711 sctp_m_free(control->aux_data); 5712 control->aux_data = NULL; 5713 } 5714 #ifdef INVARIANTS 5715 if (control->on_strm_q) { 5716 panic("About to free ctl:%p so:%p and its in %d", 5717 control, so, control->on_strm_q); 5718 } 5719 #endif 5720 sctp_free_remote_addr(control->whoFrom); 5721 sctp_free_a_readq(stcb, control); 5722 if (hold_rlock) { 5723 hold_rlock = 0; 5724 SCTP_INP_READ_UNLOCK(inp); 5725 } 5726 goto restart; 5727 } 5728 if (control->length == 0) { 5729 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5730 (filling_sinfo)) { 5731 /* find a more suitable one then this */ 5732 ctl = TAILQ_NEXT(control, next); 5733 while (ctl) { 5734 if ((ctl->stcb != control->stcb) && (ctl->length) && 5735 (ctl->some_taken || 5736 (ctl->spec_flags & M_NOTIFICATION) || 5737 ((ctl->do_not_ref_stcb == 0) && 5738 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5739 ) { 5740 /*- 5741 * If we have a different TCB next, and there is data 5742 * present. If we have already taken some (pdapi), OR we can 5743 * ref the tcb and no delivery as started on this stream, we 5744 * take it. Note we allow a notification on a different 5745 * assoc to be delivered.. 5746 */ 5747 control = ctl; 5748 goto found_one; 5749 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5750 (ctl->length) && 5751 ((ctl->some_taken) || 5752 ((ctl->do_not_ref_stcb == 0) && 5753 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5754 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5755 /*- 5756 * If we have the same tcb, and there is data present, and we 5757 * have the strm interleave feature present. Then if we have 5758 * taken some (pdapi) or we can refer to tht tcb AND we have 5759 * not started a delivery for this stream, we can take it. 5760 * Note we do NOT allow a notificaiton on the same assoc to 5761 * be delivered. 5762 */ 5763 control = ctl; 5764 goto found_one; 5765 } 5766 ctl = TAILQ_NEXT(ctl, next); 5767 } 5768 } 5769 /* 5770 * if we reach here, not suitable replacement is available 5771 * <or> fragment interleave is NOT on. So stuff the sb_cc 5772 * into the our held count, and its time to sleep again. 5773 */ 5774 held_length = so->so_rcv.sb_cc; 5775 control->held_length = so->so_rcv.sb_cc; 5776 goto restart; 5777 } 5778 /* Clear the held length since there is something to read */ 5779 control->held_length = 0; 5780 found_one: 5781 /* 5782 * If we reach here, control has a some data for us to read off. 5783 * Note that stcb COULD be NULL. 5784 */ 5785 if (hold_rlock == 0) { 5786 hold_rlock = 1; 5787 SCTP_INP_READ_LOCK(inp); 5788 } 5789 control->some_taken++; 5790 stcb = control->stcb; 5791 if (stcb) { 5792 if ((control->do_not_ref_stcb == 0) && 5793 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5794 if (freecnt_applied == 0) 5795 stcb = NULL; 5796 } else if (control->do_not_ref_stcb == 0) { 5797 /* you can't free it on me please */ 5798 /* 5799 * The lock on the socket buffer protects us so the 5800 * free code will stop. But since we used the 5801 * socketbuf lock and the sender uses the tcb_lock 5802 * to increment, we need to use the atomic add to 5803 * the refcnt 5804 */ 5805 if (freecnt_applied) { 5806 #ifdef INVARIANTS 5807 panic("refcnt already incremented"); 5808 #else 5809 SCTP_PRINTF("refcnt already incremented?\n"); 5810 #endif 5811 } else { 5812 atomic_add_int(&stcb->asoc.refcnt, 1); 5813 freecnt_applied = 1; 5814 } 5815 /* 5816 * Setup to remember how much we have not yet told 5817 * the peer our rwnd has opened up. Note we grab the 5818 * value from the tcb from last time. Note too that 5819 * sack sending clears this when a sack is sent, 5820 * which is fine. Once we hit the rwnd_req, we then 5821 * will go to the sctp_user_rcvd() that will not 5822 * lock until it KNOWs it MUST send a WUP-SACK. 5823 */ 5824 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5825 stcb->freed_by_sorcv_sincelast = 0; 5826 } 5827 } 5828 if (stcb && 5829 ((control->spec_flags & M_NOTIFICATION) == 0) && 5830 control->do_not_ref_stcb == 0) { 5831 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5832 } 5833 5834 /* First lets get off the sinfo and sockaddr info */ 5835 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5836 sinfo->sinfo_stream = control->sinfo_stream; 5837 sinfo->sinfo_ssn = (uint16_t)control->mid; 5838 sinfo->sinfo_flags = control->sinfo_flags; 5839 sinfo->sinfo_ppid = control->sinfo_ppid; 5840 sinfo->sinfo_context = control->sinfo_context; 5841 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5842 sinfo->sinfo_tsn = control->sinfo_tsn; 5843 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5844 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5845 nxt = TAILQ_NEXT(control, next); 5846 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5847 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5848 struct sctp_extrcvinfo *s_extra; 5849 5850 s_extra = (struct sctp_extrcvinfo *)sinfo; 5851 if ((nxt) && 5852 (nxt->length)) { 5853 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5854 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5855 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5856 } 5857 if (nxt->spec_flags & M_NOTIFICATION) { 5858 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5859 } 5860 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5861 s_extra->serinfo_next_length = nxt->length; 5862 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5863 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5864 if (nxt->tail_mbuf != NULL) { 5865 if (nxt->end_added) { 5866 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5867 } 5868 } 5869 } else { 5870 /* 5871 * we explicitly 0 this, since the memcpy 5872 * got some other things beyond the older 5873 * sinfo_ that is on the control's structure 5874 * :-D 5875 */ 5876 nxt = NULL; 5877 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5878 s_extra->serinfo_next_aid = 0; 5879 s_extra->serinfo_next_length = 0; 5880 s_extra->serinfo_next_ppid = 0; 5881 s_extra->serinfo_next_stream = 0; 5882 } 5883 } 5884 /* 5885 * update off the real current cum-ack, if we have an stcb. 5886 */ 5887 if ((control->do_not_ref_stcb == 0) && stcb) 5888 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5889 /* 5890 * mask off the high bits, we keep the actual chunk bits in 5891 * there. 5892 */ 5893 sinfo->sinfo_flags &= 0x00ff; 5894 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5895 sinfo->sinfo_flags |= SCTP_UNORDERED; 5896 } 5897 } 5898 #ifdef SCTP_ASOCLOG_OF_TSNS 5899 { 5900 int index, newindex; 5901 struct sctp_pcbtsn_rlog *entry; 5902 5903 do { 5904 index = inp->readlog_index; 5905 newindex = index + 1; 5906 if (newindex >= SCTP_READ_LOG_SIZE) { 5907 newindex = 0; 5908 } 5909 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5910 entry = &inp->readlog[index]; 5911 entry->vtag = control->sinfo_assoc_id; 5912 entry->strm = control->sinfo_stream; 5913 entry->seq = (uint16_t)control->mid; 5914 entry->sz = control->length; 5915 entry->flgs = control->sinfo_flags; 5916 } 5917 #endif 5918 if ((fromlen > 0) && (from != NULL)) { 5919 union sctp_sockstore store; 5920 size_t len; 5921 5922 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5923 #ifdef INET6 5924 case AF_INET6: 5925 len = sizeof(struct sockaddr_in6); 5926 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5927 store.sin6.sin6_port = control->port_from; 5928 break; 5929 #endif 5930 #ifdef INET 5931 case AF_INET: 5932 #ifdef INET6 5933 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5934 len = sizeof(struct sockaddr_in6); 5935 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5936 &store.sin6); 5937 store.sin6.sin6_port = control->port_from; 5938 } else { 5939 len = sizeof(struct sockaddr_in); 5940 store.sin = control->whoFrom->ro._l_addr.sin; 5941 store.sin.sin_port = control->port_from; 5942 } 5943 #else 5944 len = sizeof(struct sockaddr_in); 5945 store.sin = control->whoFrom->ro._l_addr.sin; 5946 store.sin.sin_port = control->port_from; 5947 #endif 5948 break; 5949 #endif 5950 default: 5951 len = 0; 5952 break; 5953 } 5954 memcpy(from, &store, min((size_t)fromlen, len)); 5955 #ifdef INET6 5956 { 5957 struct sockaddr_in6 lsa6, *from6; 5958 5959 from6 = (struct sockaddr_in6 *)from; 5960 sctp_recover_scope_mac(from6, (&lsa6)); 5961 } 5962 #endif 5963 } 5964 if (hold_rlock) { 5965 SCTP_INP_READ_UNLOCK(inp); 5966 hold_rlock = 0; 5967 } 5968 if (hold_sblock) { 5969 SOCKBUF_UNLOCK(&so->so_rcv); 5970 hold_sblock = 0; 5971 } 5972 /* now copy out what data we can */ 5973 if (mp == NULL) { 5974 /* copy out each mbuf in the chain up to length */ 5975 get_more_data: 5976 m = control->data; 5977 while (m) { 5978 /* Move out all we can */ 5979 cp_len = uio->uio_resid; 5980 my_len = SCTP_BUF_LEN(m); 5981 if (cp_len > my_len) { 5982 /* not enough in this buf */ 5983 cp_len = my_len; 5984 } 5985 if (hold_rlock) { 5986 SCTP_INP_READ_UNLOCK(inp); 5987 hold_rlock = 0; 5988 } 5989 if (cp_len > 0) 5990 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5991 /* re-read */ 5992 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5993 goto release; 5994 } 5995 5996 if ((control->do_not_ref_stcb == 0) && stcb && 5997 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5998 no_rcv_needed = 1; 5999 } 6000 if (error) { 6001 /* error we are out of here */ 6002 goto release; 6003 } 6004 SCTP_INP_READ_LOCK(inp); 6005 hold_rlock = 1; 6006 if (cp_len == SCTP_BUF_LEN(m)) { 6007 if ((SCTP_BUF_NEXT(m) == NULL) && 6008 (control->end_added)) { 6009 out_flags |= MSG_EOR; 6010 if ((control->do_not_ref_stcb == 0) && 6011 (control->stcb != NULL) && 6012 ((control->spec_flags & M_NOTIFICATION) == 0)) 6013 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6014 } 6015 if (control->spec_flags & M_NOTIFICATION) { 6016 out_flags |= MSG_NOTIFICATION; 6017 } 6018 /* we ate up the mbuf */ 6019 if (in_flags & MSG_PEEK) { 6020 /* just looking */ 6021 m = SCTP_BUF_NEXT(m); 6022 copied_so_far += cp_len; 6023 } else { 6024 /* dispose of the mbuf */ 6025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6026 sctp_sblog(&so->so_rcv, 6027 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6028 } 6029 sctp_sbfree(control, stcb, &so->so_rcv, m); 6030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6031 sctp_sblog(&so->so_rcv, 6032 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6033 } 6034 copied_so_far += cp_len; 6035 freed_so_far += (uint32_t)cp_len; 6036 freed_so_far += MSIZE; 6037 atomic_subtract_int(&control->length, cp_len); 6038 control->data = sctp_m_free(m); 6039 m = control->data; 6040 /* 6041 * been through it all, must hold sb 6042 * lock ok to null tail 6043 */ 6044 if (control->data == NULL) { 6045 #ifdef INVARIANTS 6046 if ((control->end_added == 0) || 6047 (TAILQ_NEXT(control, next) == NULL)) { 6048 /* 6049 * If the end is not 6050 * added, OR the 6051 * next is NOT null 6052 * we MUST have the 6053 * lock. 6054 */ 6055 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6056 panic("Hmm we don't own the lock?"); 6057 } 6058 } 6059 #endif 6060 control->tail_mbuf = NULL; 6061 #ifdef INVARIANTS 6062 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6063 panic("end_added, nothing left and no MSG_EOR"); 6064 } 6065 #endif 6066 } 6067 } 6068 } else { 6069 /* Do we need to trim the mbuf? */ 6070 if (control->spec_flags & M_NOTIFICATION) { 6071 out_flags |= MSG_NOTIFICATION; 6072 } 6073 if ((in_flags & MSG_PEEK) == 0) { 6074 SCTP_BUF_RESV_UF(m, cp_len); 6075 SCTP_BUF_LEN(m) -= (int)cp_len; 6076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6077 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6078 } 6079 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6080 if ((control->do_not_ref_stcb == 0) && 6081 stcb) { 6082 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6083 } 6084 copied_so_far += cp_len; 6085 freed_so_far += (uint32_t)cp_len; 6086 freed_so_far += MSIZE; 6087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6088 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6089 SCTP_LOG_SBRESULT, 0); 6090 } 6091 atomic_subtract_int(&control->length, cp_len); 6092 } else { 6093 copied_so_far += cp_len; 6094 } 6095 } 6096 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6097 break; 6098 } 6099 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6100 (control->do_not_ref_stcb == 0) && 6101 (freed_so_far >= rwnd_req)) { 6102 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6103 } 6104 } /* end while(m) */ 6105 /* 6106 * At this point we have looked at it all and we either have 6107 * a MSG_EOR/or read all the user wants... <OR> 6108 * control->length == 0. 6109 */ 6110 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6111 /* we are done with this control */ 6112 if (control->length == 0) { 6113 if (control->data) { 6114 #ifdef INVARIANTS 6115 panic("control->data not null at read eor?"); 6116 #else 6117 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6118 sctp_m_freem(control->data); 6119 control->data = NULL; 6120 #endif 6121 } 6122 done_with_control: 6123 if (hold_rlock == 0) { 6124 SCTP_INP_READ_LOCK(inp); 6125 hold_rlock = 1; 6126 } 6127 TAILQ_REMOVE(&inp->read_queue, control, next); 6128 /* Add back any hiddend data */ 6129 if (control->held_length) { 6130 held_length = 0; 6131 control->held_length = 0; 6132 wakeup_read_socket = 1; 6133 } 6134 if (control->aux_data) { 6135 sctp_m_free(control->aux_data); 6136 control->aux_data = NULL; 6137 } 6138 no_rcv_needed = control->do_not_ref_stcb; 6139 sctp_free_remote_addr(control->whoFrom); 6140 control->data = NULL; 6141 #ifdef INVARIANTS 6142 if (control->on_strm_q) { 6143 panic("About to free ctl:%p so:%p and its in %d", 6144 control, so, control->on_strm_q); 6145 } 6146 #endif 6147 sctp_free_a_readq(stcb, control); 6148 control = NULL; 6149 if ((freed_so_far >= rwnd_req) && 6150 (no_rcv_needed == 0)) 6151 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6152 6153 } else { 6154 /* 6155 * The user did not read all of this 6156 * message, turn off the returned MSG_EOR 6157 * since we are leaving more behind on the 6158 * control to read. 6159 */ 6160 #ifdef INVARIANTS 6161 if (control->end_added && 6162 (control->data == NULL) && 6163 (control->tail_mbuf == NULL)) { 6164 panic("Gak, control->length is corrupt?"); 6165 } 6166 #endif 6167 no_rcv_needed = control->do_not_ref_stcb; 6168 out_flags &= ~MSG_EOR; 6169 } 6170 } 6171 if (out_flags & MSG_EOR) { 6172 goto release; 6173 } 6174 if ((uio->uio_resid == 0) || 6175 ((in_eeor_mode) && 6176 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6177 goto release; 6178 } 6179 /* 6180 * If I hit here the receiver wants more and this message is 6181 * NOT done (pd-api). So two questions. Can we block? if not 6182 * we are done. Did the user NOT set MSG_WAITALL? 6183 */ 6184 if (block_allowed == 0) { 6185 goto release; 6186 } 6187 /* 6188 * We need to wait for more data a few things: - We don't 6189 * sbunlock() so we don't get someone else reading. - We 6190 * must be sure to account for the case where what is added 6191 * is NOT to our control when we wakeup. 6192 */ 6193 6194 /* 6195 * Do we need to tell the transport a rwnd update might be 6196 * needed before we go to sleep? 6197 */ 6198 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6199 ((freed_so_far >= rwnd_req) && 6200 (control->do_not_ref_stcb == 0) && 6201 (no_rcv_needed == 0))) { 6202 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6203 } 6204 wait_some_more: 6205 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6206 goto release; 6207 } 6208 6209 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6210 goto release; 6211 6212 if (hold_rlock == 1) { 6213 SCTP_INP_READ_UNLOCK(inp); 6214 hold_rlock = 0; 6215 } 6216 if (hold_sblock == 0) { 6217 SOCKBUF_LOCK(&so->so_rcv); 6218 hold_sblock = 1; 6219 } 6220 if ((copied_so_far) && (control->length == 0) && 6221 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6222 goto release; 6223 } 6224 if (so->so_rcv.sb_cc <= control->held_length) { 6225 error = sbwait(&so->so_rcv); 6226 if (error) { 6227 goto release; 6228 } 6229 control->held_length = 0; 6230 } 6231 if (hold_sblock) { 6232 SOCKBUF_UNLOCK(&so->so_rcv); 6233 hold_sblock = 0; 6234 } 6235 if (control->length == 0) { 6236 /* still nothing here */ 6237 if (control->end_added == 1) { 6238 /* he aborted, or is done i.e.did a shutdown */ 6239 out_flags |= MSG_EOR; 6240 if (control->pdapi_aborted) { 6241 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6242 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6243 6244 out_flags |= MSG_TRUNC; 6245 } else { 6246 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6247 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6248 } 6249 goto done_with_control; 6250 } 6251 if (so->so_rcv.sb_cc > held_length) { 6252 control->held_length = so->so_rcv.sb_cc; 6253 held_length = 0; 6254 } 6255 goto wait_some_more; 6256 } else if (control->data == NULL) { 6257 /* 6258 * we must re-sync since data is probably being 6259 * added 6260 */ 6261 SCTP_INP_READ_LOCK(inp); 6262 if ((control->length > 0) && (control->data == NULL)) { 6263 /* 6264 * big trouble.. we have the lock and its 6265 * corrupt? 6266 */ 6267 #ifdef INVARIANTS 6268 panic("Impossible data==NULL length !=0"); 6269 #endif 6270 out_flags |= MSG_EOR; 6271 out_flags |= MSG_TRUNC; 6272 control->length = 0; 6273 SCTP_INP_READ_UNLOCK(inp); 6274 goto done_with_control; 6275 } 6276 SCTP_INP_READ_UNLOCK(inp); 6277 /* We will fall around to get more data */ 6278 } 6279 goto get_more_data; 6280 } else { 6281 /*- 6282 * Give caller back the mbuf chain, 6283 * store in uio_resid the length 6284 */ 6285 wakeup_read_socket = 0; 6286 if ((control->end_added == 0) || 6287 (TAILQ_NEXT(control, next) == NULL)) { 6288 /* Need to get rlock */ 6289 if (hold_rlock == 0) { 6290 SCTP_INP_READ_LOCK(inp); 6291 hold_rlock = 1; 6292 } 6293 } 6294 if (control->end_added) { 6295 out_flags |= MSG_EOR; 6296 if ((control->do_not_ref_stcb == 0) && 6297 (control->stcb != NULL) && 6298 ((control->spec_flags & M_NOTIFICATION) == 0)) 6299 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6300 } 6301 if (control->spec_flags & M_NOTIFICATION) { 6302 out_flags |= MSG_NOTIFICATION; 6303 } 6304 uio->uio_resid = control->length; 6305 *mp = control->data; 6306 m = control->data; 6307 while (m) { 6308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6309 sctp_sblog(&so->so_rcv, 6310 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6311 } 6312 sctp_sbfree(control, stcb, &so->so_rcv, m); 6313 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6314 freed_so_far += MSIZE; 6315 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6316 sctp_sblog(&so->so_rcv, 6317 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6318 } 6319 m = SCTP_BUF_NEXT(m); 6320 } 6321 control->data = control->tail_mbuf = NULL; 6322 control->length = 0; 6323 if (out_flags & MSG_EOR) { 6324 /* Done with this control */ 6325 goto done_with_control; 6326 } 6327 } 6328 release: 6329 if (hold_rlock == 1) { 6330 SCTP_INP_READ_UNLOCK(inp); 6331 hold_rlock = 0; 6332 } 6333 if (hold_sblock == 1) { 6334 SOCKBUF_UNLOCK(&so->so_rcv); 6335 hold_sblock = 0; 6336 } 6337 6338 sbunlock(&so->so_rcv); 6339 sockbuf_lock = 0; 6340 6341 release_unlocked: 6342 if (hold_sblock) { 6343 SOCKBUF_UNLOCK(&so->so_rcv); 6344 hold_sblock = 0; 6345 } 6346 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6347 if ((freed_so_far >= rwnd_req) && 6348 (control && (control->do_not_ref_stcb == 0)) && 6349 (no_rcv_needed == 0)) 6350 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6351 } 6352 out: 6353 if (msg_flags) { 6354 *msg_flags = out_flags; 6355 } 6356 if (((out_flags & MSG_EOR) == 0) && 6357 ((in_flags & MSG_PEEK) == 0) && 6358 (sinfo) && 6359 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6360 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6361 struct sctp_extrcvinfo *s_extra; 6362 6363 s_extra = (struct sctp_extrcvinfo *)sinfo; 6364 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6365 } 6366 if (hold_rlock == 1) { 6367 SCTP_INP_READ_UNLOCK(inp); 6368 } 6369 if (hold_sblock) { 6370 SOCKBUF_UNLOCK(&so->so_rcv); 6371 } 6372 if (sockbuf_lock) { 6373 sbunlock(&so->so_rcv); 6374 } 6375 6376 if (freecnt_applied) { 6377 /* 6378 * The lock on the socket buffer protects us so the free 6379 * code will stop. But since we used the socketbuf lock and 6380 * the sender uses the tcb_lock to increment, we need to use 6381 * the atomic add to the refcnt. 6382 */ 6383 if (stcb == NULL) { 6384 #ifdef INVARIANTS 6385 panic("stcb for refcnt has gone NULL?"); 6386 goto stage_left; 6387 #else 6388 goto stage_left; 6389 #endif 6390 } 6391 /* Save the value back for next time */ 6392 stcb->freed_by_sorcv_sincelast = freed_so_far; 6393 atomic_add_int(&stcb->asoc.refcnt, -1); 6394 } 6395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6396 if (stcb) { 6397 sctp_misc_ints(SCTP_SORECV_DONE, 6398 freed_so_far, 6399 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6400 stcb->asoc.my_rwnd, 6401 so->so_rcv.sb_cc); 6402 } else { 6403 sctp_misc_ints(SCTP_SORECV_DONE, 6404 freed_so_far, 6405 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6406 0, 6407 so->so_rcv.sb_cc); 6408 } 6409 } 6410 stage_left: 6411 if (wakeup_read_socket) { 6412 sctp_sorwakeup(inp, so); 6413 } 6414 return (error); 6415 } 6416 6417 #ifdef SCTP_MBUF_LOGGING 6418 struct mbuf * 6419 sctp_m_free(struct mbuf *m) 6420 { 6421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6422 sctp_log_mb(m, SCTP_MBUF_IFREE); 6423 } 6424 return (m_free(m)); 6425 } 6426 6427 void 6428 sctp_m_freem(struct mbuf *mb) 6429 { 6430 while (mb != NULL) 6431 mb = sctp_m_free(mb); 6432 } 6433 6434 #endif 6435 6436 int 6437 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6438 { 6439 /* 6440 * Given a local address. For all associations that holds the 6441 * address, request a peer-set-primary. 6442 */ 6443 struct sctp_ifa *ifa; 6444 struct sctp_laddr *wi; 6445 6446 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6447 if (ifa == NULL) { 6448 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6449 return (EADDRNOTAVAIL); 6450 } 6451 /* 6452 * Now that we have the ifa we must awaken the iterator with this 6453 * message. 6454 */ 6455 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6456 if (wi == NULL) { 6457 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6458 return (ENOMEM); 6459 } 6460 /* Now incr the count and int wi structure */ 6461 SCTP_INCR_LADDR_COUNT(); 6462 memset(wi, 0, sizeof(*wi)); 6463 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6464 wi->ifa = ifa; 6465 wi->action = SCTP_SET_PRIM_ADDR; 6466 atomic_add_int(&ifa->refcount, 1); 6467 6468 /* Now add it to the work queue */ 6469 SCTP_WQ_ADDR_LOCK(); 6470 /* 6471 * Should this really be a tailq? As it is we will process the 6472 * newest first :-0 6473 */ 6474 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6475 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6476 (struct sctp_inpcb *)NULL, 6477 (struct sctp_tcb *)NULL, 6478 (struct sctp_nets *)NULL); 6479 SCTP_WQ_ADDR_UNLOCK(); 6480 return (0); 6481 } 6482 6483 int 6484 sctp_soreceive(struct socket *so, 6485 struct sockaddr **psa, 6486 struct uio *uio, 6487 struct mbuf **mp0, 6488 struct mbuf **controlp, 6489 int *flagsp) 6490 { 6491 int error, fromlen; 6492 uint8_t sockbuf[256]; 6493 struct sockaddr *from; 6494 struct sctp_extrcvinfo sinfo; 6495 int filling_sinfo = 1; 6496 int flags; 6497 struct sctp_inpcb *inp; 6498 6499 inp = (struct sctp_inpcb *)so->so_pcb; 6500 /* pickup the assoc we are reading from */ 6501 if (inp == NULL) { 6502 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6503 return (EINVAL); 6504 } 6505 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6506 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6507 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6508 (controlp == NULL)) { 6509 /* user does not want the sndrcv ctl */ 6510 filling_sinfo = 0; 6511 } 6512 if (psa) { 6513 from = (struct sockaddr *)sockbuf; 6514 fromlen = sizeof(sockbuf); 6515 from->sa_len = 0; 6516 } else { 6517 from = NULL; 6518 fromlen = 0; 6519 } 6520 6521 if (filling_sinfo) { 6522 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6523 } 6524 if (flagsp != NULL) { 6525 flags = *flagsp; 6526 } else { 6527 flags = 0; 6528 } 6529 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6530 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6531 if (flagsp != NULL) { 6532 *flagsp = flags; 6533 } 6534 if (controlp != NULL) { 6535 /* copy back the sinfo in a CMSG format */ 6536 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6537 *controlp = sctp_build_ctl_nchunk(inp, 6538 (struct sctp_sndrcvinfo *)&sinfo); 6539 } else { 6540 *controlp = NULL; 6541 } 6542 } 6543 if (psa) { 6544 /* copy back the address info */ 6545 if (from && from->sa_len) { 6546 *psa = sodupsockaddr(from, M_NOWAIT); 6547 } else { 6548 *psa = NULL; 6549 } 6550 } 6551 return (error); 6552 } 6553 6554 int 6555 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6556 int totaddr, int *error) 6557 { 6558 int added = 0; 6559 int i; 6560 struct sctp_inpcb *inp; 6561 struct sockaddr *sa; 6562 size_t incr = 0; 6563 #ifdef INET 6564 struct sockaddr_in *sin; 6565 #endif 6566 #ifdef INET6 6567 struct sockaddr_in6 *sin6; 6568 #endif 6569 6570 sa = addr; 6571 inp = stcb->sctp_ep; 6572 *error = 0; 6573 for (i = 0; i < totaddr; i++) { 6574 switch (sa->sa_family) { 6575 #ifdef INET 6576 case AF_INET: 6577 incr = sizeof(struct sockaddr_in); 6578 sin = (struct sockaddr_in *)sa; 6579 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6580 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6581 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6582 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6583 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6584 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6585 *error = EINVAL; 6586 goto out_now; 6587 } 6588 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6589 SCTP_DONOT_SETSCOPE, 6590 SCTP_ADDR_IS_CONFIRMED)) { 6591 /* assoc gone no un-lock */ 6592 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6593 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6594 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6595 *error = ENOBUFS; 6596 goto out_now; 6597 } 6598 added++; 6599 break; 6600 #endif 6601 #ifdef INET6 6602 case AF_INET6: 6603 incr = sizeof(struct sockaddr_in6); 6604 sin6 = (struct sockaddr_in6 *)sa; 6605 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6606 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6607 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6608 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6609 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6610 *error = EINVAL; 6611 goto out_now; 6612 } 6613 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6614 SCTP_DONOT_SETSCOPE, 6615 SCTP_ADDR_IS_CONFIRMED)) { 6616 /* assoc gone no un-lock */ 6617 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6618 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6619 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6620 *error = ENOBUFS; 6621 goto out_now; 6622 } 6623 added++; 6624 break; 6625 #endif 6626 default: 6627 break; 6628 } 6629 sa = (struct sockaddr *)((caddr_t)sa + incr); 6630 } 6631 out_now: 6632 return (added); 6633 } 6634 6635 int 6636 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6637 unsigned int totaddr, 6638 unsigned int *num_v4, unsigned int *num_v6, 6639 unsigned int limit) 6640 { 6641 struct sockaddr *sa; 6642 struct sctp_tcb *stcb; 6643 unsigned int incr, at, i; 6644 6645 at = 0; 6646 sa = addr; 6647 *num_v6 = *num_v4 = 0; 6648 /* account and validate addresses */ 6649 if (totaddr == 0) { 6650 return (EINVAL); 6651 } 6652 for (i = 0; i < totaddr; i++) { 6653 if (at + sizeof(struct sockaddr) > limit) { 6654 return (EINVAL); 6655 } 6656 switch (sa->sa_family) { 6657 #ifdef INET 6658 case AF_INET: 6659 incr = (unsigned int)sizeof(struct sockaddr_in); 6660 if (sa->sa_len != incr) { 6661 return (EINVAL); 6662 } 6663 (*num_v4) += 1; 6664 break; 6665 #endif 6666 #ifdef INET6 6667 case AF_INET6: 6668 { 6669 struct sockaddr_in6 *sin6; 6670 6671 sin6 = (struct sockaddr_in6 *)sa; 6672 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6673 /* Must be non-mapped for connectx */ 6674 return (EINVAL); 6675 } 6676 incr = (unsigned int)sizeof(struct sockaddr_in6); 6677 if (sa->sa_len != incr) { 6678 return (EINVAL); 6679 } 6680 (*num_v6) += 1; 6681 break; 6682 } 6683 #endif 6684 default: 6685 return (EINVAL); 6686 } 6687 if ((at + incr) > limit) { 6688 return (EINVAL); 6689 } 6690 SCTP_INP_INCR_REF(inp); 6691 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6692 if (stcb != NULL) { 6693 SCTP_TCB_UNLOCK(stcb); 6694 return (EALREADY); 6695 } else { 6696 SCTP_INP_DECR_REF(inp); 6697 } 6698 at += incr; 6699 sa = (struct sockaddr *)((caddr_t)sa + incr); 6700 } 6701 return (0); 6702 } 6703 6704 /* 6705 * sctp_bindx(ADD) for one address. 6706 * assumes all arguments are valid/checked by caller. 6707 */ 6708 void 6709 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6710 struct sockaddr *sa, uint32_t vrf_id, int *error, 6711 void *p) 6712 { 6713 #if defined(INET) && defined(INET6) 6714 struct sockaddr_in sin; 6715 #endif 6716 #ifdef INET6 6717 struct sockaddr_in6 *sin6; 6718 #endif 6719 #ifdef INET 6720 struct sockaddr_in *sinp; 6721 #endif 6722 struct sockaddr *addr_to_use; 6723 struct sctp_inpcb *lep; 6724 uint16_t port; 6725 6726 /* see if we're bound all already! */ 6727 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6728 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6729 *error = EINVAL; 6730 return; 6731 } 6732 switch (sa->sa_family) { 6733 #ifdef INET6 6734 case AF_INET6: 6735 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6736 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6737 *error = EINVAL; 6738 return; 6739 } 6740 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6741 /* can only bind v6 on PF_INET6 sockets */ 6742 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6743 *error = EINVAL; 6744 return; 6745 } 6746 sin6 = (struct sockaddr_in6 *)sa; 6747 port = sin6->sin6_port; 6748 #ifdef INET 6749 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6750 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6751 SCTP_IPV6_V6ONLY(inp)) { 6752 /* can't bind v4-mapped on PF_INET sockets */ 6753 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6754 *error = EINVAL; 6755 return; 6756 } 6757 in6_sin6_2_sin(&sin, sin6); 6758 addr_to_use = (struct sockaddr *)&sin; 6759 } else { 6760 addr_to_use = sa; 6761 } 6762 #else 6763 addr_to_use = sa; 6764 #endif 6765 break; 6766 #endif 6767 #ifdef INET 6768 case AF_INET: 6769 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6770 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6771 *error = EINVAL; 6772 return; 6773 } 6774 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6775 SCTP_IPV6_V6ONLY(inp)) { 6776 /* can't bind v4 on PF_INET sockets */ 6777 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6778 *error = EINVAL; 6779 return; 6780 } 6781 sinp = (struct sockaddr_in *)sa; 6782 port = sinp->sin_port; 6783 addr_to_use = sa; 6784 break; 6785 #endif 6786 default: 6787 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6788 *error = EINVAL; 6789 return; 6790 } 6791 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6792 if (p == NULL) { 6793 /* Can't get proc for Net/Open BSD */ 6794 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6795 *error = EINVAL; 6796 return; 6797 } 6798 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6799 return; 6800 } 6801 /* Validate the incoming port. */ 6802 if ((port != 0) && (port != inp->sctp_lport)) { 6803 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6804 *error = EINVAL; 6805 return; 6806 } 6807 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6808 if (lep == NULL) { 6809 /* add the address */ 6810 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6811 SCTP_ADD_IP_ADDRESS, vrf_id); 6812 } else { 6813 if (lep != inp) { 6814 *error = EADDRINUSE; 6815 } 6816 SCTP_INP_DECR_REF(lep); 6817 } 6818 } 6819 6820 /* 6821 * sctp_bindx(DELETE) for one address. 6822 * assumes all arguments are valid/checked by caller. 6823 */ 6824 void 6825 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6826 struct sockaddr *sa, uint32_t vrf_id, int *error) 6827 { 6828 struct sockaddr *addr_to_use; 6829 #if defined(INET) && defined(INET6) 6830 struct sockaddr_in6 *sin6; 6831 struct sockaddr_in sin; 6832 #endif 6833 6834 /* see if we're bound all already! */ 6835 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6836 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6837 *error = EINVAL; 6838 return; 6839 } 6840 switch (sa->sa_family) { 6841 #ifdef INET6 6842 case AF_INET6: 6843 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6844 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6845 *error = EINVAL; 6846 return; 6847 } 6848 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6849 /* can only bind v6 on PF_INET6 sockets */ 6850 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6851 *error = EINVAL; 6852 return; 6853 } 6854 #ifdef INET 6855 sin6 = (struct sockaddr_in6 *)sa; 6856 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6857 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6858 SCTP_IPV6_V6ONLY(inp)) { 6859 /* can't bind mapped-v4 on PF_INET sockets */ 6860 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6861 *error = EINVAL; 6862 return; 6863 } 6864 in6_sin6_2_sin(&sin, sin6); 6865 addr_to_use = (struct sockaddr *)&sin; 6866 } else { 6867 addr_to_use = sa; 6868 } 6869 #else 6870 addr_to_use = sa; 6871 #endif 6872 break; 6873 #endif 6874 #ifdef INET 6875 case AF_INET: 6876 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6877 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6878 *error = EINVAL; 6879 return; 6880 } 6881 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6882 SCTP_IPV6_V6ONLY(inp)) { 6883 /* can't bind v4 on PF_INET sockets */ 6884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6885 *error = EINVAL; 6886 return; 6887 } 6888 addr_to_use = sa; 6889 break; 6890 #endif 6891 default: 6892 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6893 *error = EINVAL; 6894 return; 6895 } 6896 /* No lock required mgmt_ep_sa does its own locking. */ 6897 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6898 vrf_id); 6899 } 6900 6901 /* 6902 * returns the valid local address count for an assoc, taking into account 6903 * all scoping rules 6904 */ 6905 int 6906 sctp_local_addr_count(struct sctp_tcb *stcb) 6907 { 6908 int loopback_scope; 6909 #if defined(INET) 6910 int ipv4_local_scope, ipv4_addr_legal; 6911 #endif 6912 #if defined(INET6) 6913 int local_scope, site_scope, ipv6_addr_legal; 6914 #endif 6915 struct sctp_vrf *vrf; 6916 struct sctp_ifn *sctp_ifn; 6917 struct sctp_ifa *sctp_ifa; 6918 int count = 0; 6919 6920 /* Turn on all the appropriate scopes */ 6921 loopback_scope = stcb->asoc.scope.loopback_scope; 6922 #if defined(INET) 6923 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6924 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6925 #endif 6926 #if defined(INET6) 6927 local_scope = stcb->asoc.scope.local_scope; 6928 site_scope = stcb->asoc.scope.site_scope; 6929 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6930 #endif 6931 SCTP_IPI_ADDR_RLOCK(); 6932 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6933 if (vrf == NULL) { 6934 /* no vrf, no addresses */ 6935 SCTP_IPI_ADDR_RUNLOCK(); 6936 return (0); 6937 } 6938 6939 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6940 /* 6941 * bound all case: go through all ifns on the vrf 6942 */ 6943 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6944 if ((loopback_scope == 0) && 6945 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6946 continue; 6947 } 6948 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6949 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6950 continue; 6951 switch (sctp_ifa->address.sa.sa_family) { 6952 #ifdef INET 6953 case AF_INET: 6954 if (ipv4_addr_legal) { 6955 struct sockaddr_in *sin; 6956 6957 sin = &sctp_ifa->address.sin; 6958 if (sin->sin_addr.s_addr == 0) { 6959 /* 6960 * skip unspecified 6961 * addrs 6962 */ 6963 continue; 6964 } 6965 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6966 &sin->sin_addr) != 0) { 6967 continue; 6968 } 6969 if ((ipv4_local_scope == 0) && 6970 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6971 continue; 6972 } 6973 /* count this one */ 6974 count++; 6975 } else { 6976 continue; 6977 } 6978 break; 6979 #endif 6980 #ifdef INET6 6981 case AF_INET6: 6982 if (ipv6_addr_legal) { 6983 struct sockaddr_in6 *sin6; 6984 6985 sin6 = &sctp_ifa->address.sin6; 6986 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6987 continue; 6988 } 6989 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6990 &sin6->sin6_addr) != 0) { 6991 continue; 6992 } 6993 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6994 if (local_scope == 0) 6995 continue; 6996 if (sin6->sin6_scope_id == 0) { 6997 if (sa6_recoverscope(sin6) != 0) 6998 /* 6999 * 7000 * bad 7001 * link 7002 * 7003 * local 7004 * 7005 * address 7006 */ 7007 continue; 7008 } 7009 } 7010 if ((site_scope == 0) && 7011 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7012 continue; 7013 } 7014 /* count this one */ 7015 count++; 7016 } 7017 break; 7018 #endif 7019 default: 7020 /* TSNH */ 7021 break; 7022 } 7023 } 7024 } 7025 } else { 7026 /* 7027 * subset bound case 7028 */ 7029 struct sctp_laddr *laddr; 7030 7031 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7032 sctp_nxt_addr) { 7033 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7034 continue; 7035 } 7036 /* count this one */ 7037 count++; 7038 } 7039 } 7040 SCTP_IPI_ADDR_RUNLOCK(); 7041 return (count); 7042 } 7043 7044 #if defined(SCTP_LOCAL_TRACE_BUF) 7045 7046 void 7047 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7048 { 7049 uint32_t saveindex, newindex; 7050 7051 do { 7052 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7053 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7054 newindex = 1; 7055 } else { 7056 newindex = saveindex + 1; 7057 } 7058 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7059 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7060 saveindex = 0; 7061 } 7062 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7063 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7064 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7065 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7066 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7067 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7068 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7069 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7070 } 7071 7072 #endif 7073 static void 7074 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7075 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7076 { 7077 struct ip *iph; 7078 #ifdef INET6 7079 struct ip6_hdr *ip6; 7080 #endif 7081 struct mbuf *sp, *last; 7082 struct udphdr *uhdr; 7083 uint16_t port; 7084 7085 if ((m->m_flags & M_PKTHDR) == 0) { 7086 /* Can't handle one that is not a pkt hdr */ 7087 goto out; 7088 } 7089 /* Pull the src port */ 7090 iph = mtod(m, struct ip *); 7091 uhdr = (struct udphdr *)((caddr_t)iph + off); 7092 port = uhdr->uh_sport; 7093 /* 7094 * Split out the mbuf chain. Leave the IP header in m, place the 7095 * rest in the sp. 7096 */ 7097 sp = m_split(m, off, M_NOWAIT); 7098 if (sp == NULL) { 7099 /* Gak, drop packet, we can't do a split */ 7100 goto out; 7101 } 7102 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7103 /* Gak, packet can't have an SCTP header in it - too small */ 7104 m_freem(sp); 7105 goto out; 7106 } 7107 /* Now pull up the UDP header and SCTP header together */ 7108 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7109 if (sp == NULL) { 7110 /* Gak pullup failed */ 7111 goto out; 7112 } 7113 /* Trim out the UDP header */ 7114 m_adj(sp, sizeof(struct udphdr)); 7115 7116 /* Now reconstruct the mbuf chain */ 7117 for (last = m; last->m_next; last = last->m_next); 7118 last->m_next = sp; 7119 m->m_pkthdr.len += sp->m_pkthdr.len; 7120 /* 7121 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7122 * checksum and it was valid. Since CSUM_DATA_VALID == 7123 * CSUM_SCTP_VALID this would imply that the HW also verified the 7124 * SCTP checksum. Therefore, clear the bit. 7125 */ 7126 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7127 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7128 m->m_pkthdr.len, 7129 if_name(m->m_pkthdr.rcvif), 7130 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7131 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7132 iph = mtod(m, struct ip *); 7133 switch (iph->ip_v) { 7134 #ifdef INET 7135 case IPVERSION: 7136 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7137 sctp_input_with_port(m, off, port); 7138 break; 7139 #endif 7140 #ifdef INET6 7141 case IPV6_VERSION >> 4: 7142 ip6 = mtod(m, struct ip6_hdr *); 7143 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7144 sctp6_input_with_port(&m, &off, port); 7145 break; 7146 #endif 7147 default: 7148 goto out; 7149 break; 7150 } 7151 return; 7152 out: 7153 m_freem(m); 7154 } 7155 7156 #ifdef INET 7157 static void 7158 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7159 { 7160 struct ip *outer_ip, *inner_ip; 7161 struct sctphdr *sh; 7162 struct icmp *icmp; 7163 struct udphdr *udp; 7164 struct sctp_inpcb *inp; 7165 struct sctp_tcb *stcb; 7166 struct sctp_nets *net; 7167 struct sctp_init_chunk *ch; 7168 struct sockaddr_in src, dst; 7169 uint8_t type, code; 7170 7171 inner_ip = (struct ip *)vip; 7172 icmp = (struct icmp *)((caddr_t)inner_ip - 7173 (sizeof(struct icmp) - sizeof(struct ip))); 7174 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7175 if (ntohs(outer_ip->ip_len) < 7176 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7177 return; 7178 } 7179 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7180 sh = (struct sctphdr *)(udp + 1); 7181 memset(&src, 0, sizeof(struct sockaddr_in)); 7182 src.sin_family = AF_INET; 7183 src.sin_len = sizeof(struct sockaddr_in); 7184 src.sin_port = sh->src_port; 7185 src.sin_addr = inner_ip->ip_src; 7186 memset(&dst, 0, sizeof(struct sockaddr_in)); 7187 dst.sin_family = AF_INET; 7188 dst.sin_len = sizeof(struct sockaddr_in); 7189 dst.sin_port = sh->dest_port; 7190 dst.sin_addr = inner_ip->ip_dst; 7191 /* 7192 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7193 * holds our local endpoint address. Thus we reverse the dst and the 7194 * src in the lookup. 7195 */ 7196 inp = NULL; 7197 net = NULL; 7198 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7199 (struct sockaddr *)&src, 7200 &inp, &net, 1, 7201 SCTP_DEFAULT_VRFID); 7202 if ((stcb != NULL) && 7203 (net != NULL) && 7204 (inp != NULL)) { 7205 /* Check the UDP port numbers */ 7206 if ((udp->uh_dport != net->port) || 7207 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7208 SCTP_TCB_UNLOCK(stcb); 7209 return; 7210 } 7211 /* Check the verification tag */ 7212 if (ntohl(sh->v_tag) != 0) { 7213 /* 7214 * This must be the verification tag used for 7215 * sending out packets. We don't consider packets 7216 * reflecting the verification tag. 7217 */ 7218 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7219 SCTP_TCB_UNLOCK(stcb); 7220 return; 7221 } 7222 } else { 7223 if (ntohs(outer_ip->ip_len) >= 7224 sizeof(struct ip) + 7225 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7226 /* 7227 * In this case we can check if we got an 7228 * INIT chunk and if the initiate tag 7229 * matches. 7230 */ 7231 ch = (struct sctp_init_chunk *)(sh + 1); 7232 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7233 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7234 SCTP_TCB_UNLOCK(stcb); 7235 return; 7236 } 7237 } else { 7238 SCTP_TCB_UNLOCK(stcb); 7239 return; 7240 } 7241 } 7242 type = icmp->icmp_type; 7243 code = icmp->icmp_code; 7244 if ((type == ICMP_UNREACH) && 7245 (code == ICMP_UNREACH_PORT)) { 7246 code = ICMP_UNREACH_PROTOCOL; 7247 } 7248 sctp_notify(inp, stcb, net, type, code, 7249 ntohs(inner_ip->ip_len), 7250 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7251 } else { 7252 if ((stcb == NULL) && (inp != NULL)) { 7253 /* reduce ref-count */ 7254 SCTP_INP_WLOCK(inp); 7255 SCTP_INP_DECR_REF(inp); 7256 SCTP_INP_WUNLOCK(inp); 7257 } 7258 if (stcb) { 7259 SCTP_TCB_UNLOCK(stcb); 7260 } 7261 } 7262 return; 7263 } 7264 #endif 7265 7266 #ifdef INET6 7267 static void 7268 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7269 { 7270 struct ip6ctlparam *ip6cp; 7271 struct sctp_inpcb *inp; 7272 struct sctp_tcb *stcb; 7273 struct sctp_nets *net; 7274 struct sctphdr sh; 7275 struct udphdr udp; 7276 struct sockaddr_in6 src, dst; 7277 uint8_t type, code; 7278 7279 ip6cp = (struct ip6ctlparam *)d; 7280 /* 7281 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7282 */ 7283 if (ip6cp->ip6c_m == NULL) { 7284 return; 7285 } 7286 /* 7287 * Check if we can safely examine the ports and the verification tag 7288 * of the SCTP common header. 7289 */ 7290 if (ip6cp->ip6c_m->m_pkthdr.len < 7291 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7292 return; 7293 } 7294 /* Copy out the UDP header. */ 7295 memset(&udp, 0, sizeof(struct udphdr)); 7296 m_copydata(ip6cp->ip6c_m, 7297 ip6cp->ip6c_off, 7298 sizeof(struct udphdr), 7299 (caddr_t)&udp); 7300 /* Copy out the port numbers and the verification tag. */ 7301 memset(&sh, 0, sizeof(struct sctphdr)); 7302 m_copydata(ip6cp->ip6c_m, 7303 ip6cp->ip6c_off + sizeof(struct udphdr), 7304 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7305 (caddr_t)&sh); 7306 memset(&src, 0, sizeof(struct sockaddr_in6)); 7307 src.sin6_family = AF_INET6; 7308 src.sin6_len = sizeof(struct sockaddr_in6); 7309 src.sin6_port = sh.src_port; 7310 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7311 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7312 return; 7313 } 7314 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7315 dst.sin6_family = AF_INET6; 7316 dst.sin6_len = sizeof(struct sockaddr_in6); 7317 dst.sin6_port = sh.dest_port; 7318 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7319 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7320 return; 7321 } 7322 inp = NULL; 7323 net = NULL; 7324 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7325 (struct sockaddr *)&src, 7326 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7327 if ((stcb != NULL) && 7328 (net != NULL) && 7329 (inp != NULL)) { 7330 /* Check the UDP port numbers */ 7331 if ((udp.uh_dport != net->port) || 7332 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7333 SCTP_TCB_UNLOCK(stcb); 7334 return; 7335 } 7336 /* Check the verification tag */ 7337 if (ntohl(sh.v_tag) != 0) { 7338 /* 7339 * This must be the verification tag used for 7340 * sending out packets. We don't consider packets 7341 * reflecting the verification tag. 7342 */ 7343 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7344 SCTP_TCB_UNLOCK(stcb); 7345 return; 7346 } 7347 } else { 7348 if (ip6cp->ip6c_m->m_pkthdr.len >= 7349 ip6cp->ip6c_off + sizeof(struct udphdr) + 7350 sizeof(struct sctphdr) + 7351 sizeof(struct sctp_chunkhdr) + 7352 offsetof(struct sctp_init, a_rwnd)) { 7353 /* 7354 * In this case we can check if we got an 7355 * INIT chunk and if the initiate tag 7356 * matches. 7357 */ 7358 uint32_t initiate_tag; 7359 uint8_t chunk_type; 7360 7361 m_copydata(ip6cp->ip6c_m, 7362 ip6cp->ip6c_off + 7363 sizeof(struct udphdr) + 7364 sizeof(struct sctphdr), 7365 sizeof(uint8_t), 7366 (caddr_t)&chunk_type); 7367 m_copydata(ip6cp->ip6c_m, 7368 ip6cp->ip6c_off + 7369 sizeof(struct udphdr) + 7370 sizeof(struct sctphdr) + 7371 sizeof(struct sctp_chunkhdr), 7372 sizeof(uint32_t), 7373 (caddr_t)&initiate_tag); 7374 if ((chunk_type != SCTP_INITIATION) || 7375 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7376 SCTP_TCB_UNLOCK(stcb); 7377 return; 7378 } 7379 } else { 7380 SCTP_TCB_UNLOCK(stcb); 7381 return; 7382 } 7383 } 7384 type = ip6cp->ip6c_icmp6->icmp6_type; 7385 code = ip6cp->ip6c_icmp6->icmp6_code; 7386 if ((type == ICMP6_DST_UNREACH) && 7387 (code == ICMP6_DST_UNREACH_NOPORT)) { 7388 type = ICMP6_PARAM_PROB; 7389 code = ICMP6_PARAMPROB_NEXTHEADER; 7390 } 7391 sctp6_notify(inp, stcb, net, type, code, 7392 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7393 } else { 7394 if ((stcb == NULL) && (inp != NULL)) { 7395 /* reduce inp's ref-count */ 7396 SCTP_INP_WLOCK(inp); 7397 SCTP_INP_DECR_REF(inp); 7398 SCTP_INP_WUNLOCK(inp); 7399 } 7400 if (stcb) { 7401 SCTP_TCB_UNLOCK(stcb); 7402 } 7403 } 7404 } 7405 #endif 7406 7407 void 7408 sctp_over_udp_stop(void) 7409 { 7410 /* 7411 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7412 * for writting! 7413 */ 7414 #ifdef INET 7415 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7416 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7417 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7418 } 7419 #endif 7420 #ifdef INET6 7421 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7422 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7423 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7424 } 7425 #endif 7426 } 7427 7428 int 7429 sctp_over_udp_start(void) 7430 { 7431 uint16_t port; 7432 int ret; 7433 #ifdef INET 7434 struct sockaddr_in sin; 7435 #endif 7436 #ifdef INET6 7437 struct sockaddr_in6 sin6; 7438 #endif 7439 /* 7440 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7441 * for writting! 7442 */ 7443 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7444 if (ntohs(port) == 0) { 7445 /* Must have a port set */ 7446 return (EINVAL); 7447 } 7448 #ifdef INET 7449 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7450 /* Already running -- must stop first */ 7451 return (EALREADY); 7452 } 7453 #endif 7454 #ifdef INET6 7455 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7456 /* Already running -- must stop first */ 7457 return (EALREADY); 7458 } 7459 #endif 7460 #ifdef INET 7461 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7462 SOCK_DGRAM, IPPROTO_UDP, 7463 curthread->td_ucred, curthread))) { 7464 sctp_over_udp_stop(); 7465 return (ret); 7466 } 7467 /* Call the special UDP hook. */ 7468 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7469 sctp_recv_udp_tunneled_packet, 7470 sctp_recv_icmp_tunneled_packet, 7471 NULL))) { 7472 sctp_over_udp_stop(); 7473 return (ret); 7474 } 7475 /* Ok, we have a socket, bind it to the port. */ 7476 memset(&sin, 0, sizeof(struct sockaddr_in)); 7477 sin.sin_len = sizeof(struct sockaddr_in); 7478 sin.sin_family = AF_INET; 7479 sin.sin_port = htons(port); 7480 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7481 (struct sockaddr *)&sin, curthread))) { 7482 sctp_over_udp_stop(); 7483 return (ret); 7484 } 7485 #endif 7486 #ifdef INET6 7487 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7488 SOCK_DGRAM, IPPROTO_UDP, 7489 curthread->td_ucred, curthread))) { 7490 sctp_over_udp_stop(); 7491 return (ret); 7492 } 7493 /* Call the special UDP hook. */ 7494 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7495 sctp_recv_udp_tunneled_packet, 7496 sctp_recv_icmp6_tunneled_packet, 7497 NULL))) { 7498 sctp_over_udp_stop(); 7499 return (ret); 7500 } 7501 /* Ok, we have a socket, bind it to the port. */ 7502 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7503 sin6.sin6_len = sizeof(struct sockaddr_in6); 7504 sin6.sin6_family = AF_INET6; 7505 sin6.sin6_port = htons(port); 7506 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7507 (struct sockaddr *)&sin6, curthread))) { 7508 sctp_over_udp_stop(); 7509 return (ret); 7510 } 7511 #endif 7512 return (0); 7513 } 7514 7515 /* 7516 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7517 * If all arguments are zero, zero is returned. 7518 */ 7519 uint32_t 7520 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7521 { 7522 if (mtu1 > 0) { 7523 if (mtu2 > 0) { 7524 if (mtu3 > 0) { 7525 return (min(mtu1, min(mtu2, mtu3))); 7526 } else { 7527 return (min(mtu1, mtu2)); 7528 } 7529 } else { 7530 if (mtu3 > 0) { 7531 return (min(mtu1, mtu3)); 7532 } else { 7533 return (mtu1); 7534 } 7535 } 7536 } else { 7537 if (mtu2 > 0) { 7538 if (mtu3 > 0) { 7539 return (min(mtu2, mtu3)); 7540 } else { 7541 return (mtu2); 7542 } 7543 } else { 7544 return (mtu3); 7545 } 7546 } 7547 } 7548 7549 void 7550 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7551 { 7552 struct in_conninfo inc; 7553 7554 memset(&inc, 0, sizeof(struct in_conninfo)); 7555 inc.inc_fibnum = fibnum; 7556 switch (addr->sa.sa_family) { 7557 #ifdef INET 7558 case AF_INET: 7559 inc.inc_faddr = addr->sin.sin_addr; 7560 break; 7561 #endif 7562 #ifdef INET6 7563 case AF_INET6: 7564 inc.inc_flags |= INC_ISIPV6; 7565 inc.inc6_faddr = addr->sin6.sin6_addr; 7566 break; 7567 #endif 7568 default: 7569 return; 7570 } 7571 tcp_hc_updatemtu(&inc, (u_long)mtu); 7572 } 7573 7574 uint32_t 7575 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7576 { 7577 struct in_conninfo inc; 7578 7579 memset(&inc, 0, sizeof(struct in_conninfo)); 7580 inc.inc_fibnum = fibnum; 7581 switch (addr->sa.sa_family) { 7582 #ifdef INET 7583 case AF_INET: 7584 inc.inc_faddr = addr->sin.sin_addr; 7585 break; 7586 #endif 7587 #ifdef INET6 7588 case AF_INET6: 7589 inc.inc_flags |= INC_ISIPV6; 7590 inc.inc6_faddr = addr->sin6.sin6_addr; 7591 break; 7592 #endif 7593 default: 7594 return (0); 7595 } 7596 return ((uint32_t)tcp_hc_getmtu(&inc)); 7597 } 7598 7599 void 7600 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7601 { 7602 #if defined(KDTRACE_HOOKS) 7603 int old_state = stcb->asoc.state; 7604 #endif 7605 7606 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7607 ("sctp_set_state: Can't set substate (new_state = %x)", 7608 new_state)); 7609 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7610 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7611 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7612 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7613 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7614 } 7615 #if defined(KDTRACE_HOOKS) 7616 if (((old_state & SCTP_STATE_MASK) != new_state) && 7617 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7618 (new_state == SCTP_STATE_INUSE))) { 7619 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7620 } 7621 #endif 7622 } 7623 7624 void 7625 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7626 { 7627 #if defined(KDTRACE_HOOKS) 7628 int old_state = stcb->asoc.state; 7629 #endif 7630 7631 KASSERT((substate & SCTP_STATE_MASK) == 0, 7632 ("sctp_add_substate: Can't set state (substate = %x)", 7633 substate)); 7634 stcb->asoc.state |= substate; 7635 #if defined(KDTRACE_HOOKS) 7636 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7637 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7638 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7639 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7640 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7641 } 7642 #endif 7643 } 7644