1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1116 { 1117 struct sctp_association *asoc; 1118 1119 /* 1120 * Anything set to zero is taken care of by the allocation routine's 1121 * bzero 1122 */ 1123 1124 /* 1125 * Up front select what scoping to apply on addresses I tell my peer 1126 * Not sure what to do with these right now, we will need to come up 1127 * with a way to set them. We may need to pass them through from the 1128 * caller in the sctp_aloc_assoc() function. 1129 */ 1130 int i; 1131 #if defined(SCTP_DETAILED_STR_STATS) 1132 int j; 1133 #endif 1134 1135 asoc = &stcb->asoc; 1136 /* init all variables to a known value. */ 1137 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1138 asoc->max_burst = inp->sctp_ep.max_burst; 1139 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1140 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1141 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1142 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1143 asoc->ecn_supported = inp->ecn_supported; 1144 asoc->prsctp_supported = inp->prsctp_supported; 1145 asoc->auth_supported = inp->auth_supported; 1146 asoc->asconf_supported = inp->asconf_supported; 1147 asoc->reconfig_supported = inp->reconfig_supported; 1148 asoc->nrsack_supported = inp->nrsack_supported; 1149 asoc->pktdrop_supported = inp->pktdrop_supported; 1150 asoc->idata_supported = inp->idata_supported; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1194 sctp_select_initial_TSN(&inp->sctp_ep); 1195 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1196 /* we are optimisitic here */ 1197 asoc->peer_supports_nat = 0; 1198 asoc->sent_queue_retran_cnt = 0; 1199 1200 /* for CMT */ 1201 asoc->last_net_cmt_send_started = NULL; 1202 1203 /* This will need to be adjusted */ 1204 asoc->last_acked_seq = asoc->init_seq_number - 1; 1205 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1206 asoc->asconf_seq_in = asoc->last_acked_seq; 1207 1208 /* here we are different, we hold the next one we expect */ 1209 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1210 1211 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1212 asoc->initial_rto = inp->sctp_ep.initial_rto; 1213 1214 asoc->default_mtu = inp->sctp_ep.default_mtu; 1215 asoc->max_init_times = inp->sctp_ep.max_init_times; 1216 asoc->max_send_times = inp->sctp_ep.max_send_times; 1217 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1218 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1219 asoc->free_chunk_cnt = 0; 1220 1221 asoc->iam_blocking = 0; 1222 asoc->context = inp->sctp_context; 1223 asoc->local_strreset_support = inp->local_strreset_support; 1224 asoc->def_send = inp->def_send; 1225 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1226 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1227 asoc->pr_sctp_cnt = 0; 1228 asoc->total_output_queue_size = 0; 1229 1230 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1231 asoc->scope.ipv6_addr_legal = 1; 1232 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1233 asoc->scope.ipv4_addr_legal = 1; 1234 } else { 1235 asoc->scope.ipv4_addr_legal = 0; 1236 } 1237 } else { 1238 asoc->scope.ipv6_addr_legal = 0; 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } 1241 1242 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1243 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1244 1245 asoc->smallest_mtu = inp->sctp_frag_point; 1246 asoc->minrto = inp->sctp_ep.sctp_minrto; 1247 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1248 1249 asoc->stream_locked_on = 0; 1250 asoc->ecn_echo_cnt_onq = 0; 1251 asoc->stream_locked = 0; 1252 1253 asoc->send_sack = 1; 1254 1255 LIST_INIT(&asoc->sctp_restricted_addrs); 1256 1257 TAILQ_INIT(&asoc->nets); 1258 TAILQ_INIT(&asoc->pending_reply_queue); 1259 TAILQ_INIT(&asoc->asconf_ack_sent); 1260 /* Setup to fill the hb random cache at first HB */ 1261 asoc->hb_random_idx = 4; 1262 1263 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1264 1265 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1266 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1267 1268 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1269 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1270 1271 /* 1272 * Now the stream parameters, here we allocate space for all streams 1273 * that we request by default. 1274 */ 1275 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1276 o_strms; 1277 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1278 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1279 SCTP_M_STRMO); 1280 if (asoc->strmout == NULL) { 1281 /* big trouble no memory */ 1282 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1283 return (ENOMEM); 1284 } 1285 for (i = 0; i < asoc->streamoutcnt; i++) { 1286 /* 1287 * inbound side must be set to 0xffff, also NOTE when we get 1288 * the INIT-ACK back (for INIT sender) we MUST reduce the 1289 * count (streamoutcnt) but first check if we sent to any of 1290 * the upper streams that were dropped (if some were). Those 1291 * that were dropped must be notified to the upper layer as 1292 * failed to send. 1293 */ 1294 asoc->strmout[i].next_mid_ordered = 0; 1295 asoc->strmout[i].next_mid_unordered = 0; 1296 TAILQ_INIT(&asoc->strmout[i].outqueue); 1297 asoc->strmout[i].chunks_on_queues = 0; 1298 #if defined(SCTP_DETAILED_STR_STATS) 1299 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1300 asoc->strmout[i].abandoned_sent[j] = 0; 1301 asoc->strmout[i].abandoned_unsent[j] = 0; 1302 } 1303 #else 1304 asoc->strmout[i].abandoned_sent[0] = 0; 1305 asoc->strmout[i].abandoned_unsent[0] = 0; 1306 #endif 1307 asoc->strmout[i].sid = i; 1308 asoc->strmout[i].last_msg_incomplete = 0; 1309 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1310 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1311 } 1312 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1313 1314 /* Now the mapping array */ 1315 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1316 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1317 SCTP_M_MAP); 1318 if (asoc->mapping_array == NULL) { 1319 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1320 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1321 return (ENOMEM); 1322 } 1323 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1324 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->nr_mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1330 return (ENOMEM); 1331 } 1332 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1333 1334 /* Now the init of the other outqueues */ 1335 TAILQ_INIT(&asoc->free_chunks); 1336 TAILQ_INIT(&asoc->control_send_queue); 1337 TAILQ_INIT(&asoc->asconf_send_queue); 1338 TAILQ_INIT(&asoc->send_queue); 1339 TAILQ_INIT(&asoc->sent_queue); 1340 TAILQ_INIT(&asoc->resetHead); 1341 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1342 TAILQ_INIT(&asoc->asconf_queue); 1343 /* authentication fields */ 1344 asoc->authinfo.random = NULL; 1345 asoc->authinfo.active_keyid = 0; 1346 asoc->authinfo.assoc_key = NULL; 1347 asoc->authinfo.assoc_keyid = 0; 1348 asoc->authinfo.recv_key = NULL; 1349 asoc->authinfo.recv_keyid = 0; 1350 LIST_INIT(&asoc->shared_keys); 1351 asoc->marked_retrans = 0; 1352 asoc->port = inp->sctp_ep.port; 1353 asoc->timoinit = 0; 1354 asoc->timodata = 0; 1355 asoc->timosack = 0; 1356 asoc->timoshutdown = 0; 1357 asoc->timoheartbeat = 0; 1358 asoc->timocookie = 0; 1359 asoc->timoshutdownack = 0; 1360 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1361 asoc->discontinuity_time = asoc->start_time; 1362 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1363 asoc->abandoned_unsent[i] = 0; 1364 asoc->abandoned_sent[i] = 0; 1365 } 1366 /* 1367 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1368 * freed later when the association is freed. 1369 */ 1370 return (0); 1371 } 1372 1373 void 1374 sctp_print_mapping_array(struct sctp_association *asoc) 1375 { 1376 unsigned int i, limit; 1377 1378 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1379 asoc->mapping_array_size, 1380 asoc->mapping_array_base_tsn, 1381 asoc->cumulative_tsn, 1382 asoc->highest_tsn_inside_map, 1383 asoc->highest_tsn_inside_nr_map); 1384 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1385 if (asoc->mapping_array[limit - 1] != 0) { 1386 break; 1387 } 1388 } 1389 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1390 for (i = 0; i < limit; i++) { 1391 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1392 } 1393 if (limit % 16) 1394 SCTP_PRINTF("\n"); 1395 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1396 if (asoc->nr_mapping_array[limit - 1]) { 1397 break; 1398 } 1399 } 1400 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1401 for (i = 0; i < limit; i++) { 1402 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1403 } 1404 if (limit % 16) 1405 SCTP_PRINTF("\n"); 1406 } 1407 1408 int 1409 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1410 { 1411 /* mapping array needs to grow */ 1412 uint8_t *new_array1, *new_array2; 1413 uint32_t new_size; 1414 1415 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1416 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1417 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1418 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1419 /* can't get more, forget it */ 1420 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1421 if (new_array1) { 1422 SCTP_FREE(new_array1, SCTP_M_MAP); 1423 } 1424 if (new_array2) { 1425 SCTP_FREE(new_array2, SCTP_M_MAP); 1426 } 1427 return (-1); 1428 } 1429 memset(new_array1, 0, new_size); 1430 memset(new_array2, 0, new_size); 1431 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1432 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1433 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1434 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1435 asoc->mapping_array = new_array1; 1436 asoc->nr_mapping_array = new_array2; 1437 asoc->mapping_array_size = new_size; 1438 return (0); 1439 } 1440 1441 static void 1442 sctp_iterator_work(struct sctp_iterator *it) 1443 { 1444 struct epoch_tracker et; 1445 struct sctp_inpcb *tinp; 1446 int iteration_count = 0; 1447 int inp_skip = 0; 1448 int first_in = 1; 1449 1450 NET_EPOCH_ENTER(et); 1451 SCTP_INP_INFO_RLOCK(); 1452 SCTP_ITERATOR_LOCK(); 1453 sctp_it_ctl.cur_it = it; 1454 if (it->inp) { 1455 SCTP_INP_RLOCK(it->inp); 1456 SCTP_INP_DECR_REF(it->inp); 1457 } 1458 if (it->inp == NULL) { 1459 /* iterator is complete */ 1460 done_with_iterator: 1461 sctp_it_ctl.cur_it = NULL; 1462 SCTP_ITERATOR_UNLOCK(); 1463 SCTP_INP_INFO_RUNLOCK(); 1464 if (it->function_atend != NULL) { 1465 (*it->function_atend) (it->pointer, it->val); 1466 } 1467 SCTP_FREE(it, SCTP_M_ITER); 1468 NET_EPOCH_EXIT(et); 1469 return; 1470 } 1471 select_a_new_ep: 1472 if (first_in) { 1473 first_in = 0; 1474 } else { 1475 SCTP_INP_RLOCK(it->inp); 1476 } 1477 while (((it->pcb_flags) && 1478 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1479 ((it->pcb_features) && 1480 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1481 /* endpoint flags or features don't match, so keep looking */ 1482 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1483 SCTP_INP_RUNLOCK(it->inp); 1484 goto done_with_iterator; 1485 } 1486 tinp = it->inp; 1487 it->inp = LIST_NEXT(it->inp, sctp_list); 1488 it->stcb = NULL; 1489 SCTP_INP_RUNLOCK(tinp); 1490 if (it->inp == NULL) { 1491 goto done_with_iterator; 1492 } 1493 SCTP_INP_RLOCK(it->inp); 1494 } 1495 /* now go through each assoc which is in the desired state */ 1496 if (it->done_current_ep == 0) { 1497 if (it->function_inp != NULL) 1498 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1499 it->done_current_ep = 1; 1500 } 1501 if (it->stcb == NULL) { 1502 /* run the per instance function */ 1503 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1504 } 1505 if ((inp_skip) || it->stcb == NULL) { 1506 if (it->function_inp_end != NULL) { 1507 inp_skip = (*it->function_inp_end) (it->inp, 1508 it->pointer, 1509 it->val); 1510 } 1511 SCTP_INP_RUNLOCK(it->inp); 1512 goto no_stcb; 1513 } 1514 while (it->stcb) { 1515 SCTP_TCB_LOCK(it->stcb); 1516 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1517 /* not in the right state... keep looking */ 1518 SCTP_TCB_UNLOCK(it->stcb); 1519 goto next_assoc; 1520 } 1521 /* see if we have limited out the iterator loop */ 1522 iteration_count++; 1523 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1524 /* Pause to let others grab the lock */ 1525 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 SCTP_INP_INCR_REF(it->inp); 1528 SCTP_INP_RUNLOCK(it->inp); 1529 SCTP_ITERATOR_UNLOCK(); 1530 SCTP_INP_INFO_RUNLOCK(); 1531 SCTP_INP_INFO_RLOCK(); 1532 SCTP_ITERATOR_LOCK(); 1533 if (sctp_it_ctl.iterator_flags) { 1534 /* We won't be staying here */ 1535 SCTP_INP_DECR_REF(it->inp); 1536 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1537 if (sctp_it_ctl.iterator_flags & 1538 SCTP_ITERATOR_STOP_CUR_IT) { 1539 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1540 goto done_with_iterator; 1541 } 1542 if (sctp_it_ctl.iterator_flags & 1543 SCTP_ITERATOR_STOP_CUR_INP) { 1544 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1545 goto no_stcb; 1546 } 1547 /* If we reach here huh? */ 1548 SCTP_PRINTF("Unknown it ctl flag %x\n", 1549 sctp_it_ctl.iterator_flags); 1550 sctp_it_ctl.iterator_flags = 0; 1551 } 1552 SCTP_INP_RLOCK(it->inp); 1553 SCTP_INP_DECR_REF(it->inp); 1554 SCTP_TCB_LOCK(it->stcb); 1555 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1556 iteration_count = 0; 1557 } 1558 KASSERT(it->inp == it->stcb->sctp_ep, 1559 ("%s: stcb %p does not belong to inp %p, but inp %p", 1560 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1561 1562 /* run function on this one */ 1563 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1564 1565 /* 1566 * we lie here, it really needs to have its own type but 1567 * first I must verify that this won't effect things :-0 1568 */ 1569 if (it->no_chunk_output == 0) 1570 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 1572 SCTP_TCB_UNLOCK(it->stcb); 1573 next_assoc: 1574 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1575 if (it->stcb == NULL) { 1576 /* Run last function */ 1577 if (it->function_inp_end != NULL) { 1578 inp_skip = (*it->function_inp_end) (it->inp, 1579 it->pointer, 1580 it->val); 1581 } 1582 } 1583 } 1584 SCTP_INP_RUNLOCK(it->inp); 1585 no_stcb: 1586 /* done with all assocs on this endpoint, move on to next endpoint */ 1587 it->done_current_ep = 0; 1588 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1589 it->inp = NULL; 1590 } else { 1591 it->inp = LIST_NEXT(it->inp, sctp_list); 1592 } 1593 it->stcb = NULL; 1594 if (it->inp == NULL) { 1595 goto done_with_iterator; 1596 } 1597 goto select_a_new_ep; 1598 } 1599 1600 void 1601 sctp_iterator_worker(void) 1602 { 1603 struct sctp_iterator *it; 1604 1605 /* This function is called with the WQ lock in place */ 1606 sctp_it_ctl.iterator_running = 1; 1607 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1608 /* now lets work on this one */ 1609 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1610 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1611 CURVNET_SET(it->vn); 1612 sctp_iterator_work(it); 1613 CURVNET_RESTORE(); 1614 SCTP_IPI_ITERATOR_WQ_LOCK(); 1615 /* sa_ignore FREED_MEMORY */ 1616 } 1617 sctp_it_ctl.iterator_running = 0; 1618 return; 1619 } 1620 1621 static void 1622 sctp_handle_addr_wq(void) 1623 { 1624 /* deal with the ADDR wq from the rtsock calls */ 1625 struct sctp_laddr *wi, *nwi; 1626 struct sctp_asconf_iterator *asc; 1627 1628 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1629 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1630 if (asc == NULL) { 1631 /* Try later, no memory */ 1632 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1633 (struct sctp_inpcb *)NULL, 1634 (struct sctp_tcb *)NULL, 1635 (struct sctp_nets *)NULL); 1636 return; 1637 } 1638 LIST_INIT(&asc->list_of_work); 1639 asc->cnt = 0; 1640 1641 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1642 LIST_REMOVE(wi, sctp_nxt_addr); 1643 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1644 asc->cnt++; 1645 } 1646 1647 if (asc->cnt == 0) { 1648 SCTP_FREE(asc, SCTP_M_ASC_IT); 1649 } else { 1650 int ret; 1651 1652 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1653 sctp_asconf_iterator_stcb, 1654 NULL, /* No ep end for boundall */ 1655 SCTP_PCB_FLAGS_BOUNDALL, 1656 SCTP_PCB_ANY_FEATURES, 1657 SCTP_ASOC_ANY_STATE, 1658 (void *)asc, 0, 1659 sctp_asconf_iterator_end, NULL, 0); 1660 if (ret) { 1661 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1662 /* 1663 * Freeing if we are stopping or put back on the 1664 * addr_wq. 1665 */ 1666 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1667 sctp_asconf_iterator_end(asc, 0); 1668 } else { 1669 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1670 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1671 } 1672 SCTP_FREE(asc, SCTP_M_ASC_IT); 1673 } 1674 } 1675 } 1676 } 1677 1678 /*- 1679 * The following table shows which pointers for the inp, stcb, or net are 1680 * stored for each timer after it was started. 1681 * 1682 *|Name |Timer |inp |stcb|net | 1683 *|-----------------------------|-----------------------------|----|----|----| 1684 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1685 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1686 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1687 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1690 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1691 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1696 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1697 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1698 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1700 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1701 */ 1702 1703 void 1704 sctp_timeout_handler(void *t) 1705 { 1706 struct epoch_tracker et; 1707 struct timeval tv; 1708 struct sctp_inpcb *inp; 1709 struct sctp_tcb *stcb; 1710 struct sctp_nets *net; 1711 struct sctp_timer *tmr; 1712 struct mbuf *op_err; 1713 int type; 1714 int i, secret; 1715 bool did_output, released_asoc_reference; 1716 1717 /* 1718 * If inp, stcb or net are not NULL, then references to these were 1719 * added when the timer was started, and must be released before 1720 * this function returns. 1721 */ 1722 tmr = (struct sctp_timer *)t; 1723 inp = (struct sctp_inpcb *)tmr->ep; 1724 stcb = (struct sctp_tcb *)tmr->tcb; 1725 net = (struct sctp_nets *)tmr->net; 1726 CURVNET_SET((struct vnet *)tmr->vnet); 1727 NET_EPOCH_ENTER(et); 1728 released_asoc_reference = false; 1729 1730 #ifdef SCTP_AUDITING_ENABLED 1731 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1732 sctp_auditing(3, inp, stcb, net); 1733 #endif 1734 1735 /* sanity checks... */ 1736 KASSERT(tmr->self == NULL || tmr->self == tmr, 1737 ("sctp_timeout_handler: tmr->self corrupted")); 1738 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1739 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1740 type = tmr->type; 1741 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1742 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1743 type, stcb, stcb->sctp_ep)); 1744 tmr->stopped_from = 0xa001; 1745 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1746 SCTPDBG(SCTP_DEBUG_TIMER2, 1747 "Timer type %d handler exiting due to CLOSED association.\n", 1748 type); 1749 goto out_decr; 1750 } 1751 tmr->stopped_from = 0xa002; 1752 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1753 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1754 SCTPDBG(SCTP_DEBUG_TIMER2, 1755 "Timer type %d handler exiting due to not being active.\n", 1756 type); 1757 goto out_decr; 1758 } 1759 1760 tmr->stopped_from = 0xa003; 1761 if (stcb) { 1762 SCTP_TCB_LOCK(stcb); 1763 /* 1764 * Release reference so that association can be freed if 1765 * necessary below. This is safe now that we have acquired 1766 * the lock. 1767 */ 1768 atomic_add_int(&stcb->asoc.refcnt, -1); 1769 released_asoc_reference = true; 1770 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1771 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1772 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1773 SCTPDBG(SCTP_DEBUG_TIMER2, 1774 "Timer type %d handler exiting due to CLOSED association.\n", 1775 type); 1776 goto out; 1777 } 1778 } else if (inp != NULL) { 1779 SCTP_INP_WLOCK(inp); 1780 } else { 1781 SCTP_WQ_ADDR_LOCK(); 1782 } 1783 1784 /* Record in stopped_from which timeout occurred. */ 1785 tmr->stopped_from = type; 1786 /* mark as being serviced now */ 1787 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1788 /* 1789 * Callout has been rescheduled. 1790 */ 1791 goto out; 1792 } 1793 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1794 /* 1795 * Not active, so no action. 1796 */ 1797 goto out; 1798 } 1799 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1800 1801 /* call the handler for the appropriate timer type */ 1802 switch (type) { 1803 case SCTP_TIMER_TYPE_SEND: 1804 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1805 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1806 type, inp, stcb, net)); 1807 SCTP_STAT_INCR(sctps_timodata); 1808 stcb->asoc.timodata++; 1809 stcb->asoc.num_send_timers_up--; 1810 if (stcb->asoc.num_send_timers_up < 0) { 1811 stcb->asoc.num_send_timers_up = 0; 1812 } 1813 SCTP_TCB_LOCK_ASSERT(stcb); 1814 if (sctp_t3rxt_timer(inp, stcb, net)) { 1815 /* no need to unlock on tcb its gone */ 1816 1817 goto out_decr; 1818 } 1819 SCTP_TCB_LOCK_ASSERT(stcb); 1820 #ifdef SCTP_AUDITING_ENABLED 1821 sctp_auditing(4, inp, stcb, net); 1822 #endif 1823 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1824 did_output = true; 1825 if ((stcb->asoc.num_send_timers_up == 0) && 1826 (stcb->asoc.sent_queue_cnt > 0)) { 1827 struct sctp_tmit_chunk *chk; 1828 1829 /* 1830 * Safeguard. If there on some on the sent queue 1831 * somewhere but no timers running something is 1832 * wrong... so we start a timer on the first chunk 1833 * on the send queue on whatever net it is sent to. 1834 */ 1835 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1836 if (chk->whoTo != NULL) { 1837 break; 1838 } 1839 } 1840 if (chk != NULL) { 1841 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1842 } 1843 } 1844 break; 1845 case SCTP_TIMER_TYPE_INIT: 1846 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1847 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1848 type, inp, stcb, net)); 1849 SCTP_STAT_INCR(sctps_timoinit); 1850 stcb->asoc.timoinit++; 1851 if (sctp_t1init_timer(inp, stcb, net)) { 1852 /* no need to unlock on tcb its gone */ 1853 goto out_decr; 1854 } 1855 did_output = false; 1856 break; 1857 case SCTP_TIMER_TYPE_RECV: 1858 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1859 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1860 type, inp, stcb, net)); 1861 SCTP_STAT_INCR(sctps_timosack); 1862 stcb->asoc.timosack++; 1863 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1864 #ifdef SCTP_AUDITING_ENABLED 1865 sctp_auditing(4, inp, stcb, NULL); 1866 #endif 1867 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1868 did_output = true; 1869 break; 1870 case SCTP_TIMER_TYPE_SHUTDOWN: 1871 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1872 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1873 type, inp, stcb, net)); 1874 SCTP_STAT_INCR(sctps_timoshutdown); 1875 stcb->asoc.timoshutdown++; 1876 if (sctp_shutdown_timer(inp, stcb, net)) { 1877 /* no need to unlock on tcb its gone */ 1878 goto out_decr; 1879 } 1880 #ifdef SCTP_AUDITING_ENABLED 1881 sctp_auditing(4, inp, stcb, net); 1882 #endif 1883 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1884 did_output = true; 1885 break; 1886 case SCTP_TIMER_TYPE_HEARTBEAT: 1887 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1888 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1889 type, inp, stcb, net)); 1890 SCTP_STAT_INCR(sctps_timoheartbeat); 1891 stcb->asoc.timoheartbeat++; 1892 if (sctp_heartbeat_timer(inp, stcb, net)) { 1893 /* no need to unlock on tcb its gone */ 1894 goto out_decr; 1895 } 1896 #ifdef SCTP_AUDITING_ENABLED 1897 sctp_auditing(4, inp, stcb, net); 1898 #endif 1899 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1900 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1901 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1902 did_output = true; 1903 } else { 1904 did_output = false; 1905 } 1906 break; 1907 case SCTP_TIMER_TYPE_COOKIE: 1908 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1909 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1910 type, inp, stcb, net)); 1911 SCTP_STAT_INCR(sctps_timocookie); 1912 stcb->asoc.timocookie++; 1913 if (sctp_cookie_timer(inp, stcb, net)) { 1914 /* no need to unlock on tcb its gone */ 1915 goto out_decr; 1916 } 1917 #ifdef SCTP_AUDITING_ENABLED 1918 sctp_auditing(4, inp, stcb, net); 1919 #endif 1920 /* 1921 * We consider T3 and Cookie timer pretty much the same with 1922 * respect to where from in chunk_output. 1923 */ 1924 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1925 did_output = true; 1926 break; 1927 case SCTP_TIMER_TYPE_NEWCOOKIE: 1928 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1929 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1930 type, inp, stcb, net)); 1931 SCTP_STAT_INCR(sctps_timosecret); 1932 (void)SCTP_GETTIME_TIMEVAL(&tv); 1933 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1934 inp->sctp_ep.last_secret_number = 1935 inp->sctp_ep.current_secret_number; 1936 inp->sctp_ep.current_secret_number++; 1937 if (inp->sctp_ep.current_secret_number >= 1938 SCTP_HOW_MANY_SECRETS) { 1939 inp->sctp_ep.current_secret_number = 0; 1940 } 1941 secret = (int)inp->sctp_ep.current_secret_number; 1942 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1943 inp->sctp_ep.secret_key[secret][i] = 1944 sctp_select_initial_TSN(&inp->sctp_ep); 1945 } 1946 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1947 did_output = false; 1948 break; 1949 case SCTP_TIMER_TYPE_PATHMTURAISE: 1950 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1951 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1952 type, inp, stcb, net)); 1953 SCTP_STAT_INCR(sctps_timopathmtu); 1954 sctp_pathmtu_timer(inp, stcb, net); 1955 did_output = false; 1956 break; 1957 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1958 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1959 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1960 type, inp, stcb, net)); 1961 if (sctp_shutdownack_timer(inp, stcb, net)) { 1962 /* no need to unlock on tcb its gone */ 1963 goto out_decr; 1964 } 1965 SCTP_STAT_INCR(sctps_timoshutdownack); 1966 stcb->asoc.timoshutdownack++; 1967 #ifdef SCTP_AUDITING_ENABLED 1968 sctp_auditing(4, inp, stcb, net); 1969 #endif 1970 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1971 did_output = true; 1972 break; 1973 case SCTP_TIMER_TYPE_ASCONF: 1974 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1975 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1976 type, inp, stcb, net)); 1977 SCTP_STAT_INCR(sctps_timoasconf); 1978 if (sctp_asconf_timer(inp, stcb, net)) { 1979 /* no need to unlock on tcb its gone */ 1980 goto out_decr; 1981 } 1982 #ifdef SCTP_AUDITING_ENABLED 1983 sctp_auditing(4, inp, stcb, net); 1984 #endif 1985 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1986 did_output = true; 1987 break; 1988 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1989 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1990 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1991 type, inp, stcb, net)); 1992 SCTP_STAT_INCR(sctps_timoshutdownguard); 1993 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1994 "Shutdown guard timer expired"); 1995 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1996 /* no need to unlock on tcb its gone */ 1997 goto out_decr; 1998 case SCTP_TIMER_TYPE_AUTOCLOSE: 1999 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2000 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2001 type, inp, stcb, net)); 2002 SCTP_STAT_INCR(sctps_timoautoclose); 2003 sctp_autoclose_timer(inp, stcb); 2004 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2005 did_output = true; 2006 break; 2007 case SCTP_TIMER_TYPE_STRRESET: 2008 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2009 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2010 type, inp, stcb, net)); 2011 SCTP_STAT_INCR(sctps_timostrmrst); 2012 if (sctp_strreset_timer(inp, stcb)) { 2013 /* no need to unlock on tcb its gone */ 2014 goto out_decr; 2015 } 2016 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2017 did_output = true; 2018 break; 2019 case SCTP_TIMER_TYPE_INPKILL: 2020 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2021 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2022 type, inp, stcb, net)); 2023 SCTP_STAT_INCR(sctps_timoinpkill); 2024 /* 2025 * special case, take away our increment since WE are the 2026 * killer 2027 */ 2028 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2029 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2030 SCTP_INP_DECR_REF(inp); 2031 SCTP_INP_WUNLOCK(inp); 2032 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2033 SCTP_CALLED_FROM_INPKILL_TIMER); 2034 inp = NULL; 2035 goto out_no_decr; 2036 case SCTP_TIMER_TYPE_ASOCKILL: 2037 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2038 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2039 type, inp, stcb, net)); 2040 SCTP_STAT_INCR(sctps_timoassockill); 2041 /* Can we free it yet? */ 2042 SCTP_INP_DECR_REF(inp); 2043 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2044 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2045 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2046 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2047 /* 2048 * free asoc, always unlocks (or destroy's) so prevent 2049 * duplicate unlock or unlock of a free mtx :-0 2050 */ 2051 stcb = NULL; 2052 goto out_no_decr; 2053 case SCTP_TIMER_TYPE_ADDR_WQ: 2054 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2055 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2056 type, inp, stcb, net)); 2057 sctp_handle_addr_wq(); 2058 did_output = true; 2059 break; 2060 case SCTP_TIMER_TYPE_PRIM_DELETED: 2061 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2062 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2063 type, inp, stcb, net)); 2064 SCTP_STAT_INCR(sctps_timodelprim); 2065 sctp_delete_prim_timer(inp, stcb); 2066 did_output = false; 2067 break; 2068 default: 2069 #ifdef INVARIANTS 2070 panic("Unknown timer type %d", type); 2071 #else 2072 goto out; 2073 #endif 2074 } 2075 #ifdef SCTP_AUDITING_ENABLED 2076 sctp_audit_log(0xF1, (uint8_t)type); 2077 if (inp != NULL) 2078 sctp_auditing(5, inp, stcb, net); 2079 #endif 2080 if (did_output && (stcb != NULL)) { 2081 /* 2082 * Now we need to clean up the control chunk chain if an 2083 * ECNE is on it. It must be marked as UNSENT again so next 2084 * call will continue to send it until such time that we get 2085 * a CWR, to remove it. It is, however, less likely that we 2086 * will find a ecn echo on the chain though. 2087 */ 2088 sctp_fix_ecn_echo(&stcb->asoc); 2089 } 2090 out: 2091 if (stcb != NULL) { 2092 SCTP_TCB_UNLOCK(stcb); 2093 } else if (inp != NULL) { 2094 SCTP_INP_WUNLOCK(inp); 2095 } else { 2096 SCTP_WQ_ADDR_UNLOCK(); 2097 } 2098 2099 out_decr: 2100 /* These reference counts were incremented in sctp_timer_start(). */ 2101 if (inp != NULL) { 2102 SCTP_INP_DECR_REF(inp); 2103 } 2104 if ((stcb != NULL) && !released_asoc_reference) { 2105 atomic_add_int(&stcb->asoc.refcnt, -1); 2106 } 2107 if (net != NULL) { 2108 sctp_free_remote_addr(net); 2109 } 2110 out_no_decr: 2111 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2112 CURVNET_RESTORE(); 2113 NET_EPOCH_EXIT(et); 2114 } 2115 2116 /*- 2117 * The following table shows which parameters must be provided 2118 * when calling sctp_timer_start(). For parameters not being 2119 * provided, NULL must be used. 2120 * 2121 * |Name |inp |stcb|net | 2122 * |-----------------------------|----|----|----| 2123 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2124 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2126 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2128 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2130 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2132 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2134 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2135 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2137 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2139 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2140 * 2141 */ 2142 2143 void 2144 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2145 struct sctp_nets *net) 2146 { 2147 struct sctp_timer *tmr; 2148 uint32_t to_ticks; 2149 uint32_t rndval, jitter; 2150 2151 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2152 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2153 t_type, stcb, stcb->sctp_ep)); 2154 tmr = NULL; 2155 if (stcb != NULL) { 2156 SCTP_TCB_LOCK_ASSERT(stcb); 2157 } else if (inp != NULL) { 2158 SCTP_INP_WLOCK_ASSERT(inp); 2159 } else { 2160 SCTP_WQ_ADDR_LOCK_ASSERT(); 2161 } 2162 if (stcb != NULL) { 2163 /* 2164 * Don't restart timer on association that's about to be 2165 * killed. 2166 */ 2167 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2168 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2169 SCTPDBG(SCTP_DEBUG_TIMER2, 2170 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2171 t_type, inp, stcb, net); 2172 return; 2173 } 2174 /* Don't restart timer on net that's been removed. */ 2175 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2176 SCTPDBG(SCTP_DEBUG_TIMER2, 2177 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2178 t_type, inp, stcb, net); 2179 return; 2180 } 2181 } 2182 switch (t_type) { 2183 case SCTP_TIMER_TYPE_SEND: 2184 /* Here we use the RTO timer. */ 2185 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2186 #ifdef INVARIANTS 2187 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2188 t_type, inp, stcb, net); 2189 #else 2190 return; 2191 #endif 2192 } 2193 tmr = &net->rxt_timer; 2194 if (net->RTO == 0) { 2195 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2196 } else { 2197 to_ticks = sctp_msecs_to_ticks(net->RTO); 2198 } 2199 break; 2200 case SCTP_TIMER_TYPE_INIT: 2201 /* 2202 * Here we use the INIT timer default usually about 1 2203 * second. 2204 */ 2205 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2206 #ifdef INVARIANTS 2207 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2208 t_type, inp, stcb, net); 2209 #else 2210 return; 2211 #endif 2212 } 2213 tmr = &net->rxt_timer; 2214 if (net->RTO == 0) { 2215 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2216 } else { 2217 to_ticks = sctp_msecs_to_ticks(net->RTO); 2218 } 2219 break; 2220 case SCTP_TIMER_TYPE_RECV: 2221 /* 2222 * Here we use the Delayed-Ack timer value from the inp, 2223 * ususually about 200ms. 2224 */ 2225 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2226 #ifdef INVARIANTS 2227 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2228 t_type, inp, stcb, net); 2229 #else 2230 return; 2231 #endif 2232 } 2233 tmr = &stcb->asoc.dack_timer; 2234 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2235 break; 2236 case SCTP_TIMER_TYPE_SHUTDOWN: 2237 /* Here we use the RTO of the destination. */ 2238 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2239 #ifdef INVARIANTS 2240 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2241 t_type, inp, stcb, net); 2242 #else 2243 return; 2244 #endif 2245 } 2246 tmr = &net->rxt_timer; 2247 if (net->RTO == 0) { 2248 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2249 } else { 2250 to_ticks = sctp_msecs_to_ticks(net->RTO); 2251 } 2252 break; 2253 case SCTP_TIMER_TYPE_HEARTBEAT: 2254 /* 2255 * The net is used here so that we can add in the RTO. Even 2256 * though we use a different timer. We also add the HB timer 2257 * PLUS a random jitter. 2258 */ 2259 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2260 #ifdef INVARIANTS 2261 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2262 t_type, inp, stcb, net); 2263 #else 2264 return; 2265 #endif 2266 } 2267 if ((net->dest_state & SCTP_ADDR_NOHB) && 2268 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2269 SCTPDBG(SCTP_DEBUG_TIMER2, 2270 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2271 t_type, inp, stcb, net); 2272 return; 2273 } 2274 tmr = &net->hb_timer; 2275 if (net->RTO == 0) { 2276 to_ticks = stcb->asoc.initial_rto; 2277 } else { 2278 to_ticks = net->RTO; 2279 } 2280 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2281 jitter = rndval % to_ticks; 2282 if (jitter >= (to_ticks >> 1)) { 2283 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2284 } else { 2285 to_ticks = to_ticks - jitter; 2286 } 2287 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2288 !(net->dest_state & SCTP_ADDR_PF)) { 2289 to_ticks += net->heart_beat_delay; 2290 } 2291 /* 2292 * Now we must convert the to_ticks that are now in ms to 2293 * ticks. 2294 */ 2295 to_ticks = sctp_msecs_to_ticks(to_ticks); 2296 break; 2297 case SCTP_TIMER_TYPE_COOKIE: 2298 /* 2299 * Here we can use the RTO timer from the network since one 2300 * RTT was complete. If a retransmission happened then we 2301 * will be using the RTO initial value. 2302 */ 2303 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2304 #ifdef INVARIANTS 2305 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2306 t_type, inp, stcb, net); 2307 #else 2308 return; 2309 #endif 2310 } 2311 tmr = &net->rxt_timer; 2312 if (net->RTO == 0) { 2313 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2314 } else { 2315 to_ticks = sctp_msecs_to_ticks(net->RTO); 2316 } 2317 break; 2318 case SCTP_TIMER_TYPE_NEWCOOKIE: 2319 /* 2320 * Nothing needed but the endpoint here ususually about 60 2321 * minutes. 2322 */ 2323 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2324 #ifdef INVARIANTS 2325 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2326 t_type, inp, stcb, net); 2327 #else 2328 return; 2329 #endif 2330 } 2331 tmr = &inp->sctp_ep.signature_change; 2332 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2333 break; 2334 case SCTP_TIMER_TYPE_PATHMTURAISE: 2335 /* 2336 * Here we use the value found in the EP for PMTUD, 2337 * ususually about 10 minutes. 2338 */ 2339 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2340 #ifdef INVARIANTS 2341 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2342 t_type, inp, stcb, net); 2343 #else 2344 return; 2345 #endif 2346 } 2347 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2348 SCTPDBG(SCTP_DEBUG_TIMER2, 2349 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2350 t_type, inp, stcb, net); 2351 return; 2352 } 2353 tmr = &net->pmtu_timer; 2354 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2355 break; 2356 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2357 /* Here we use the RTO of the destination. */ 2358 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2359 #ifdef INVARIANTS 2360 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2361 t_type, inp, stcb, net); 2362 #else 2363 return; 2364 #endif 2365 } 2366 tmr = &net->rxt_timer; 2367 if (net->RTO == 0) { 2368 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2369 } else { 2370 to_ticks = sctp_msecs_to_ticks(net->RTO); 2371 } 2372 break; 2373 case SCTP_TIMER_TYPE_ASCONF: 2374 /* 2375 * Here the timer comes from the stcb but its value is from 2376 * the net's RTO. 2377 */ 2378 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2379 #ifdef INVARIANTS 2380 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2381 t_type, inp, stcb, net); 2382 #else 2383 return; 2384 #endif 2385 } 2386 tmr = &stcb->asoc.asconf_timer; 2387 if (net->RTO == 0) { 2388 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2389 } else { 2390 to_ticks = sctp_msecs_to_ticks(net->RTO); 2391 } 2392 break; 2393 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2394 /* 2395 * Here we use the endpoints shutdown guard timer usually 2396 * about 3 minutes. 2397 */ 2398 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2399 #ifdef INVARIANTS 2400 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2401 t_type, inp, stcb, net); 2402 #else 2403 return; 2404 #endif 2405 } 2406 tmr = &stcb->asoc.shut_guard_timer; 2407 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2408 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2409 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2410 } else { 2411 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2412 } 2413 } else { 2414 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2415 } 2416 break; 2417 case SCTP_TIMER_TYPE_AUTOCLOSE: 2418 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2419 #ifdef INVARIANTS 2420 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2421 t_type, inp, stcb, net); 2422 #else 2423 return; 2424 #endif 2425 } 2426 tmr = &stcb->asoc.autoclose_timer; 2427 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2428 break; 2429 case SCTP_TIMER_TYPE_STRRESET: 2430 /* 2431 * Here the timer comes from the stcb but its value is from 2432 * the net's RTO. 2433 */ 2434 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2435 #ifdef INVARIANTS 2436 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2437 t_type, inp, stcb, net); 2438 #else 2439 return; 2440 #endif 2441 } 2442 tmr = &stcb->asoc.strreset_timer; 2443 if (net->RTO == 0) { 2444 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2445 } else { 2446 to_ticks = sctp_msecs_to_ticks(net->RTO); 2447 } 2448 break; 2449 case SCTP_TIMER_TYPE_INPKILL: 2450 /* 2451 * The inp is setup to die. We re-use the signature_chage 2452 * timer since that has stopped and we are in the GONE 2453 * state. 2454 */ 2455 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2456 #ifdef INVARIANTS 2457 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2458 t_type, inp, stcb, net); 2459 #else 2460 return; 2461 #endif 2462 } 2463 tmr = &inp->sctp_ep.signature_change; 2464 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2465 break; 2466 case SCTP_TIMER_TYPE_ASOCKILL: 2467 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2468 #ifdef INVARIANTS 2469 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2470 t_type, inp, stcb, net); 2471 #else 2472 return; 2473 #endif 2474 } 2475 tmr = &stcb->asoc.strreset_timer; 2476 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2477 break; 2478 case SCTP_TIMER_TYPE_ADDR_WQ: 2479 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2480 #ifdef INVARIANTS 2481 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2482 t_type, inp, stcb, net); 2483 #else 2484 return; 2485 #endif 2486 } 2487 /* Only 1 tick away :-) */ 2488 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2489 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2490 break; 2491 case SCTP_TIMER_TYPE_PRIM_DELETED: 2492 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2493 #ifdef INVARIANTS 2494 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2495 t_type, inp, stcb, net); 2496 #else 2497 return; 2498 #endif 2499 } 2500 tmr = &stcb->asoc.delete_prim_timer; 2501 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2502 break; 2503 default: 2504 #ifdef INVARIANTS 2505 panic("Unknown timer type %d", t_type); 2506 #else 2507 return; 2508 #endif 2509 } 2510 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2511 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2512 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2513 /* 2514 * We do NOT allow you to have it already running. If it is, 2515 * we leave the current one up unchanged. 2516 */ 2517 SCTPDBG(SCTP_DEBUG_TIMER2, 2518 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2519 t_type, inp, stcb, net); 2520 return; 2521 } 2522 /* At this point we can proceed. */ 2523 if (t_type == SCTP_TIMER_TYPE_SEND) { 2524 stcb->asoc.num_send_timers_up++; 2525 } 2526 tmr->stopped_from = 0; 2527 tmr->type = t_type; 2528 tmr->ep = (void *)inp; 2529 tmr->tcb = (void *)stcb; 2530 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2531 tmr->net = NULL; 2532 } else { 2533 tmr->net = (void *)net; 2534 } 2535 tmr->self = (void *)tmr; 2536 tmr->vnet = (void *)curvnet; 2537 tmr->ticks = sctp_get_tick_count(); 2538 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2539 SCTPDBG(SCTP_DEBUG_TIMER2, 2540 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2541 t_type, to_ticks, inp, stcb, net); 2542 /* 2543 * If this is a newly scheduled callout, as opposed to a 2544 * rescheduled one, increment relevant reference counts. 2545 */ 2546 if (tmr->ep != NULL) { 2547 SCTP_INP_INCR_REF(inp); 2548 } 2549 if (tmr->tcb != NULL) { 2550 atomic_add_int(&stcb->asoc.refcnt, 1); 2551 } 2552 if (tmr->net != NULL) { 2553 atomic_add_int(&net->ref_count, 1); 2554 } 2555 } else { 2556 /* 2557 * This should not happen, since we checked for pending 2558 * above. 2559 */ 2560 SCTPDBG(SCTP_DEBUG_TIMER2, 2561 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2562 t_type, to_ticks, inp, stcb, net); 2563 } 2564 return; 2565 } 2566 2567 /*- 2568 * The following table shows which parameters must be provided 2569 * when calling sctp_timer_stop(). For parameters not being 2570 * provided, NULL must be used. 2571 * 2572 * |Name |inp |stcb|net | 2573 * |-----------------------------|----|----|----| 2574 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2575 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2576 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2577 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2578 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2579 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2580 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2581 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2582 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2583 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2584 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2585 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2586 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2587 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2588 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2589 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2590 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2591 * 2592 */ 2593 2594 void 2595 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2596 struct sctp_nets *net, uint32_t from) 2597 { 2598 struct sctp_timer *tmr; 2599 2600 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2601 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2602 t_type, stcb, stcb->sctp_ep)); 2603 if (stcb != NULL) { 2604 SCTP_TCB_LOCK_ASSERT(stcb); 2605 } else if (inp != NULL) { 2606 SCTP_INP_WLOCK_ASSERT(inp); 2607 } else { 2608 SCTP_WQ_ADDR_LOCK_ASSERT(); 2609 } 2610 tmr = NULL; 2611 switch (t_type) { 2612 case SCTP_TIMER_TYPE_SEND: 2613 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2614 #ifdef INVARIANTS 2615 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2616 t_type, inp, stcb, net); 2617 #else 2618 return; 2619 #endif 2620 } 2621 tmr = &net->rxt_timer; 2622 break; 2623 case SCTP_TIMER_TYPE_INIT: 2624 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2625 #ifdef INVARIANTS 2626 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2627 t_type, inp, stcb, net); 2628 #else 2629 return; 2630 #endif 2631 } 2632 tmr = &net->rxt_timer; 2633 break; 2634 case SCTP_TIMER_TYPE_RECV: 2635 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2636 #ifdef INVARIANTS 2637 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2638 t_type, inp, stcb, net); 2639 #else 2640 return; 2641 #endif 2642 } 2643 tmr = &stcb->asoc.dack_timer; 2644 break; 2645 case SCTP_TIMER_TYPE_SHUTDOWN: 2646 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2647 #ifdef INVARIANTS 2648 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2649 t_type, inp, stcb, net); 2650 #else 2651 return; 2652 #endif 2653 } 2654 tmr = &net->rxt_timer; 2655 break; 2656 case SCTP_TIMER_TYPE_HEARTBEAT: 2657 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2658 #ifdef INVARIANTS 2659 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2660 t_type, inp, stcb, net); 2661 #else 2662 return; 2663 #endif 2664 } 2665 tmr = &net->hb_timer; 2666 break; 2667 case SCTP_TIMER_TYPE_COOKIE: 2668 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2669 #ifdef INVARIANTS 2670 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2671 t_type, inp, stcb, net); 2672 #else 2673 return; 2674 #endif 2675 } 2676 tmr = &net->rxt_timer; 2677 break; 2678 case SCTP_TIMER_TYPE_NEWCOOKIE: 2679 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2680 #ifdef INVARIANTS 2681 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2682 t_type, inp, stcb, net); 2683 #else 2684 return; 2685 #endif 2686 } 2687 tmr = &inp->sctp_ep.signature_change; 2688 break; 2689 case SCTP_TIMER_TYPE_PATHMTURAISE: 2690 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2691 #ifdef INVARIANTS 2692 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2693 t_type, inp, stcb, net); 2694 #else 2695 return; 2696 #endif 2697 } 2698 tmr = &net->pmtu_timer; 2699 break; 2700 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2701 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2702 #ifdef INVARIANTS 2703 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2704 t_type, inp, stcb, net); 2705 #else 2706 return; 2707 #endif 2708 } 2709 tmr = &net->rxt_timer; 2710 break; 2711 case SCTP_TIMER_TYPE_ASCONF: 2712 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2713 #ifdef INVARIANTS 2714 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2715 t_type, inp, stcb, net); 2716 #else 2717 return; 2718 #endif 2719 } 2720 tmr = &stcb->asoc.asconf_timer; 2721 break; 2722 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2723 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2724 #ifdef INVARIANTS 2725 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2726 t_type, inp, stcb, net); 2727 #else 2728 return; 2729 #endif 2730 } 2731 tmr = &stcb->asoc.shut_guard_timer; 2732 break; 2733 case SCTP_TIMER_TYPE_AUTOCLOSE: 2734 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2735 #ifdef INVARIANTS 2736 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2737 t_type, inp, stcb, net); 2738 #else 2739 return; 2740 #endif 2741 } 2742 tmr = &stcb->asoc.autoclose_timer; 2743 break; 2744 case SCTP_TIMER_TYPE_STRRESET: 2745 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2746 #ifdef INVARIANTS 2747 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2748 t_type, inp, stcb, net); 2749 #else 2750 return; 2751 #endif 2752 } 2753 tmr = &stcb->asoc.strreset_timer; 2754 break; 2755 case SCTP_TIMER_TYPE_INPKILL: 2756 /* 2757 * The inp is setup to die. We re-use the signature_chage 2758 * timer since that has stopped and we are in the GONE 2759 * state. 2760 */ 2761 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2762 #ifdef INVARIANTS 2763 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2764 t_type, inp, stcb, net); 2765 #else 2766 return; 2767 #endif 2768 } 2769 tmr = &inp->sctp_ep.signature_change; 2770 break; 2771 case SCTP_TIMER_TYPE_ASOCKILL: 2772 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2773 #ifdef INVARIANTS 2774 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2775 t_type, inp, stcb, net); 2776 #else 2777 return; 2778 #endif 2779 } 2780 tmr = &stcb->asoc.strreset_timer; 2781 break; 2782 case SCTP_TIMER_TYPE_ADDR_WQ: 2783 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2784 #ifdef INVARIANTS 2785 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2786 t_type, inp, stcb, net); 2787 #else 2788 return; 2789 #endif 2790 } 2791 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2792 break; 2793 case SCTP_TIMER_TYPE_PRIM_DELETED: 2794 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2795 #ifdef INVARIANTS 2796 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2797 t_type, inp, stcb, net); 2798 #else 2799 return; 2800 #endif 2801 } 2802 tmr = &stcb->asoc.delete_prim_timer; 2803 break; 2804 default: 2805 #ifdef INVARIANTS 2806 panic("Unknown timer type %d", t_type); 2807 #else 2808 return; 2809 #endif 2810 } 2811 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2812 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2813 (tmr->type != t_type)) { 2814 /* 2815 * Ok we have a timer that is under joint use. Cookie timer 2816 * per chance with the SEND timer. We therefore are NOT 2817 * running the timer that the caller wants stopped. So just 2818 * return. 2819 */ 2820 SCTPDBG(SCTP_DEBUG_TIMER2, 2821 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2822 t_type, inp, stcb, net); 2823 return; 2824 } 2825 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2826 stcb->asoc.num_send_timers_up--; 2827 if (stcb->asoc.num_send_timers_up < 0) { 2828 stcb->asoc.num_send_timers_up = 0; 2829 } 2830 } 2831 tmr->self = NULL; 2832 tmr->stopped_from = from; 2833 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2834 KASSERT(tmr->ep == inp, 2835 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2836 t_type, inp, tmr->ep)); 2837 KASSERT(tmr->tcb == stcb, 2838 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2839 t_type, stcb, tmr->tcb)); 2840 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2841 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2842 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2843 t_type, net, tmr->net)); 2844 SCTPDBG(SCTP_DEBUG_TIMER2, 2845 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2846 t_type, inp, stcb, net); 2847 /* 2848 * If the timer was actually stopped, decrement reference 2849 * counts that were incremented in sctp_timer_start(). 2850 */ 2851 if (tmr->ep != NULL) { 2852 SCTP_INP_DECR_REF(inp); 2853 tmr->ep = NULL; 2854 } 2855 if (tmr->tcb != NULL) { 2856 atomic_add_int(&stcb->asoc.refcnt, -1); 2857 tmr->tcb = NULL; 2858 } 2859 if (tmr->net != NULL) { 2860 /* 2861 * Can't use net, since it doesn't work for 2862 * SCTP_TIMER_TYPE_ASCONF. 2863 */ 2864 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2865 tmr->net = NULL; 2866 } 2867 } else { 2868 SCTPDBG(SCTP_DEBUG_TIMER2, 2869 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2870 t_type, inp, stcb, net); 2871 } 2872 return; 2873 } 2874 2875 uint32_t 2876 sctp_calculate_len(struct mbuf *m) 2877 { 2878 uint32_t tlen = 0; 2879 struct mbuf *at; 2880 2881 at = m; 2882 while (at) { 2883 tlen += SCTP_BUF_LEN(at); 2884 at = SCTP_BUF_NEXT(at); 2885 } 2886 return (tlen); 2887 } 2888 2889 void 2890 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2891 struct sctp_association *asoc, uint32_t mtu) 2892 { 2893 /* 2894 * Reset the P-MTU size on this association, this involves changing 2895 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2896 * allow the DF flag to be cleared. 2897 */ 2898 struct sctp_tmit_chunk *chk; 2899 unsigned int eff_mtu, ovh; 2900 2901 asoc->smallest_mtu = mtu; 2902 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2903 ovh = SCTP_MIN_OVERHEAD; 2904 } else { 2905 ovh = SCTP_MIN_V4_OVERHEAD; 2906 } 2907 eff_mtu = mtu - ovh; 2908 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2909 if (chk->send_size > eff_mtu) { 2910 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2911 } 2912 } 2913 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2914 if (chk->send_size > eff_mtu) { 2915 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2916 } 2917 } 2918 } 2919 2920 /* 2921 * Given an association and starting time of the current RTT period, update 2922 * RTO in number of msecs. net should point to the current network. 2923 * Return 1, if an RTO update was performed, return 0 if no update was 2924 * performed due to invalid starting point. 2925 */ 2926 2927 int 2928 sctp_calculate_rto(struct sctp_tcb *stcb, 2929 struct sctp_association *asoc, 2930 struct sctp_nets *net, 2931 struct timeval *old, 2932 int rtt_from_sack) 2933 { 2934 struct timeval now; 2935 uint64_t rtt_us; /* RTT in us */ 2936 int32_t rtt; /* RTT in ms */ 2937 uint32_t new_rto; 2938 int first_measure = 0; 2939 2940 /************************/ 2941 /* 1. calculate new RTT */ 2942 /************************/ 2943 /* get the current time */ 2944 if (stcb->asoc.use_precise_time) { 2945 (void)SCTP_GETPTIME_TIMEVAL(&now); 2946 } else { 2947 (void)SCTP_GETTIME_TIMEVAL(&now); 2948 } 2949 if ((old->tv_sec > now.tv_sec) || 2950 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2951 /* The starting point is in the future. */ 2952 return (0); 2953 } 2954 timevalsub(&now, old); 2955 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2956 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2957 /* The RTT is larger than a sane value. */ 2958 return (0); 2959 } 2960 /* store the current RTT in us */ 2961 net->rtt = rtt_us; 2962 /* compute rtt in ms */ 2963 rtt = (int32_t)(net->rtt / 1000); 2964 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2965 /* 2966 * Tell the CC module that a new update has just occurred 2967 * from a sack 2968 */ 2969 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2970 } 2971 /* 2972 * Do we need to determine the lan? We do this only on sacks i.e. 2973 * RTT being determined from data not non-data (HB/INIT->INITACK). 2974 */ 2975 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2976 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2977 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2978 net->lan_type = SCTP_LAN_INTERNET; 2979 } else { 2980 net->lan_type = SCTP_LAN_LOCAL; 2981 } 2982 } 2983 2984 /***************************/ 2985 /* 2. update RTTVAR & SRTT */ 2986 /***************************/ 2987 /*- 2988 * Compute the scaled average lastsa and the 2989 * scaled variance lastsv as described in van Jacobson 2990 * Paper "Congestion Avoidance and Control", Annex A. 2991 * 2992 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2993 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2994 */ 2995 if (net->RTO_measured) { 2996 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2997 net->lastsa += rtt; 2998 if (rtt < 0) { 2999 rtt = -rtt; 3000 } 3001 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3002 net->lastsv += rtt; 3003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3004 rto_logging(net, SCTP_LOG_RTTVAR); 3005 } 3006 } else { 3007 /* First RTO measurment */ 3008 net->RTO_measured = 1; 3009 first_measure = 1; 3010 net->lastsa = rtt << SCTP_RTT_SHIFT; 3011 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3012 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3013 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3014 } 3015 } 3016 if (net->lastsv == 0) { 3017 net->lastsv = SCTP_CLOCK_GRANULARITY; 3018 } 3019 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3020 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3021 (stcb->asoc.sat_network_lockout == 0)) { 3022 stcb->asoc.sat_network = 1; 3023 } else if ((!first_measure) && stcb->asoc.sat_network) { 3024 stcb->asoc.sat_network = 0; 3025 stcb->asoc.sat_network_lockout = 1; 3026 } 3027 /* bound it, per C6/C7 in Section 5.3.1 */ 3028 if (new_rto < stcb->asoc.minrto) { 3029 new_rto = stcb->asoc.minrto; 3030 } 3031 if (new_rto > stcb->asoc.maxrto) { 3032 new_rto = stcb->asoc.maxrto; 3033 } 3034 net->RTO = new_rto; 3035 return (1); 3036 } 3037 3038 /* 3039 * return a pointer to a contiguous piece of data from the given mbuf chain 3040 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3041 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3042 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3043 */ 3044 caddr_t 3045 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3046 { 3047 uint32_t count; 3048 uint8_t *ptr; 3049 3050 ptr = in_ptr; 3051 if ((off < 0) || (len <= 0)) 3052 return (NULL); 3053 3054 /* find the desired start location */ 3055 while ((m != NULL) && (off > 0)) { 3056 if (off < SCTP_BUF_LEN(m)) 3057 break; 3058 off -= SCTP_BUF_LEN(m); 3059 m = SCTP_BUF_NEXT(m); 3060 } 3061 if (m == NULL) 3062 return (NULL); 3063 3064 /* is the current mbuf large enough (eg. contiguous)? */ 3065 if ((SCTP_BUF_LEN(m) - off) >= len) { 3066 return (mtod(m, caddr_t)+off); 3067 } else { 3068 /* else, it spans more than one mbuf, so save a temp copy... */ 3069 while ((m != NULL) && (len > 0)) { 3070 count = min(SCTP_BUF_LEN(m) - off, len); 3071 memcpy(ptr, mtod(m, caddr_t)+off, count); 3072 len -= count; 3073 ptr += count; 3074 off = 0; 3075 m = SCTP_BUF_NEXT(m); 3076 } 3077 if ((m == NULL) && (len > 0)) 3078 return (NULL); 3079 else 3080 return ((caddr_t)in_ptr); 3081 } 3082 } 3083 3084 struct sctp_paramhdr * 3085 sctp_get_next_param(struct mbuf *m, 3086 int offset, 3087 struct sctp_paramhdr *pull, 3088 int pull_limit) 3089 { 3090 /* This just provides a typed signature to Peter's Pull routine */ 3091 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3092 (uint8_t *)pull)); 3093 } 3094 3095 struct mbuf * 3096 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3097 { 3098 struct mbuf *m_last; 3099 caddr_t dp; 3100 3101 if (padlen > 3) { 3102 return (NULL); 3103 } 3104 if (padlen <= M_TRAILINGSPACE(m)) { 3105 /* 3106 * The easy way. We hope the majority of the time we hit 3107 * here :) 3108 */ 3109 m_last = m; 3110 } else { 3111 /* Hard way we must grow the mbuf chain */ 3112 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3113 if (m_last == NULL) { 3114 return (NULL); 3115 } 3116 SCTP_BUF_LEN(m_last) = 0; 3117 SCTP_BUF_NEXT(m_last) = NULL; 3118 SCTP_BUF_NEXT(m) = m_last; 3119 } 3120 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3121 SCTP_BUF_LEN(m_last) += padlen; 3122 memset(dp, 0, padlen); 3123 return (m_last); 3124 } 3125 3126 struct mbuf * 3127 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3128 { 3129 /* find the last mbuf in chain and pad it */ 3130 struct mbuf *m_at; 3131 3132 if (last_mbuf != NULL) { 3133 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3134 } else { 3135 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3136 if (SCTP_BUF_NEXT(m_at) == NULL) { 3137 return (sctp_add_pad_tombuf(m_at, padval)); 3138 } 3139 } 3140 } 3141 return (NULL); 3142 } 3143 3144 static void 3145 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3146 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3147 { 3148 struct mbuf *m_notify; 3149 struct sctp_assoc_change *sac; 3150 struct sctp_queued_to_read *control; 3151 unsigned int notif_len; 3152 uint16_t abort_len; 3153 unsigned int i; 3154 3155 if (stcb == NULL) { 3156 return; 3157 } 3158 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3159 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3160 if (abort != NULL) { 3161 abort_len = ntohs(abort->ch.chunk_length); 3162 /* 3163 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3164 * contiguous. 3165 */ 3166 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3167 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3168 } 3169 } else { 3170 abort_len = 0; 3171 } 3172 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3173 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3174 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3175 notif_len += abort_len; 3176 } 3177 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3178 if (m_notify == NULL) { 3179 /* Retry with smaller value. */ 3180 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3181 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3182 if (m_notify == NULL) { 3183 goto set_error; 3184 } 3185 } 3186 SCTP_BUF_NEXT(m_notify) = NULL; 3187 sac = mtod(m_notify, struct sctp_assoc_change *); 3188 memset(sac, 0, notif_len); 3189 sac->sac_type = SCTP_ASSOC_CHANGE; 3190 sac->sac_flags = 0; 3191 sac->sac_length = sizeof(struct sctp_assoc_change); 3192 sac->sac_state = state; 3193 sac->sac_error = error; 3194 /* XXX verify these stream counts */ 3195 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3196 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3197 sac->sac_assoc_id = sctp_get_associd(stcb); 3198 if (notif_len > sizeof(struct sctp_assoc_change)) { 3199 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3200 i = 0; 3201 if (stcb->asoc.prsctp_supported == 1) { 3202 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3203 } 3204 if (stcb->asoc.auth_supported == 1) { 3205 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3206 } 3207 if (stcb->asoc.asconf_supported == 1) { 3208 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3209 } 3210 if (stcb->asoc.idata_supported == 1) { 3211 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3212 } 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3214 if (stcb->asoc.reconfig_supported == 1) { 3215 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3216 } 3217 sac->sac_length += i; 3218 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3219 memcpy(sac->sac_info, abort, abort_len); 3220 sac->sac_length += abort_len; 3221 } 3222 } 3223 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3224 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3225 0, 0, stcb->asoc.context, 0, 0, 0, 3226 m_notify); 3227 if (control != NULL) { 3228 control->length = SCTP_BUF_LEN(m_notify); 3229 control->spec_flags = M_NOTIFICATION; 3230 /* not that we need this */ 3231 control->tail_mbuf = m_notify; 3232 sctp_add_to_readq(stcb->sctp_ep, stcb, 3233 control, 3234 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3235 so_locked); 3236 } else { 3237 sctp_m_freem(m_notify); 3238 } 3239 } 3240 /* 3241 * For 1-to-1 style sockets, we send up and error when an ABORT 3242 * comes in. 3243 */ 3244 set_error: 3245 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3246 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3247 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3248 SOCK_LOCK(stcb->sctp_socket); 3249 if (from_peer) { 3250 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3251 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3252 stcb->sctp_socket->so_error = ECONNREFUSED; 3253 } else { 3254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3255 stcb->sctp_socket->so_error = ECONNRESET; 3256 } 3257 } else { 3258 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3259 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3260 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3261 stcb->sctp_socket->so_error = ETIMEDOUT; 3262 } else { 3263 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3264 stcb->sctp_socket->so_error = ECONNABORTED; 3265 } 3266 } 3267 SOCK_UNLOCK(stcb->sctp_socket); 3268 } 3269 /* Wake ANY sleepers */ 3270 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3271 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3272 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3273 socantrcvmore(stcb->sctp_socket); 3274 } 3275 sorwakeup(stcb->sctp_socket); 3276 sowwakeup(stcb->sctp_socket); 3277 } 3278 3279 static void 3280 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3281 struct sockaddr *sa, uint32_t error, int so_locked) 3282 { 3283 struct mbuf *m_notify; 3284 struct sctp_paddr_change *spc; 3285 struct sctp_queued_to_read *control; 3286 3287 if ((stcb == NULL) || 3288 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3289 /* event not enabled */ 3290 return; 3291 } 3292 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3293 if (m_notify == NULL) 3294 return; 3295 SCTP_BUF_LEN(m_notify) = 0; 3296 spc = mtod(m_notify, struct sctp_paddr_change *); 3297 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3298 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3299 spc->spc_flags = 0; 3300 spc->spc_length = sizeof(struct sctp_paddr_change); 3301 switch (sa->sa_family) { 3302 #ifdef INET 3303 case AF_INET: 3304 #ifdef INET6 3305 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3306 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3307 (struct sockaddr_in6 *)&spc->spc_aaddr); 3308 } else { 3309 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3310 } 3311 #else 3312 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3313 #endif 3314 break; 3315 #endif 3316 #ifdef INET6 3317 case AF_INET6: 3318 { 3319 struct sockaddr_in6 *sin6; 3320 3321 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3322 3323 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3324 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3325 if (sin6->sin6_scope_id == 0) { 3326 /* recover scope_id for user */ 3327 (void)sa6_recoverscope(sin6); 3328 } else { 3329 /* clear embedded scope_id for user */ 3330 in6_clearscope(&sin6->sin6_addr); 3331 } 3332 } 3333 break; 3334 } 3335 #endif 3336 default: 3337 /* TSNH */ 3338 break; 3339 } 3340 spc->spc_state = state; 3341 spc->spc_error = error; 3342 spc->spc_assoc_id = sctp_get_associd(stcb); 3343 3344 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3345 SCTP_BUF_NEXT(m_notify) = NULL; 3346 3347 /* append to socket */ 3348 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3349 0, 0, stcb->asoc.context, 0, 0, 0, 3350 m_notify); 3351 if (control == NULL) { 3352 /* no memory */ 3353 sctp_m_freem(m_notify); 3354 return; 3355 } 3356 control->length = SCTP_BUF_LEN(m_notify); 3357 control->spec_flags = M_NOTIFICATION; 3358 /* not that we need this */ 3359 control->tail_mbuf = m_notify; 3360 sctp_add_to_readq(stcb->sctp_ep, stcb, 3361 control, 3362 &stcb->sctp_socket->so_rcv, 1, 3363 SCTP_READ_LOCK_NOT_HELD, 3364 so_locked); 3365 } 3366 3367 static void 3368 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3369 struct sctp_tmit_chunk *chk, int so_locked) 3370 { 3371 struct mbuf *m_notify; 3372 struct sctp_send_failed *ssf; 3373 struct sctp_send_failed_event *ssfe; 3374 struct sctp_queued_to_read *control; 3375 struct sctp_chunkhdr *chkhdr; 3376 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3377 3378 if ((stcb == NULL) || 3379 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3380 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3381 /* event not enabled */ 3382 return; 3383 } 3384 3385 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3386 notifhdr_len = sizeof(struct sctp_send_failed_event); 3387 } else { 3388 notifhdr_len = sizeof(struct sctp_send_failed); 3389 } 3390 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3391 if (m_notify == NULL) 3392 /* no space left */ 3393 return; 3394 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3395 if (stcb->asoc.idata_supported) { 3396 chkhdr_len = sizeof(struct sctp_idata_chunk); 3397 } else { 3398 chkhdr_len = sizeof(struct sctp_data_chunk); 3399 } 3400 /* Use some defaults in case we can't access the chunk header */ 3401 if (chk->send_size >= chkhdr_len) { 3402 payload_len = chk->send_size - chkhdr_len; 3403 } else { 3404 payload_len = 0; 3405 } 3406 padding_len = 0; 3407 if (chk->data != NULL) { 3408 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3409 if (chkhdr != NULL) { 3410 chk_len = ntohs(chkhdr->chunk_length); 3411 if ((chk_len >= chkhdr_len) && 3412 (chk->send_size >= chk_len) && 3413 (chk->send_size - chk_len < 4)) { 3414 padding_len = chk->send_size - chk_len; 3415 payload_len = chk->send_size - chkhdr_len - padding_len; 3416 } 3417 } 3418 } 3419 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3420 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3421 memset(ssfe, 0, notifhdr_len); 3422 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3423 if (sent) { 3424 ssfe->ssfe_flags = SCTP_DATA_SENT; 3425 } else { 3426 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3427 } 3428 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3429 ssfe->ssfe_error = error; 3430 /* not exactly what the user sent in, but should be close :) */ 3431 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3432 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3433 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3434 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3435 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3436 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3437 } else { 3438 ssf = mtod(m_notify, struct sctp_send_failed *); 3439 memset(ssf, 0, notifhdr_len); 3440 ssf->ssf_type = SCTP_SEND_FAILED; 3441 if (sent) { 3442 ssf->ssf_flags = SCTP_DATA_SENT; 3443 } else { 3444 ssf->ssf_flags = SCTP_DATA_UNSENT; 3445 } 3446 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3447 ssf->ssf_error = error; 3448 /* not exactly what the user sent in, but should be close :) */ 3449 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3450 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3451 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3452 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3453 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3454 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3455 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3456 } 3457 if (chk->data != NULL) { 3458 /* Trim off the sctp chunk header (it should be there) */ 3459 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3460 m_adj(chk->data, chkhdr_len); 3461 m_adj(chk->data, -padding_len); 3462 sctp_mbuf_crush(chk->data); 3463 chk->send_size -= (chkhdr_len + padding_len); 3464 } 3465 } 3466 SCTP_BUF_NEXT(m_notify) = chk->data; 3467 /* Steal off the mbuf */ 3468 chk->data = NULL; 3469 /* 3470 * For this case, we check the actual socket buffer, since the assoc 3471 * is going away we don't want to overfill the socket buffer for a 3472 * non-reader 3473 */ 3474 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3475 sctp_m_freem(m_notify); 3476 return; 3477 } 3478 /* append to socket */ 3479 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3480 0, 0, stcb->asoc.context, 0, 0, 0, 3481 m_notify); 3482 if (control == NULL) { 3483 /* no memory */ 3484 sctp_m_freem(m_notify); 3485 return; 3486 } 3487 control->length = SCTP_BUF_LEN(m_notify); 3488 control->spec_flags = M_NOTIFICATION; 3489 /* not that we need this */ 3490 control->tail_mbuf = m_notify; 3491 sctp_add_to_readq(stcb->sctp_ep, stcb, 3492 control, 3493 &stcb->sctp_socket->so_rcv, 1, 3494 SCTP_READ_LOCK_NOT_HELD, 3495 so_locked); 3496 } 3497 3498 static void 3499 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3500 struct sctp_stream_queue_pending *sp, int so_locked) 3501 { 3502 struct mbuf *m_notify; 3503 struct sctp_send_failed *ssf; 3504 struct sctp_send_failed_event *ssfe; 3505 struct sctp_queued_to_read *control; 3506 int notifhdr_len; 3507 3508 if ((stcb == NULL) || 3509 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3510 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3511 /* event not enabled */ 3512 return; 3513 } 3514 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3515 notifhdr_len = sizeof(struct sctp_send_failed_event); 3516 } else { 3517 notifhdr_len = sizeof(struct sctp_send_failed); 3518 } 3519 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3520 if (m_notify == NULL) { 3521 /* no space left */ 3522 return; 3523 } 3524 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3525 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3526 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3527 memset(ssfe, 0, notifhdr_len); 3528 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3529 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3530 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3531 ssfe->ssfe_error = error; 3532 /* not exactly what the user sent in, but should be close :) */ 3533 ssfe->ssfe_info.snd_sid = sp->sid; 3534 if (sp->some_taken) { 3535 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3536 } else { 3537 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3538 } 3539 ssfe->ssfe_info.snd_ppid = sp->ppid; 3540 ssfe->ssfe_info.snd_context = sp->context; 3541 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3542 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3543 } else { 3544 ssf = mtod(m_notify, struct sctp_send_failed *); 3545 memset(ssf, 0, notifhdr_len); 3546 ssf->ssf_type = SCTP_SEND_FAILED; 3547 ssf->ssf_flags = SCTP_DATA_UNSENT; 3548 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3549 ssf->ssf_error = error; 3550 /* not exactly what the user sent in, but should be close :) */ 3551 ssf->ssf_info.sinfo_stream = sp->sid; 3552 ssf->ssf_info.sinfo_ssn = 0; 3553 if (sp->some_taken) { 3554 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3555 } else { 3556 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3557 } 3558 ssf->ssf_info.sinfo_ppid = sp->ppid; 3559 ssf->ssf_info.sinfo_context = sp->context; 3560 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3561 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3562 } 3563 SCTP_BUF_NEXT(m_notify) = sp->data; 3564 3565 /* Steal off the mbuf */ 3566 sp->data = NULL; 3567 /* 3568 * For this case, we check the actual socket buffer, since the assoc 3569 * is going away we don't want to overfill the socket buffer for a 3570 * non-reader 3571 */ 3572 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3573 sctp_m_freem(m_notify); 3574 return; 3575 } 3576 /* append to socket */ 3577 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3578 0, 0, stcb->asoc.context, 0, 0, 0, 3579 m_notify); 3580 if (control == NULL) { 3581 /* no memory */ 3582 sctp_m_freem(m_notify); 3583 return; 3584 } 3585 control->length = SCTP_BUF_LEN(m_notify); 3586 control->spec_flags = M_NOTIFICATION; 3587 /* not that we need this */ 3588 control->tail_mbuf = m_notify; 3589 sctp_add_to_readq(stcb->sctp_ep, stcb, 3590 control, 3591 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3592 } 3593 3594 static void 3595 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3596 { 3597 struct mbuf *m_notify; 3598 struct sctp_adaptation_event *sai; 3599 struct sctp_queued_to_read *control; 3600 3601 if ((stcb == NULL) || 3602 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3603 /* event not enabled */ 3604 return; 3605 } 3606 3607 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3608 if (m_notify == NULL) 3609 /* no space left */ 3610 return; 3611 SCTP_BUF_LEN(m_notify) = 0; 3612 sai = mtod(m_notify, struct sctp_adaptation_event *); 3613 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3614 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3615 sai->sai_flags = 0; 3616 sai->sai_length = sizeof(struct sctp_adaptation_event); 3617 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3618 sai->sai_assoc_id = sctp_get_associd(stcb); 3619 3620 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3621 SCTP_BUF_NEXT(m_notify) = NULL; 3622 3623 /* append to socket */ 3624 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3625 0, 0, stcb->asoc.context, 0, 0, 0, 3626 m_notify); 3627 if (control == NULL) { 3628 /* no memory */ 3629 sctp_m_freem(m_notify); 3630 return; 3631 } 3632 control->length = SCTP_BUF_LEN(m_notify); 3633 control->spec_flags = M_NOTIFICATION; 3634 /* not that we need this */ 3635 control->tail_mbuf = m_notify; 3636 sctp_add_to_readq(stcb->sctp_ep, stcb, 3637 control, 3638 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3639 } 3640 3641 /* This always must be called with the read-queue LOCKED in the INP */ 3642 static void 3643 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3644 uint32_t val, int so_locked) 3645 { 3646 struct mbuf *m_notify; 3647 struct sctp_pdapi_event *pdapi; 3648 struct sctp_queued_to_read *control; 3649 struct sockbuf *sb; 3650 3651 if ((stcb == NULL) || 3652 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3653 /* event not enabled */ 3654 return; 3655 } 3656 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3657 return; 3658 } 3659 3660 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3661 if (m_notify == NULL) 3662 /* no space left */ 3663 return; 3664 SCTP_BUF_LEN(m_notify) = 0; 3665 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3666 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3667 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3668 pdapi->pdapi_flags = 0; 3669 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3670 pdapi->pdapi_indication = error; 3671 pdapi->pdapi_stream = (val >> 16); 3672 pdapi->pdapi_seq = (val & 0x0000ffff); 3673 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3674 3675 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3676 SCTP_BUF_NEXT(m_notify) = NULL; 3677 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3678 0, 0, stcb->asoc.context, 0, 0, 0, 3679 m_notify); 3680 if (control == NULL) { 3681 /* no memory */ 3682 sctp_m_freem(m_notify); 3683 return; 3684 } 3685 control->length = SCTP_BUF_LEN(m_notify); 3686 control->spec_flags = M_NOTIFICATION; 3687 /* not that we need this */ 3688 control->tail_mbuf = m_notify; 3689 sb = &stcb->sctp_socket->so_rcv; 3690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3691 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3692 } 3693 sctp_sballoc(stcb, sb, m_notify); 3694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3695 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3696 } 3697 control->end_added = 1; 3698 if (stcb->asoc.control_pdapi) 3699 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3700 else { 3701 /* we really should not see this case */ 3702 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3703 } 3704 if (stcb->sctp_ep && stcb->sctp_socket) { 3705 /* This should always be the case */ 3706 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3707 } 3708 } 3709 3710 static void 3711 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3712 { 3713 struct mbuf *m_notify; 3714 struct sctp_shutdown_event *sse; 3715 struct sctp_queued_to_read *control; 3716 3717 /* 3718 * For TCP model AND UDP connected sockets we will send an error up 3719 * when an SHUTDOWN completes 3720 */ 3721 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3722 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3723 /* mark socket closed for read/write and wakeup! */ 3724 socantsendmore(stcb->sctp_socket); 3725 } 3726 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3727 /* event not enabled */ 3728 return; 3729 } 3730 3731 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3732 if (m_notify == NULL) 3733 /* no space left */ 3734 return; 3735 sse = mtod(m_notify, struct sctp_shutdown_event *); 3736 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3737 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3738 sse->sse_flags = 0; 3739 sse->sse_length = sizeof(struct sctp_shutdown_event); 3740 sse->sse_assoc_id = sctp_get_associd(stcb); 3741 3742 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3743 SCTP_BUF_NEXT(m_notify) = NULL; 3744 3745 /* append to socket */ 3746 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3747 0, 0, stcb->asoc.context, 0, 0, 0, 3748 m_notify); 3749 if (control == NULL) { 3750 /* no memory */ 3751 sctp_m_freem(m_notify); 3752 return; 3753 } 3754 control->length = SCTP_BUF_LEN(m_notify); 3755 control->spec_flags = M_NOTIFICATION; 3756 /* not that we need this */ 3757 control->tail_mbuf = m_notify; 3758 sctp_add_to_readq(stcb->sctp_ep, stcb, 3759 control, 3760 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3761 } 3762 3763 static void 3764 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3765 int so_locked) 3766 { 3767 struct mbuf *m_notify; 3768 struct sctp_sender_dry_event *event; 3769 struct sctp_queued_to_read *control; 3770 3771 if ((stcb == NULL) || 3772 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3773 /* event not enabled */ 3774 return; 3775 } 3776 3777 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3778 if (m_notify == NULL) { 3779 /* no space left */ 3780 return; 3781 } 3782 SCTP_BUF_LEN(m_notify) = 0; 3783 event = mtod(m_notify, struct sctp_sender_dry_event *); 3784 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3785 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3786 event->sender_dry_flags = 0; 3787 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3788 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3789 3790 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3791 SCTP_BUF_NEXT(m_notify) = NULL; 3792 3793 /* append to socket */ 3794 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3795 0, 0, stcb->asoc.context, 0, 0, 0, 3796 m_notify); 3797 if (control == NULL) { 3798 /* no memory */ 3799 sctp_m_freem(m_notify); 3800 return; 3801 } 3802 control->length = SCTP_BUF_LEN(m_notify); 3803 control->spec_flags = M_NOTIFICATION; 3804 /* not that we need this */ 3805 control->tail_mbuf = m_notify; 3806 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3807 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3808 } 3809 3810 void 3811 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3812 { 3813 struct mbuf *m_notify; 3814 struct sctp_queued_to_read *control; 3815 struct sctp_stream_change_event *stradd; 3816 3817 if ((stcb == NULL) || 3818 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3819 /* event not enabled */ 3820 return; 3821 } 3822 if ((stcb->asoc.peer_req_out) && flag) { 3823 /* Peer made the request, don't tell the local user */ 3824 stcb->asoc.peer_req_out = 0; 3825 return; 3826 } 3827 stcb->asoc.peer_req_out = 0; 3828 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3829 if (m_notify == NULL) 3830 /* no space left */ 3831 return; 3832 SCTP_BUF_LEN(m_notify) = 0; 3833 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3834 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3835 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3836 stradd->strchange_flags = flag; 3837 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3838 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3839 stradd->strchange_instrms = numberin; 3840 stradd->strchange_outstrms = numberout; 3841 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3842 SCTP_BUF_NEXT(m_notify) = NULL; 3843 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3844 /* no space */ 3845 sctp_m_freem(m_notify); 3846 return; 3847 } 3848 /* append to socket */ 3849 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3850 0, 0, stcb->asoc.context, 0, 0, 0, 3851 m_notify); 3852 if (control == NULL) { 3853 /* no memory */ 3854 sctp_m_freem(m_notify); 3855 return; 3856 } 3857 control->length = SCTP_BUF_LEN(m_notify); 3858 control->spec_flags = M_NOTIFICATION; 3859 /* not that we need this */ 3860 control->tail_mbuf = m_notify; 3861 sctp_add_to_readq(stcb->sctp_ep, stcb, 3862 control, 3863 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3864 } 3865 3866 void 3867 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3868 { 3869 struct mbuf *m_notify; 3870 struct sctp_queued_to_read *control; 3871 struct sctp_assoc_reset_event *strasoc; 3872 3873 if ((stcb == NULL) || 3874 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3875 /* event not enabled */ 3876 return; 3877 } 3878 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3879 if (m_notify == NULL) 3880 /* no space left */ 3881 return; 3882 SCTP_BUF_LEN(m_notify) = 0; 3883 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3884 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3885 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3886 strasoc->assocreset_flags = flag; 3887 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3888 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3889 strasoc->assocreset_local_tsn = sending_tsn; 3890 strasoc->assocreset_remote_tsn = recv_tsn; 3891 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3892 SCTP_BUF_NEXT(m_notify) = NULL; 3893 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3894 /* no space */ 3895 sctp_m_freem(m_notify); 3896 return; 3897 } 3898 /* append to socket */ 3899 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3900 0, 0, stcb->asoc.context, 0, 0, 0, 3901 m_notify); 3902 if (control == NULL) { 3903 /* no memory */ 3904 sctp_m_freem(m_notify); 3905 return; 3906 } 3907 control->length = SCTP_BUF_LEN(m_notify); 3908 control->spec_flags = M_NOTIFICATION; 3909 /* not that we need this */ 3910 control->tail_mbuf = m_notify; 3911 sctp_add_to_readq(stcb->sctp_ep, stcb, 3912 control, 3913 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3914 } 3915 3916 static void 3917 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3918 int number_entries, uint16_t *list, int flag) 3919 { 3920 struct mbuf *m_notify; 3921 struct sctp_queued_to_read *control; 3922 struct sctp_stream_reset_event *strreset; 3923 int len; 3924 3925 if ((stcb == NULL) || 3926 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3927 /* event not enabled */ 3928 return; 3929 } 3930 3931 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3932 if (m_notify == NULL) 3933 /* no space left */ 3934 return; 3935 SCTP_BUF_LEN(m_notify) = 0; 3936 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3937 if (len > M_TRAILINGSPACE(m_notify)) { 3938 /* never enough room */ 3939 sctp_m_freem(m_notify); 3940 return; 3941 } 3942 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3943 memset(strreset, 0, len); 3944 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3945 strreset->strreset_flags = flag; 3946 strreset->strreset_length = len; 3947 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3948 if (number_entries) { 3949 int i; 3950 3951 for (i = 0; i < number_entries; i++) { 3952 strreset->strreset_stream_list[i] = ntohs(list[i]); 3953 } 3954 } 3955 SCTP_BUF_LEN(m_notify) = len; 3956 SCTP_BUF_NEXT(m_notify) = NULL; 3957 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3958 /* no space */ 3959 sctp_m_freem(m_notify); 3960 return; 3961 } 3962 /* append to socket */ 3963 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3964 0, 0, stcb->asoc.context, 0, 0, 0, 3965 m_notify); 3966 if (control == NULL) { 3967 /* no memory */ 3968 sctp_m_freem(m_notify); 3969 return; 3970 } 3971 control->length = SCTP_BUF_LEN(m_notify); 3972 control->spec_flags = M_NOTIFICATION; 3973 /* not that we need this */ 3974 control->tail_mbuf = m_notify; 3975 sctp_add_to_readq(stcb->sctp_ep, stcb, 3976 control, 3977 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3978 } 3979 3980 static void 3981 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3982 { 3983 struct mbuf *m_notify; 3984 struct sctp_remote_error *sre; 3985 struct sctp_queued_to_read *control; 3986 unsigned int notif_len; 3987 uint16_t chunk_len; 3988 3989 if ((stcb == NULL) || 3990 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3991 return; 3992 } 3993 if (chunk != NULL) { 3994 chunk_len = ntohs(chunk->ch.chunk_length); 3995 /* 3996 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3997 * contiguous. 3998 */ 3999 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4000 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4001 } 4002 } else { 4003 chunk_len = 0; 4004 } 4005 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4006 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4007 if (m_notify == NULL) { 4008 /* Retry with smaller value. */ 4009 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4010 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4011 if (m_notify == NULL) { 4012 return; 4013 } 4014 } 4015 SCTP_BUF_NEXT(m_notify) = NULL; 4016 sre = mtod(m_notify, struct sctp_remote_error *); 4017 memset(sre, 0, notif_len); 4018 sre->sre_type = SCTP_REMOTE_ERROR; 4019 sre->sre_flags = 0; 4020 sre->sre_length = sizeof(struct sctp_remote_error); 4021 sre->sre_error = error; 4022 sre->sre_assoc_id = sctp_get_associd(stcb); 4023 if (notif_len > sizeof(struct sctp_remote_error)) { 4024 memcpy(sre->sre_data, chunk, chunk_len); 4025 sre->sre_length += chunk_len; 4026 } 4027 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4028 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4029 0, 0, stcb->asoc.context, 0, 0, 0, 4030 m_notify); 4031 if (control != NULL) { 4032 control->length = SCTP_BUF_LEN(m_notify); 4033 control->spec_flags = M_NOTIFICATION; 4034 /* not that we need this */ 4035 control->tail_mbuf = m_notify; 4036 sctp_add_to_readq(stcb->sctp_ep, stcb, 4037 control, 4038 &stcb->sctp_socket->so_rcv, 1, 4039 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4040 } else { 4041 sctp_m_freem(m_notify); 4042 } 4043 } 4044 4045 void 4046 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4047 uint32_t error, void *data, int so_locked) 4048 { 4049 if ((stcb == NULL) || 4050 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4051 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4052 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4053 /* If the socket is gone we are out of here */ 4054 return; 4055 } 4056 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4057 return; 4058 } 4059 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4060 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4061 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4062 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4063 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4064 /* Don't report these in front states */ 4065 return; 4066 } 4067 } 4068 switch (notification) { 4069 case SCTP_NOTIFY_ASSOC_UP: 4070 if (stcb->asoc.assoc_up_sent == 0) { 4071 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4072 stcb->asoc.assoc_up_sent = 1; 4073 } 4074 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4075 sctp_notify_adaptation_layer(stcb); 4076 } 4077 if (stcb->asoc.auth_supported == 0) { 4078 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4079 NULL, so_locked); 4080 } 4081 break; 4082 case SCTP_NOTIFY_ASSOC_DOWN: 4083 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4084 break; 4085 case SCTP_NOTIFY_INTERFACE_DOWN: 4086 { 4087 struct sctp_nets *net; 4088 4089 net = (struct sctp_nets *)data; 4090 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4091 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4092 break; 4093 } 4094 case SCTP_NOTIFY_INTERFACE_UP: 4095 { 4096 struct sctp_nets *net; 4097 4098 net = (struct sctp_nets *)data; 4099 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4100 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4101 break; 4102 } 4103 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4104 { 4105 struct sctp_nets *net; 4106 4107 net = (struct sctp_nets *)data; 4108 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4109 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4110 break; 4111 } 4112 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4113 sctp_notify_send_failed2(stcb, error, 4114 (struct sctp_stream_queue_pending *)data, so_locked); 4115 break; 4116 case SCTP_NOTIFY_SENT_DG_FAIL: 4117 sctp_notify_send_failed(stcb, 1, error, 4118 (struct sctp_tmit_chunk *)data, so_locked); 4119 break; 4120 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4121 sctp_notify_send_failed(stcb, 0, error, 4122 (struct sctp_tmit_chunk *)data, so_locked); 4123 break; 4124 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4125 { 4126 uint32_t val; 4127 4128 val = *((uint32_t *)data); 4129 4130 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4131 break; 4132 } 4133 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4134 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4135 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4136 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4137 } else { 4138 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4139 } 4140 break; 4141 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4142 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4143 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4144 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4145 } else { 4146 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4147 } 4148 break; 4149 case SCTP_NOTIFY_ASSOC_RESTART: 4150 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4151 if (stcb->asoc.auth_supported == 0) { 4152 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4153 NULL, so_locked); 4154 } 4155 break; 4156 case SCTP_NOTIFY_STR_RESET_SEND: 4157 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4158 break; 4159 case SCTP_NOTIFY_STR_RESET_RECV: 4160 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4161 break; 4162 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4163 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4164 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4165 break; 4166 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4167 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4168 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4169 break; 4170 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4171 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4172 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4173 break; 4174 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4175 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4176 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4177 break; 4178 case SCTP_NOTIFY_ASCONF_ADD_IP: 4179 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4180 error, so_locked); 4181 break; 4182 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4183 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4184 error, so_locked); 4185 break; 4186 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4187 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4188 error, so_locked); 4189 break; 4190 case SCTP_NOTIFY_PEER_SHUTDOWN: 4191 sctp_notify_shutdown_event(stcb); 4192 break; 4193 case SCTP_NOTIFY_AUTH_NEW_KEY: 4194 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4195 (uint16_t)(uintptr_t)data, 4196 so_locked); 4197 break; 4198 case SCTP_NOTIFY_AUTH_FREE_KEY: 4199 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4200 (uint16_t)(uintptr_t)data, 4201 so_locked); 4202 break; 4203 case SCTP_NOTIFY_NO_PEER_AUTH: 4204 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4205 (uint16_t)(uintptr_t)data, 4206 so_locked); 4207 break; 4208 case SCTP_NOTIFY_SENDER_DRY: 4209 sctp_notify_sender_dry_event(stcb, so_locked); 4210 break; 4211 case SCTP_NOTIFY_REMOTE_ERROR: 4212 sctp_notify_remote_error(stcb, error, data); 4213 break; 4214 default: 4215 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4216 __func__, notification, notification); 4217 break; 4218 } /* end switch */ 4219 } 4220 4221 void 4222 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4223 { 4224 struct sctp_association *asoc; 4225 struct sctp_stream_out *outs; 4226 struct sctp_tmit_chunk *chk, *nchk; 4227 struct sctp_stream_queue_pending *sp, *nsp; 4228 int i; 4229 4230 if (stcb == NULL) { 4231 return; 4232 } 4233 asoc = &stcb->asoc; 4234 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4235 /* already being freed */ 4236 return; 4237 } 4238 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4239 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4240 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4241 return; 4242 } 4243 /* now through all the gunk freeing chunks */ 4244 /* sent queue SHOULD be empty */ 4245 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4246 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4247 asoc->sent_queue_cnt--; 4248 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4249 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4250 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4251 #ifdef INVARIANTS 4252 } else { 4253 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4254 #endif 4255 } 4256 } 4257 if (chk->data != NULL) { 4258 sctp_free_bufspace(stcb, asoc, chk, 1); 4259 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4260 error, chk, so_locked); 4261 if (chk->data) { 4262 sctp_m_freem(chk->data); 4263 chk->data = NULL; 4264 } 4265 } 4266 sctp_free_a_chunk(stcb, chk, so_locked); 4267 /* sa_ignore FREED_MEMORY */ 4268 } 4269 /* pending send queue SHOULD be empty */ 4270 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4271 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4272 asoc->send_queue_cnt--; 4273 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4274 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4275 #ifdef INVARIANTS 4276 } else { 4277 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4278 #endif 4279 } 4280 if (chk->data != NULL) { 4281 sctp_free_bufspace(stcb, asoc, chk, 1); 4282 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4283 error, chk, so_locked); 4284 if (chk->data) { 4285 sctp_m_freem(chk->data); 4286 chk->data = NULL; 4287 } 4288 } 4289 sctp_free_a_chunk(stcb, chk, so_locked); 4290 /* sa_ignore FREED_MEMORY */ 4291 } 4292 for (i = 0; i < asoc->streamoutcnt; i++) { 4293 /* For each stream */ 4294 outs = &asoc->strmout[i]; 4295 /* clean up any sends there */ 4296 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4297 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4298 TAILQ_REMOVE(&outs->outqueue, sp, next); 4299 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4300 sctp_free_spbufspace(stcb, asoc, sp); 4301 if (sp->data) { 4302 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4303 error, (void *)sp, so_locked); 4304 if (sp->data) { 4305 sctp_m_freem(sp->data); 4306 sp->data = NULL; 4307 sp->tail_mbuf = NULL; 4308 sp->length = 0; 4309 } 4310 } 4311 if (sp->net) { 4312 sctp_free_remote_addr(sp->net); 4313 sp->net = NULL; 4314 } 4315 /* Free the chunk */ 4316 sctp_free_a_strmoq(stcb, sp, so_locked); 4317 /* sa_ignore FREED_MEMORY */ 4318 } 4319 } 4320 } 4321 4322 void 4323 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4324 struct sctp_abort_chunk *abort, int so_locked) 4325 { 4326 if (stcb == NULL) { 4327 return; 4328 } 4329 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4330 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4331 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4332 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4333 } 4334 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4335 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4336 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4337 return; 4338 } 4339 SCTP_TCB_SEND_LOCK(stcb); 4340 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4341 /* Tell them we lost the asoc */ 4342 sctp_report_all_outbound(stcb, error, so_locked); 4343 SCTP_TCB_SEND_UNLOCK(stcb); 4344 if (from_peer) { 4345 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4346 } else { 4347 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4348 } 4349 } 4350 4351 void 4352 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4353 struct mbuf *m, int iphlen, 4354 struct sockaddr *src, struct sockaddr *dst, 4355 struct sctphdr *sh, struct mbuf *op_err, 4356 uint8_t mflowtype, uint32_t mflowid, 4357 uint32_t vrf_id, uint16_t port) 4358 { 4359 uint32_t vtag; 4360 4361 vtag = 0; 4362 if (stcb != NULL) { 4363 vtag = stcb->asoc.peer_vtag; 4364 vrf_id = stcb->asoc.vrf_id; 4365 } 4366 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4367 mflowtype, mflowid, inp->fibnum, 4368 vrf_id, port); 4369 if (stcb != NULL) { 4370 /* We have a TCB to abort, send notification too */ 4371 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4372 /* Ok, now lets free it */ 4373 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4374 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4375 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4376 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4377 } 4378 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4379 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4380 } 4381 } 4382 #ifdef SCTP_ASOCLOG_OF_TSNS 4383 void 4384 sctp_print_out_track_log(struct sctp_tcb *stcb) 4385 { 4386 #ifdef NOSIY_PRINTS 4387 int i; 4388 4389 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4390 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4391 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4392 SCTP_PRINTF("None rcvd\n"); 4393 goto none_in; 4394 } 4395 if (stcb->asoc.tsn_in_wrapped) { 4396 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4397 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4398 stcb->asoc.in_tsnlog[i].tsn, 4399 stcb->asoc.in_tsnlog[i].strm, 4400 stcb->asoc.in_tsnlog[i].seq, 4401 stcb->asoc.in_tsnlog[i].flgs, 4402 stcb->asoc.in_tsnlog[i].sz); 4403 } 4404 } 4405 if (stcb->asoc.tsn_in_at) { 4406 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4407 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4408 stcb->asoc.in_tsnlog[i].tsn, 4409 stcb->asoc.in_tsnlog[i].strm, 4410 stcb->asoc.in_tsnlog[i].seq, 4411 stcb->asoc.in_tsnlog[i].flgs, 4412 stcb->asoc.in_tsnlog[i].sz); 4413 } 4414 } 4415 none_in: 4416 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4417 if ((stcb->asoc.tsn_out_at == 0) && 4418 (stcb->asoc.tsn_out_wrapped == 0)) { 4419 SCTP_PRINTF("None sent\n"); 4420 } 4421 if (stcb->asoc.tsn_out_wrapped) { 4422 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4423 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4424 stcb->asoc.out_tsnlog[i].tsn, 4425 stcb->asoc.out_tsnlog[i].strm, 4426 stcb->asoc.out_tsnlog[i].seq, 4427 stcb->asoc.out_tsnlog[i].flgs, 4428 stcb->asoc.out_tsnlog[i].sz); 4429 } 4430 } 4431 if (stcb->asoc.tsn_out_at) { 4432 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4433 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4434 stcb->asoc.out_tsnlog[i].tsn, 4435 stcb->asoc.out_tsnlog[i].strm, 4436 stcb->asoc.out_tsnlog[i].seq, 4437 stcb->asoc.out_tsnlog[i].flgs, 4438 stcb->asoc.out_tsnlog[i].sz); 4439 } 4440 } 4441 #endif 4442 } 4443 #endif 4444 4445 void 4446 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4447 struct mbuf *op_err, 4448 int so_locked) 4449 { 4450 4451 if (stcb == NULL) { 4452 /* Got to have a TCB */ 4453 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4454 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4455 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4456 SCTP_CALLED_DIRECTLY_NOCMPSET); 4457 } 4458 } 4459 return; 4460 } 4461 /* notify the peer */ 4462 sctp_send_abort_tcb(stcb, op_err, so_locked); 4463 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4464 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4465 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4466 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4467 } 4468 /* notify the ulp */ 4469 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4470 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4471 } 4472 /* now free the asoc */ 4473 #ifdef SCTP_ASOCLOG_OF_TSNS 4474 sctp_print_out_track_log(stcb); 4475 #endif 4476 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4477 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4478 } 4479 4480 void 4481 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4482 struct sockaddr *src, struct sockaddr *dst, 4483 struct sctphdr *sh, struct sctp_inpcb *inp, 4484 struct mbuf *cause, 4485 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4486 uint32_t vrf_id, uint16_t port) 4487 { 4488 struct sctp_chunkhdr *ch, chunk_buf; 4489 unsigned int chk_length; 4490 int contains_init_chunk; 4491 4492 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4493 /* Generate a TO address for future reference */ 4494 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4495 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4496 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4497 SCTP_CALLED_DIRECTLY_NOCMPSET); 4498 } 4499 } 4500 contains_init_chunk = 0; 4501 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4502 sizeof(*ch), (uint8_t *)&chunk_buf); 4503 while (ch != NULL) { 4504 chk_length = ntohs(ch->chunk_length); 4505 if (chk_length < sizeof(*ch)) { 4506 /* break to abort land */ 4507 break; 4508 } 4509 switch (ch->chunk_type) { 4510 case SCTP_INIT: 4511 contains_init_chunk = 1; 4512 break; 4513 case SCTP_PACKET_DROPPED: 4514 /* we don't respond to pkt-dropped */ 4515 return; 4516 case SCTP_ABORT_ASSOCIATION: 4517 /* we don't respond with an ABORT to an ABORT */ 4518 return; 4519 case SCTP_SHUTDOWN_COMPLETE: 4520 /* 4521 * we ignore it since we are not waiting for it and 4522 * peer is gone 4523 */ 4524 return; 4525 case SCTP_SHUTDOWN_ACK: 4526 sctp_send_shutdown_complete2(src, dst, sh, 4527 mflowtype, mflowid, fibnum, 4528 vrf_id, port); 4529 return; 4530 default: 4531 break; 4532 } 4533 offset += SCTP_SIZE32(chk_length); 4534 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4535 sizeof(*ch), (uint8_t *)&chunk_buf); 4536 } 4537 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4538 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4539 (contains_init_chunk == 0))) { 4540 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4541 mflowtype, mflowid, fibnum, 4542 vrf_id, port); 4543 } 4544 } 4545 4546 /* 4547 * check the inbound datagram to make sure there is not an abort inside it, 4548 * if there is return 1, else return 0. 4549 */ 4550 int 4551 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4552 { 4553 struct sctp_chunkhdr *ch; 4554 struct sctp_init_chunk *init_chk, chunk_buf; 4555 int offset; 4556 unsigned int chk_length; 4557 4558 offset = iphlen + sizeof(struct sctphdr); 4559 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4560 (uint8_t *)&chunk_buf); 4561 while (ch != NULL) { 4562 chk_length = ntohs(ch->chunk_length); 4563 if (chk_length < sizeof(*ch)) { 4564 /* packet is probably corrupt */ 4565 break; 4566 } 4567 /* we seem to be ok, is it an abort? */ 4568 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4569 /* yep, tell them */ 4570 return (1); 4571 } 4572 if (ch->chunk_type == SCTP_INITIATION) { 4573 /* need to update the Vtag */ 4574 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4575 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4576 if (init_chk != NULL) { 4577 *vtagfill = ntohl(init_chk->init.initiate_tag); 4578 } 4579 } 4580 /* Nope, move to the next chunk */ 4581 offset += SCTP_SIZE32(chk_length); 4582 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4583 sizeof(*ch), (uint8_t *)&chunk_buf); 4584 } 4585 return (0); 4586 } 4587 4588 /* 4589 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4590 * set (i.e. it's 0) so, create this function to compare link local scopes 4591 */ 4592 #ifdef INET6 4593 uint32_t 4594 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4595 { 4596 struct sockaddr_in6 a, b; 4597 4598 /* save copies */ 4599 a = *addr1; 4600 b = *addr2; 4601 4602 if (a.sin6_scope_id == 0) 4603 if (sa6_recoverscope(&a)) { 4604 /* can't get scope, so can't match */ 4605 return (0); 4606 } 4607 if (b.sin6_scope_id == 0) 4608 if (sa6_recoverscope(&b)) { 4609 /* can't get scope, so can't match */ 4610 return (0); 4611 } 4612 if (a.sin6_scope_id != b.sin6_scope_id) 4613 return (0); 4614 4615 return (1); 4616 } 4617 4618 /* 4619 * returns a sockaddr_in6 with embedded scope recovered and removed 4620 */ 4621 struct sockaddr_in6 * 4622 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4623 { 4624 /* check and strip embedded scope junk */ 4625 if (addr->sin6_family == AF_INET6) { 4626 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4627 if (addr->sin6_scope_id == 0) { 4628 *store = *addr; 4629 if (!sa6_recoverscope(store)) { 4630 /* use the recovered scope */ 4631 addr = store; 4632 } 4633 } else { 4634 /* else, return the original "to" addr */ 4635 in6_clearscope(&addr->sin6_addr); 4636 } 4637 } 4638 } 4639 return (addr); 4640 } 4641 #endif 4642 4643 /* 4644 * are the two addresses the same? currently a "scopeless" check returns: 1 4645 * if same, 0 if not 4646 */ 4647 int 4648 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4649 { 4650 4651 /* must be valid */ 4652 if (sa1 == NULL || sa2 == NULL) 4653 return (0); 4654 4655 /* must be the same family */ 4656 if (sa1->sa_family != sa2->sa_family) 4657 return (0); 4658 4659 switch (sa1->sa_family) { 4660 #ifdef INET6 4661 case AF_INET6: 4662 { 4663 /* IPv6 addresses */ 4664 struct sockaddr_in6 *sin6_1, *sin6_2; 4665 4666 sin6_1 = (struct sockaddr_in6 *)sa1; 4667 sin6_2 = (struct sockaddr_in6 *)sa2; 4668 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4669 sin6_2)); 4670 } 4671 #endif 4672 #ifdef INET 4673 case AF_INET: 4674 { 4675 /* IPv4 addresses */ 4676 struct sockaddr_in *sin_1, *sin_2; 4677 4678 sin_1 = (struct sockaddr_in *)sa1; 4679 sin_2 = (struct sockaddr_in *)sa2; 4680 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4681 } 4682 #endif 4683 default: 4684 /* we don't do these... */ 4685 return (0); 4686 } 4687 } 4688 4689 void 4690 sctp_print_address(struct sockaddr *sa) 4691 { 4692 #ifdef INET6 4693 char ip6buf[INET6_ADDRSTRLEN]; 4694 #endif 4695 4696 switch (sa->sa_family) { 4697 #ifdef INET6 4698 case AF_INET6: 4699 { 4700 struct sockaddr_in6 *sin6; 4701 4702 sin6 = (struct sockaddr_in6 *)sa; 4703 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4704 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4705 ntohs(sin6->sin6_port), 4706 sin6->sin6_scope_id); 4707 break; 4708 } 4709 #endif 4710 #ifdef INET 4711 case AF_INET: 4712 { 4713 struct sockaddr_in *sin; 4714 unsigned char *p; 4715 4716 sin = (struct sockaddr_in *)sa; 4717 p = (unsigned char *)&sin->sin_addr; 4718 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4719 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4720 break; 4721 } 4722 #endif 4723 default: 4724 SCTP_PRINTF("?\n"); 4725 break; 4726 } 4727 } 4728 4729 void 4730 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4731 struct sctp_inpcb *new_inp, 4732 struct sctp_tcb *stcb, 4733 int waitflags) 4734 { 4735 /* 4736 * go through our old INP and pull off any control structures that 4737 * belong to stcb and move then to the new inp. 4738 */ 4739 struct socket *old_so, *new_so; 4740 struct sctp_queued_to_read *control, *nctl; 4741 struct sctp_readhead tmp_queue; 4742 struct mbuf *m; 4743 int error = 0; 4744 4745 old_so = old_inp->sctp_socket; 4746 new_so = new_inp->sctp_socket; 4747 TAILQ_INIT(&tmp_queue); 4748 error = sblock(&old_so->so_rcv, waitflags); 4749 if (error) { 4750 /* 4751 * Gak, can't get sblock, we have a problem. data will be 4752 * left stranded.. and we don't dare look at it since the 4753 * other thread may be reading something. Oh well, its a 4754 * screwed up app that does a peeloff OR a accept while 4755 * reading from the main socket... actually its only the 4756 * peeloff() case, since I think read will fail on a 4757 * listening socket.. 4758 */ 4759 return; 4760 } 4761 /* lock the socket buffers */ 4762 SCTP_INP_READ_LOCK(old_inp); 4763 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4764 /* Pull off all for out target stcb */ 4765 if (control->stcb == stcb) { 4766 /* remove it we want it */ 4767 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4768 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4769 m = control->data; 4770 while (m) { 4771 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4772 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4773 } 4774 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4776 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4777 } 4778 m = SCTP_BUF_NEXT(m); 4779 } 4780 } 4781 } 4782 SCTP_INP_READ_UNLOCK(old_inp); 4783 /* Remove the sb-lock on the old socket */ 4784 4785 sbunlock(&old_so->so_rcv); 4786 /* Now we move them over to the new socket buffer */ 4787 SCTP_INP_READ_LOCK(new_inp); 4788 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4789 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4790 m = control->data; 4791 while (m) { 4792 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4793 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4794 } 4795 sctp_sballoc(stcb, &new_so->so_rcv, m); 4796 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4797 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4798 } 4799 m = SCTP_BUF_NEXT(m); 4800 } 4801 } 4802 SCTP_INP_READ_UNLOCK(new_inp); 4803 } 4804 4805 void 4806 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4807 struct sctp_tcb *stcb, 4808 int so_locked 4809 SCTP_UNUSED 4810 ) 4811 { 4812 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4813 sctp_sorwakeup(inp, inp->sctp_socket); 4814 } 4815 } 4816 4817 void 4818 sctp_add_to_readq(struct sctp_inpcb *inp, 4819 struct sctp_tcb *stcb, 4820 struct sctp_queued_to_read *control, 4821 struct sockbuf *sb, 4822 int end, 4823 int inp_read_lock_held, 4824 int so_locked) 4825 { 4826 /* 4827 * Here we must place the control on the end of the socket read 4828 * queue AND increment sb_cc so that select will work properly on 4829 * read. 4830 */ 4831 struct mbuf *m, *prev = NULL; 4832 4833 if (inp == NULL) { 4834 /* Gak, TSNH!! */ 4835 #ifdef INVARIANTS 4836 panic("Gak, inp NULL on add_to_readq"); 4837 #endif 4838 return; 4839 } 4840 if (inp_read_lock_held == 0) 4841 SCTP_INP_READ_LOCK(inp); 4842 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4843 if (!control->on_strm_q) { 4844 sctp_free_remote_addr(control->whoFrom); 4845 if (control->data) { 4846 sctp_m_freem(control->data); 4847 control->data = NULL; 4848 } 4849 sctp_free_a_readq(stcb, control); 4850 } 4851 if (inp_read_lock_held == 0) 4852 SCTP_INP_READ_UNLOCK(inp); 4853 return; 4854 } 4855 if (!(control->spec_flags & M_NOTIFICATION)) { 4856 atomic_add_int(&inp->total_recvs, 1); 4857 if (!control->do_not_ref_stcb) { 4858 atomic_add_int(&stcb->total_recvs, 1); 4859 } 4860 } 4861 m = control->data; 4862 control->held_length = 0; 4863 control->length = 0; 4864 while (m) { 4865 if (SCTP_BUF_LEN(m) == 0) { 4866 /* Skip mbufs with NO length */ 4867 if (prev == NULL) { 4868 /* First one */ 4869 control->data = sctp_m_free(m); 4870 m = control->data; 4871 } else { 4872 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4873 m = SCTP_BUF_NEXT(prev); 4874 } 4875 if (m == NULL) { 4876 control->tail_mbuf = prev; 4877 } 4878 continue; 4879 } 4880 prev = m; 4881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4882 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4883 } 4884 sctp_sballoc(stcb, sb, m); 4885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4886 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4887 } 4888 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4889 m = SCTP_BUF_NEXT(m); 4890 } 4891 if (prev != NULL) { 4892 control->tail_mbuf = prev; 4893 } else { 4894 /* Everything got collapsed out?? */ 4895 if (!control->on_strm_q) { 4896 sctp_free_remote_addr(control->whoFrom); 4897 sctp_free_a_readq(stcb, control); 4898 } 4899 if (inp_read_lock_held == 0) 4900 SCTP_INP_READ_UNLOCK(inp); 4901 return; 4902 } 4903 if (end) { 4904 control->end_added = 1; 4905 } 4906 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4907 control->on_read_q = 1; 4908 if (inp_read_lock_held == 0) 4909 SCTP_INP_READ_UNLOCK(inp); 4910 if (inp && inp->sctp_socket) { 4911 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4912 } 4913 } 4914 4915 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4916 *************ALTERNATE ROUTING CODE 4917 */ 4918 4919 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4920 *************ALTERNATE ROUTING CODE 4921 */ 4922 4923 struct mbuf * 4924 sctp_generate_cause(uint16_t code, char *info) 4925 { 4926 struct mbuf *m; 4927 struct sctp_gen_error_cause *cause; 4928 size_t info_len; 4929 uint16_t len; 4930 4931 if ((code == 0) || (info == NULL)) { 4932 return (NULL); 4933 } 4934 info_len = strlen(info); 4935 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4936 return (NULL); 4937 } 4938 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4939 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4940 if (m != NULL) { 4941 SCTP_BUF_LEN(m) = len; 4942 cause = mtod(m, struct sctp_gen_error_cause *); 4943 cause->code = htons(code); 4944 cause->length = htons(len); 4945 memcpy(cause->info, info, info_len); 4946 } 4947 return (m); 4948 } 4949 4950 struct mbuf * 4951 sctp_generate_no_user_data_cause(uint32_t tsn) 4952 { 4953 struct mbuf *m; 4954 struct sctp_error_no_user_data *no_user_data_cause; 4955 uint16_t len; 4956 4957 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4958 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4959 if (m != NULL) { 4960 SCTP_BUF_LEN(m) = len; 4961 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4962 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4963 no_user_data_cause->cause.length = htons(len); 4964 no_user_data_cause->tsn = htonl(tsn); 4965 } 4966 return (m); 4967 } 4968 4969 #ifdef SCTP_MBCNT_LOGGING 4970 void 4971 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4972 struct sctp_tmit_chunk *tp1, int chk_cnt) 4973 { 4974 if (tp1->data == NULL) { 4975 return; 4976 } 4977 asoc->chunks_on_out_queue -= chk_cnt; 4978 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4979 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4980 asoc->total_output_queue_size, 4981 tp1->book_size, 4982 0, 4983 tp1->mbcnt); 4984 } 4985 if (asoc->total_output_queue_size >= tp1->book_size) { 4986 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4987 } else { 4988 asoc->total_output_queue_size = 0; 4989 } 4990 4991 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4992 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4993 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4994 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4995 } else { 4996 stcb->sctp_socket->so_snd.sb_cc = 0; 4997 } 4998 } 4999 } 5000 5001 #endif 5002 5003 int 5004 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5005 uint8_t sent, int so_locked) 5006 { 5007 struct sctp_stream_out *strq; 5008 struct sctp_tmit_chunk *chk = NULL, *tp2; 5009 struct sctp_stream_queue_pending *sp; 5010 uint32_t mid; 5011 uint16_t sid; 5012 uint8_t foundeom = 0; 5013 int ret_sz = 0; 5014 int notdone; 5015 int do_wakeup_routine = 0; 5016 5017 sid = tp1->rec.data.sid; 5018 mid = tp1->rec.data.mid; 5019 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5020 stcb->asoc.abandoned_sent[0]++; 5021 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5022 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5023 #if defined(SCTP_DETAILED_STR_STATS) 5024 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5025 #endif 5026 } else { 5027 stcb->asoc.abandoned_unsent[0]++; 5028 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5029 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5030 #if defined(SCTP_DETAILED_STR_STATS) 5031 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5032 #endif 5033 } 5034 do { 5035 ret_sz += tp1->book_size; 5036 if (tp1->data != NULL) { 5037 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5038 sctp_flight_size_decrease(tp1); 5039 sctp_total_flight_decrease(stcb, tp1); 5040 } 5041 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5042 stcb->asoc.peers_rwnd += tp1->send_size; 5043 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5044 if (sent) { 5045 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5046 } else { 5047 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5048 } 5049 if (tp1->data) { 5050 sctp_m_freem(tp1->data); 5051 tp1->data = NULL; 5052 } 5053 do_wakeup_routine = 1; 5054 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5055 stcb->asoc.sent_queue_cnt_removeable--; 5056 } 5057 } 5058 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5059 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5060 SCTP_DATA_NOT_FRAG) { 5061 /* not frag'ed we ae done */ 5062 notdone = 0; 5063 foundeom = 1; 5064 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5065 /* end of frag, we are done */ 5066 notdone = 0; 5067 foundeom = 1; 5068 } else { 5069 /* 5070 * Its a begin or middle piece, we must mark all of 5071 * it 5072 */ 5073 notdone = 1; 5074 tp1 = TAILQ_NEXT(tp1, sctp_next); 5075 } 5076 } while (tp1 && notdone); 5077 if (foundeom == 0) { 5078 /* 5079 * The multi-part message was scattered across the send and 5080 * sent queue. 5081 */ 5082 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5083 if ((tp1->rec.data.sid != sid) || 5084 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5085 break; 5086 } 5087 /* 5088 * save to chk in case we have some on stream out 5089 * queue. If so and we have an un-transmitted one we 5090 * don't have to fudge the TSN. 5091 */ 5092 chk = tp1; 5093 ret_sz += tp1->book_size; 5094 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5095 if (sent) { 5096 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5097 } else { 5098 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5099 } 5100 if (tp1->data) { 5101 sctp_m_freem(tp1->data); 5102 tp1->data = NULL; 5103 } 5104 /* No flight involved here book the size to 0 */ 5105 tp1->book_size = 0; 5106 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5107 foundeom = 1; 5108 } 5109 do_wakeup_routine = 1; 5110 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5111 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5112 /* 5113 * on to the sent queue so we can wait for it to be 5114 * passed by. 5115 */ 5116 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5117 sctp_next); 5118 stcb->asoc.send_queue_cnt--; 5119 stcb->asoc.sent_queue_cnt++; 5120 } 5121 } 5122 if (foundeom == 0) { 5123 /* 5124 * Still no eom found. That means there is stuff left on the 5125 * stream out queue.. yuck. 5126 */ 5127 SCTP_TCB_SEND_LOCK(stcb); 5128 strq = &stcb->asoc.strmout[sid]; 5129 sp = TAILQ_FIRST(&strq->outqueue); 5130 if (sp != NULL) { 5131 sp->discard_rest = 1; 5132 /* 5133 * We may need to put a chunk on the queue that 5134 * holds the TSN that would have been sent with the 5135 * LAST bit. 5136 */ 5137 if (chk == NULL) { 5138 /* Yep, we have to */ 5139 sctp_alloc_a_chunk(stcb, chk); 5140 if (chk == NULL) { 5141 /* 5142 * we are hosed. All we can do is 5143 * nothing.. which will cause an 5144 * abort if the peer is paying 5145 * attention. 5146 */ 5147 goto oh_well; 5148 } 5149 memset(chk, 0, sizeof(*chk)); 5150 chk->rec.data.rcv_flags = 0; 5151 chk->sent = SCTP_FORWARD_TSN_SKIP; 5152 chk->asoc = &stcb->asoc; 5153 if (stcb->asoc.idata_supported == 0) { 5154 if (sp->sinfo_flags & SCTP_UNORDERED) { 5155 chk->rec.data.mid = 0; 5156 } else { 5157 chk->rec.data.mid = strq->next_mid_ordered; 5158 } 5159 } else { 5160 if (sp->sinfo_flags & SCTP_UNORDERED) { 5161 chk->rec.data.mid = strq->next_mid_unordered; 5162 } else { 5163 chk->rec.data.mid = strq->next_mid_ordered; 5164 } 5165 } 5166 chk->rec.data.sid = sp->sid; 5167 chk->rec.data.ppid = sp->ppid; 5168 chk->rec.data.context = sp->context; 5169 chk->flags = sp->act_flags; 5170 chk->whoTo = NULL; 5171 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5172 strq->chunks_on_queues++; 5173 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5174 stcb->asoc.sent_queue_cnt++; 5175 stcb->asoc.pr_sctp_cnt++; 5176 } 5177 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5178 if (sp->sinfo_flags & SCTP_UNORDERED) { 5179 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5180 } 5181 if (stcb->asoc.idata_supported == 0) { 5182 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5183 strq->next_mid_ordered++; 5184 } 5185 } else { 5186 if (sp->sinfo_flags & SCTP_UNORDERED) { 5187 strq->next_mid_unordered++; 5188 } else { 5189 strq->next_mid_ordered++; 5190 } 5191 } 5192 oh_well: 5193 if (sp->data) { 5194 /* 5195 * Pull any data to free up the SB and allow 5196 * sender to "add more" while we will throw 5197 * away :-) 5198 */ 5199 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5200 ret_sz += sp->length; 5201 do_wakeup_routine = 1; 5202 sp->some_taken = 1; 5203 sctp_m_freem(sp->data); 5204 sp->data = NULL; 5205 sp->tail_mbuf = NULL; 5206 sp->length = 0; 5207 } 5208 } 5209 SCTP_TCB_SEND_UNLOCK(stcb); 5210 } 5211 if (do_wakeup_routine) { 5212 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5213 } 5214 return (ret_sz); 5215 } 5216 5217 /* 5218 * checks to see if the given address, sa, is one that is currently known by 5219 * the kernel note: can't distinguish the same address on multiple interfaces 5220 * and doesn't handle multiple addresses with different zone/scope id's note: 5221 * ifa_ifwithaddr() compares the entire sockaddr struct 5222 */ 5223 struct sctp_ifa * 5224 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5225 int holds_lock) 5226 { 5227 struct sctp_laddr *laddr; 5228 5229 if (holds_lock == 0) { 5230 SCTP_INP_RLOCK(inp); 5231 } 5232 5233 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5234 if (laddr->ifa == NULL) 5235 continue; 5236 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5237 continue; 5238 #ifdef INET 5239 if (addr->sa_family == AF_INET) { 5240 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5241 laddr->ifa->address.sin.sin_addr.s_addr) { 5242 /* found him. */ 5243 break; 5244 } 5245 } 5246 #endif 5247 #ifdef INET6 5248 if (addr->sa_family == AF_INET6) { 5249 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5250 &laddr->ifa->address.sin6)) { 5251 /* found him. */ 5252 break; 5253 } 5254 } 5255 #endif 5256 } 5257 if (holds_lock == 0) { 5258 SCTP_INP_RUNLOCK(inp); 5259 } 5260 if (laddr != NULL) { 5261 return (laddr->ifa); 5262 } else { 5263 return (NULL); 5264 } 5265 } 5266 5267 uint32_t 5268 sctp_get_ifa_hash_val(struct sockaddr *addr) 5269 { 5270 switch (addr->sa_family) { 5271 #ifdef INET 5272 case AF_INET: 5273 { 5274 struct sockaddr_in *sin; 5275 5276 sin = (struct sockaddr_in *)addr; 5277 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5278 } 5279 #endif 5280 #ifdef INET6 5281 case AF_INET6: 5282 { 5283 struct sockaddr_in6 *sin6; 5284 uint32_t hash_of_addr; 5285 5286 sin6 = (struct sockaddr_in6 *)addr; 5287 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5288 sin6->sin6_addr.s6_addr32[1] + 5289 sin6->sin6_addr.s6_addr32[2] + 5290 sin6->sin6_addr.s6_addr32[3]); 5291 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5292 return (hash_of_addr); 5293 } 5294 #endif 5295 default: 5296 break; 5297 } 5298 return (0); 5299 } 5300 5301 struct sctp_ifa * 5302 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5303 { 5304 struct sctp_ifa *sctp_ifap; 5305 struct sctp_vrf *vrf; 5306 struct sctp_ifalist *hash_head; 5307 uint32_t hash_of_addr; 5308 5309 if (holds_lock == 0) { 5310 SCTP_IPI_ADDR_RLOCK(); 5311 } else { 5312 SCTP_IPI_ADDR_LOCK_ASSERT(); 5313 } 5314 5315 vrf = sctp_find_vrf(vrf_id); 5316 if (vrf == NULL) { 5317 if (holds_lock == 0) 5318 SCTP_IPI_ADDR_RUNLOCK(); 5319 return (NULL); 5320 } 5321 5322 hash_of_addr = sctp_get_ifa_hash_val(addr); 5323 5324 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5325 if (hash_head == NULL) { 5326 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5327 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5328 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5329 sctp_print_address(addr); 5330 SCTP_PRINTF("No such bucket for address\n"); 5331 if (holds_lock == 0) 5332 SCTP_IPI_ADDR_RUNLOCK(); 5333 5334 return (NULL); 5335 } 5336 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5337 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5338 continue; 5339 #ifdef INET 5340 if (addr->sa_family == AF_INET) { 5341 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5342 sctp_ifap->address.sin.sin_addr.s_addr) { 5343 /* found him. */ 5344 break; 5345 } 5346 } 5347 #endif 5348 #ifdef INET6 5349 if (addr->sa_family == AF_INET6) { 5350 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5351 &sctp_ifap->address.sin6)) { 5352 /* found him. */ 5353 break; 5354 } 5355 } 5356 #endif 5357 } 5358 if (holds_lock == 0) 5359 SCTP_IPI_ADDR_RUNLOCK(); 5360 return (sctp_ifap); 5361 } 5362 5363 static void 5364 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5365 uint32_t rwnd_req) 5366 { 5367 /* User pulled some data, do we need a rwnd update? */ 5368 struct epoch_tracker et; 5369 int r_unlocked = 0; 5370 uint32_t dif, rwnd; 5371 struct socket *so = NULL; 5372 5373 if (stcb == NULL) 5374 return; 5375 5376 atomic_add_int(&stcb->asoc.refcnt, 1); 5377 5378 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5379 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5380 /* Pre-check If we are freeing no update */ 5381 goto no_lock; 5382 } 5383 SCTP_INP_INCR_REF(stcb->sctp_ep); 5384 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5385 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5386 goto out; 5387 } 5388 so = stcb->sctp_socket; 5389 if (so == NULL) { 5390 goto out; 5391 } 5392 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5393 /* Have you have freed enough to look */ 5394 *freed_so_far = 0; 5395 /* Yep, its worth a look and the lock overhead */ 5396 5397 /* Figure out what the rwnd would be */ 5398 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5399 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5400 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5401 } else { 5402 dif = 0; 5403 } 5404 if (dif >= rwnd_req) { 5405 if (hold_rlock) { 5406 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5407 r_unlocked = 1; 5408 } 5409 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5410 /* 5411 * One last check before we allow the guy possibly 5412 * to get in. There is a race, where the guy has not 5413 * reached the gate. In that case 5414 */ 5415 goto out; 5416 } 5417 SCTP_TCB_LOCK(stcb); 5418 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5419 /* No reports here */ 5420 SCTP_TCB_UNLOCK(stcb); 5421 goto out; 5422 } 5423 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5424 NET_EPOCH_ENTER(et); 5425 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5426 5427 sctp_chunk_output(stcb->sctp_ep, stcb, 5428 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5429 /* make sure no timer is running */ 5430 NET_EPOCH_EXIT(et); 5431 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5432 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5433 SCTP_TCB_UNLOCK(stcb); 5434 } else { 5435 /* Update how much we have pending */ 5436 stcb->freed_by_sorcv_sincelast = dif; 5437 } 5438 out: 5439 if (so && r_unlocked && hold_rlock) { 5440 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5441 } 5442 5443 SCTP_INP_DECR_REF(stcb->sctp_ep); 5444 no_lock: 5445 atomic_add_int(&stcb->asoc.refcnt, -1); 5446 return; 5447 } 5448 5449 int 5450 sctp_sorecvmsg(struct socket *so, 5451 struct uio *uio, 5452 struct mbuf **mp, 5453 struct sockaddr *from, 5454 int fromlen, 5455 int *msg_flags, 5456 struct sctp_sndrcvinfo *sinfo, 5457 int filling_sinfo) 5458 { 5459 /* 5460 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5461 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5462 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5463 * On the way out we may send out any combination of: 5464 * MSG_NOTIFICATION MSG_EOR 5465 * 5466 */ 5467 struct sctp_inpcb *inp = NULL; 5468 ssize_t my_len = 0; 5469 ssize_t cp_len = 0; 5470 int error = 0; 5471 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5472 struct mbuf *m = NULL; 5473 struct sctp_tcb *stcb = NULL; 5474 int wakeup_read_socket = 0; 5475 int freecnt_applied = 0; 5476 int out_flags = 0, in_flags = 0; 5477 int block_allowed = 1; 5478 uint32_t freed_so_far = 0; 5479 ssize_t copied_so_far = 0; 5480 int in_eeor_mode = 0; 5481 int no_rcv_needed = 0; 5482 uint32_t rwnd_req = 0; 5483 int hold_sblock = 0; 5484 int hold_rlock = 0; 5485 ssize_t slen = 0; 5486 uint32_t held_length = 0; 5487 int sockbuf_lock = 0; 5488 5489 if (uio == NULL) { 5490 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5491 return (EINVAL); 5492 } 5493 5494 if (msg_flags) { 5495 in_flags = *msg_flags; 5496 if (in_flags & MSG_PEEK) 5497 SCTP_STAT_INCR(sctps_read_peeks); 5498 } else { 5499 in_flags = 0; 5500 } 5501 slen = uio->uio_resid; 5502 5503 /* Pull in and set up our int flags */ 5504 if (in_flags & MSG_OOB) { 5505 /* Out of band's NOT supported */ 5506 return (EOPNOTSUPP); 5507 } 5508 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5509 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5510 return (EINVAL); 5511 } 5512 if ((in_flags & (MSG_DONTWAIT 5513 | MSG_NBIO 5514 )) || 5515 SCTP_SO_IS_NBIO(so)) { 5516 block_allowed = 0; 5517 } 5518 /* setup the endpoint */ 5519 inp = (struct sctp_inpcb *)so->so_pcb; 5520 if (inp == NULL) { 5521 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5522 return (EFAULT); 5523 } 5524 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5525 /* Must be at least a MTU's worth */ 5526 if (rwnd_req < SCTP_MIN_RWND) 5527 rwnd_req = SCTP_MIN_RWND; 5528 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5530 sctp_misc_ints(SCTP_SORECV_ENTER, 5531 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5532 } 5533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5534 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5535 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5536 } 5537 5538 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5539 if (error) { 5540 goto release_unlocked; 5541 } 5542 sockbuf_lock = 1; 5543 restart: 5544 5545 restart_nosblocks: 5546 if (hold_sblock == 0) { 5547 SOCKBUF_LOCK(&so->so_rcv); 5548 hold_sblock = 1; 5549 } 5550 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5551 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5552 goto out; 5553 } 5554 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5555 if (so->so_error) { 5556 error = so->so_error; 5557 if ((in_flags & MSG_PEEK) == 0) 5558 so->so_error = 0; 5559 goto out; 5560 } else { 5561 if (so->so_rcv.sb_cc == 0) { 5562 /* indicate EOF */ 5563 error = 0; 5564 goto out; 5565 } 5566 } 5567 } 5568 if (so->so_rcv.sb_cc <= held_length) { 5569 if (so->so_error) { 5570 error = so->so_error; 5571 if ((in_flags & MSG_PEEK) == 0) { 5572 so->so_error = 0; 5573 } 5574 goto out; 5575 } 5576 if ((so->so_rcv.sb_cc == 0) && 5577 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5578 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5579 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5580 /* 5581 * For active open side clear flags for 5582 * re-use passive open is blocked by 5583 * connect. 5584 */ 5585 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5586 /* 5587 * You were aborted, passive side 5588 * always hits here 5589 */ 5590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5591 error = ECONNRESET; 5592 } 5593 so->so_state &= ~(SS_ISCONNECTING | 5594 SS_ISDISCONNECTING | 5595 SS_ISCONFIRMING | 5596 SS_ISCONNECTED); 5597 if (error == 0) { 5598 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5599 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5600 error = ENOTCONN; 5601 } 5602 } 5603 goto out; 5604 } 5605 } 5606 if (block_allowed) { 5607 error = sbwait(&so->so_rcv); 5608 if (error) { 5609 goto out; 5610 } 5611 held_length = 0; 5612 goto restart_nosblocks; 5613 } else { 5614 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5615 error = EWOULDBLOCK; 5616 goto out; 5617 } 5618 } 5619 if (hold_sblock == 1) { 5620 SOCKBUF_UNLOCK(&so->so_rcv); 5621 hold_sblock = 0; 5622 } 5623 /* we possibly have data we can read */ 5624 /* sa_ignore FREED_MEMORY */ 5625 control = TAILQ_FIRST(&inp->read_queue); 5626 if (control == NULL) { 5627 /* 5628 * This could be happening since the appender did the 5629 * increment but as not yet did the tailq insert onto the 5630 * read_queue 5631 */ 5632 if (hold_rlock == 0) { 5633 SCTP_INP_READ_LOCK(inp); 5634 } 5635 control = TAILQ_FIRST(&inp->read_queue); 5636 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5637 #ifdef INVARIANTS 5638 panic("Huh, its non zero and nothing on control?"); 5639 #endif 5640 so->so_rcv.sb_cc = 0; 5641 } 5642 SCTP_INP_READ_UNLOCK(inp); 5643 hold_rlock = 0; 5644 goto restart; 5645 } 5646 5647 if ((control->length == 0) && 5648 (control->do_not_ref_stcb)) { 5649 /* 5650 * Clean up code for freeing assoc that left behind a 5651 * pdapi.. maybe a peer in EEOR that just closed after 5652 * sending and never indicated a EOR. 5653 */ 5654 if (hold_rlock == 0) { 5655 hold_rlock = 1; 5656 SCTP_INP_READ_LOCK(inp); 5657 } 5658 control->held_length = 0; 5659 if (control->data) { 5660 /* Hmm there is data here .. fix */ 5661 struct mbuf *m_tmp; 5662 int cnt = 0; 5663 5664 m_tmp = control->data; 5665 while (m_tmp) { 5666 cnt += SCTP_BUF_LEN(m_tmp); 5667 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5668 control->tail_mbuf = m_tmp; 5669 control->end_added = 1; 5670 } 5671 m_tmp = SCTP_BUF_NEXT(m_tmp); 5672 } 5673 control->length = cnt; 5674 } else { 5675 /* remove it */ 5676 TAILQ_REMOVE(&inp->read_queue, control, next); 5677 /* Add back any hiddend data */ 5678 sctp_free_remote_addr(control->whoFrom); 5679 sctp_free_a_readq(stcb, control); 5680 } 5681 if (hold_rlock) { 5682 hold_rlock = 0; 5683 SCTP_INP_READ_UNLOCK(inp); 5684 } 5685 goto restart; 5686 } 5687 if ((control->length == 0) && 5688 (control->end_added == 1)) { 5689 /* 5690 * Do we also need to check for (control->pdapi_aborted == 5691 * 1)? 5692 */ 5693 if (hold_rlock == 0) { 5694 hold_rlock = 1; 5695 SCTP_INP_READ_LOCK(inp); 5696 } 5697 TAILQ_REMOVE(&inp->read_queue, control, next); 5698 if (control->data) { 5699 #ifdef INVARIANTS 5700 panic("control->data not null but control->length == 0"); 5701 #else 5702 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5703 sctp_m_freem(control->data); 5704 control->data = NULL; 5705 #endif 5706 } 5707 if (control->aux_data) { 5708 sctp_m_free(control->aux_data); 5709 control->aux_data = NULL; 5710 } 5711 #ifdef INVARIANTS 5712 if (control->on_strm_q) { 5713 panic("About to free ctl:%p so:%p and its in %d", 5714 control, so, control->on_strm_q); 5715 } 5716 #endif 5717 sctp_free_remote_addr(control->whoFrom); 5718 sctp_free_a_readq(stcb, control); 5719 if (hold_rlock) { 5720 hold_rlock = 0; 5721 SCTP_INP_READ_UNLOCK(inp); 5722 } 5723 goto restart; 5724 } 5725 if (control->length == 0) { 5726 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5727 (filling_sinfo)) { 5728 /* find a more suitable one then this */ 5729 ctl = TAILQ_NEXT(control, next); 5730 while (ctl) { 5731 if ((ctl->stcb != control->stcb) && (ctl->length) && 5732 (ctl->some_taken || 5733 (ctl->spec_flags & M_NOTIFICATION) || 5734 ((ctl->do_not_ref_stcb == 0) && 5735 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5736 ) { 5737 /*- 5738 * If we have a different TCB next, and there is data 5739 * present. If we have already taken some (pdapi), OR we can 5740 * ref the tcb and no delivery as started on this stream, we 5741 * take it. Note we allow a notification on a different 5742 * assoc to be delivered.. 5743 */ 5744 control = ctl; 5745 goto found_one; 5746 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5747 (ctl->length) && 5748 ((ctl->some_taken) || 5749 ((ctl->do_not_ref_stcb == 0) && 5750 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5751 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5752 /*- 5753 * If we have the same tcb, and there is data present, and we 5754 * have the strm interleave feature present. Then if we have 5755 * taken some (pdapi) or we can refer to tht tcb AND we have 5756 * not started a delivery for this stream, we can take it. 5757 * Note we do NOT allow a notificaiton on the same assoc to 5758 * be delivered. 5759 */ 5760 control = ctl; 5761 goto found_one; 5762 } 5763 ctl = TAILQ_NEXT(ctl, next); 5764 } 5765 } 5766 /* 5767 * if we reach here, not suitable replacement is available 5768 * <or> fragment interleave is NOT on. So stuff the sb_cc 5769 * into the our held count, and its time to sleep again. 5770 */ 5771 held_length = so->so_rcv.sb_cc; 5772 control->held_length = so->so_rcv.sb_cc; 5773 goto restart; 5774 } 5775 /* Clear the held length since there is something to read */ 5776 control->held_length = 0; 5777 found_one: 5778 /* 5779 * If we reach here, control has a some data for us to read off. 5780 * Note that stcb COULD be NULL. 5781 */ 5782 if (hold_rlock == 0) { 5783 hold_rlock = 1; 5784 SCTP_INP_READ_LOCK(inp); 5785 } 5786 control->some_taken++; 5787 stcb = control->stcb; 5788 if (stcb) { 5789 if ((control->do_not_ref_stcb == 0) && 5790 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5791 if (freecnt_applied == 0) 5792 stcb = NULL; 5793 } else if (control->do_not_ref_stcb == 0) { 5794 /* you can't free it on me please */ 5795 /* 5796 * The lock on the socket buffer protects us so the 5797 * free code will stop. But since we used the 5798 * socketbuf lock and the sender uses the tcb_lock 5799 * to increment, we need to use the atomic add to 5800 * the refcnt 5801 */ 5802 if (freecnt_applied) { 5803 #ifdef INVARIANTS 5804 panic("refcnt already incremented"); 5805 #else 5806 SCTP_PRINTF("refcnt already incremented?\n"); 5807 #endif 5808 } else { 5809 atomic_add_int(&stcb->asoc.refcnt, 1); 5810 freecnt_applied = 1; 5811 } 5812 /* 5813 * Setup to remember how much we have not yet told 5814 * the peer our rwnd has opened up. Note we grab the 5815 * value from the tcb from last time. Note too that 5816 * sack sending clears this when a sack is sent, 5817 * which is fine. Once we hit the rwnd_req, we then 5818 * will go to the sctp_user_rcvd() that will not 5819 * lock until it KNOWs it MUST send a WUP-SACK. 5820 */ 5821 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5822 stcb->freed_by_sorcv_sincelast = 0; 5823 } 5824 } 5825 if (stcb && 5826 ((control->spec_flags & M_NOTIFICATION) == 0) && 5827 control->do_not_ref_stcb == 0) { 5828 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5829 } 5830 5831 /* First lets get off the sinfo and sockaddr info */ 5832 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5833 sinfo->sinfo_stream = control->sinfo_stream; 5834 sinfo->sinfo_ssn = (uint16_t)control->mid; 5835 sinfo->sinfo_flags = control->sinfo_flags; 5836 sinfo->sinfo_ppid = control->sinfo_ppid; 5837 sinfo->sinfo_context = control->sinfo_context; 5838 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5839 sinfo->sinfo_tsn = control->sinfo_tsn; 5840 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5841 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5842 nxt = TAILQ_NEXT(control, next); 5843 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5844 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5845 struct sctp_extrcvinfo *s_extra; 5846 5847 s_extra = (struct sctp_extrcvinfo *)sinfo; 5848 if ((nxt) && 5849 (nxt->length)) { 5850 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5851 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5852 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5853 } 5854 if (nxt->spec_flags & M_NOTIFICATION) { 5855 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5856 } 5857 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5858 s_extra->serinfo_next_length = nxt->length; 5859 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5860 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5861 if (nxt->tail_mbuf != NULL) { 5862 if (nxt->end_added) { 5863 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5864 } 5865 } 5866 } else { 5867 /* 5868 * we explicitly 0 this, since the memcpy 5869 * got some other things beyond the older 5870 * sinfo_ that is on the control's structure 5871 * :-D 5872 */ 5873 nxt = NULL; 5874 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5875 s_extra->serinfo_next_aid = 0; 5876 s_extra->serinfo_next_length = 0; 5877 s_extra->serinfo_next_ppid = 0; 5878 s_extra->serinfo_next_stream = 0; 5879 } 5880 } 5881 /* 5882 * update off the real current cum-ack, if we have an stcb. 5883 */ 5884 if ((control->do_not_ref_stcb == 0) && stcb) 5885 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5886 /* 5887 * mask off the high bits, we keep the actual chunk bits in 5888 * there. 5889 */ 5890 sinfo->sinfo_flags &= 0x00ff; 5891 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5892 sinfo->sinfo_flags |= SCTP_UNORDERED; 5893 } 5894 } 5895 #ifdef SCTP_ASOCLOG_OF_TSNS 5896 { 5897 int index, newindex; 5898 struct sctp_pcbtsn_rlog *entry; 5899 5900 do { 5901 index = inp->readlog_index; 5902 newindex = index + 1; 5903 if (newindex >= SCTP_READ_LOG_SIZE) { 5904 newindex = 0; 5905 } 5906 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5907 entry = &inp->readlog[index]; 5908 entry->vtag = control->sinfo_assoc_id; 5909 entry->strm = control->sinfo_stream; 5910 entry->seq = (uint16_t)control->mid; 5911 entry->sz = control->length; 5912 entry->flgs = control->sinfo_flags; 5913 } 5914 #endif 5915 if ((fromlen > 0) && (from != NULL)) { 5916 union sctp_sockstore store; 5917 size_t len; 5918 5919 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5920 #ifdef INET6 5921 case AF_INET6: 5922 len = sizeof(struct sockaddr_in6); 5923 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5924 store.sin6.sin6_port = control->port_from; 5925 break; 5926 #endif 5927 #ifdef INET 5928 case AF_INET: 5929 #ifdef INET6 5930 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5931 len = sizeof(struct sockaddr_in6); 5932 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5933 &store.sin6); 5934 store.sin6.sin6_port = control->port_from; 5935 } else { 5936 len = sizeof(struct sockaddr_in); 5937 store.sin = control->whoFrom->ro._l_addr.sin; 5938 store.sin.sin_port = control->port_from; 5939 } 5940 #else 5941 len = sizeof(struct sockaddr_in); 5942 store.sin = control->whoFrom->ro._l_addr.sin; 5943 store.sin.sin_port = control->port_from; 5944 #endif 5945 break; 5946 #endif 5947 default: 5948 len = 0; 5949 break; 5950 } 5951 memcpy(from, &store, min((size_t)fromlen, len)); 5952 #ifdef INET6 5953 { 5954 struct sockaddr_in6 lsa6, *from6; 5955 5956 from6 = (struct sockaddr_in6 *)from; 5957 sctp_recover_scope_mac(from6, (&lsa6)); 5958 } 5959 #endif 5960 } 5961 if (hold_rlock) { 5962 SCTP_INP_READ_UNLOCK(inp); 5963 hold_rlock = 0; 5964 } 5965 if (hold_sblock) { 5966 SOCKBUF_UNLOCK(&so->so_rcv); 5967 hold_sblock = 0; 5968 } 5969 /* now copy out what data we can */ 5970 if (mp == NULL) { 5971 /* copy out each mbuf in the chain up to length */ 5972 get_more_data: 5973 m = control->data; 5974 while (m) { 5975 /* Move out all we can */ 5976 cp_len = uio->uio_resid; 5977 my_len = SCTP_BUF_LEN(m); 5978 if (cp_len > my_len) { 5979 /* not enough in this buf */ 5980 cp_len = my_len; 5981 } 5982 if (hold_rlock) { 5983 SCTP_INP_READ_UNLOCK(inp); 5984 hold_rlock = 0; 5985 } 5986 if (cp_len > 0) 5987 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5988 /* re-read */ 5989 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5990 goto release; 5991 } 5992 5993 if ((control->do_not_ref_stcb == 0) && stcb && 5994 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5995 no_rcv_needed = 1; 5996 } 5997 if (error) { 5998 /* error we are out of here */ 5999 goto release; 6000 } 6001 SCTP_INP_READ_LOCK(inp); 6002 hold_rlock = 1; 6003 if (cp_len == SCTP_BUF_LEN(m)) { 6004 if ((SCTP_BUF_NEXT(m) == NULL) && 6005 (control->end_added)) { 6006 out_flags |= MSG_EOR; 6007 if ((control->do_not_ref_stcb == 0) && 6008 (control->stcb != NULL) && 6009 ((control->spec_flags & M_NOTIFICATION) == 0)) 6010 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6011 } 6012 if (control->spec_flags & M_NOTIFICATION) { 6013 out_flags |= MSG_NOTIFICATION; 6014 } 6015 /* we ate up the mbuf */ 6016 if (in_flags & MSG_PEEK) { 6017 /* just looking */ 6018 m = SCTP_BUF_NEXT(m); 6019 copied_so_far += cp_len; 6020 } else { 6021 /* dispose of the mbuf */ 6022 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6023 sctp_sblog(&so->so_rcv, 6024 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6025 } 6026 sctp_sbfree(control, stcb, &so->so_rcv, m); 6027 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6028 sctp_sblog(&so->so_rcv, 6029 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6030 } 6031 copied_so_far += cp_len; 6032 freed_so_far += (uint32_t)cp_len; 6033 freed_so_far += MSIZE; 6034 atomic_subtract_int(&control->length, cp_len); 6035 control->data = sctp_m_free(m); 6036 m = control->data; 6037 /* 6038 * been through it all, must hold sb 6039 * lock ok to null tail 6040 */ 6041 if (control->data == NULL) { 6042 #ifdef INVARIANTS 6043 if ((control->end_added == 0) || 6044 (TAILQ_NEXT(control, next) == NULL)) { 6045 /* 6046 * If the end is not 6047 * added, OR the 6048 * next is NOT null 6049 * we MUST have the 6050 * lock. 6051 */ 6052 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6053 panic("Hmm we don't own the lock?"); 6054 } 6055 } 6056 #endif 6057 control->tail_mbuf = NULL; 6058 #ifdef INVARIANTS 6059 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6060 panic("end_added, nothing left and no MSG_EOR"); 6061 } 6062 #endif 6063 } 6064 } 6065 } else { 6066 /* Do we need to trim the mbuf? */ 6067 if (control->spec_flags & M_NOTIFICATION) { 6068 out_flags |= MSG_NOTIFICATION; 6069 } 6070 if ((in_flags & MSG_PEEK) == 0) { 6071 SCTP_BUF_RESV_UF(m, cp_len); 6072 SCTP_BUF_LEN(m) -= (int)cp_len; 6073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6074 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6075 } 6076 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6077 if ((control->do_not_ref_stcb == 0) && 6078 stcb) { 6079 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6080 } 6081 copied_so_far += cp_len; 6082 freed_so_far += (uint32_t)cp_len; 6083 freed_so_far += MSIZE; 6084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6085 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6086 SCTP_LOG_SBRESULT, 0); 6087 } 6088 atomic_subtract_int(&control->length, cp_len); 6089 } else { 6090 copied_so_far += cp_len; 6091 } 6092 } 6093 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6094 break; 6095 } 6096 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6097 (control->do_not_ref_stcb == 0) && 6098 (freed_so_far >= rwnd_req)) { 6099 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6100 } 6101 } /* end while(m) */ 6102 /* 6103 * At this point we have looked at it all and we either have 6104 * a MSG_EOR/or read all the user wants... <OR> 6105 * control->length == 0. 6106 */ 6107 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6108 /* we are done with this control */ 6109 if (control->length == 0) { 6110 if (control->data) { 6111 #ifdef INVARIANTS 6112 panic("control->data not null at read eor?"); 6113 #else 6114 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6115 sctp_m_freem(control->data); 6116 control->data = NULL; 6117 #endif 6118 } 6119 done_with_control: 6120 if (hold_rlock == 0) { 6121 SCTP_INP_READ_LOCK(inp); 6122 hold_rlock = 1; 6123 } 6124 TAILQ_REMOVE(&inp->read_queue, control, next); 6125 /* Add back any hiddend data */ 6126 if (control->held_length) { 6127 held_length = 0; 6128 control->held_length = 0; 6129 wakeup_read_socket = 1; 6130 } 6131 if (control->aux_data) { 6132 sctp_m_free(control->aux_data); 6133 control->aux_data = NULL; 6134 } 6135 no_rcv_needed = control->do_not_ref_stcb; 6136 sctp_free_remote_addr(control->whoFrom); 6137 control->data = NULL; 6138 #ifdef INVARIANTS 6139 if (control->on_strm_q) { 6140 panic("About to free ctl:%p so:%p and its in %d", 6141 control, so, control->on_strm_q); 6142 } 6143 #endif 6144 sctp_free_a_readq(stcb, control); 6145 control = NULL; 6146 if ((freed_so_far >= rwnd_req) && 6147 (no_rcv_needed == 0)) 6148 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6149 6150 } else { 6151 /* 6152 * The user did not read all of this 6153 * message, turn off the returned MSG_EOR 6154 * since we are leaving more behind on the 6155 * control to read. 6156 */ 6157 #ifdef INVARIANTS 6158 if (control->end_added && 6159 (control->data == NULL) && 6160 (control->tail_mbuf == NULL)) { 6161 panic("Gak, control->length is corrupt?"); 6162 } 6163 #endif 6164 no_rcv_needed = control->do_not_ref_stcb; 6165 out_flags &= ~MSG_EOR; 6166 } 6167 } 6168 if (out_flags & MSG_EOR) { 6169 goto release; 6170 } 6171 if ((uio->uio_resid == 0) || 6172 ((in_eeor_mode) && 6173 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6174 goto release; 6175 } 6176 /* 6177 * If I hit here the receiver wants more and this message is 6178 * NOT done (pd-api). So two questions. Can we block? if not 6179 * we are done. Did the user NOT set MSG_WAITALL? 6180 */ 6181 if (block_allowed == 0) { 6182 goto release; 6183 } 6184 /* 6185 * We need to wait for more data a few things: - We don't 6186 * sbunlock() so we don't get someone else reading. - We 6187 * must be sure to account for the case where what is added 6188 * is NOT to our control when we wakeup. 6189 */ 6190 6191 /* 6192 * Do we need to tell the transport a rwnd update might be 6193 * needed before we go to sleep? 6194 */ 6195 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6196 ((freed_so_far >= rwnd_req) && 6197 (control->do_not_ref_stcb == 0) && 6198 (no_rcv_needed == 0))) { 6199 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6200 } 6201 wait_some_more: 6202 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6203 goto release; 6204 } 6205 6206 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6207 goto release; 6208 6209 if (hold_rlock == 1) { 6210 SCTP_INP_READ_UNLOCK(inp); 6211 hold_rlock = 0; 6212 } 6213 if (hold_sblock == 0) { 6214 SOCKBUF_LOCK(&so->so_rcv); 6215 hold_sblock = 1; 6216 } 6217 if ((copied_so_far) && (control->length == 0) && 6218 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6219 goto release; 6220 } 6221 if (so->so_rcv.sb_cc <= control->held_length) { 6222 error = sbwait(&so->so_rcv); 6223 if (error) { 6224 goto release; 6225 } 6226 control->held_length = 0; 6227 } 6228 if (hold_sblock) { 6229 SOCKBUF_UNLOCK(&so->so_rcv); 6230 hold_sblock = 0; 6231 } 6232 if (control->length == 0) { 6233 /* still nothing here */ 6234 if (control->end_added == 1) { 6235 /* he aborted, or is done i.e.did a shutdown */ 6236 out_flags |= MSG_EOR; 6237 if (control->pdapi_aborted) { 6238 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6239 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6240 6241 out_flags |= MSG_TRUNC; 6242 } else { 6243 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6244 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6245 } 6246 goto done_with_control; 6247 } 6248 if (so->so_rcv.sb_cc > held_length) { 6249 control->held_length = so->so_rcv.sb_cc; 6250 held_length = 0; 6251 } 6252 goto wait_some_more; 6253 } else if (control->data == NULL) { 6254 /* 6255 * we must re-sync since data is probably being 6256 * added 6257 */ 6258 SCTP_INP_READ_LOCK(inp); 6259 if ((control->length > 0) && (control->data == NULL)) { 6260 /* 6261 * big trouble.. we have the lock and its 6262 * corrupt? 6263 */ 6264 #ifdef INVARIANTS 6265 panic("Impossible data==NULL length !=0"); 6266 #endif 6267 out_flags |= MSG_EOR; 6268 out_flags |= MSG_TRUNC; 6269 control->length = 0; 6270 SCTP_INP_READ_UNLOCK(inp); 6271 goto done_with_control; 6272 } 6273 SCTP_INP_READ_UNLOCK(inp); 6274 /* We will fall around to get more data */ 6275 } 6276 goto get_more_data; 6277 } else { 6278 /*- 6279 * Give caller back the mbuf chain, 6280 * store in uio_resid the length 6281 */ 6282 wakeup_read_socket = 0; 6283 if ((control->end_added == 0) || 6284 (TAILQ_NEXT(control, next) == NULL)) { 6285 /* Need to get rlock */ 6286 if (hold_rlock == 0) { 6287 SCTP_INP_READ_LOCK(inp); 6288 hold_rlock = 1; 6289 } 6290 } 6291 if (control->end_added) { 6292 out_flags |= MSG_EOR; 6293 if ((control->do_not_ref_stcb == 0) && 6294 (control->stcb != NULL) && 6295 ((control->spec_flags & M_NOTIFICATION) == 0)) 6296 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6297 } 6298 if (control->spec_flags & M_NOTIFICATION) { 6299 out_flags |= MSG_NOTIFICATION; 6300 } 6301 uio->uio_resid = control->length; 6302 *mp = control->data; 6303 m = control->data; 6304 while (m) { 6305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6306 sctp_sblog(&so->so_rcv, 6307 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6308 } 6309 sctp_sbfree(control, stcb, &so->so_rcv, m); 6310 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6311 freed_so_far += MSIZE; 6312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6313 sctp_sblog(&so->so_rcv, 6314 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6315 } 6316 m = SCTP_BUF_NEXT(m); 6317 } 6318 control->data = control->tail_mbuf = NULL; 6319 control->length = 0; 6320 if (out_flags & MSG_EOR) { 6321 /* Done with this control */ 6322 goto done_with_control; 6323 } 6324 } 6325 release: 6326 if (hold_rlock == 1) { 6327 SCTP_INP_READ_UNLOCK(inp); 6328 hold_rlock = 0; 6329 } 6330 if (hold_sblock == 1) { 6331 SOCKBUF_UNLOCK(&so->so_rcv); 6332 hold_sblock = 0; 6333 } 6334 6335 sbunlock(&so->so_rcv); 6336 sockbuf_lock = 0; 6337 6338 release_unlocked: 6339 if (hold_sblock) { 6340 SOCKBUF_UNLOCK(&so->so_rcv); 6341 hold_sblock = 0; 6342 } 6343 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6344 if ((freed_so_far >= rwnd_req) && 6345 (control && (control->do_not_ref_stcb == 0)) && 6346 (no_rcv_needed == 0)) 6347 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6348 } 6349 out: 6350 if (msg_flags) { 6351 *msg_flags = out_flags; 6352 } 6353 if (((out_flags & MSG_EOR) == 0) && 6354 ((in_flags & MSG_PEEK) == 0) && 6355 (sinfo) && 6356 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6357 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6358 struct sctp_extrcvinfo *s_extra; 6359 6360 s_extra = (struct sctp_extrcvinfo *)sinfo; 6361 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6362 } 6363 if (hold_rlock == 1) { 6364 SCTP_INP_READ_UNLOCK(inp); 6365 } 6366 if (hold_sblock) { 6367 SOCKBUF_UNLOCK(&so->so_rcv); 6368 } 6369 if (sockbuf_lock) { 6370 sbunlock(&so->so_rcv); 6371 } 6372 6373 if (freecnt_applied) { 6374 /* 6375 * The lock on the socket buffer protects us so the free 6376 * code will stop. But since we used the socketbuf lock and 6377 * the sender uses the tcb_lock to increment, we need to use 6378 * the atomic add to the refcnt. 6379 */ 6380 if (stcb == NULL) { 6381 #ifdef INVARIANTS 6382 panic("stcb for refcnt has gone NULL?"); 6383 goto stage_left; 6384 #else 6385 goto stage_left; 6386 #endif 6387 } 6388 /* Save the value back for next time */ 6389 stcb->freed_by_sorcv_sincelast = freed_so_far; 6390 atomic_add_int(&stcb->asoc.refcnt, -1); 6391 } 6392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6393 if (stcb) { 6394 sctp_misc_ints(SCTP_SORECV_DONE, 6395 freed_so_far, 6396 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6397 stcb->asoc.my_rwnd, 6398 so->so_rcv.sb_cc); 6399 } else { 6400 sctp_misc_ints(SCTP_SORECV_DONE, 6401 freed_so_far, 6402 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6403 0, 6404 so->so_rcv.sb_cc); 6405 } 6406 } 6407 stage_left: 6408 if (wakeup_read_socket) { 6409 sctp_sorwakeup(inp, so); 6410 } 6411 return (error); 6412 } 6413 6414 #ifdef SCTP_MBUF_LOGGING 6415 struct mbuf * 6416 sctp_m_free(struct mbuf *m) 6417 { 6418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6419 sctp_log_mb(m, SCTP_MBUF_IFREE); 6420 } 6421 return (m_free(m)); 6422 } 6423 6424 void 6425 sctp_m_freem(struct mbuf *mb) 6426 { 6427 while (mb != NULL) 6428 mb = sctp_m_free(mb); 6429 } 6430 6431 #endif 6432 6433 int 6434 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6435 { 6436 /* 6437 * Given a local address. For all associations that holds the 6438 * address, request a peer-set-primary. 6439 */ 6440 struct sctp_ifa *ifa; 6441 struct sctp_laddr *wi; 6442 6443 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6444 if (ifa == NULL) { 6445 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6446 return (EADDRNOTAVAIL); 6447 } 6448 /* 6449 * Now that we have the ifa we must awaken the iterator with this 6450 * message. 6451 */ 6452 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6453 if (wi == NULL) { 6454 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6455 return (ENOMEM); 6456 } 6457 /* Now incr the count and int wi structure */ 6458 SCTP_INCR_LADDR_COUNT(); 6459 memset(wi, 0, sizeof(*wi)); 6460 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6461 wi->ifa = ifa; 6462 wi->action = SCTP_SET_PRIM_ADDR; 6463 atomic_add_int(&ifa->refcount, 1); 6464 6465 /* Now add it to the work queue */ 6466 SCTP_WQ_ADDR_LOCK(); 6467 /* 6468 * Should this really be a tailq? As it is we will process the 6469 * newest first :-0 6470 */ 6471 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6472 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6473 (struct sctp_inpcb *)NULL, 6474 (struct sctp_tcb *)NULL, 6475 (struct sctp_nets *)NULL); 6476 SCTP_WQ_ADDR_UNLOCK(); 6477 return (0); 6478 } 6479 6480 int 6481 sctp_soreceive(struct socket *so, 6482 struct sockaddr **psa, 6483 struct uio *uio, 6484 struct mbuf **mp0, 6485 struct mbuf **controlp, 6486 int *flagsp) 6487 { 6488 int error, fromlen; 6489 uint8_t sockbuf[256]; 6490 struct sockaddr *from; 6491 struct sctp_extrcvinfo sinfo; 6492 int filling_sinfo = 1; 6493 int flags; 6494 struct sctp_inpcb *inp; 6495 6496 inp = (struct sctp_inpcb *)so->so_pcb; 6497 /* pickup the assoc we are reading from */ 6498 if (inp == NULL) { 6499 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6500 return (EINVAL); 6501 } 6502 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6503 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6504 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6505 (controlp == NULL)) { 6506 /* user does not want the sndrcv ctl */ 6507 filling_sinfo = 0; 6508 } 6509 if (psa) { 6510 from = (struct sockaddr *)sockbuf; 6511 fromlen = sizeof(sockbuf); 6512 from->sa_len = 0; 6513 } else { 6514 from = NULL; 6515 fromlen = 0; 6516 } 6517 6518 if (filling_sinfo) { 6519 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6520 } 6521 if (flagsp != NULL) { 6522 flags = *flagsp; 6523 } else { 6524 flags = 0; 6525 } 6526 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6527 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6528 if (flagsp != NULL) { 6529 *flagsp = flags; 6530 } 6531 if (controlp != NULL) { 6532 /* copy back the sinfo in a CMSG format */ 6533 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6534 *controlp = sctp_build_ctl_nchunk(inp, 6535 (struct sctp_sndrcvinfo *)&sinfo); 6536 } else { 6537 *controlp = NULL; 6538 } 6539 } 6540 if (psa) { 6541 /* copy back the address info */ 6542 if (from && from->sa_len) { 6543 *psa = sodupsockaddr(from, M_NOWAIT); 6544 } else { 6545 *psa = NULL; 6546 } 6547 } 6548 return (error); 6549 } 6550 6551 int 6552 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6553 int totaddr, int *error) 6554 { 6555 int added = 0; 6556 int i; 6557 struct sctp_inpcb *inp; 6558 struct sockaddr *sa; 6559 size_t incr = 0; 6560 #ifdef INET 6561 struct sockaddr_in *sin; 6562 #endif 6563 #ifdef INET6 6564 struct sockaddr_in6 *sin6; 6565 #endif 6566 6567 sa = addr; 6568 inp = stcb->sctp_ep; 6569 *error = 0; 6570 for (i = 0; i < totaddr; i++) { 6571 switch (sa->sa_family) { 6572 #ifdef INET 6573 case AF_INET: 6574 incr = sizeof(struct sockaddr_in); 6575 sin = (struct sockaddr_in *)sa; 6576 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6577 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6578 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6579 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6580 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6581 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6582 *error = EINVAL; 6583 goto out_now; 6584 } 6585 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6586 SCTP_DONOT_SETSCOPE, 6587 SCTP_ADDR_IS_CONFIRMED)) { 6588 /* assoc gone no un-lock */ 6589 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6590 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6591 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6592 *error = ENOBUFS; 6593 goto out_now; 6594 } 6595 added++; 6596 break; 6597 #endif 6598 #ifdef INET6 6599 case AF_INET6: 6600 incr = sizeof(struct sockaddr_in6); 6601 sin6 = (struct sockaddr_in6 *)sa; 6602 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6603 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6604 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6605 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6606 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6607 *error = EINVAL; 6608 goto out_now; 6609 } 6610 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6611 SCTP_DONOT_SETSCOPE, 6612 SCTP_ADDR_IS_CONFIRMED)) { 6613 /* assoc gone no un-lock */ 6614 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6615 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6616 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6617 *error = ENOBUFS; 6618 goto out_now; 6619 } 6620 added++; 6621 break; 6622 #endif 6623 default: 6624 break; 6625 } 6626 sa = (struct sockaddr *)((caddr_t)sa + incr); 6627 } 6628 out_now: 6629 return (added); 6630 } 6631 6632 int 6633 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6634 unsigned int totaddr, 6635 unsigned int *num_v4, unsigned int *num_v6, 6636 unsigned int limit) 6637 { 6638 struct sockaddr *sa; 6639 struct sctp_tcb *stcb; 6640 unsigned int incr, at, i; 6641 6642 at = 0; 6643 sa = addr; 6644 *num_v6 = *num_v4 = 0; 6645 /* account and validate addresses */ 6646 if (totaddr == 0) { 6647 return (EINVAL); 6648 } 6649 for (i = 0; i < totaddr; i++) { 6650 if (at + sizeof(struct sockaddr) > limit) { 6651 return (EINVAL); 6652 } 6653 switch (sa->sa_family) { 6654 #ifdef INET 6655 case AF_INET: 6656 incr = (unsigned int)sizeof(struct sockaddr_in); 6657 if (sa->sa_len != incr) { 6658 return (EINVAL); 6659 } 6660 (*num_v4) += 1; 6661 break; 6662 #endif 6663 #ifdef INET6 6664 case AF_INET6: 6665 { 6666 struct sockaddr_in6 *sin6; 6667 6668 sin6 = (struct sockaddr_in6 *)sa; 6669 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6670 /* Must be non-mapped for connectx */ 6671 return (EINVAL); 6672 } 6673 incr = (unsigned int)sizeof(struct sockaddr_in6); 6674 if (sa->sa_len != incr) { 6675 return (EINVAL); 6676 } 6677 (*num_v6) += 1; 6678 break; 6679 } 6680 #endif 6681 default: 6682 return (EINVAL); 6683 } 6684 if ((at + incr) > limit) { 6685 return (EINVAL); 6686 } 6687 SCTP_INP_INCR_REF(inp); 6688 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6689 if (stcb != NULL) { 6690 SCTP_TCB_UNLOCK(stcb); 6691 return (EALREADY); 6692 } else { 6693 SCTP_INP_DECR_REF(inp); 6694 } 6695 at += incr; 6696 sa = (struct sockaddr *)((caddr_t)sa + incr); 6697 } 6698 return (0); 6699 } 6700 6701 /* 6702 * sctp_bindx(ADD) for one address. 6703 * assumes all arguments are valid/checked by caller. 6704 */ 6705 void 6706 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6707 struct sockaddr *sa, uint32_t vrf_id, int *error, 6708 void *p) 6709 { 6710 #if defined(INET) && defined(INET6) 6711 struct sockaddr_in sin; 6712 #endif 6713 #ifdef INET6 6714 struct sockaddr_in6 *sin6; 6715 #endif 6716 #ifdef INET 6717 struct sockaddr_in *sinp; 6718 #endif 6719 struct sockaddr *addr_to_use; 6720 struct sctp_inpcb *lep; 6721 uint16_t port; 6722 6723 /* see if we're bound all already! */ 6724 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6725 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6726 *error = EINVAL; 6727 return; 6728 } 6729 switch (sa->sa_family) { 6730 #ifdef INET6 6731 case AF_INET6: 6732 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6733 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6734 *error = EINVAL; 6735 return; 6736 } 6737 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6738 /* can only bind v6 on PF_INET6 sockets */ 6739 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6740 *error = EINVAL; 6741 return; 6742 } 6743 sin6 = (struct sockaddr_in6 *)sa; 6744 port = sin6->sin6_port; 6745 #ifdef INET 6746 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6747 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6748 SCTP_IPV6_V6ONLY(inp)) { 6749 /* can't bind v4-mapped on PF_INET sockets */ 6750 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6751 *error = EINVAL; 6752 return; 6753 } 6754 in6_sin6_2_sin(&sin, sin6); 6755 addr_to_use = (struct sockaddr *)&sin; 6756 } else { 6757 addr_to_use = sa; 6758 } 6759 #else 6760 addr_to_use = sa; 6761 #endif 6762 break; 6763 #endif 6764 #ifdef INET 6765 case AF_INET: 6766 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6767 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6768 *error = EINVAL; 6769 return; 6770 } 6771 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6772 SCTP_IPV6_V6ONLY(inp)) { 6773 /* can't bind v4 on PF_INET sockets */ 6774 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6775 *error = EINVAL; 6776 return; 6777 } 6778 sinp = (struct sockaddr_in *)sa; 6779 port = sinp->sin_port; 6780 addr_to_use = sa; 6781 break; 6782 #endif 6783 default: 6784 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6785 *error = EINVAL; 6786 return; 6787 } 6788 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6789 if (p == NULL) { 6790 /* Can't get proc for Net/Open BSD */ 6791 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6792 *error = EINVAL; 6793 return; 6794 } 6795 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6796 return; 6797 } 6798 /* Validate the incoming port. */ 6799 if ((port != 0) && (port != inp->sctp_lport)) { 6800 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6801 *error = EINVAL; 6802 return; 6803 } 6804 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6805 if (lep == NULL) { 6806 /* add the address */ 6807 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6808 SCTP_ADD_IP_ADDRESS, vrf_id); 6809 } else { 6810 if (lep != inp) { 6811 *error = EADDRINUSE; 6812 } 6813 SCTP_INP_DECR_REF(lep); 6814 } 6815 } 6816 6817 /* 6818 * sctp_bindx(DELETE) for one address. 6819 * assumes all arguments are valid/checked by caller. 6820 */ 6821 void 6822 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6823 struct sockaddr *sa, uint32_t vrf_id, int *error) 6824 { 6825 struct sockaddr *addr_to_use; 6826 #if defined(INET) && defined(INET6) 6827 struct sockaddr_in6 *sin6; 6828 struct sockaddr_in sin; 6829 #endif 6830 6831 /* see if we're bound all already! */ 6832 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6833 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6834 *error = EINVAL; 6835 return; 6836 } 6837 switch (sa->sa_family) { 6838 #ifdef INET6 6839 case AF_INET6: 6840 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6841 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6842 *error = EINVAL; 6843 return; 6844 } 6845 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6846 /* can only bind v6 on PF_INET6 sockets */ 6847 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6848 *error = EINVAL; 6849 return; 6850 } 6851 #ifdef INET 6852 sin6 = (struct sockaddr_in6 *)sa; 6853 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6854 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6855 SCTP_IPV6_V6ONLY(inp)) { 6856 /* can't bind mapped-v4 on PF_INET sockets */ 6857 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6858 *error = EINVAL; 6859 return; 6860 } 6861 in6_sin6_2_sin(&sin, sin6); 6862 addr_to_use = (struct sockaddr *)&sin; 6863 } else { 6864 addr_to_use = sa; 6865 } 6866 #else 6867 addr_to_use = sa; 6868 #endif 6869 break; 6870 #endif 6871 #ifdef INET 6872 case AF_INET: 6873 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6874 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6875 *error = EINVAL; 6876 return; 6877 } 6878 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6879 SCTP_IPV6_V6ONLY(inp)) { 6880 /* can't bind v4 on PF_INET sockets */ 6881 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6882 *error = EINVAL; 6883 return; 6884 } 6885 addr_to_use = sa; 6886 break; 6887 #endif 6888 default: 6889 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6890 *error = EINVAL; 6891 return; 6892 } 6893 /* No lock required mgmt_ep_sa does its own locking. */ 6894 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6895 vrf_id); 6896 } 6897 6898 /* 6899 * returns the valid local address count for an assoc, taking into account 6900 * all scoping rules 6901 */ 6902 int 6903 sctp_local_addr_count(struct sctp_tcb *stcb) 6904 { 6905 int loopback_scope; 6906 #if defined(INET) 6907 int ipv4_local_scope, ipv4_addr_legal; 6908 #endif 6909 #if defined(INET6) 6910 int local_scope, site_scope, ipv6_addr_legal; 6911 #endif 6912 struct sctp_vrf *vrf; 6913 struct sctp_ifn *sctp_ifn; 6914 struct sctp_ifa *sctp_ifa; 6915 int count = 0; 6916 6917 /* Turn on all the appropriate scopes */ 6918 loopback_scope = stcb->asoc.scope.loopback_scope; 6919 #if defined(INET) 6920 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6921 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6922 #endif 6923 #if defined(INET6) 6924 local_scope = stcb->asoc.scope.local_scope; 6925 site_scope = stcb->asoc.scope.site_scope; 6926 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6927 #endif 6928 SCTP_IPI_ADDR_RLOCK(); 6929 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6930 if (vrf == NULL) { 6931 /* no vrf, no addresses */ 6932 SCTP_IPI_ADDR_RUNLOCK(); 6933 return (0); 6934 } 6935 6936 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6937 /* 6938 * bound all case: go through all ifns on the vrf 6939 */ 6940 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6941 if ((loopback_scope == 0) && 6942 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6943 continue; 6944 } 6945 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6946 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6947 continue; 6948 switch (sctp_ifa->address.sa.sa_family) { 6949 #ifdef INET 6950 case AF_INET: 6951 if (ipv4_addr_legal) { 6952 struct sockaddr_in *sin; 6953 6954 sin = &sctp_ifa->address.sin; 6955 if (sin->sin_addr.s_addr == 0) { 6956 /* 6957 * skip unspecified 6958 * addrs 6959 */ 6960 continue; 6961 } 6962 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6963 &sin->sin_addr) != 0) { 6964 continue; 6965 } 6966 if ((ipv4_local_scope == 0) && 6967 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6968 continue; 6969 } 6970 /* count this one */ 6971 count++; 6972 } else { 6973 continue; 6974 } 6975 break; 6976 #endif 6977 #ifdef INET6 6978 case AF_INET6: 6979 if (ipv6_addr_legal) { 6980 struct sockaddr_in6 *sin6; 6981 6982 sin6 = &sctp_ifa->address.sin6; 6983 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6984 continue; 6985 } 6986 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6987 &sin6->sin6_addr) != 0) { 6988 continue; 6989 } 6990 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6991 if (local_scope == 0) 6992 continue; 6993 if (sin6->sin6_scope_id == 0) { 6994 if (sa6_recoverscope(sin6) != 0) 6995 /* 6996 * 6997 * bad 6998 * link 6999 * 7000 * local 7001 * 7002 * address 7003 */ 7004 continue; 7005 } 7006 } 7007 if ((site_scope == 0) && 7008 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7009 continue; 7010 } 7011 /* count this one */ 7012 count++; 7013 } 7014 break; 7015 #endif 7016 default: 7017 /* TSNH */ 7018 break; 7019 } 7020 } 7021 } 7022 } else { 7023 /* 7024 * subset bound case 7025 */ 7026 struct sctp_laddr *laddr; 7027 7028 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7029 sctp_nxt_addr) { 7030 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7031 continue; 7032 } 7033 /* count this one */ 7034 count++; 7035 } 7036 } 7037 SCTP_IPI_ADDR_RUNLOCK(); 7038 return (count); 7039 } 7040 7041 #if defined(SCTP_LOCAL_TRACE_BUF) 7042 7043 void 7044 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7045 { 7046 uint32_t saveindex, newindex; 7047 7048 do { 7049 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7050 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7051 newindex = 1; 7052 } else { 7053 newindex = saveindex + 1; 7054 } 7055 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7056 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7057 saveindex = 0; 7058 } 7059 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7060 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7061 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7062 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7063 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7064 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7065 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7066 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7067 } 7068 7069 #endif 7070 static void 7071 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7072 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7073 { 7074 struct ip *iph; 7075 #ifdef INET6 7076 struct ip6_hdr *ip6; 7077 #endif 7078 struct mbuf *sp, *last; 7079 struct udphdr *uhdr; 7080 uint16_t port; 7081 7082 if ((m->m_flags & M_PKTHDR) == 0) { 7083 /* Can't handle one that is not a pkt hdr */ 7084 goto out; 7085 } 7086 /* Pull the src port */ 7087 iph = mtod(m, struct ip *); 7088 uhdr = (struct udphdr *)((caddr_t)iph + off); 7089 port = uhdr->uh_sport; 7090 /* 7091 * Split out the mbuf chain. Leave the IP header in m, place the 7092 * rest in the sp. 7093 */ 7094 sp = m_split(m, off, M_NOWAIT); 7095 if (sp == NULL) { 7096 /* Gak, drop packet, we can't do a split */ 7097 goto out; 7098 } 7099 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7100 /* Gak, packet can't have an SCTP header in it - too small */ 7101 m_freem(sp); 7102 goto out; 7103 } 7104 /* Now pull up the UDP header and SCTP header together */ 7105 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7106 if (sp == NULL) { 7107 /* Gak pullup failed */ 7108 goto out; 7109 } 7110 /* Trim out the UDP header */ 7111 m_adj(sp, sizeof(struct udphdr)); 7112 7113 /* Now reconstruct the mbuf chain */ 7114 for (last = m; last->m_next; last = last->m_next); 7115 last->m_next = sp; 7116 m->m_pkthdr.len += sp->m_pkthdr.len; 7117 /* 7118 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7119 * checksum and it was valid. Since CSUM_DATA_VALID == 7120 * CSUM_SCTP_VALID this would imply that the HW also verified the 7121 * SCTP checksum. Therefore, clear the bit. 7122 */ 7123 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7124 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7125 m->m_pkthdr.len, 7126 if_name(m->m_pkthdr.rcvif), 7127 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7128 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7129 iph = mtod(m, struct ip *); 7130 switch (iph->ip_v) { 7131 #ifdef INET 7132 case IPVERSION: 7133 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7134 sctp_input_with_port(m, off, port); 7135 break; 7136 #endif 7137 #ifdef INET6 7138 case IPV6_VERSION >> 4: 7139 ip6 = mtod(m, struct ip6_hdr *); 7140 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7141 sctp6_input_with_port(&m, &off, port); 7142 break; 7143 #endif 7144 default: 7145 goto out; 7146 break; 7147 } 7148 return; 7149 out: 7150 m_freem(m); 7151 } 7152 7153 #ifdef INET 7154 static void 7155 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7156 { 7157 struct ip *outer_ip, *inner_ip; 7158 struct sctphdr *sh; 7159 struct icmp *icmp; 7160 struct udphdr *udp; 7161 struct sctp_inpcb *inp; 7162 struct sctp_tcb *stcb; 7163 struct sctp_nets *net; 7164 struct sctp_init_chunk *ch; 7165 struct sockaddr_in src, dst; 7166 uint8_t type, code; 7167 7168 inner_ip = (struct ip *)vip; 7169 icmp = (struct icmp *)((caddr_t)inner_ip - 7170 (sizeof(struct icmp) - sizeof(struct ip))); 7171 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7172 if (ntohs(outer_ip->ip_len) < 7173 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7174 return; 7175 } 7176 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7177 sh = (struct sctphdr *)(udp + 1); 7178 memset(&src, 0, sizeof(struct sockaddr_in)); 7179 src.sin_family = AF_INET; 7180 src.sin_len = sizeof(struct sockaddr_in); 7181 src.sin_port = sh->src_port; 7182 src.sin_addr = inner_ip->ip_src; 7183 memset(&dst, 0, sizeof(struct sockaddr_in)); 7184 dst.sin_family = AF_INET; 7185 dst.sin_len = sizeof(struct sockaddr_in); 7186 dst.sin_port = sh->dest_port; 7187 dst.sin_addr = inner_ip->ip_dst; 7188 /* 7189 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7190 * holds our local endpoint address. Thus we reverse the dst and the 7191 * src in the lookup. 7192 */ 7193 inp = NULL; 7194 net = NULL; 7195 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7196 (struct sockaddr *)&src, 7197 &inp, &net, 1, 7198 SCTP_DEFAULT_VRFID); 7199 if ((stcb != NULL) && 7200 (net != NULL) && 7201 (inp != NULL)) { 7202 /* Check the UDP port numbers */ 7203 if ((udp->uh_dport != net->port) || 7204 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7205 SCTP_TCB_UNLOCK(stcb); 7206 return; 7207 } 7208 /* Check the verification tag */ 7209 if (ntohl(sh->v_tag) != 0) { 7210 /* 7211 * This must be the verification tag used for 7212 * sending out packets. We don't consider packets 7213 * reflecting the verification tag. 7214 */ 7215 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7216 SCTP_TCB_UNLOCK(stcb); 7217 return; 7218 } 7219 } else { 7220 if (ntohs(outer_ip->ip_len) >= 7221 sizeof(struct ip) + 7222 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7223 /* 7224 * In this case we can check if we got an 7225 * INIT chunk and if the initiate tag 7226 * matches. 7227 */ 7228 ch = (struct sctp_init_chunk *)(sh + 1); 7229 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7230 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7231 SCTP_TCB_UNLOCK(stcb); 7232 return; 7233 } 7234 } else { 7235 SCTP_TCB_UNLOCK(stcb); 7236 return; 7237 } 7238 } 7239 type = icmp->icmp_type; 7240 code = icmp->icmp_code; 7241 if ((type == ICMP_UNREACH) && 7242 (code == ICMP_UNREACH_PORT)) { 7243 code = ICMP_UNREACH_PROTOCOL; 7244 } 7245 sctp_notify(inp, stcb, net, type, code, 7246 ntohs(inner_ip->ip_len), 7247 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7248 } else { 7249 if ((stcb == NULL) && (inp != NULL)) { 7250 /* reduce ref-count */ 7251 SCTP_INP_WLOCK(inp); 7252 SCTP_INP_DECR_REF(inp); 7253 SCTP_INP_WUNLOCK(inp); 7254 } 7255 if (stcb) { 7256 SCTP_TCB_UNLOCK(stcb); 7257 } 7258 } 7259 return; 7260 } 7261 #endif 7262 7263 #ifdef INET6 7264 static void 7265 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7266 { 7267 struct ip6ctlparam *ip6cp; 7268 struct sctp_inpcb *inp; 7269 struct sctp_tcb *stcb; 7270 struct sctp_nets *net; 7271 struct sctphdr sh; 7272 struct udphdr udp; 7273 struct sockaddr_in6 src, dst; 7274 uint8_t type, code; 7275 7276 ip6cp = (struct ip6ctlparam *)d; 7277 /* 7278 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7279 */ 7280 if (ip6cp->ip6c_m == NULL) { 7281 return; 7282 } 7283 /* 7284 * Check if we can safely examine the ports and the verification tag 7285 * of the SCTP common header. 7286 */ 7287 if (ip6cp->ip6c_m->m_pkthdr.len < 7288 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7289 return; 7290 } 7291 /* Copy out the UDP header. */ 7292 memset(&udp, 0, sizeof(struct udphdr)); 7293 m_copydata(ip6cp->ip6c_m, 7294 ip6cp->ip6c_off, 7295 sizeof(struct udphdr), 7296 (caddr_t)&udp); 7297 /* Copy out the port numbers and the verification tag. */ 7298 memset(&sh, 0, sizeof(struct sctphdr)); 7299 m_copydata(ip6cp->ip6c_m, 7300 ip6cp->ip6c_off + sizeof(struct udphdr), 7301 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7302 (caddr_t)&sh); 7303 memset(&src, 0, sizeof(struct sockaddr_in6)); 7304 src.sin6_family = AF_INET6; 7305 src.sin6_len = sizeof(struct sockaddr_in6); 7306 src.sin6_port = sh.src_port; 7307 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7308 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7309 return; 7310 } 7311 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7312 dst.sin6_family = AF_INET6; 7313 dst.sin6_len = sizeof(struct sockaddr_in6); 7314 dst.sin6_port = sh.dest_port; 7315 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7316 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7317 return; 7318 } 7319 inp = NULL; 7320 net = NULL; 7321 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7322 (struct sockaddr *)&src, 7323 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7324 if ((stcb != NULL) && 7325 (net != NULL) && 7326 (inp != NULL)) { 7327 /* Check the UDP port numbers */ 7328 if ((udp.uh_dport != net->port) || 7329 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7330 SCTP_TCB_UNLOCK(stcb); 7331 return; 7332 } 7333 /* Check the verification tag */ 7334 if (ntohl(sh.v_tag) != 0) { 7335 /* 7336 * This must be the verification tag used for 7337 * sending out packets. We don't consider packets 7338 * reflecting the verification tag. 7339 */ 7340 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7341 SCTP_TCB_UNLOCK(stcb); 7342 return; 7343 } 7344 } else { 7345 if (ip6cp->ip6c_m->m_pkthdr.len >= 7346 ip6cp->ip6c_off + sizeof(struct udphdr) + 7347 sizeof(struct sctphdr) + 7348 sizeof(struct sctp_chunkhdr) + 7349 offsetof(struct sctp_init, a_rwnd)) { 7350 /* 7351 * In this case we can check if we got an 7352 * INIT chunk and if the initiate tag 7353 * matches. 7354 */ 7355 uint32_t initiate_tag; 7356 uint8_t chunk_type; 7357 7358 m_copydata(ip6cp->ip6c_m, 7359 ip6cp->ip6c_off + 7360 sizeof(struct udphdr) + 7361 sizeof(struct sctphdr), 7362 sizeof(uint8_t), 7363 (caddr_t)&chunk_type); 7364 m_copydata(ip6cp->ip6c_m, 7365 ip6cp->ip6c_off + 7366 sizeof(struct udphdr) + 7367 sizeof(struct sctphdr) + 7368 sizeof(struct sctp_chunkhdr), 7369 sizeof(uint32_t), 7370 (caddr_t)&initiate_tag); 7371 if ((chunk_type != SCTP_INITIATION) || 7372 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7373 SCTP_TCB_UNLOCK(stcb); 7374 return; 7375 } 7376 } else { 7377 SCTP_TCB_UNLOCK(stcb); 7378 return; 7379 } 7380 } 7381 type = ip6cp->ip6c_icmp6->icmp6_type; 7382 code = ip6cp->ip6c_icmp6->icmp6_code; 7383 if ((type == ICMP6_DST_UNREACH) && 7384 (code == ICMP6_DST_UNREACH_NOPORT)) { 7385 type = ICMP6_PARAM_PROB; 7386 code = ICMP6_PARAMPROB_NEXTHEADER; 7387 } 7388 sctp6_notify(inp, stcb, net, type, code, 7389 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7390 } else { 7391 if ((stcb == NULL) && (inp != NULL)) { 7392 /* reduce inp's ref-count */ 7393 SCTP_INP_WLOCK(inp); 7394 SCTP_INP_DECR_REF(inp); 7395 SCTP_INP_WUNLOCK(inp); 7396 } 7397 if (stcb) { 7398 SCTP_TCB_UNLOCK(stcb); 7399 } 7400 } 7401 } 7402 #endif 7403 7404 void 7405 sctp_over_udp_stop(void) 7406 { 7407 /* 7408 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7409 * for writting! 7410 */ 7411 #ifdef INET 7412 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7413 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7414 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7415 } 7416 #endif 7417 #ifdef INET6 7418 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7419 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7420 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7421 } 7422 #endif 7423 } 7424 7425 int 7426 sctp_over_udp_start(void) 7427 { 7428 uint16_t port; 7429 int ret; 7430 #ifdef INET 7431 struct sockaddr_in sin; 7432 #endif 7433 #ifdef INET6 7434 struct sockaddr_in6 sin6; 7435 #endif 7436 /* 7437 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7438 * for writting! 7439 */ 7440 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7441 if (ntohs(port) == 0) { 7442 /* Must have a port set */ 7443 return (EINVAL); 7444 } 7445 #ifdef INET 7446 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7447 /* Already running -- must stop first */ 7448 return (EALREADY); 7449 } 7450 #endif 7451 #ifdef INET6 7452 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7453 /* Already running -- must stop first */ 7454 return (EALREADY); 7455 } 7456 #endif 7457 #ifdef INET 7458 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7459 SOCK_DGRAM, IPPROTO_UDP, 7460 curthread->td_ucred, curthread))) { 7461 sctp_over_udp_stop(); 7462 return (ret); 7463 } 7464 /* Call the special UDP hook. */ 7465 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7466 sctp_recv_udp_tunneled_packet, 7467 sctp_recv_icmp_tunneled_packet, 7468 NULL))) { 7469 sctp_over_udp_stop(); 7470 return (ret); 7471 } 7472 /* Ok, we have a socket, bind it to the port. */ 7473 memset(&sin, 0, sizeof(struct sockaddr_in)); 7474 sin.sin_len = sizeof(struct sockaddr_in); 7475 sin.sin_family = AF_INET; 7476 sin.sin_port = htons(port); 7477 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7478 (struct sockaddr *)&sin, curthread))) { 7479 sctp_over_udp_stop(); 7480 return (ret); 7481 } 7482 #endif 7483 #ifdef INET6 7484 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7485 SOCK_DGRAM, IPPROTO_UDP, 7486 curthread->td_ucred, curthread))) { 7487 sctp_over_udp_stop(); 7488 return (ret); 7489 } 7490 /* Call the special UDP hook. */ 7491 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7492 sctp_recv_udp_tunneled_packet, 7493 sctp_recv_icmp6_tunneled_packet, 7494 NULL))) { 7495 sctp_over_udp_stop(); 7496 return (ret); 7497 } 7498 /* Ok, we have a socket, bind it to the port. */ 7499 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7500 sin6.sin6_len = sizeof(struct sockaddr_in6); 7501 sin6.sin6_family = AF_INET6; 7502 sin6.sin6_port = htons(port); 7503 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7504 (struct sockaddr *)&sin6, curthread))) { 7505 sctp_over_udp_stop(); 7506 return (ret); 7507 } 7508 #endif 7509 return (0); 7510 } 7511 7512 /* 7513 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7514 * If all arguments are zero, zero is returned. 7515 */ 7516 uint32_t 7517 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7518 { 7519 if (mtu1 > 0) { 7520 if (mtu2 > 0) { 7521 if (mtu3 > 0) { 7522 return (min(mtu1, min(mtu2, mtu3))); 7523 } else { 7524 return (min(mtu1, mtu2)); 7525 } 7526 } else { 7527 if (mtu3 > 0) { 7528 return (min(mtu1, mtu3)); 7529 } else { 7530 return (mtu1); 7531 } 7532 } 7533 } else { 7534 if (mtu2 > 0) { 7535 if (mtu3 > 0) { 7536 return (min(mtu2, mtu3)); 7537 } else { 7538 return (mtu2); 7539 } 7540 } else { 7541 return (mtu3); 7542 } 7543 } 7544 } 7545 7546 void 7547 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7548 { 7549 struct in_conninfo inc; 7550 7551 memset(&inc, 0, sizeof(struct in_conninfo)); 7552 inc.inc_fibnum = fibnum; 7553 switch (addr->sa.sa_family) { 7554 #ifdef INET 7555 case AF_INET: 7556 inc.inc_faddr = addr->sin.sin_addr; 7557 break; 7558 #endif 7559 #ifdef INET6 7560 case AF_INET6: 7561 inc.inc_flags |= INC_ISIPV6; 7562 inc.inc6_faddr = addr->sin6.sin6_addr; 7563 break; 7564 #endif 7565 default: 7566 return; 7567 } 7568 tcp_hc_updatemtu(&inc, (u_long)mtu); 7569 } 7570 7571 uint32_t 7572 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7573 { 7574 struct in_conninfo inc; 7575 7576 memset(&inc, 0, sizeof(struct in_conninfo)); 7577 inc.inc_fibnum = fibnum; 7578 switch (addr->sa.sa_family) { 7579 #ifdef INET 7580 case AF_INET: 7581 inc.inc_faddr = addr->sin.sin_addr; 7582 break; 7583 #endif 7584 #ifdef INET6 7585 case AF_INET6: 7586 inc.inc_flags |= INC_ISIPV6; 7587 inc.inc6_faddr = addr->sin6.sin6_addr; 7588 break; 7589 #endif 7590 default: 7591 return (0); 7592 } 7593 return ((uint32_t)tcp_hc_getmtu(&inc)); 7594 } 7595 7596 void 7597 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7598 { 7599 #if defined(KDTRACE_HOOKS) 7600 int old_state = stcb->asoc.state; 7601 #endif 7602 7603 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7604 ("sctp_set_state: Can't set substate (new_state = %x)", 7605 new_state)); 7606 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7607 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7608 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7609 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7610 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7611 } 7612 #if defined(KDTRACE_HOOKS) 7613 if (((old_state & SCTP_STATE_MASK) != new_state) && 7614 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7615 (new_state == SCTP_STATE_INUSE))) { 7616 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7617 } 7618 #endif 7619 } 7620 7621 void 7622 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7623 { 7624 #if defined(KDTRACE_HOOKS) 7625 int old_state = stcb->asoc.state; 7626 #endif 7627 7628 KASSERT((substate & SCTP_STATE_MASK) == 0, 7629 ("sctp_add_substate: Can't set state (substate = %x)", 7630 substate)); 7631 stcb->asoc.state |= substate; 7632 #if defined(KDTRACE_HOOKS) 7633 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7634 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7635 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7636 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7637 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7638 } 7639 #endif 7640 } 7641