1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 tot_out = 0; 740 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 741 if ((chk->whoTo == lnet) && 742 (chk->sent < SCTP_DATAGRAM_RESEND)) { 743 tot_out += chk->book_size; 744 } 745 } 746 if (lnet->flight_size != tot_out) { 747 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 748 (void *)lnet, lnet->flight_size, 749 tot_out); 750 lnet->flight_size = tot_out; 751 } 752 } 753 } 754 if (rep) { 755 sctp_print_audit_report(); 756 } 757 } 758 759 void 760 sctp_audit_log(uint8_t ev, uint8_t fd) 761 { 762 763 sctp_audit_data[sctp_audit_indx][0] = ev; 764 sctp_audit_data[sctp_audit_indx][1] = fd; 765 sctp_audit_indx++; 766 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 767 sctp_audit_indx = 0; 768 } 769 } 770 771 #endif 772 773 /* 774 * The conversion from time to ticks and vice versa is done by rounding 775 * upwards. This way we can test in the code the time to be positive and 776 * know that this corresponds to a positive number of ticks. 777 */ 778 779 uint32_t 780 sctp_msecs_to_ticks(uint32_t msecs) 781 { 782 uint64_t temp; 783 uint32_t ticks; 784 785 if (hz == 1000) { 786 ticks = msecs; 787 } else { 788 temp = (((uint64_t)msecs * hz) + 999) / 1000; 789 if (temp > UINT32_MAX) { 790 ticks = UINT32_MAX; 791 } else { 792 ticks = (uint32_t)temp; 793 } 794 } 795 return (ticks); 796 } 797 798 uint32_t 799 sctp_ticks_to_msecs(uint32_t ticks) 800 { 801 uint64_t temp; 802 uint32_t msecs; 803 804 if (hz == 1000) { 805 msecs = ticks; 806 } else { 807 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 808 if (temp > UINT32_MAX) { 809 msecs = UINT32_MAX; 810 } else { 811 msecs = (uint32_t)temp; 812 } 813 } 814 return (msecs); 815 } 816 817 uint32_t 818 sctp_secs_to_ticks(uint32_t secs) 819 { 820 uint64_t temp; 821 uint32_t ticks; 822 823 temp = (uint64_t)secs * hz; 824 if (temp > UINT32_MAX) { 825 ticks = UINT32_MAX; 826 } else { 827 ticks = (uint32_t)temp; 828 } 829 return (ticks); 830 } 831 832 uint32_t 833 sctp_ticks_to_secs(uint32_t ticks) 834 { 835 uint64_t temp; 836 uint32_t secs; 837 838 temp = ((uint64_t)ticks + (hz - 1)) / hz; 839 if (temp > UINT32_MAX) { 840 secs = UINT32_MAX; 841 } else { 842 secs = (uint32_t)temp; 843 } 844 return (secs); 845 } 846 847 /* 848 * sctp_stop_timers_for_shutdown() should be called 849 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 850 * state to make sure that all timers are stopped. 851 */ 852 void 853 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 854 { 855 struct sctp_inpcb *inp; 856 struct sctp_nets *net; 857 858 inp = stcb->sctp_ep; 859 860 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 861 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 862 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 863 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 864 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 865 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 866 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 867 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 868 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 869 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 870 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 871 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 872 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 873 } 874 } 875 876 void 877 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 878 { 879 struct sctp_inpcb *inp; 880 struct sctp_nets *net; 881 882 inp = stcb->sctp_ep; 883 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 884 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 885 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 886 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 887 if (stop_assoc_kill_timer) { 888 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 889 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 890 } 891 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 892 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 893 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 894 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 895 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 896 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 897 /* Mobility adaptation */ 898 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 899 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 900 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 901 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 902 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 903 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 904 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 905 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 906 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 907 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 908 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 909 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 910 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 911 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 912 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 913 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 914 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 915 } 916 } 917 918 /* 919 * A list of sizes based on typical mtu's, used only if next hop size not 920 * returned. These values MUST be multiples of 4 and MUST be ordered. 921 */ 922 static uint32_t sctp_mtu_sizes[] = { 923 68, 924 296, 925 508, 926 512, 927 544, 928 576, 929 1004, 930 1492, 931 1500, 932 1536, 933 2000, 934 2048, 935 4352, 936 4464, 937 8168, 938 17912, 939 32000, 940 65532 941 }; 942 943 /* 944 * Return the largest MTU in sctp_mtu_sizes smaller than val. 945 * If val is smaller than the minimum, just return the largest 946 * multiple of 4 smaller or equal to val. 947 * Ensure that the result is a multiple of 4. 948 */ 949 uint32_t 950 sctp_get_prev_mtu(uint32_t val) 951 { 952 uint32_t i; 953 954 val &= 0xfffffffc; 955 if (val <= sctp_mtu_sizes[0]) { 956 return (val); 957 } 958 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 959 if (val <= sctp_mtu_sizes[i]) { 960 break; 961 } 962 } 963 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 964 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 965 return (sctp_mtu_sizes[i - 1]); 966 } 967 968 /* 969 * Return the smallest MTU in sctp_mtu_sizes larger than val. 970 * If val is larger than the maximum, just return the largest multiple of 4 smaller 971 * or equal to val. 972 * Ensure that the result is a multiple of 4. 973 */ 974 uint32_t 975 sctp_get_next_mtu(uint32_t val) 976 { 977 /* select another MTU that is just bigger than this one */ 978 uint32_t i; 979 980 val &= 0xfffffffc; 981 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 982 if (val < sctp_mtu_sizes[i]) { 983 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 984 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 985 return (sctp_mtu_sizes[i]); 986 } 987 } 988 return (val); 989 } 990 991 void 992 sctp_fill_random_store(struct sctp_pcb *m) 993 { 994 /* 995 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 996 * our counter. The result becomes our good random numbers and we 997 * then setup to give these out. Note that we do no locking to 998 * protect this. This is ok, since if competing folks call this we 999 * will get more gobbled gook in the random store which is what we 1000 * want. There is a danger that two guys will use the same random 1001 * numbers, but thats ok too since that is random as well :-> 1002 */ 1003 m->store_at = 0; 1004 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1005 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1006 sizeof(m->random_counter), (uint8_t *)m->random_store); 1007 m->random_counter++; 1008 } 1009 1010 uint32_t 1011 sctp_select_initial_TSN(struct sctp_pcb *inp) 1012 { 1013 /* 1014 * A true implementation should use random selection process to get 1015 * the initial stream sequence number, using RFC1750 as a good 1016 * guideline 1017 */ 1018 uint32_t x, *xp; 1019 uint8_t *p; 1020 int store_at, new_store; 1021 1022 if (inp->initial_sequence_debug != 0) { 1023 uint32_t ret; 1024 1025 ret = inp->initial_sequence_debug; 1026 inp->initial_sequence_debug++; 1027 return (ret); 1028 } 1029 retry: 1030 store_at = inp->store_at; 1031 new_store = store_at + sizeof(uint32_t); 1032 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1033 new_store = 0; 1034 } 1035 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1036 goto retry; 1037 } 1038 if (new_store == 0) { 1039 /* Refill the random store */ 1040 sctp_fill_random_store(inp); 1041 } 1042 p = &inp->random_store[store_at]; 1043 xp = (uint32_t *)p; 1044 x = *xp; 1045 return (x); 1046 } 1047 1048 uint32_t 1049 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1050 { 1051 uint32_t x; 1052 struct timeval now; 1053 1054 if (check) { 1055 (void)SCTP_GETTIME_TIMEVAL(&now); 1056 } 1057 for (;;) { 1058 x = sctp_select_initial_TSN(&inp->sctp_ep); 1059 if (x == 0) { 1060 /* we never use 0 */ 1061 continue; 1062 } 1063 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1064 break; 1065 } 1066 } 1067 return (x); 1068 } 1069 1070 int32_t 1071 sctp_map_assoc_state(int kernel_state) 1072 { 1073 int32_t user_state; 1074 1075 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1076 user_state = SCTP_CLOSED; 1077 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1078 user_state = SCTP_SHUTDOWN_PENDING; 1079 } else { 1080 switch (kernel_state & SCTP_STATE_MASK) { 1081 case SCTP_STATE_EMPTY: 1082 user_state = SCTP_CLOSED; 1083 break; 1084 case SCTP_STATE_INUSE: 1085 user_state = SCTP_CLOSED; 1086 break; 1087 case SCTP_STATE_COOKIE_WAIT: 1088 user_state = SCTP_COOKIE_WAIT; 1089 break; 1090 case SCTP_STATE_COOKIE_ECHOED: 1091 user_state = SCTP_COOKIE_ECHOED; 1092 break; 1093 case SCTP_STATE_OPEN: 1094 user_state = SCTP_ESTABLISHED; 1095 break; 1096 case SCTP_STATE_SHUTDOWN_SENT: 1097 user_state = SCTP_SHUTDOWN_SENT; 1098 break; 1099 case SCTP_STATE_SHUTDOWN_RECEIVED: 1100 user_state = SCTP_SHUTDOWN_RECEIVED; 1101 break; 1102 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1103 user_state = SCTP_SHUTDOWN_ACK_SENT; 1104 break; 1105 default: 1106 user_state = SCTP_CLOSED; 1107 break; 1108 } 1109 } 1110 return (user_state); 1111 } 1112 1113 int 1114 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1115 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1116 { 1117 struct sctp_association *asoc; 1118 1119 /* 1120 * Anything set to zero is taken care of by the allocation routine's 1121 * bzero 1122 */ 1123 1124 /* 1125 * Up front select what scoping to apply on addresses I tell my peer 1126 * Not sure what to do with these right now, we will need to come up 1127 * with a way to set them. We may need to pass them through from the 1128 * caller in the sctp_aloc_assoc() function. 1129 */ 1130 int i; 1131 #if defined(SCTP_DETAILED_STR_STATS) 1132 int j; 1133 #endif 1134 1135 asoc = &stcb->asoc; 1136 /* init all variables to a known value. */ 1137 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1138 asoc->max_burst = inp->sctp_ep.max_burst; 1139 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1140 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1141 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1142 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1143 asoc->ecn_supported = inp->ecn_supported; 1144 asoc->prsctp_supported = inp->prsctp_supported; 1145 asoc->auth_supported = inp->auth_supported; 1146 asoc->asconf_supported = inp->asconf_supported; 1147 asoc->reconfig_supported = inp->reconfig_supported; 1148 asoc->nrsack_supported = inp->nrsack_supported; 1149 asoc->pktdrop_supported = inp->pktdrop_supported; 1150 asoc->idata_supported = inp->idata_supported; 1151 asoc->sctp_cmt_pf = (uint8_t)0; 1152 asoc->sctp_frag_point = inp->sctp_frag_point; 1153 asoc->sctp_features = inp->sctp_features; 1154 asoc->default_dscp = inp->sctp_ep.default_dscp; 1155 asoc->max_cwnd = inp->max_cwnd; 1156 #ifdef INET6 1157 if (inp->sctp_ep.default_flowlabel) { 1158 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1159 } else { 1160 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1161 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1162 asoc->default_flowlabel &= 0x000fffff; 1163 asoc->default_flowlabel |= 0x80000000; 1164 } else { 1165 asoc->default_flowlabel = 0; 1166 } 1167 } 1168 #endif 1169 asoc->sb_send_resv = 0; 1170 if (override_tag) { 1171 asoc->my_vtag = override_tag; 1172 } else { 1173 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1174 } 1175 /* Get the nonce tags */ 1176 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1177 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->vrf_id = vrf_id; 1179 1180 #ifdef SCTP_ASOCLOG_OF_TSNS 1181 asoc->tsn_in_at = 0; 1182 asoc->tsn_out_at = 0; 1183 asoc->tsn_in_wrapped = 0; 1184 asoc->tsn_out_wrapped = 0; 1185 asoc->cumack_log_at = 0; 1186 asoc->cumack_log_atsnt = 0; 1187 #endif 1188 #ifdef SCTP_FS_SPEC_LOG 1189 asoc->fs_index = 0; 1190 #endif 1191 asoc->refcnt = 0; 1192 asoc->assoc_up_sent = 0; 1193 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1194 sctp_select_initial_TSN(&inp->sctp_ep); 1195 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1196 /* we are optimisitic here */ 1197 asoc->peer_supports_nat = 0; 1198 asoc->sent_queue_retran_cnt = 0; 1199 1200 /* for CMT */ 1201 asoc->last_net_cmt_send_started = NULL; 1202 1203 /* This will need to be adjusted */ 1204 asoc->last_acked_seq = asoc->init_seq_number - 1; 1205 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1206 asoc->asconf_seq_in = asoc->last_acked_seq; 1207 1208 /* here we are different, we hold the next one we expect */ 1209 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1210 1211 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1212 asoc->initial_rto = inp->sctp_ep.initial_rto; 1213 1214 asoc->default_mtu = inp->sctp_ep.default_mtu; 1215 asoc->max_init_times = inp->sctp_ep.max_init_times; 1216 asoc->max_send_times = inp->sctp_ep.max_send_times; 1217 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1218 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1219 asoc->free_chunk_cnt = 0; 1220 1221 asoc->iam_blocking = 0; 1222 asoc->context = inp->sctp_context; 1223 asoc->local_strreset_support = inp->local_strreset_support; 1224 asoc->def_send = inp->def_send; 1225 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1226 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1227 asoc->pr_sctp_cnt = 0; 1228 asoc->total_output_queue_size = 0; 1229 1230 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1231 asoc->scope.ipv6_addr_legal = 1; 1232 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1233 asoc->scope.ipv4_addr_legal = 1; 1234 } else { 1235 asoc->scope.ipv4_addr_legal = 0; 1236 } 1237 } else { 1238 asoc->scope.ipv6_addr_legal = 0; 1239 asoc->scope.ipv4_addr_legal = 1; 1240 } 1241 1242 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1243 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1244 1245 asoc->smallest_mtu = inp->sctp_frag_point; 1246 asoc->minrto = inp->sctp_ep.sctp_minrto; 1247 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1248 1249 asoc->stream_locked_on = 0; 1250 asoc->ecn_echo_cnt_onq = 0; 1251 asoc->stream_locked = 0; 1252 1253 asoc->send_sack = 1; 1254 1255 LIST_INIT(&asoc->sctp_restricted_addrs); 1256 1257 TAILQ_INIT(&asoc->nets); 1258 TAILQ_INIT(&asoc->pending_reply_queue); 1259 TAILQ_INIT(&asoc->asconf_ack_sent); 1260 /* Setup to fill the hb random cache at first HB */ 1261 asoc->hb_random_idx = 4; 1262 1263 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1264 1265 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1266 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1267 1268 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1269 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1270 1271 /* 1272 * Now the stream parameters, here we allocate space for all streams 1273 * that we request by default. 1274 */ 1275 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1276 o_strms; 1277 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1278 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1279 SCTP_M_STRMO); 1280 if (asoc->strmout == NULL) { 1281 /* big trouble no memory */ 1282 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1283 return (ENOMEM); 1284 } 1285 for (i = 0; i < asoc->streamoutcnt; i++) { 1286 /* 1287 * inbound side must be set to 0xffff, also NOTE when we get 1288 * the INIT-ACK back (for INIT sender) we MUST reduce the 1289 * count (streamoutcnt) but first check if we sent to any of 1290 * the upper streams that were dropped (if some were). Those 1291 * that were dropped must be notified to the upper layer as 1292 * failed to send. 1293 */ 1294 asoc->strmout[i].next_mid_ordered = 0; 1295 asoc->strmout[i].next_mid_unordered = 0; 1296 TAILQ_INIT(&asoc->strmout[i].outqueue); 1297 asoc->strmout[i].chunks_on_queues = 0; 1298 #if defined(SCTP_DETAILED_STR_STATS) 1299 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1300 asoc->strmout[i].abandoned_sent[j] = 0; 1301 asoc->strmout[i].abandoned_unsent[j] = 0; 1302 } 1303 #else 1304 asoc->strmout[i].abandoned_sent[0] = 0; 1305 asoc->strmout[i].abandoned_unsent[0] = 0; 1306 #endif 1307 asoc->strmout[i].sid = i; 1308 asoc->strmout[i].last_msg_incomplete = 0; 1309 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1310 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1311 } 1312 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1313 1314 /* Now the mapping array */ 1315 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1316 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1317 SCTP_M_MAP); 1318 if (asoc->mapping_array == NULL) { 1319 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1320 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1321 return (ENOMEM); 1322 } 1323 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1324 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1325 SCTP_M_MAP); 1326 if (asoc->nr_mapping_array == NULL) { 1327 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1328 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1330 return (ENOMEM); 1331 } 1332 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1333 1334 /* Now the init of the other outqueues */ 1335 TAILQ_INIT(&asoc->free_chunks); 1336 TAILQ_INIT(&asoc->control_send_queue); 1337 TAILQ_INIT(&asoc->asconf_send_queue); 1338 TAILQ_INIT(&asoc->send_queue); 1339 TAILQ_INIT(&asoc->sent_queue); 1340 TAILQ_INIT(&asoc->resetHead); 1341 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1342 TAILQ_INIT(&asoc->asconf_queue); 1343 /* authentication fields */ 1344 asoc->authinfo.random = NULL; 1345 asoc->authinfo.active_keyid = 0; 1346 asoc->authinfo.assoc_key = NULL; 1347 asoc->authinfo.assoc_keyid = 0; 1348 asoc->authinfo.recv_key = NULL; 1349 asoc->authinfo.recv_keyid = 0; 1350 LIST_INIT(&asoc->shared_keys); 1351 asoc->marked_retrans = 0; 1352 asoc->port = inp->sctp_ep.port; 1353 asoc->timoinit = 0; 1354 asoc->timodata = 0; 1355 asoc->timosack = 0; 1356 asoc->timoshutdown = 0; 1357 asoc->timoheartbeat = 0; 1358 asoc->timocookie = 0; 1359 asoc->timoshutdownack = 0; 1360 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1361 asoc->discontinuity_time = asoc->start_time; 1362 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1363 asoc->abandoned_unsent[i] = 0; 1364 asoc->abandoned_sent[i] = 0; 1365 } 1366 /* 1367 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1368 * freed later when the association is freed. 1369 */ 1370 return (0); 1371 } 1372 1373 void 1374 sctp_print_mapping_array(struct sctp_association *asoc) 1375 { 1376 unsigned int i, limit; 1377 1378 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1379 asoc->mapping_array_size, 1380 asoc->mapping_array_base_tsn, 1381 asoc->cumulative_tsn, 1382 asoc->highest_tsn_inside_map, 1383 asoc->highest_tsn_inside_nr_map); 1384 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1385 if (asoc->mapping_array[limit - 1] != 0) { 1386 break; 1387 } 1388 } 1389 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1390 for (i = 0; i < limit; i++) { 1391 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1392 } 1393 if (limit % 16) 1394 SCTP_PRINTF("\n"); 1395 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1396 if (asoc->nr_mapping_array[limit - 1]) { 1397 break; 1398 } 1399 } 1400 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1401 for (i = 0; i < limit; i++) { 1402 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1403 } 1404 if (limit % 16) 1405 SCTP_PRINTF("\n"); 1406 } 1407 1408 int 1409 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1410 { 1411 /* mapping array needs to grow */ 1412 uint8_t *new_array1, *new_array2; 1413 uint32_t new_size; 1414 1415 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1416 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1417 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1418 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1419 /* can't get more, forget it */ 1420 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1421 if (new_array1) { 1422 SCTP_FREE(new_array1, SCTP_M_MAP); 1423 } 1424 if (new_array2) { 1425 SCTP_FREE(new_array2, SCTP_M_MAP); 1426 } 1427 return (-1); 1428 } 1429 memset(new_array1, 0, new_size); 1430 memset(new_array2, 0, new_size); 1431 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1432 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1433 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1434 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1435 asoc->mapping_array = new_array1; 1436 asoc->nr_mapping_array = new_array2; 1437 asoc->mapping_array_size = new_size; 1438 return (0); 1439 } 1440 1441 static void 1442 sctp_iterator_work(struct sctp_iterator *it) 1443 { 1444 struct epoch_tracker et; 1445 struct sctp_inpcb *tinp; 1446 int iteration_count = 0; 1447 int inp_skip = 0; 1448 int first_in = 1; 1449 1450 NET_EPOCH_ENTER(et); 1451 SCTP_INP_INFO_RLOCK(); 1452 SCTP_ITERATOR_LOCK(); 1453 sctp_it_ctl.cur_it = it; 1454 if (it->inp) { 1455 SCTP_INP_RLOCK(it->inp); 1456 SCTP_INP_DECR_REF(it->inp); 1457 } 1458 if (it->inp == NULL) { 1459 /* iterator is complete */ 1460 done_with_iterator: 1461 sctp_it_ctl.cur_it = NULL; 1462 SCTP_ITERATOR_UNLOCK(); 1463 SCTP_INP_INFO_RUNLOCK(); 1464 if (it->function_atend != NULL) { 1465 (*it->function_atend) (it->pointer, it->val); 1466 } 1467 SCTP_FREE(it, SCTP_M_ITER); 1468 NET_EPOCH_EXIT(et); 1469 return; 1470 } 1471 select_a_new_ep: 1472 if (first_in) { 1473 first_in = 0; 1474 } else { 1475 SCTP_INP_RLOCK(it->inp); 1476 } 1477 while (((it->pcb_flags) && 1478 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1479 ((it->pcb_features) && 1480 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1481 /* endpoint flags or features don't match, so keep looking */ 1482 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1483 SCTP_INP_RUNLOCK(it->inp); 1484 goto done_with_iterator; 1485 } 1486 tinp = it->inp; 1487 it->inp = LIST_NEXT(it->inp, sctp_list); 1488 it->stcb = NULL; 1489 SCTP_INP_RUNLOCK(tinp); 1490 if (it->inp == NULL) { 1491 goto done_with_iterator; 1492 } 1493 SCTP_INP_RLOCK(it->inp); 1494 } 1495 /* now go through each assoc which is in the desired state */ 1496 if (it->done_current_ep == 0) { 1497 if (it->function_inp != NULL) 1498 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1499 it->done_current_ep = 1; 1500 } 1501 if (it->stcb == NULL) { 1502 /* run the per instance function */ 1503 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1504 } 1505 if ((inp_skip) || it->stcb == NULL) { 1506 if (it->function_inp_end != NULL) { 1507 inp_skip = (*it->function_inp_end) (it->inp, 1508 it->pointer, 1509 it->val); 1510 } 1511 SCTP_INP_RUNLOCK(it->inp); 1512 goto no_stcb; 1513 } 1514 while (it->stcb) { 1515 SCTP_TCB_LOCK(it->stcb); 1516 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1517 /* not in the right state... keep looking */ 1518 SCTP_TCB_UNLOCK(it->stcb); 1519 goto next_assoc; 1520 } 1521 /* see if we have limited out the iterator loop */ 1522 iteration_count++; 1523 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1524 /* Pause to let others grab the lock */ 1525 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1526 SCTP_TCB_UNLOCK(it->stcb); 1527 SCTP_INP_INCR_REF(it->inp); 1528 SCTP_INP_RUNLOCK(it->inp); 1529 SCTP_ITERATOR_UNLOCK(); 1530 SCTP_INP_INFO_RUNLOCK(); 1531 SCTP_INP_INFO_RLOCK(); 1532 SCTP_ITERATOR_LOCK(); 1533 if (sctp_it_ctl.iterator_flags) { 1534 /* We won't be staying here */ 1535 SCTP_INP_DECR_REF(it->inp); 1536 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1537 if (sctp_it_ctl.iterator_flags & 1538 SCTP_ITERATOR_STOP_CUR_IT) { 1539 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1540 goto done_with_iterator; 1541 } 1542 if (sctp_it_ctl.iterator_flags & 1543 SCTP_ITERATOR_STOP_CUR_INP) { 1544 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1545 goto no_stcb; 1546 } 1547 /* If we reach here huh? */ 1548 SCTP_PRINTF("Unknown it ctl flag %x\n", 1549 sctp_it_ctl.iterator_flags); 1550 sctp_it_ctl.iterator_flags = 0; 1551 } 1552 SCTP_INP_RLOCK(it->inp); 1553 SCTP_INP_DECR_REF(it->inp); 1554 SCTP_TCB_LOCK(it->stcb); 1555 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1556 iteration_count = 0; 1557 } 1558 KASSERT(it->inp == it->stcb->sctp_ep, 1559 ("%s: stcb %p does not belong to inp %p, but inp %p", 1560 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1561 1562 /* run function on this one */ 1563 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1564 1565 /* 1566 * we lie here, it really needs to have its own type but 1567 * first I must verify that this won't effect things :-0 1568 */ 1569 if (it->no_chunk_output == 0) 1570 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 1572 SCTP_TCB_UNLOCK(it->stcb); 1573 next_assoc: 1574 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1575 if (it->stcb == NULL) { 1576 /* Run last function */ 1577 if (it->function_inp_end != NULL) { 1578 inp_skip = (*it->function_inp_end) (it->inp, 1579 it->pointer, 1580 it->val); 1581 } 1582 } 1583 } 1584 SCTP_INP_RUNLOCK(it->inp); 1585 no_stcb: 1586 /* done with all assocs on this endpoint, move on to next endpoint */ 1587 it->done_current_ep = 0; 1588 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1589 it->inp = NULL; 1590 } else { 1591 it->inp = LIST_NEXT(it->inp, sctp_list); 1592 } 1593 it->stcb = NULL; 1594 if (it->inp == NULL) { 1595 goto done_with_iterator; 1596 } 1597 goto select_a_new_ep; 1598 } 1599 1600 void 1601 sctp_iterator_worker(void) 1602 { 1603 struct sctp_iterator *it; 1604 1605 /* This function is called with the WQ lock in place */ 1606 sctp_it_ctl.iterator_running = 1; 1607 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1608 /* now lets work on this one */ 1609 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1610 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1611 CURVNET_SET(it->vn); 1612 sctp_iterator_work(it); 1613 CURVNET_RESTORE(); 1614 SCTP_IPI_ITERATOR_WQ_LOCK(); 1615 /* sa_ignore FREED_MEMORY */ 1616 } 1617 sctp_it_ctl.iterator_running = 0; 1618 return; 1619 } 1620 1621 static void 1622 sctp_handle_addr_wq(void) 1623 { 1624 /* deal with the ADDR wq from the rtsock calls */ 1625 struct sctp_laddr *wi, *nwi; 1626 struct sctp_asconf_iterator *asc; 1627 1628 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1629 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1630 if (asc == NULL) { 1631 /* Try later, no memory */ 1632 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1633 (struct sctp_inpcb *)NULL, 1634 (struct sctp_tcb *)NULL, 1635 (struct sctp_nets *)NULL); 1636 return; 1637 } 1638 LIST_INIT(&asc->list_of_work); 1639 asc->cnt = 0; 1640 1641 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1642 LIST_REMOVE(wi, sctp_nxt_addr); 1643 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1644 asc->cnt++; 1645 } 1646 1647 if (asc->cnt == 0) { 1648 SCTP_FREE(asc, SCTP_M_ASC_IT); 1649 } else { 1650 int ret; 1651 1652 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1653 sctp_asconf_iterator_stcb, 1654 NULL, /* No ep end for boundall */ 1655 SCTP_PCB_FLAGS_BOUNDALL, 1656 SCTP_PCB_ANY_FEATURES, 1657 SCTP_ASOC_ANY_STATE, 1658 (void *)asc, 0, 1659 sctp_asconf_iterator_end, NULL, 0); 1660 if (ret) { 1661 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1662 /* 1663 * Freeing if we are stopping or put back on the 1664 * addr_wq. 1665 */ 1666 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1667 sctp_asconf_iterator_end(asc, 0); 1668 } else { 1669 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1670 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1671 } 1672 SCTP_FREE(asc, SCTP_M_ASC_IT); 1673 } 1674 } 1675 } 1676 } 1677 1678 /*- 1679 * The following table shows which pointers for the inp, stcb, or net are 1680 * stored for each timer after it was started. 1681 * 1682 *|Name |Timer |inp |stcb|net | 1683 *|-----------------------------|-----------------------------|----|----|----| 1684 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1685 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1686 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1687 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1690 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1691 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1694 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1695 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1696 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1697 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1698 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1700 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1701 */ 1702 1703 void 1704 sctp_timeout_handler(void *t) 1705 { 1706 struct epoch_tracker et; 1707 struct timeval tv; 1708 struct sctp_inpcb *inp; 1709 struct sctp_tcb *stcb; 1710 struct sctp_nets *net; 1711 struct sctp_timer *tmr; 1712 struct mbuf *op_err; 1713 int type; 1714 int i, secret; 1715 bool did_output, released_asoc_reference; 1716 1717 /* 1718 * If inp, stcb or net are not NULL, then references to these were 1719 * added when the timer was started, and must be released before 1720 * this function returns. 1721 */ 1722 tmr = (struct sctp_timer *)t; 1723 inp = (struct sctp_inpcb *)tmr->ep; 1724 stcb = (struct sctp_tcb *)tmr->tcb; 1725 net = (struct sctp_nets *)tmr->net; 1726 CURVNET_SET((struct vnet *)tmr->vnet); 1727 NET_EPOCH_ENTER(et); 1728 did_output = 1; 1729 released_asoc_reference = false; 1730 1731 #ifdef SCTP_AUDITING_ENABLED 1732 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1733 sctp_auditing(3, inp, stcb, net); 1734 #endif 1735 1736 /* sanity checks... */ 1737 KASSERT(tmr->self == NULL || tmr->self == tmr, 1738 ("sctp_timeout_handler: tmr->self corrupted")); 1739 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1740 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1741 type = tmr->type; 1742 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1743 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1744 type, stcb, stcb->sctp_ep)); 1745 tmr->stopped_from = 0xa001; 1746 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1747 SCTPDBG(SCTP_DEBUG_TIMER2, 1748 "Timer type %d handler exiting due to CLOSED association.\n", 1749 type); 1750 goto out_decr; 1751 } 1752 tmr->stopped_from = 0xa002; 1753 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1754 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1755 SCTPDBG(SCTP_DEBUG_TIMER2, 1756 "Timer type %d handler exiting due to not being active.\n", 1757 type); 1758 goto out_decr; 1759 } 1760 1761 tmr->stopped_from = 0xa003; 1762 if (stcb) { 1763 SCTP_TCB_LOCK(stcb); 1764 /* 1765 * Release reference so that association can be freed if 1766 * necessary below. This is safe now that we have acquired 1767 * the lock. 1768 */ 1769 atomic_add_int(&stcb->asoc.refcnt, -1); 1770 released_asoc_reference = true; 1771 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1772 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1773 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1774 SCTPDBG(SCTP_DEBUG_TIMER2, 1775 "Timer type %d handler exiting due to CLOSED association.\n", 1776 type); 1777 goto out; 1778 } 1779 } else if (inp != NULL) { 1780 SCTP_INP_WLOCK(inp); 1781 } else { 1782 SCTP_WQ_ADDR_LOCK(); 1783 } 1784 1785 /* Record in stopped_from which timeout occurred. */ 1786 tmr->stopped_from = type; 1787 /* mark as being serviced now */ 1788 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1789 /* 1790 * Callout has been rescheduled. 1791 */ 1792 goto out; 1793 } 1794 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1795 /* 1796 * Not active, so no action. 1797 */ 1798 goto out; 1799 } 1800 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1801 1802 /* call the handler for the appropriate timer type */ 1803 switch (type) { 1804 case SCTP_TIMER_TYPE_SEND: 1805 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1806 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1807 type, inp, stcb, net)); 1808 SCTP_STAT_INCR(sctps_timodata); 1809 stcb->asoc.timodata++; 1810 stcb->asoc.num_send_timers_up--; 1811 if (stcb->asoc.num_send_timers_up < 0) { 1812 stcb->asoc.num_send_timers_up = 0; 1813 } 1814 SCTP_TCB_LOCK_ASSERT(stcb); 1815 if (sctp_t3rxt_timer(inp, stcb, net)) { 1816 /* no need to unlock on tcb its gone */ 1817 1818 goto out_decr; 1819 } 1820 SCTP_TCB_LOCK_ASSERT(stcb); 1821 #ifdef SCTP_AUDITING_ENABLED 1822 sctp_auditing(4, inp, stcb, net); 1823 #endif 1824 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1825 did_output = true; 1826 if ((stcb->asoc.num_send_timers_up == 0) && 1827 (stcb->asoc.sent_queue_cnt > 0)) { 1828 struct sctp_tmit_chunk *chk; 1829 1830 /* 1831 * Safeguard. If there on some on the sent queue 1832 * somewhere but no timers running something is 1833 * wrong... so we start a timer on the first chunk 1834 * on the send queue on whatever net it is sent to. 1835 */ 1836 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1837 if (chk->whoTo != NULL) { 1838 break; 1839 } 1840 } 1841 if (chk != NULL) { 1842 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1843 } 1844 } 1845 break; 1846 case SCTP_TIMER_TYPE_INIT: 1847 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1848 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1849 type, inp, stcb, net)); 1850 SCTP_STAT_INCR(sctps_timoinit); 1851 stcb->asoc.timoinit++; 1852 if (sctp_t1init_timer(inp, stcb, net)) { 1853 /* no need to unlock on tcb its gone */ 1854 goto out_decr; 1855 } 1856 did_output = false; 1857 break; 1858 case SCTP_TIMER_TYPE_RECV: 1859 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1860 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1861 type, inp, stcb, net)); 1862 SCTP_STAT_INCR(sctps_timosack); 1863 stcb->asoc.timosack++; 1864 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1865 #ifdef SCTP_AUDITING_ENABLED 1866 sctp_auditing(4, inp, stcb, NULL); 1867 #endif 1868 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1869 did_output = true; 1870 break; 1871 case SCTP_TIMER_TYPE_SHUTDOWN: 1872 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1873 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1874 type, inp, stcb, net)); 1875 SCTP_STAT_INCR(sctps_timoshutdown); 1876 stcb->asoc.timoshutdown++; 1877 if (sctp_shutdown_timer(inp, stcb, net)) { 1878 /* no need to unlock on tcb its gone */ 1879 goto out_decr; 1880 } 1881 #ifdef SCTP_AUDITING_ENABLED 1882 sctp_auditing(4, inp, stcb, net); 1883 #endif 1884 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1885 did_output = true; 1886 break; 1887 case SCTP_TIMER_TYPE_HEARTBEAT: 1888 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1889 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1890 type, inp, stcb, net)); 1891 SCTP_STAT_INCR(sctps_timoheartbeat); 1892 stcb->asoc.timoheartbeat++; 1893 if (sctp_heartbeat_timer(inp, stcb, net)) { 1894 /* no need to unlock on tcb its gone */ 1895 goto out_decr; 1896 } 1897 #ifdef SCTP_AUDITING_ENABLED 1898 sctp_auditing(4, inp, stcb, net); 1899 #endif 1900 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1901 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1902 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1903 did_output = true; 1904 } else { 1905 did_output = false; 1906 } 1907 break; 1908 case SCTP_TIMER_TYPE_COOKIE: 1909 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1910 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1911 type, inp, stcb, net)); 1912 SCTP_STAT_INCR(sctps_timocookie); 1913 stcb->asoc.timocookie++; 1914 if (sctp_cookie_timer(inp, stcb, net)) { 1915 /* no need to unlock on tcb its gone */ 1916 goto out_decr; 1917 } 1918 #ifdef SCTP_AUDITING_ENABLED 1919 sctp_auditing(4, inp, stcb, net); 1920 #endif 1921 /* 1922 * We consider T3 and Cookie timer pretty much the same with 1923 * respect to where from in chunk_output. 1924 */ 1925 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1926 did_output = true; 1927 break; 1928 case SCTP_TIMER_TYPE_NEWCOOKIE: 1929 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1930 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1931 type, inp, stcb, net)); 1932 SCTP_STAT_INCR(sctps_timosecret); 1933 (void)SCTP_GETTIME_TIMEVAL(&tv); 1934 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1935 inp->sctp_ep.last_secret_number = 1936 inp->sctp_ep.current_secret_number; 1937 inp->sctp_ep.current_secret_number++; 1938 if (inp->sctp_ep.current_secret_number >= 1939 SCTP_HOW_MANY_SECRETS) { 1940 inp->sctp_ep.current_secret_number = 0; 1941 } 1942 secret = (int)inp->sctp_ep.current_secret_number; 1943 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1944 inp->sctp_ep.secret_key[secret][i] = 1945 sctp_select_initial_TSN(&inp->sctp_ep); 1946 } 1947 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1948 did_output = false; 1949 break; 1950 case SCTP_TIMER_TYPE_PATHMTURAISE: 1951 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1952 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1953 type, inp, stcb, net)); 1954 SCTP_STAT_INCR(sctps_timopathmtu); 1955 sctp_pathmtu_timer(inp, stcb, net); 1956 did_output = false; 1957 break; 1958 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1959 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1960 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1961 type, inp, stcb, net)); 1962 if (sctp_shutdownack_timer(inp, stcb, net)) { 1963 /* no need to unlock on tcb its gone */ 1964 goto out_decr; 1965 } 1966 SCTP_STAT_INCR(sctps_timoshutdownack); 1967 stcb->asoc.timoshutdownack++; 1968 #ifdef SCTP_AUDITING_ENABLED 1969 sctp_auditing(4, inp, stcb, net); 1970 #endif 1971 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1972 did_output = true; 1973 break; 1974 case SCTP_TIMER_TYPE_ASCONF: 1975 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1976 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1977 type, inp, stcb, net)); 1978 SCTP_STAT_INCR(sctps_timoasconf); 1979 if (sctp_asconf_timer(inp, stcb, net)) { 1980 /* no need to unlock on tcb its gone */ 1981 goto out_decr; 1982 } 1983 #ifdef SCTP_AUDITING_ENABLED 1984 sctp_auditing(4, inp, stcb, net); 1985 #endif 1986 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1987 did_output = true; 1988 break; 1989 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1990 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1991 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1992 type, inp, stcb, net)); 1993 SCTP_STAT_INCR(sctps_timoshutdownguard); 1994 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1995 "Shutdown guard timer expired"); 1996 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 1997 did_output = true; 1998 /* no need to unlock on tcb its gone */ 1999 goto out_decr; 2000 case SCTP_TIMER_TYPE_AUTOCLOSE: 2001 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2002 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2003 type, inp, stcb, net)); 2004 SCTP_STAT_INCR(sctps_timoautoclose); 2005 sctp_autoclose_timer(inp, stcb); 2006 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2007 did_output = true; 2008 break; 2009 case SCTP_TIMER_TYPE_STRRESET: 2010 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2011 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2012 type, inp, stcb, net)); 2013 SCTP_STAT_INCR(sctps_timostrmrst); 2014 if (sctp_strreset_timer(inp, stcb)) { 2015 /* no need to unlock on tcb its gone */ 2016 goto out_decr; 2017 } 2018 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2019 did_output = true; 2020 break; 2021 case SCTP_TIMER_TYPE_INPKILL: 2022 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2023 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2024 type, inp, stcb, net)); 2025 SCTP_STAT_INCR(sctps_timoinpkill); 2026 /* 2027 * special case, take away our increment since WE are the 2028 * killer 2029 */ 2030 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2031 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2032 SCTP_INP_DECR_REF(inp); 2033 SCTP_INP_WUNLOCK(inp); 2034 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2035 SCTP_CALLED_FROM_INPKILL_TIMER); 2036 inp = NULL; 2037 goto out_no_decr; 2038 case SCTP_TIMER_TYPE_ASOCKILL: 2039 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2040 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2041 type, inp, stcb, net)); 2042 SCTP_STAT_INCR(sctps_timoassockill); 2043 /* Can we free it yet? */ 2044 SCTP_INP_DECR_REF(inp); 2045 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2046 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2047 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2048 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2049 /* 2050 * free asoc, always unlocks (or destroy's) so prevent 2051 * duplicate unlock or unlock of a free mtx :-0 2052 */ 2053 stcb = NULL; 2054 goto out_no_decr; 2055 case SCTP_TIMER_TYPE_ADDR_WQ: 2056 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2057 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2058 type, inp, stcb, net)); 2059 sctp_handle_addr_wq(); 2060 did_output = true; 2061 break; 2062 case SCTP_TIMER_TYPE_PRIM_DELETED: 2063 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2064 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2065 type, inp, stcb, net)); 2066 SCTP_STAT_INCR(sctps_timodelprim); 2067 sctp_delete_prim_timer(inp, stcb); 2068 did_output = false; 2069 break; 2070 default: 2071 #ifdef INVARIANTS 2072 panic("Unknown timer type %d", type); 2073 #else 2074 did_output = false; 2075 goto out; 2076 #endif 2077 } 2078 #ifdef SCTP_AUDITING_ENABLED 2079 sctp_audit_log(0xF1, (uint8_t)type); 2080 if (inp != NULL) 2081 sctp_auditing(5, inp, stcb, net); 2082 #endif 2083 if (did_output && (stcb != NULL)) { 2084 /* 2085 * Now we need to clean up the control chunk chain if an 2086 * ECNE is on it. It must be marked as UNSENT again so next 2087 * call will continue to send it until such time that we get 2088 * a CWR, to remove it. It is, however, less likely that we 2089 * will find a ecn echo on the chain though. 2090 */ 2091 sctp_fix_ecn_echo(&stcb->asoc); 2092 } 2093 out: 2094 if (stcb != NULL) { 2095 SCTP_TCB_UNLOCK(stcb); 2096 } else if (inp != NULL) { 2097 SCTP_INP_WUNLOCK(inp); 2098 } else { 2099 SCTP_WQ_ADDR_UNLOCK(); 2100 } 2101 2102 out_decr: 2103 /* These reference counts were incremented in sctp_timer_start(). */ 2104 if (inp != NULL) { 2105 SCTP_INP_DECR_REF(inp); 2106 } 2107 if ((stcb != NULL) && !released_asoc_reference) { 2108 atomic_add_int(&stcb->asoc.refcnt, -1); 2109 } 2110 if (net != NULL) { 2111 sctp_free_remote_addr(net); 2112 } 2113 out_no_decr: 2114 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2115 CURVNET_RESTORE(); 2116 NET_EPOCH_EXIT(et); 2117 } 2118 2119 /*- 2120 * The following table shows which parameters must be provided 2121 * when calling sctp_timer_start(). For parameters not being 2122 * provided, NULL must be used. 2123 * 2124 * |Name |inp |stcb|net | 2125 * |-----------------------------|----|----|----| 2126 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2128 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2129 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2132 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2133 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2136 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2137 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2138 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2140 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2142 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2143 * 2144 */ 2145 2146 void 2147 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2148 struct sctp_nets *net) 2149 { 2150 struct sctp_timer *tmr; 2151 uint32_t to_ticks; 2152 uint32_t rndval, jitter; 2153 2154 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2155 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2156 t_type, stcb, stcb->sctp_ep)); 2157 tmr = NULL; 2158 to_ticks = 0; 2159 if (stcb != NULL) { 2160 SCTP_TCB_LOCK_ASSERT(stcb); 2161 } else if (inp != NULL) { 2162 SCTP_INP_WLOCK_ASSERT(inp); 2163 } else { 2164 SCTP_WQ_ADDR_LOCK_ASSERT(); 2165 } 2166 if (stcb != NULL) { 2167 /* 2168 * Don't restart timer on association that's about to be 2169 * killed. 2170 */ 2171 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2172 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2173 SCTPDBG(SCTP_DEBUG_TIMER2, 2174 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2175 t_type, inp, stcb, net); 2176 return; 2177 } 2178 /* Don't restart timer on net that's been removed. */ 2179 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2180 SCTPDBG(SCTP_DEBUG_TIMER2, 2181 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2182 t_type, inp, stcb, net); 2183 return; 2184 } 2185 } 2186 switch (t_type) { 2187 case SCTP_TIMER_TYPE_SEND: 2188 /* Here we use the RTO timer. */ 2189 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2190 #ifdef INVARIANTS 2191 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2192 t_type, inp, stcb, net); 2193 #else 2194 return; 2195 #endif 2196 } 2197 tmr = &net->rxt_timer; 2198 if (net->RTO == 0) { 2199 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2200 } else { 2201 to_ticks = sctp_msecs_to_ticks(net->RTO); 2202 } 2203 break; 2204 case SCTP_TIMER_TYPE_INIT: 2205 /* 2206 * Here we use the INIT timer default usually about 1 2207 * second. 2208 */ 2209 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2210 #ifdef INVARIANTS 2211 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2212 t_type, inp, stcb, net); 2213 #else 2214 return; 2215 #endif 2216 } 2217 tmr = &net->rxt_timer; 2218 if (net->RTO == 0) { 2219 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2220 } else { 2221 to_ticks = sctp_msecs_to_ticks(net->RTO); 2222 } 2223 break; 2224 case SCTP_TIMER_TYPE_RECV: 2225 /* 2226 * Here we use the Delayed-Ack timer value from the inp, 2227 * ususually about 200ms. 2228 */ 2229 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2230 #ifdef INVARIANTS 2231 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2232 t_type, inp, stcb, net); 2233 #else 2234 return; 2235 #endif 2236 } 2237 tmr = &stcb->asoc.dack_timer; 2238 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2239 break; 2240 case SCTP_TIMER_TYPE_SHUTDOWN: 2241 /* Here we use the RTO of the destination. */ 2242 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2243 #ifdef INVARIANTS 2244 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2245 t_type, inp, stcb, net); 2246 #else 2247 return; 2248 #endif 2249 } 2250 tmr = &net->rxt_timer; 2251 if (net->RTO == 0) { 2252 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2253 } else { 2254 to_ticks = sctp_msecs_to_ticks(net->RTO); 2255 } 2256 break; 2257 case SCTP_TIMER_TYPE_HEARTBEAT: 2258 /* 2259 * The net is used here so that we can add in the RTO. Even 2260 * though we use a different timer. We also add the HB timer 2261 * PLUS a random jitter. 2262 */ 2263 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2264 #ifdef INVARIANTS 2265 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2266 t_type, inp, stcb, net); 2267 #else 2268 return; 2269 #endif 2270 } 2271 if ((net->dest_state & SCTP_ADDR_NOHB) && 2272 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2273 SCTPDBG(SCTP_DEBUG_TIMER2, 2274 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2275 t_type, inp, stcb, net); 2276 return; 2277 } 2278 tmr = &net->hb_timer; 2279 if (net->RTO == 0) { 2280 to_ticks = stcb->asoc.initial_rto; 2281 } else { 2282 to_ticks = net->RTO; 2283 } 2284 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2285 jitter = rndval % to_ticks; 2286 if (jitter >= (to_ticks >> 1)) { 2287 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2288 } else { 2289 to_ticks = to_ticks - jitter; 2290 } 2291 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2292 !(net->dest_state & SCTP_ADDR_PF)) { 2293 to_ticks += net->heart_beat_delay; 2294 } 2295 /* 2296 * Now we must convert the to_ticks that are now in ms to 2297 * ticks. 2298 */ 2299 to_ticks = sctp_msecs_to_ticks(to_ticks); 2300 break; 2301 case SCTP_TIMER_TYPE_COOKIE: 2302 /* 2303 * Here we can use the RTO timer from the network since one 2304 * RTT was complete. If a retransmission happened then we 2305 * will be using the RTO initial value. 2306 */ 2307 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2308 #ifdef INVARIANTS 2309 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2310 t_type, inp, stcb, net); 2311 #else 2312 return; 2313 #endif 2314 } 2315 tmr = &net->rxt_timer; 2316 if (net->RTO == 0) { 2317 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2318 } else { 2319 to_ticks = sctp_msecs_to_ticks(net->RTO); 2320 } 2321 break; 2322 case SCTP_TIMER_TYPE_NEWCOOKIE: 2323 /* 2324 * Nothing needed but the endpoint here ususually about 60 2325 * minutes. 2326 */ 2327 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2328 #ifdef INVARIANTS 2329 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2330 t_type, inp, stcb, net); 2331 #else 2332 return; 2333 #endif 2334 } 2335 tmr = &inp->sctp_ep.signature_change; 2336 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2337 break; 2338 case SCTP_TIMER_TYPE_PATHMTURAISE: 2339 /* 2340 * Here we use the value found in the EP for PMTUD, 2341 * ususually about 10 minutes. 2342 */ 2343 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2344 #ifdef INVARIANTS 2345 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2346 t_type, inp, stcb, net); 2347 #else 2348 return; 2349 #endif 2350 } 2351 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2352 SCTPDBG(SCTP_DEBUG_TIMER2, 2353 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2354 t_type, inp, stcb, net); 2355 return; 2356 } 2357 tmr = &net->pmtu_timer; 2358 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2359 break; 2360 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2361 /* Here we use the RTO of the destination. */ 2362 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2363 #ifdef INVARIANTS 2364 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2365 t_type, inp, stcb, net); 2366 #else 2367 return; 2368 #endif 2369 } 2370 tmr = &net->rxt_timer; 2371 if (net->RTO == 0) { 2372 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2373 } else { 2374 to_ticks = sctp_msecs_to_ticks(net->RTO); 2375 } 2376 break; 2377 case SCTP_TIMER_TYPE_ASCONF: 2378 /* 2379 * Here the timer comes from the stcb but its value is from 2380 * the net's RTO. 2381 */ 2382 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2383 #ifdef INVARIANTS 2384 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2385 t_type, inp, stcb, net); 2386 #else 2387 return; 2388 #endif 2389 } 2390 tmr = &stcb->asoc.asconf_timer; 2391 if (net->RTO == 0) { 2392 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2393 } else { 2394 to_ticks = sctp_msecs_to_ticks(net->RTO); 2395 } 2396 break; 2397 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2398 /* 2399 * Here we use the endpoints shutdown guard timer usually 2400 * about 3 minutes. 2401 */ 2402 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2403 #ifdef INVARIANTS 2404 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2405 t_type, inp, stcb, net); 2406 #else 2407 return; 2408 #endif 2409 } 2410 tmr = &stcb->asoc.shut_guard_timer; 2411 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2412 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2413 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2414 } else { 2415 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2416 } 2417 } else { 2418 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2419 } 2420 break; 2421 case SCTP_TIMER_TYPE_AUTOCLOSE: 2422 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2423 #ifdef INVARIANTS 2424 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2425 t_type, inp, stcb, net); 2426 #else 2427 return; 2428 #endif 2429 } 2430 tmr = &stcb->asoc.autoclose_timer; 2431 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2432 break; 2433 case SCTP_TIMER_TYPE_STRRESET: 2434 /* 2435 * Here the timer comes from the stcb but its value is from 2436 * the net's RTO. 2437 */ 2438 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2439 #ifdef INVARIANTS 2440 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2441 t_type, inp, stcb, net); 2442 #else 2443 return; 2444 #endif 2445 } 2446 tmr = &stcb->asoc.strreset_timer; 2447 if (net->RTO == 0) { 2448 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2449 } else { 2450 to_ticks = sctp_msecs_to_ticks(net->RTO); 2451 } 2452 break; 2453 case SCTP_TIMER_TYPE_INPKILL: 2454 /* 2455 * The inp is setup to die. We re-use the signature_chage 2456 * timer since that has stopped and we are in the GONE 2457 * state. 2458 */ 2459 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2460 #ifdef INVARIANTS 2461 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2462 t_type, inp, stcb, net); 2463 #else 2464 return; 2465 #endif 2466 } 2467 tmr = &inp->sctp_ep.signature_change; 2468 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2469 break; 2470 case SCTP_TIMER_TYPE_ASOCKILL: 2471 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2472 #ifdef INVARIANTS 2473 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2474 t_type, inp, stcb, net); 2475 #else 2476 return; 2477 #endif 2478 } 2479 tmr = &stcb->asoc.strreset_timer; 2480 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2481 break; 2482 case SCTP_TIMER_TYPE_ADDR_WQ: 2483 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2484 #ifdef INVARIANTS 2485 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2486 t_type, inp, stcb, net); 2487 #else 2488 return; 2489 #endif 2490 } 2491 /* Only 1 tick away :-) */ 2492 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2493 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2494 break; 2495 case SCTP_TIMER_TYPE_PRIM_DELETED: 2496 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2497 #ifdef INVARIANTS 2498 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2499 t_type, inp, stcb, net); 2500 #else 2501 return; 2502 #endif 2503 } 2504 tmr = &stcb->asoc.delete_prim_timer; 2505 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2506 break; 2507 default: 2508 #ifdef INVARIANTS 2509 panic("Unknown timer type %d", t_type); 2510 #else 2511 return; 2512 #endif 2513 } 2514 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2515 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2516 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2517 /* 2518 * We do NOT allow you to have it already running. If it is, 2519 * we leave the current one up unchanged. 2520 */ 2521 SCTPDBG(SCTP_DEBUG_TIMER2, 2522 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2523 t_type, inp, stcb, net); 2524 return; 2525 } 2526 /* At this point we can proceed. */ 2527 if (t_type == SCTP_TIMER_TYPE_SEND) { 2528 stcb->asoc.num_send_timers_up++; 2529 } 2530 tmr->stopped_from = 0; 2531 tmr->type = t_type; 2532 tmr->ep = (void *)inp; 2533 tmr->tcb = (void *)stcb; 2534 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2535 tmr->net = NULL; 2536 } else { 2537 tmr->net = (void *)net; 2538 } 2539 tmr->self = (void *)tmr; 2540 tmr->vnet = (void *)curvnet; 2541 tmr->ticks = sctp_get_tick_count(); 2542 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2543 SCTPDBG(SCTP_DEBUG_TIMER2, 2544 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2545 t_type, to_ticks, inp, stcb, net); 2546 /* 2547 * If this is a newly scheduled callout, as opposed to a 2548 * rescheduled one, increment relevant reference counts. 2549 */ 2550 if (tmr->ep != NULL) { 2551 SCTP_INP_INCR_REF(inp); 2552 } 2553 if (tmr->tcb != NULL) { 2554 atomic_add_int(&stcb->asoc.refcnt, 1); 2555 } 2556 if (tmr->net != NULL) { 2557 atomic_add_int(&net->ref_count, 1); 2558 } 2559 } else { 2560 /* 2561 * This should not happen, since we checked for pending 2562 * above. 2563 */ 2564 SCTPDBG(SCTP_DEBUG_TIMER2, 2565 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2566 t_type, to_ticks, inp, stcb, net); 2567 } 2568 return; 2569 } 2570 2571 /*- 2572 * The following table shows which parameters must be provided 2573 * when calling sctp_timer_stop(). For parameters not being 2574 * provided, NULL must be used. 2575 * 2576 * |Name |inp |stcb|net | 2577 * |-----------------------------|----|----|----| 2578 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2579 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2580 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2581 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2582 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2583 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2584 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2585 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2586 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2587 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2588 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2589 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2590 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2591 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2592 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2593 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2594 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2595 * 2596 */ 2597 2598 void 2599 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2600 struct sctp_nets *net, uint32_t from) 2601 { 2602 struct sctp_timer *tmr; 2603 2604 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2605 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2606 t_type, stcb, stcb->sctp_ep)); 2607 if (stcb != NULL) { 2608 SCTP_TCB_LOCK_ASSERT(stcb); 2609 } else if (inp != NULL) { 2610 SCTP_INP_WLOCK_ASSERT(inp); 2611 } else { 2612 SCTP_WQ_ADDR_LOCK_ASSERT(); 2613 } 2614 tmr = NULL; 2615 switch (t_type) { 2616 case SCTP_TIMER_TYPE_SEND: 2617 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2618 #ifdef INVARIANTS 2619 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2620 t_type, inp, stcb, net); 2621 #else 2622 return; 2623 #endif 2624 } 2625 tmr = &net->rxt_timer; 2626 break; 2627 case SCTP_TIMER_TYPE_INIT: 2628 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2629 #ifdef INVARIANTS 2630 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2631 t_type, inp, stcb, net); 2632 #else 2633 return; 2634 #endif 2635 } 2636 tmr = &net->rxt_timer; 2637 break; 2638 case SCTP_TIMER_TYPE_RECV: 2639 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2640 #ifdef INVARIANTS 2641 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2642 t_type, inp, stcb, net); 2643 #else 2644 return; 2645 #endif 2646 } 2647 tmr = &stcb->asoc.dack_timer; 2648 break; 2649 case SCTP_TIMER_TYPE_SHUTDOWN: 2650 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2651 #ifdef INVARIANTS 2652 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2653 t_type, inp, stcb, net); 2654 #else 2655 return; 2656 #endif 2657 } 2658 tmr = &net->rxt_timer; 2659 break; 2660 case SCTP_TIMER_TYPE_HEARTBEAT: 2661 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2662 #ifdef INVARIANTS 2663 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2664 t_type, inp, stcb, net); 2665 #else 2666 return; 2667 #endif 2668 } 2669 tmr = &net->hb_timer; 2670 break; 2671 case SCTP_TIMER_TYPE_COOKIE: 2672 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2673 #ifdef INVARIANTS 2674 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2675 t_type, inp, stcb, net); 2676 #else 2677 return; 2678 #endif 2679 } 2680 tmr = &net->rxt_timer; 2681 break; 2682 case SCTP_TIMER_TYPE_NEWCOOKIE: 2683 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2684 #ifdef INVARIANTS 2685 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2686 t_type, inp, stcb, net); 2687 #else 2688 return; 2689 #endif 2690 } 2691 tmr = &inp->sctp_ep.signature_change; 2692 break; 2693 case SCTP_TIMER_TYPE_PATHMTURAISE: 2694 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2695 #ifdef INVARIANTS 2696 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2697 t_type, inp, stcb, net); 2698 #else 2699 return; 2700 #endif 2701 } 2702 tmr = &net->pmtu_timer; 2703 break; 2704 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2705 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2706 #ifdef INVARIANTS 2707 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2708 t_type, inp, stcb, net); 2709 #else 2710 return; 2711 #endif 2712 } 2713 tmr = &net->rxt_timer; 2714 break; 2715 case SCTP_TIMER_TYPE_ASCONF: 2716 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2717 #ifdef INVARIANTS 2718 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2719 t_type, inp, stcb, net); 2720 #else 2721 return; 2722 #endif 2723 } 2724 tmr = &stcb->asoc.asconf_timer; 2725 break; 2726 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2727 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2728 #ifdef INVARIANTS 2729 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2730 t_type, inp, stcb, net); 2731 #else 2732 return; 2733 #endif 2734 } 2735 tmr = &stcb->asoc.shut_guard_timer; 2736 break; 2737 case SCTP_TIMER_TYPE_AUTOCLOSE: 2738 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2739 #ifdef INVARIANTS 2740 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2741 t_type, inp, stcb, net); 2742 #else 2743 return; 2744 #endif 2745 } 2746 tmr = &stcb->asoc.autoclose_timer; 2747 break; 2748 case SCTP_TIMER_TYPE_STRRESET: 2749 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2750 #ifdef INVARIANTS 2751 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2752 t_type, inp, stcb, net); 2753 #else 2754 return; 2755 #endif 2756 } 2757 tmr = &stcb->asoc.strreset_timer; 2758 break; 2759 case SCTP_TIMER_TYPE_INPKILL: 2760 /* 2761 * The inp is setup to die. We re-use the signature_chage 2762 * timer since that has stopped and we are in the GONE 2763 * state. 2764 */ 2765 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2766 #ifdef INVARIANTS 2767 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2768 t_type, inp, stcb, net); 2769 #else 2770 return; 2771 #endif 2772 } 2773 tmr = &inp->sctp_ep.signature_change; 2774 break; 2775 case SCTP_TIMER_TYPE_ASOCKILL: 2776 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2777 #ifdef INVARIANTS 2778 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2779 t_type, inp, stcb, net); 2780 #else 2781 return; 2782 #endif 2783 } 2784 tmr = &stcb->asoc.strreset_timer; 2785 break; 2786 case SCTP_TIMER_TYPE_ADDR_WQ: 2787 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2788 #ifdef INVARIANTS 2789 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2790 t_type, inp, stcb, net); 2791 #else 2792 return; 2793 #endif 2794 } 2795 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2796 break; 2797 case SCTP_TIMER_TYPE_PRIM_DELETED: 2798 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2799 #ifdef INVARIANTS 2800 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2801 t_type, inp, stcb, net); 2802 #else 2803 return; 2804 #endif 2805 } 2806 tmr = &stcb->asoc.delete_prim_timer; 2807 break; 2808 default: 2809 #ifdef INVARIANTS 2810 panic("Unknown timer type %d", t_type); 2811 #else 2812 return; 2813 #endif 2814 } 2815 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2816 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2817 (tmr->type != t_type)) { 2818 /* 2819 * Ok we have a timer that is under joint use. Cookie timer 2820 * per chance with the SEND timer. We therefore are NOT 2821 * running the timer that the caller wants stopped. So just 2822 * return. 2823 */ 2824 SCTPDBG(SCTP_DEBUG_TIMER2, 2825 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2826 t_type, inp, stcb, net); 2827 return; 2828 } 2829 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2830 stcb->asoc.num_send_timers_up--; 2831 if (stcb->asoc.num_send_timers_up < 0) { 2832 stcb->asoc.num_send_timers_up = 0; 2833 } 2834 } 2835 tmr->self = NULL; 2836 tmr->stopped_from = from; 2837 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2838 KASSERT(tmr->ep == inp, 2839 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2840 t_type, inp, tmr->ep)); 2841 KASSERT(tmr->tcb == stcb, 2842 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2843 t_type, stcb, tmr->tcb)); 2844 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2845 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2846 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2847 t_type, net, tmr->net)); 2848 SCTPDBG(SCTP_DEBUG_TIMER2, 2849 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2850 t_type, inp, stcb, net); 2851 /* 2852 * If the timer was actually stopped, decrement reference 2853 * counts that were incremented in sctp_timer_start(). 2854 */ 2855 if (tmr->ep != NULL) { 2856 SCTP_INP_DECR_REF(inp); 2857 tmr->ep = NULL; 2858 } 2859 if (tmr->tcb != NULL) { 2860 atomic_add_int(&stcb->asoc.refcnt, -1); 2861 tmr->tcb = NULL; 2862 } 2863 if (tmr->net != NULL) { 2864 /* 2865 * Can't use net, since it doesn't work for 2866 * SCTP_TIMER_TYPE_ASCONF. 2867 */ 2868 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2869 tmr->net = NULL; 2870 } 2871 } else { 2872 SCTPDBG(SCTP_DEBUG_TIMER2, 2873 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2874 t_type, inp, stcb, net); 2875 } 2876 return; 2877 } 2878 2879 uint32_t 2880 sctp_calculate_len(struct mbuf *m) 2881 { 2882 uint32_t tlen = 0; 2883 struct mbuf *at; 2884 2885 at = m; 2886 while (at) { 2887 tlen += SCTP_BUF_LEN(at); 2888 at = SCTP_BUF_NEXT(at); 2889 } 2890 return (tlen); 2891 } 2892 2893 void 2894 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2895 struct sctp_association *asoc, uint32_t mtu) 2896 { 2897 /* 2898 * Reset the P-MTU size on this association, this involves changing 2899 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2900 * allow the DF flag to be cleared. 2901 */ 2902 struct sctp_tmit_chunk *chk; 2903 unsigned int eff_mtu, ovh; 2904 2905 asoc->smallest_mtu = mtu; 2906 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2907 ovh = SCTP_MIN_OVERHEAD; 2908 } else { 2909 ovh = SCTP_MIN_V4_OVERHEAD; 2910 } 2911 eff_mtu = mtu - ovh; 2912 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2913 if (chk->send_size > eff_mtu) { 2914 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2915 } 2916 } 2917 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2918 if (chk->send_size > eff_mtu) { 2919 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2920 } 2921 } 2922 } 2923 2924 /* 2925 * Given an association and starting time of the current RTT period, update 2926 * RTO in number of msecs. net should point to the current network. 2927 * Return 1, if an RTO update was performed, return 0 if no update was 2928 * performed due to invalid starting point. 2929 */ 2930 2931 int 2932 sctp_calculate_rto(struct sctp_tcb *stcb, 2933 struct sctp_association *asoc, 2934 struct sctp_nets *net, 2935 struct timeval *old, 2936 int rtt_from_sack) 2937 { 2938 struct timeval now; 2939 uint64_t rtt_us; /* RTT in us */ 2940 int32_t rtt; /* RTT in ms */ 2941 uint32_t new_rto; 2942 int first_measure = 0; 2943 2944 /************************/ 2945 /* 1. calculate new RTT */ 2946 /************************/ 2947 /* get the current time */ 2948 if (stcb->asoc.use_precise_time) { 2949 (void)SCTP_GETPTIME_TIMEVAL(&now); 2950 } else { 2951 (void)SCTP_GETTIME_TIMEVAL(&now); 2952 } 2953 if ((old->tv_sec > now.tv_sec) || 2954 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2955 /* The starting point is in the future. */ 2956 return (0); 2957 } 2958 timevalsub(&now, old); 2959 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2960 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2961 /* The RTT is larger than a sane value. */ 2962 return (0); 2963 } 2964 /* store the current RTT in us */ 2965 net->rtt = rtt_us; 2966 /* compute rtt in ms */ 2967 rtt = (int32_t)(net->rtt / 1000); 2968 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2969 /* 2970 * Tell the CC module that a new update has just occurred 2971 * from a sack 2972 */ 2973 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2974 } 2975 /* 2976 * Do we need to determine the lan? We do this only on sacks i.e. 2977 * RTT being determined from data not non-data (HB/INIT->INITACK). 2978 */ 2979 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2980 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2981 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2982 net->lan_type = SCTP_LAN_INTERNET; 2983 } else { 2984 net->lan_type = SCTP_LAN_LOCAL; 2985 } 2986 } 2987 2988 /***************************/ 2989 /* 2. update RTTVAR & SRTT */ 2990 /***************************/ 2991 /*- 2992 * Compute the scaled average lastsa and the 2993 * scaled variance lastsv as described in van Jacobson 2994 * Paper "Congestion Avoidance and Control", Annex A. 2995 * 2996 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2997 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2998 */ 2999 if (net->RTO_measured) { 3000 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3001 net->lastsa += rtt; 3002 if (rtt < 0) { 3003 rtt = -rtt; 3004 } 3005 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3006 net->lastsv += rtt; 3007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3008 rto_logging(net, SCTP_LOG_RTTVAR); 3009 } 3010 } else { 3011 /* First RTO measurment */ 3012 net->RTO_measured = 1; 3013 first_measure = 1; 3014 net->lastsa = rtt << SCTP_RTT_SHIFT; 3015 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3016 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3017 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3018 } 3019 } 3020 if (net->lastsv == 0) { 3021 net->lastsv = SCTP_CLOCK_GRANULARITY; 3022 } 3023 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3024 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3025 (stcb->asoc.sat_network_lockout == 0)) { 3026 stcb->asoc.sat_network = 1; 3027 } else if ((!first_measure) && stcb->asoc.sat_network) { 3028 stcb->asoc.sat_network = 0; 3029 stcb->asoc.sat_network_lockout = 1; 3030 } 3031 /* bound it, per C6/C7 in Section 5.3.1 */ 3032 if (new_rto < stcb->asoc.minrto) { 3033 new_rto = stcb->asoc.minrto; 3034 } 3035 if (new_rto > stcb->asoc.maxrto) { 3036 new_rto = stcb->asoc.maxrto; 3037 } 3038 net->RTO = new_rto; 3039 return (1); 3040 } 3041 3042 /* 3043 * return a pointer to a contiguous piece of data from the given mbuf chain 3044 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3045 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3046 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3047 */ 3048 caddr_t 3049 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3050 { 3051 uint32_t count; 3052 uint8_t *ptr; 3053 3054 ptr = in_ptr; 3055 if ((off < 0) || (len <= 0)) 3056 return (NULL); 3057 3058 /* find the desired start location */ 3059 while ((m != NULL) && (off > 0)) { 3060 if (off < SCTP_BUF_LEN(m)) 3061 break; 3062 off -= SCTP_BUF_LEN(m); 3063 m = SCTP_BUF_NEXT(m); 3064 } 3065 if (m == NULL) 3066 return (NULL); 3067 3068 /* is the current mbuf large enough (eg. contiguous)? */ 3069 if ((SCTP_BUF_LEN(m) - off) >= len) { 3070 return (mtod(m, caddr_t)+off); 3071 } else { 3072 /* else, it spans more than one mbuf, so save a temp copy... */ 3073 while ((m != NULL) && (len > 0)) { 3074 count = min(SCTP_BUF_LEN(m) - off, len); 3075 memcpy(ptr, mtod(m, caddr_t)+off, count); 3076 len -= count; 3077 ptr += count; 3078 off = 0; 3079 m = SCTP_BUF_NEXT(m); 3080 } 3081 if ((m == NULL) && (len > 0)) 3082 return (NULL); 3083 else 3084 return ((caddr_t)in_ptr); 3085 } 3086 } 3087 3088 struct sctp_paramhdr * 3089 sctp_get_next_param(struct mbuf *m, 3090 int offset, 3091 struct sctp_paramhdr *pull, 3092 int pull_limit) 3093 { 3094 /* This just provides a typed signature to Peter's Pull routine */ 3095 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3096 (uint8_t *)pull)); 3097 } 3098 3099 struct mbuf * 3100 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3101 { 3102 struct mbuf *m_last; 3103 caddr_t dp; 3104 3105 if (padlen > 3) { 3106 return (NULL); 3107 } 3108 if (padlen <= M_TRAILINGSPACE(m)) { 3109 /* 3110 * The easy way. We hope the majority of the time we hit 3111 * here :) 3112 */ 3113 m_last = m; 3114 } else { 3115 /* Hard way we must grow the mbuf chain */ 3116 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3117 if (m_last == NULL) { 3118 return (NULL); 3119 } 3120 SCTP_BUF_LEN(m_last) = 0; 3121 SCTP_BUF_NEXT(m_last) = NULL; 3122 SCTP_BUF_NEXT(m) = m_last; 3123 } 3124 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3125 SCTP_BUF_LEN(m_last) += padlen; 3126 memset(dp, 0, padlen); 3127 return (m_last); 3128 } 3129 3130 struct mbuf * 3131 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3132 { 3133 /* find the last mbuf in chain and pad it */ 3134 struct mbuf *m_at; 3135 3136 if (last_mbuf != NULL) { 3137 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3138 } else { 3139 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3140 if (SCTP_BUF_NEXT(m_at) == NULL) { 3141 return (sctp_add_pad_tombuf(m_at, padval)); 3142 } 3143 } 3144 } 3145 return (NULL); 3146 } 3147 3148 static void 3149 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3150 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3151 { 3152 struct mbuf *m_notify; 3153 struct sctp_assoc_change *sac; 3154 struct sctp_queued_to_read *control; 3155 unsigned int notif_len; 3156 uint16_t abort_len; 3157 unsigned int i; 3158 3159 if (stcb == NULL) { 3160 return; 3161 } 3162 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3163 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3164 if (abort != NULL) { 3165 abort_len = ntohs(abort->ch.chunk_length); 3166 /* 3167 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3168 * contiguous. 3169 */ 3170 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3171 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3172 } 3173 } else { 3174 abort_len = 0; 3175 } 3176 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3177 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3178 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3179 notif_len += abort_len; 3180 } 3181 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3182 if (m_notify == NULL) { 3183 /* Retry with smaller value. */ 3184 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3185 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3186 if (m_notify == NULL) { 3187 goto set_error; 3188 } 3189 } 3190 SCTP_BUF_NEXT(m_notify) = NULL; 3191 sac = mtod(m_notify, struct sctp_assoc_change *); 3192 memset(sac, 0, notif_len); 3193 sac->sac_type = SCTP_ASSOC_CHANGE; 3194 sac->sac_flags = 0; 3195 sac->sac_length = sizeof(struct sctp_assoc_change); 3196 sac->sac_state = state; 3197 sac->sac_error = error; 3198 /* XXX verify these stream counts */ 3199 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3200 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3201 sac->sac_assoc_id = sctp_get_associd(stcb); 3202 if (notif_len > sizeof(struct sctp_assoc_change)) { 3203 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3204 i = 0; 3205 if (stcb->asoc.prsctp_supported == 1) { 3206 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3207 } 3208 if (stcb->asoc.auth_supported == 1) { 3209 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3210 } 3211 if (stcb->asoc.asconf_supported == 1) { 3212 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3213 } 3214 if (stcb->asoc.idata_supported == 1) { 3215 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3216 } 3217 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3218 if (stcb->asoc.reconfig_supported == 1) { 3219 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3220 } 3221 sac->sac_length += i; 3222 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3223 memcpy(sac->sac_info, abort, abort_len); 3224 sac->sac_length += abort_len; 3225 } 3226 } 3227 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3228 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3229 0, 0, stcb->asoc.context, 0, 0, 0, 3230 m_notify); 3231 if (control != NULL) { 3232 control->length = SCTP_BUF_LEN(m_notify); 3233 control->spec_flags = M_NOTIFICATION; 3234 /* not that we need this */ 3235 control->tail_mbuf = m_notify; 3236 sctp_add_to_readq(stcb->sctp_ep, stcb, 3237 control, 3238 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3239 so_locked); 3240 } else { 3241 sctp_m_freem(m_notify); 3242 } 3243 } 3244 /* 3245 * For 1-to-1 style sockets, we send up and error when an ABORT 3246 * comes in. 3247 */ 3248 set_error: 3249 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3250 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3251 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3252 SOCK_LOCK(stcb->sctp_socket); 3253 if (from_peer) { 3254 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3255 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3256 stcb->sctp_socket->so_error = ECONNREFUSED; 3257 } else { 3258 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3259 stcb->sctp_socket->so_error = ECONNRESET; 3260 } 3261 } else { 3262 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3263 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3264 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3265 stcb->sctp_socket->so_error = ETIMEDOUT; 3266 } else { 3267 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3268 stcb->sctp_socket->so_error = ECONNABORTED; 3269 } 3270 } 3271 SOCK_UNLOCK(stcb->sctp_socket); 3272 } 3273 /* Wake ANY sleepers */ 3274 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3275 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3276 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3277 socantrcvmore(stcb->sctp_socket); 3278 } 3279 sorwakeup(stcb->sctp_socket); 3280 sowwakeup(stcb->sctp_socket); 3281 } 3282 3283 static void 3284 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3285 struct sockaddr *sa, uint32_t error, int so_locked) 3286 { 3287 struct mbuf *m_notify; 3288 struct sctp_paddr_change *spc; 3289 struct sctp_queued_to_read *control; 3290 3291 if ((stcb == NULL) || 3292 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3293 /* event not enabled */ 3294 return; 3295 } 3296 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3297 if (m_notify == NULL) 3298 return; 3299 SCTP_BUF_LEN(m_notify) = 0; 3300 spc = mtod(m_notify, struct sctp_paddr_change *); 3301 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3302 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3303 spc->spc_flags = 0; 3304 spc->spc_length = sizeof(struct sctp_paddr_change); 3305 switch (sa->sa_family) { 3306 #ifdef INET 3307 case AF_INET: 3308 #ifdef INET6 3309 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3310 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3311 (struct sockaddr_in6 *)&spc->spc_aaddr); 3312 } else { 3313 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3314 } 3315 #else 3316 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3317 #endif 3318 break; 3319 #endif 3320 #ifdef INET6 3321 case AF_INET6: 3322 { 3323 struct sockaddr_in6 *sin6; 3324 3325 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3326 3327 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3328 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3329 if (sin6->sin6_scope_id == 0) { 3330 /* recover scope_id for user */ 3331 (void)sa6_recoverscope(sin6); 3332 } else { 3333 /* clear embedded scope_id for user */ 3334 in6_clearscope(&sin6->sin6_addr); 3335 } 3336 } 3337 break; 3338 } 3339 #endif 3340 default: 3341 /* TSNH */ 3342 break; 3343 } 3344 spc->spc_state = state; 3345 spc->spc_error = error; 3346 spc->spc_assoc_id = sctp_get_associd(stcb); 3347 3348 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3349 SCTP_BUF_NEXT(m_notify) = NULL; 3350 3351 /* append to socket */ 3352 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3353 0, 0, stcb->asoc.context, 0, 0, 0, 3354 m_notify); 3355 if (control == NULL) { 3356 /* no memory */ 3357 sctp_m_freem(m_notify); 3358 return; 3359 } 3360 control->length = SCTP_BUF_LEN(m_notify); 3361 control->spec_flags = M_NOTIFICATION; 3362 /* not that we need this */ 3363 control->tail_mbuf = m_notify; 3364 sctp_add_to_readq(stcb->sctp_ep, stcb, 3365 control, 3366 &stcb->sctp_socket->so_rcv, 1, 3367 SCTP_READ_LOCK_NOT_HELD, 3368 so_locked); 3369 } 3370 3371 static void 3372 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3373 struct sctp_tmit_chunk *chk, int so_locked) 3374 { 3375 struct mbuf *m_notify; 3376 struct sctp_send_failed *ssf; 3377 struct sctp_send_failed_event *ssfe; 3378 struct sctp_queued_to_read *control; 3379 struct sctp_chunkhdr *chkhdr; 3380 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3381 3382 if ((stcb == NULL) || 3383 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3384 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3385 /* event not enabled */ 3386 return; 3387 } 3388 3389 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3390 notifhdr_len = sizeof(struct sctp_send_failed_event); 3391 } else { 3392 notifhdr_len = sizeof(struct sctp_send_failed); 3393 } 3394 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3395 if (m_notify == NULL) 3396 /* no space left */ 3397 return; 3398 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3399 if (stcb->asoc.idata_supported) { 3400 chkhdr_len = sizeof(struct sctp_idata_chunk); 3401 } else { 3402 chkhdr_len = sizeof(struct sctp_data_chunk); 3403 } 3404 /* Use some defaults in case we can't access the chunk header */ 3405 if (chk->send_size >= chkhdr_len) { 3406 payload_len = chk->send_size - chkhdr_len; 3407 } else { 3408 payload_len = 0; 3409 } 3410 padding_len = 0; 3411 if (chk->data != NULL) { 3412 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3413 if (chkhdr != NULL) { 3414 chk_len = ntohs(chkhdr->chunk_length); 3415 if ((chk_len >= chkhdr_len) && 3416 (chk->send_size >= chk_len) && 3417 (chk->send_size - chk_len < 4)) { 3418 padding_len = chk->send_size - chk_len; 3419 payload_len = chk->send_size - chkhdr_len - padding_len; 3420 } 3421 } 3422 } 3423 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3424 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3425 memset(ssfe, 0, notifhdr_len); 3426 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3427 if (sent) { 3428 ssfe->ssfe_flags = SCTP_DATA_SENT; 3429 } else { 3430 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3431 } 3432 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3433 ssfe->ssfe_error = error; 3434 /* not exactly what the user sent in, but should be close :) */ 3435 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3436 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3437 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3438 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3439 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3440 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3441 } else { 3442 ssf = mtod(m_notify, struct sctp_send_failed *); 3443 memset(ssf, 0, notifhdr_len); 3444 ssf->ssf_type = SCTP_SEND_FAILED; 3445 if (sent) { 3446 ssf->ssf_flags = SCTP_DATA_SENT; 3447 } else { 3448 ssf->ssf_flags = SCTP_DATA_UNSENT; 3449 } 3450 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3451 ssf->ssf_error = error; 3452 /* not exactly what the user sent in, but should be close :) */ 3453 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3454 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3455 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3456 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3457 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3458 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3459 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3460 } 3461 if (chk->data != NULL) { 3462 /* Trim off the sctp chunk header (it should be there) */ 3463 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3464 m_adj(chk->data, chkhdr_len); 3465 m_adj(chk->data, -padding_len); 3466 sctp_mbuf_crush(chk->data); 3467 chk->send_size -= (chkhdr_len + padding_len); 3468 } 3469 } 3470 SCTP_BUF_NEXT(m_notify) = chk->data; 3471 /* Steal off the mbuf */ 3472 chk->data = NULL; 3473 /* 3474 * For this case, we check the actual socket buffer, since the assoc 3475 * is going away we don't want to overfill the socket buffer for a 3476 * non-reader 3477 */ 3478 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3479 sctp_m_freem(m_notify); 3480 return; 3481 } 3482 /* append to socket */ 3483 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3484 0, 0, stcb->asoc.context, 0, 0, 0, 3485 m_notify); 3486 if (control == NULL) { 3487 /* no memory */ 3488 sctp_m_freem(m_notify); 3489 return; 3490 } 3491 control->length = SCTP_BUF_LEN(m_notify); 3492 control->spec_flags = M_NOTIFICATION; 3493 /* not that we need this */ 3494 control->tail_mbuf = m_notify; 3495 sctp_add_to_readq(stcb->sctp_ep, stcb, 3496 control, 3497 &stcb->sctp_socket->so_rcv, 1, 3498 SCTP_READ_LOCK_NOT_HELD, 3499 so_locked); 3500 } 3501 3502 static void 3503 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3504 struct sctp_stream_queue_pending *sp, int so_locked) 3505 { 3506 struct mbuf *m_notify; 3507 struct sctp_send_failed *ssf; 3508 struct sctp_send_failed_event *ssfe; 3509 struct sctp_queued_to_read *control; 3510 int notifhdr_len; 3511 3512 if ((stcb == NULL) || 3513 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3514 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3515 /* event not enabled */ 3516 return; 3517 } 3518 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3519 notifhdr_len = sizeof(struct sctp_send_failed_event); 3520 } else { 3521 notifhdr_len = sizeof(struct sctp_send_failed); 3522 } 3523 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3524 if (m_notify == NULL) { 3525 /* no space left */ 3526 return; 3527 } 3528 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3529 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3530 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3531 memset(ssfe, 0, notifhdr_len); 3532 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3533 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3534 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3535 ssfe->ssfe_error = error; 3536 /* not exactly what the user sent in, but should be close :) */ 3537 ssfe->ssfe_info.snd_sid = sp->sid; 3538 if (sp->some_taken) { 3539 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3540 } else { 3541 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3542 } 3543 ssfe->ssfe_info.snd_ppid = sp->ppid; 3544 ssfe->ssfe_info.snd_context = sp->context; 3545 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3546 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3547 } else { 3548 ssf = mtod(m_notify, struct sctp_send_failed *); 3549 memset(ssf, 0, notifhdr_len); 3550 ssf->ssf_type = SCTP_SEND_FAILED; 3551 ssf->ssf_flags = SCTP_DATA_UNSENT; 3552 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3553 ssf->ssf_error = error; 3554 /* not exactly what the user sent in, but should be close :) */ 3555 ssf->ssf_info.sinfo_stream = sp->sid; 3556 ssf->ssf_info.sinfo_ssn = 0; 3557 if (sp->some_taken) { 3558 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3559 } else { 3560 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3561 } 3562 ssf->ssf_info.sinfo_ppid = sp->ppid; 3563 ssf->ssf_info.sinfo_context = sp->context; 3564 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3565 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3566 } 3567 SCTP_BUF_NEXT(m_notify) = sp->data; 3568 3569 /* Steal off the mbuf */ 3570 sp->data = NULL; 3571 /* 3572 * For this case, we check the actual socket buffer, since the assoc 3573 * is going away we don't want to overfill the socket buffer for a 3574 * non-reader 3575 */ 3576 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3577 sctp_m_freem(m_notify); 3578 return; 3579 } 3580 /* append to socket */ 3581 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3582 0, 0, stcb->asoc.context, 0, 0, 0, 3583 m_notify); 3584 if (control == NULL) { 3585 /* no memory */ 3586 sctp_m_freem(m_notify); 3587 return; 3588 } 3589 control->length = SCTP_BUF_LEN(m_notify); 3590 control->spec_flags = M_NOTIFICATION; 3591 /* not that we need this */ 3592 control->tail_mbuf = m_notify; 3593 sctp_add_to_readq(stcb->sctp_ep, stcb, 3594 control, 3595 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3596 } 3597 3598 static void 3599 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3600 { 3601 struct mbuf *m_notify; 3602 struct sctp_adaptation_event *sai; 3603 struct sctp_queued_to_read *control; 3604 3605 if ((stcb == NULL) || 3606 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3607 /* event not enabled */ 3608 return; 3609 } 3610 3611 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3612 if (m_notify == NULL) 3613 /* no space left */ 3614 return; 3615 SCTP_BUF_LEN(m_notify) = 0; 3616 sai = mtod(m_notify, struct sctp_adaptation_event *); 3617 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3618 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3619 sai->sai_flags = 0; 3620 sai->sai_length = sizeof(struct sctp_adaptation_event); 3621 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3622 sai->sai_assoc_id = sctp_get_associd(stcb); 3623 3624 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3625 SCTP_BUF_NEXT(m_notify) = NULL; 3626 3627 /* append to socket */ 3628 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3629 0, 0, stcb->asoc.context, 0, 0, 0, 3630 m_notify); 3631 if (control == NULL) { 3632 /* no memory */ 3633 sctp_m_freem(m_notify); 3634 return; 3635 } 3636 control->length = SCTP_BUF_LEN(m_notify); 3637 control->spec_flags = M_NOTIFICATION; 3638 /* not that we need this */ 3639 control->tail_mbuf = m_notify; 3640 sctp_add_to_readq(stcb->sctp_ep, stcb, 3641 control, 3642 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3643 } 3644 3645 /* This always must be called with the read-queue LOCKED in the INP */ 3646 static void 3647 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3648 uint32_t val, int so_locked) 3649 { 3650 struct mbuf *m_notify; 3651 struct sctp_pdapi_event *pdapi; 3652 struct sctp_queued_to_read *control; 3653 struct sockbuf *sb; 3654 3655 if ((stcb == NULL) || 3656 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3657 /* event not enabled */ 3658 return; 3659 } 3660 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3661 return; 3662 } 3663 3664 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3665 if (m_notify == NULL) 3666 /* no space left */ 3667 return; 3668 SCTP_BUF_LEN(m_notify) = 0; 3669 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3670 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3671 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3672 pdapi->pdapi_flags = 0; 3673 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3674 pdapi->pdapi_indication = error; 3675 pdapi->pdapi_stream = (val >> 16); 3676 pdapi->pdapi_seq = (val & 0x0000ffff); 3677 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3678 3679 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3680 SCTP_BUF_NEXT(m_notify) = NULL; 3681 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3682 0, 0, stcb->asoc.context, 0, 0, 0, 3683 m_notify); 3684 if (control == NULL) { 3685 /* no memory */ 3686 sctp_m_freem(m_notify); 3687 return; 3688 } 3689 control->length = SCTP_BUF_LEN(m_notify); 3690 control->spec_flags = M_NOTIFICATION; 3691 /* not that we need this */ 3692 control->tail_mbuf = m_notify; 3693 sb = &stcb->sctp_socket->so_rcv; 3694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3695 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3696 } 3697 sctp_sballoc(stcb, sb, m_notify); 3698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3699 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3700 } 3701 control->end_added = 1; 3702 if (stcb->asoc.control_pdapi) 3703 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3704 else { 3705 /* we really should not see this case */ 3706 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3707 } 3708 if (stcb->sctp_ep && stcb->sctp_socket) { 3709 /* This should always be the case */ 3710 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3711 } 3712 } 3713 3714 static void 3715 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3716 { 3717 struct mbuf *m_notify; 3718 struct sctp_shutdown_event *sse; 3719 struct sctp_queued_to_read *control; 3720 3721 /* 3722 * For TCP model AND UDP connected sockets we will send an error up 3723 * when an SHUTDOWN completes 3724 */ 3725 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3726 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3727 /* mark socket closed for read/write and wakeup! */ 3728 socantsendmore(stcb->sctp_socket); 3729 } 3730 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3731 /* event not enabled */ 3732 return; 3733 } 3734 3735 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3736 if (m_notify == NULL) 3737 /* no space left */ 3738 return; 3739 sse = mtod(m_notify, struct sctp_shutdown_event *); 3740 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3741 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3742 sse->sse_flags = 0; 3743 sse->sse_length = sizeof(struct sctp_shutdown_event); 3744 sse->sse_assoc_id = sctp_get_associd(stcb); 3745 3746 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3747 SCTP_BUF_NEXT(m_notify) = NULL; 3748 3749 /* append to socket */ 3750 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3751 0, 0, stcb->asoc.context, 0, 0, 0, 3752 m_notify); 3753 if (control == NULL) { 3754 /* no memory */ 3755 sctp_m_freem(m_notify); 3756 return; 3757 } 3758 control->length = SCTP_BUF_LEN(m_notify); 3759 control->spec_flags = M_NOTIFICATION; 3760 /* not that we need this */ 3761 control->tail_mbuf = m_notify; 3762 sctp_add_to_readq(stcb->sctp_ep, stcb, 3763 control, 3764 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3765 } 3766 3767 static void 3768 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3769 int so_locked) 3770 { 3771 struct mbuf *m_notify; 3772 struct sctp_sender_dry_event *event; 3773 struct sctp_queued_to_read *control; 3774 3775 if ((stcb == NULL) || 3776 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3777 /* event not enabled */ 3778 return; 3779 } 3780 3781 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3782 if (m_notify == NULL) { 3783 /* no space left */ 3784 return; 3785 } 3786 SCTP_BUF_LEN(m_notify) = 0; 3787 event = mtod(m_notify, struct sctp_sender_dry_event *); 3788 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3789 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3790 event->sender_dry_flags = 0; 3791 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3792 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3793 3794 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3795 SCTP_BUF_NEXT(m_notify) = NULL; 3796 3797 /* append to socket */ 3798 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3799 0, 0, stcb->asoc.context, 0, 0, 0, 3800 m_notify); 3801 if (control == NULL) { 3802 /* no memory */ 3803 sctp_m_freem(m_notify); 3804 return; 3805 } 3806 control->length = SCTP_BUF_LEN(m_notify); 3807 control->spec_flags = M_NOTIFICATION; 3808 /* not that we need this */ 3809 control->tail_mbuf = m_notify; 3810 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3811 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3812 } 3813 3814 void 3815 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3816 { 3817 struct mbuf *m_notify; 3818 struct sctp_queued_to_read *control; 3819 struct sctp_stream_change_event *stradd; 3820 3821 if ((stcb == NULL) || 3822 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3823 /* event not enabled */ 3824 return; 3825 } 3826 if ((stcb->asoc.peer_req_out) && flag) { 3827 /* Peer made the request, don't tell the local user */ 3828 stcb->asoc.peer_req_out = 0; 3829 return; 3830 } 3831 stcb->asoc.peer_req_out = 0; 3832 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3833 if (m_notify == NULL) 3834 /* no space left */ 3835 return; 3836 SCTP_BUF_LEN(m_notify) = 0; 3837 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3838 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3839 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3840 stradd->strchange_flags = flag; 3841 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3842 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3843 stradd->strchange_instrms = numberin; 3844 stradd->strchange_outstrms = numberout; 3845 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3846 SCTP_BUF_NEXT(m_notify) = NULL; 3847 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3848 /* no space */ 3849 sctp_m_freem(m_notify); 3850 return; 3851 } 3852 /* append to socket */ 3853 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3854 0, 0, stcb->asoc.context, 0, 0, 0, 3855 m_notify); 3856 if (control == NULL) { 3857 /* no memory */ 3858 sctp_m_freem(m_notify); 3859 return; 3860 } 3861 control->length = SCTP_BUF_LEN(m_notify); 3862 control->spec_flags = M_NOTIFICATION; 3863 /* not that we need this */ 3864 control->tail_mbuf = m_notify; 3865 sctp_add_to_readq(stcb->sctp_ep, stcb, 3866 control, 3867 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3868 } 3869 3870 void 3871 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3872 { 3873 struct mbuf *m_notify; 3874 struct sctp_queued_to_read *control; 3875 struct sctp_assoc_reset_event *strasoc; 3876 3877 if ((stcb == NULL) || 3878 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3879 /* event not enabled */ 3880 return; 3881 } 3882 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3883 if (m_notify == NULL) 3884 /* no space left */ 3885 return; 3886 SCTP_BUF_LEN(m_notify) = 0; 3887 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3888 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3889 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3890 strasoc->assocreset_flags = flag; 3891 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3892 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3893 strasoc->assocreset_local_tsn = sending_tsn; 3894 strasoc->assocreset_remote_tsn = recv_tsn; 3895 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3896 SCTP_BUF_NEXT(m_notify) = NULL; 3897 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3898 /* no space */ 3899 sctp_m_freem(m_notify); 3900 return; 3901 } 3902 /* append to socket */ 3903 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3904 0, 0, stcb->asoc.context, 0, 0, 0, 3905 m_notify); 3906 if (control == NULL) { 3907 /* no memory */ 3908 sctp_m_freem(m_notify); 3909 return; 3910 } 3911 control->length = SCTP_BUF_LEN(m_notify); 3912 control->spec_flags = M_NOTIFICATION; 3913 /* not that we need this */ 3914 control->tail_mbuf = m_notify; 3915 sctp_add_to_readq(stcb->sctp_ep, stcb, 3916 control, 3917 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3918 } 3919 3920 static void 3921 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3922 int number_entries, uint16_t *list, int flag) 3923 { 3924 struct mbuf *m_notify; 3925 struct sctp_queued_to_read *control; 3926 struct sctp_stream_reset_event *strreset; 3927 int len; 3928 3929 if ((stcb == NULL) || 3930 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3931 /* event not enabled */ 3932 return; 3933 } 3934 3935 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3936 if (m_notify == NULL) 3937 /* no space left */ 3938 return; 3939 SCTP_BUF_LEN(m_notify) = 0; 3940 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3941 if (len > M_TRAILINGSPACE(m_notify)) { 3942 /* never enough room */ 3943 sctp_m_freem(m_notify); 3944 return; 3945 } 3946 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3947 memset(strreset, 0, len); 3948 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3949 strreset->strreset_flags = flag; 3950 strreset->strreset_length = len; 3951 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3952 if (number_entries) { 3953 int i; 3954 3955 for (i = 0; i < number_entries; i++) { 3956 strreset->strreset_stream_list[i] = ntohs(list[i]); 3957 } 3958 } 3959 SCTP_BUF_LEN(m_notify) = len; 3960 SCTP_BUF_NEXT(m_notify) = NULL; 3961 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3962 /* no space */ 3963 sctp_m_freem(m_notify); 3964 return; 3965 } 3966 /* append to socket */ 3967 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3968 0, 0, stcb->asoc.context, 0, 0, 0, 3969 m_notify); 3970 if (control == NULL) { 3971 /* no memory */ 3972 sctp_m_freem(m_notify); 3973 return; 3974 } 3975 control->length = SCTP_BUF_LEN(m_notify); 3976 control->spec_flags = M_NOTIFICATION; 3977 /* not that we need this */ 3978 control->tail_mbuf = m_notify; 3979 sctp_add_to_readq(stcb->sctp_ep, stcb, 3980 control, 3981 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3982 } 3983 3984 static void 3985 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3986 { 3987 struct mbuf *m_notify; 3988 struct sctp_remote_error *sre; 3989 struct sctp_queued_to_read *control; 3990 unsigned int notif_len; 3991 uint16_t chunk_len; 3992 3993 if ((stcb == NULL) || 3994 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3995 return; 3996 } 3997 if (chunk != NULL) { 3998 chunk_len = ntohs(chunk->ch.chunk_length); 3999 /* 4000 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4001 * contiguous. 4002 */ 4003 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4004 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4005 } 4006 } else { 4007 chunk_len = 0; 4008 } 4009 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4010 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4011 if (m_notify == NULL) { 4012 /* Retry with smaller value. */ 4013 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4014 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4015 if (m_notify == NULL) { 4016 return; 4017 } 4018 } 4019 SCTP_BUF_NEXT(m_notify) = NULL; 4020 sre = mtod(m_notify, struct sctp_remote_error *); 4021 memset(sre, 0, notif_len); 4022 sre->sre_type = SCTP_REMOTE_ERROR; 4023 sre->sre_flags = 0; 4024 sre->sre_length = sizeof(struct sctp_remote_error); 4025 sre->sre_error = error; 4026 sre->sre_assoc_id = sctp_get_associd(stcb); 4027 if (notif_len > sizeof(struct sctp_remote_error)) { 4028 memcpy(sre->sre_data, chunk, chunk_len); 4029 sre->sre_length += chunk_len; 4030 } 4031 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4032 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4033 0, 0, stcb->asoc.context, 0, 0, 0, 4034 m_notify); 4035 if (control != NULL) { 4036 control->length = SCTP_BUF_LEN(m_notify); 4037 control->spec_flags = M_NOTIFICATION; 4038 /* not that we need this */ 4039 control->tail_mbuf = m_notify; 4040 sctp_add_to_readq(stcb->sctp_ep, stcb, 4041 control, 4042 &stcb->sctp_socket->so_rcv, 1, 4043 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4044 } else { 4045 sctp_m_freem(m_notify); 4046 } 4047 } 4048 4049 void 4050 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4051 uint32_t error, void *data, int so_locked) 4052 { 4053 if ((stcb == NULL) || 4054 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4055 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4056 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4057 /* If the socket is gone we are out of here */ 4058 return; 4059 } 4060 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4061 return; 4062 } 4063 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4064 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4065 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4066 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4067 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4068 /* Don't report these in front states */ 4069 return; 4070 } 4071 } 4072 switch (notification) { 4073 case SCTP_NOTIFY_ASSOC_UP: 4074 if (stcb->asoc.assoc_up_sent == 0) { 4075 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4076 stcb->asoc.assoc_up_sent = 1; 4077 } 4078 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4079 sctp_notify_adaptation_layer(stcb); 4080 } 4081 if (stcb->asoc.auth_supported == 0) { 4082 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4083 NULL, so_locked); 4084 } 4085 break; 4086 case SCTP_NOTIFY_ASSOC_DOWN: 4087 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4088 break; 4089 case SCTP_NOTIFY_INTERFACE_DOWN: 4090 { 4091 struct sctp_nets *net; 4092 4093 net = (struct sctp_nets *)data; 4094 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4095 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4096 break; 4097 } 4098 case SCTP_NOTIFY_INTERFACE_UP: 4099 { 4100 struct sctp_nets *net; 4101 4102 net = (struct sctp_nets *)data; 4103 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4104 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4105 break; 4106 } 4107 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4108 { 4109 struct sctp_nets *net; 4110 4111 net = (struct sctp_nets *)data; 4112 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4113 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4114 break; 4115 } 4116 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4117 sctp_notify_send_failed2(stcb, error, 4118 (struct sctp_stream_queue_pending *)data, so_locked); 4119 break; 4120 case SCTP_NOTIFY_SENT_DG_FAIL: 4121 sctp_notify_send_failed(stcb, 1, error, 4122 (struct sctp_tmit_chunk *)data, so_locked); 4123 break; 4124 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4125 sctp_notify_send_failed(stcb, 0, error, 4126 (struct sctp_tmit_chunk *)data, so_locked); 4127 break; 4128 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4129 { 4130 uint32_t val; 4131 4132 val = *((uint32_t *)data); 4133 4134 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4135 break; 4136 } 4137 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4138 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4139 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4140 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4141 } else { 4142 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4143 } 4144 break; 4145 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4146 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4147 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4148 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4149 } else { 4150 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4151 } 4152 break; 4153 case SCTP_NOTIFY_ASSOC_RESTART: 4154 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4155 if (stcb->asoc.auth_supported == 0) { 4156 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4157 NULL, so_locked); 4158 } 4159 break; 4160 case SCTP_NOTIFY_STR_RESET_SEND: 4161 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4162 break; 4163 case SCTP_NOTIFY_STR_RESET_RECV: 4164 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4165 break; 4166 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4167 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4168 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4169 break; 4170 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4171 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4172 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4173 break; 4174 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4175 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4176 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4177 break; 4178 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4179 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4180 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4181 break; 4182 case SCTP_NOTIFY_ASCONF_ADD_IP: 4183 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4184 error, so_locked); 4185 break; 4186 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4187 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4188 error, so_locked); 4189 break; 4190 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4191 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4192 error, so_locked); 4193 break; 4194 case SCTP_NOTIFY_PEER_SHUTDOWN: 4195 sctp_notify_shutdown_event(stcb); 4196 break; 4197 case SCTP_NOTIFY_AUTH_NEW_KEY: 4198 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4199 (uint16_t)(uintptr_t)data, 4200 so_locked); 4201 break; 4202 case SCTP_NOTIFY_AUTH_FREE_KEY: 4203 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4204 (uint16_t)(uintptr_t)data, 4205 so_locked); 4206 break; 4207 case SCTP_NOTIFY_NO_PEER_AUTH: 4208 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4209 (uint16_t)(uintptr_t)data, 4210 so_locked); 4211 break; 4212 case SCTP_NOTIFY_SENDER_DRY: 4213 sctp_notify_sender_dry_event(stcb, so_locked); 4214 break; 4215 case SCTP_NOTIFY_REMOTE_ERROR: 4216 sctp_notify_remote_error(stcb, error, data); 4217 break; 4218 default: 4219 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4220 __func__, notification, notification); 4221 break; 4222 } /* end switch */ 4223 } 4224 4225 void 4226 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4227 { 4228 struct sctp_association *asoc; 4229 struct sctp_stream_out *outs; 4230 struct sctp_tmit_chunk *chk, *nchk; 4231 struct sctp_stream_queue_pending *sp, *nsp; 4232 int i; 4233 4234 if (stcb == NULL) { 4235 return; 4236 } 4237 asoc = &stcb->asoc; 4238 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4239 /* already being freed */ 4240 return; 4241 } 4242 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4243 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4244 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4245 return; 4246 } 4247 /* now through all the gunk freeing chunks */ 4248 /* sent queue SHOULD be empty */ 4249 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4250 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4251 asoc->sent_queue_cnt--; 4252 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4253 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4254 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4255 #ifdef INVARIANTS 4256 } else { 4257 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4258 #endif 4259 } 4260 } 4261 if (chk->data != NULL) { 4262 sctp_free_bufspace(stcb, asoc, chk, 1); 4263 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4264 error, chk, so_locked); 4265 if (chk->data) { 4266 sctp_m_freem(chk->data); 4267 chk->data = NULL; 4268 } 4269 } 4270 sctp_free_a_chunk(stcb, chk, so_locked); 4271 /* sa_ignore FREED_MEMORY */ 4272 } 4273 /* pending send queue SHOULD be empty */ 4274 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4275 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4276 asoc->send_queue_cnt--; 4277 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4278 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4279 #ifdef INVARIANTS 4280 } else { 4281 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4282 #endif 4283 } 4284 if (chk->data != NULL) { 4285 sctp_free_bufspace(stcb, asoc, chk, 1); 4286 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4287 error, chk, so_locked); 4288 if (chk->data) { 4289 sctp_m_freem(chk->data); 4290 chk->data = NULL; 4291 } 4292 } 4293 sctp_free_a_chunk(stcb, chk, so_locked); 4294 /* sa_ignore FREED_MEMORY */ 4295 } 4296 for (i = 0; i < asoc->streamoutcnt; i++) { 4297 /* For each stream */ 4298 outs = &asoc->strmout[i]; 4299 /* clean up any sends there */ 4300 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4301 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4302 TAILQ_REMOVE(&outs->outqueue, sp, next); 4303 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4304 sctp_free_spbufspace(stcb, asoc, sp); 4305 if (sp->data) { 4306 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4307 error, (void *)sp, so_locked); 4308 if (sp->data) { 4309 sctp_m_freem(sp->data); 4310 sp->data = NULL; 4311 sp->tail_mbuf = NULL; 4312 sp->length = 0; 4313 } 4314 } 4315 if (sp->net) { 4316 sctp_free_remote_addr(sp->net); 4317 sp->net = NULL; 4318 } 4319 /* Free the chunk */ 4320 sctp_free_a_strmoq(stcb, sp, so_locked); 4321 /* sa_ignore FREED_MEMORY */ 4322 } 4323 } 4324 } 4325 4326 void 4327 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4328 struct sctp_abort_chunk *abort, int so_locked) 4329 { 4330 if (stcb == NULL) { 4331 return; 4332 } 4333 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4334 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4335 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4336 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4337 } 4338 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4339 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4340 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4341 return; 4342 } 4343 SCTP_TCB_SEND_LOCK(stcb); 4344 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4345 /* Tell them we lost the asoc */ 4346 sctp_report_all_outbound(stcb, error, so_locked); 4347 SCTP_TCB_SEND_UNLOCK(stcb); 4348 if (from_peer) { 4349 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4350 } else { 4351 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4352 } 4353 } 4354 4355 void 4356 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4357 struct mbuf *m, int iphlen, 4358 struct sockaddr *src, struct sockaddr *dst, 4359 struct sctphdr *sh, struct mbuf *op_err, 4360 uint8_t mflowtype, uint32_t mflowid, 4361 uint32_t vrf_id, uint16_t port) 4362 { 4363 uint32_t vtag; 4364 4365 vtag = 0; 4366 if (stcb != NULL) { 4367 vtag = stcb->asoc.peer_vtag; 4368 vrf_id = stcb->asoc.vrf_id; 4369 } 4370 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4371 mflowtype, mflowid, inp->fibnum, 4372 vrf_id, port); 4373 if (stcb != NULL) { 4374 /* We have a TCB to abort, send notification too */ 4375 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4376 /* Ok, now lets free it */ 4377 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4378 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4379 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4380 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4381 } 4382 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4383 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4384 } 4385 } 4386 #ifdef SCTP_ASOCLOG_OF_TSNS 4387 void 4388 sctp_print_out_track_log(struct sctp_tcb *stcb) 4389 { 4390 #ifdef NOSIY_PRINTS 4391 int i; 4392 4393 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4394 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4395 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4396 SCTP_PRINTF("None rcvd\n"); 4397 goto none_in; 4398 } 4399 if (stcb->asoc.tsn_in_wrapped) { 4400 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4401 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4402 stcb->asoc.in_tsnlog[i].tsn, 4403 stcb->asoc.in_tsnlog[i].strm, 4404 stcb->asoc.in_tsnlog[i].seq, 4405 stcb->asoc.in_tsnlog[i].flgs, 4406 stcb->asoc.in_tsnlog[i].sz); 4407 } 4408 } 4409 if (stcb->asoc.tsn_in_at) { 4410 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4411 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4412 stcb->asoc.in_tsnlog[i].tsn, 4413 stcb->asoc.in_tsnlog[i].strm, 4414 stcb->asoc.in_tsnlog[i].seq, 4415 stcb->asoc.in_tsnlog[i].flgs, 4416 stcb->asoc.in_tsnlog[i].sz); 4417 } 4418 } 4419 none_in: 4420 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4421 if ((stcb->asoc.tsn_out_at == 0) && 4422 (stcb->asoc.tsn_out_wrapped == 0)) { 4423 SCTP_PRINTF("None sent\n"); 4424 } 4425 if (stcb->asoc.tsn_out_wrapped) { 4426 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4427 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4428 stcb->asoc.out_tsnlog[i].tsn, 4429 stcb->asoc.out_tsnlog[i].strm, 4430 stcb->asoc.out_tsnlog[i].seq, 4431 stcb->asoc.out_tsnlog[i].flgs, 4432 stcb->asoc.out_tsnlog[i].sz); 4433 } 4434 } 4435 if (stcb->asoc.tsn_out_at) { 4436 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4437 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4438 stcb->asoc.out_tsnlog[i].tsn, 4439 stcb->asoc.out_tsnlog[i].strm, 4440 stcb->asoc.out_tsnlog[i].seq, 4441 stcb->asoc.out_tsnlog[i].flgs, 4442 stcb->asoc.out_tsnlog[i].sz); 4443 } 4444 } 4445 #endif 4446 } 4447 #endif 4448 4449 void 4450 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4451 struct mbuf *op_err, 4452 int so_locked) 4453 { 4454 4455 if (stcb == NULL) { 4456 /* Got to have a TCB */ 4457 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4458 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4459 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4460 SCTP_CALLED_DIRECTLY_NOCMPSET); 4461 } 4462 } 4463 return; 4464 } 4465 /* notify the peer */ 4466 sctp_send_abort_tcb(stcb, op_err, so_locked); 4467 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4468 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4469 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4470 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4471 } 4472 /* notify the ulp */ 4473 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4474 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4475 } 4476 /* now free the asoc */ 4477 #ifdef SCTP_ASOCLOG_OF_TSNS 4478 sctp_print_out_track_log(stcb); 4479 #endif 4480 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4481 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4482 } 4483 4484 void 4485 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4486 struct sockaddr *src, struct sockaddr *dst, 4487 struct sctphdr *sh, struct sctp_inpcb *inp, 4488 struct mbuf *cause, 4489 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4490 uint32_t vrf_id, uint16_t port) 4491 { 4492 struct sctp_chunkhdr *ch, chunk_buf; 4493 unsigned int chk_length; 4494 int contains_init_chunk; 4495 4496 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4497 /* Generate a TO address for future reference */ 4498 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4499 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4500 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4501 SCTP_CALLED_DIRECTLY_NOCMPSET); 4502 } 4503 } 4504 contains_init_chunk = 0; 4505 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4506 sizeof(*ch), (uint8_t *)&chunk_buf); 4507 while (ch != NULL) { 4508 chk_length = ntohs(ch->chunk_length); 4509 if (chk_length < sizeof(*ch)) { 4510 /* break to abort land */ 4511 break; 4512 } 4513 switch (ch->chunk_type) { 4514 case SCTP_INIT: 4515 contains_init_chunk = 1; 4516 break; 4517 case SCTP_PACKET_DROPPED: 4518 /* we don't respond to pkt-dropped */ 4519 return; 4520 case SCTP_ABORT_ASSOCIATION: 4521 /* we don't respond with an ABORT to an ABORT */ 4522 return; 4523 case SCTP_SHUTDOWN_COMPLETE: 4524 /* 4525 * we ignore it since we are not waiting for it and 4526 * peer is gone 4527 */ 4528 return; 4529 case SCTP_SHUTDOWN_ACK: 4530 sctp_send_shutdown_complete2(src, dst, sh, 4531 mflowtype, mflowid, fibnum, 4532 vrf_id, port); 4533 return; 4534 default: 4535 break; 4536 } 4537 offset += SCTP_SIZE32(chk_length); 4538 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4539 sizeof(*ch), (uint8_t *)&chunk_buf); 4540 } 4541 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4542 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4543 (contains_init_chunk == 0))) { 4544 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4545 mflowtype, mflowid, fibnum, 4546 vrf_id, port); 4547 } 4548 } 4549 4550 /* 4551 * check the inbound datagram to make sure there is not an abort inside it, 4552 * if there is return 1, else return 0. 4553 */ 4554 int 4555 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4556 { 4557 struct sctp_chunkhdr *ch; 4558 struct sctp_init_chunk *init_chk, chunk_buf; 4559 int offset; 4560 unsigned int chk_length; 4561 4562 offset = iphlen + sizeof(struct sctphdr); 4563 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4564 (uint8_t *)&chunk_buf); 4565 while (ch != NULL) { 4566 chk_length = ntohs(ch->chunk_length); 4567 if (chk_length < sizeof(*ch)) { 4568 /* packet is probably corrupt */ 4569 break; 4570 } 4571 /* we seem to be ok, is it an abort? */ 4572 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4573 /* yep, tell them */ 4574 return (1); 4575 } 4576 if (ch->chunk_type == SCTP_INITIATION) { 4577 /* need to update the Vtag */ 4578 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4579 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4580 if (init_chk != NULL) { 4581 *vtagfill = ntohl(init_chk->init.initiate_tag); 4582 } 4583 } 4584 /* Nope, move to the next chunk */ 4585 offset += SCTP_SIZE32(chk_length); 4586 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4587 sizeof(*ch), (uint8_t *)&chunk_buf); 4588 } 4589 return (0); 4590 } 4591 4592 /* 4593 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4594 * set (i.e. it's 0) so, create this function to compare link local scopes 4595 */ 4596 #ifdef INET6 4597 uint32_t 4598 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4599 { 4600 struct sockaddr_in6 a, b; 4601 4602 /* save copies */ 4603 a = *addr1; 4604 b = *addr2; 4605 4606 if (a.sin6_scope_id == 0) 4607 if (sa6_recoverscope(&a)) { 4608 /* can't get scope, so can't match */ 4609 return (0); 4610 } 4611 if (b.sin6_scope_id == 0) 4612 if (sa6_recoverscope(&b)) { 4613 /* can't get scope, so can't match */ 4614 return (0); 4615 } 4616 if (a.sin6_scope_id != b.sin6_scope_id) 4617 return (0); 4618 4619 return (1); 4620 } 4621 4622 /* 4623 * returns a sockaddr_in6 with embedded scope recovered and removed 4624 */ 4625 struct sockaddr_in6 * 4626 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4627 { 4628 /* check and strip embedded scope junk */ 4629 if (addr->sin6_family == AF_INET6) { 4630 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4631 if (addr->sin6_scope_id == 0) { 4632 *store = *addr; 4633 if (!sa6_recoverscope(store)) { 4634 /* use the recovered scope */ 4635 addr = store; 4636 } 4637 } else { 4638 /* else, return the original "to" addr */ 4639 in6_clearscope(&addr->sin6_addr); 4640 } 4641 } 4642 } 4643 return (addr); 4644 } 4645 #endif 4646 4647 /* 4648 * are the two addresses the same? currently a "scopeless" check returns: 1 4649 * if same, 0 if not 4650 */ 4651 int 4652 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4653 { 4654 4655 /* must be valid */ 4656 if (sa1 == NULL || sa2 == NULL) 4657 return (0); 4658 4659 /* must be the same family */ 4660 if (sa1->sa_family != sa2->sa_family) 4661 return (0); 4662 4663 switch (sa1->sa_family) { 4664 #ifdef INET6 4665 case AF_INET6: 4666 { 4667 /* IPv6 addresses */ 4668 struct sockaddr_in6 *sin6_1, *sin6_2; 4669 4670 sin6_1 = (struct sockaddr_in6 *)sa1; 4671 sin6_2 = (struct sockaddr_in6 *)sa2; 4672 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4673 sin6_2)); 4674 } 4675 #endif 4676 #ifdef INET 4677 case AF_INET: 4678 { 4679 /* IPv4 addresses */ 4680 struct sockaddr_in *sin_1, *sin_2; 4681 4682 sin_1 = (struct sockaddr_in *)sa1; 4683 sin_2 = (struct sockaddr_in *)sa2; 4684 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4685 } 4686 #endif 4687 default: 4688 /* we don't do these... */ 4689 return (0); 4690 } 4691 } 4692 4693 void 4694 sctp_print_address(struct sockaddr *sa) 4695 { 4696 #ifdef INET6 4697 char ip6buf[INET6_ADDRSTRLEN]; 4698 #endif 4699 4700 switch (sa->sa_family) { 4701 #ifdef INET6 4702 case AF_INET6: 4703 { 4704 struct sockaddr_in6 *sin6; 4705 4706 sin6 = (struct sockaddr_in6 *)sa; 4707 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4708 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4709 ntohs(sin6->sin6_port), 4710 sin6->sin6_scope_id); 4711 break; 4712 } 4713 #endif 4714 #ifdef INET 4715 case AF_INET: 4716 { 4717 struct sockaddr_in *sin; 4718 unsigned char *p; 4719 4720 sin = (struct sockaddr_in *)sa; 4721 p = (unsigned char *)&sin->sin_addr; 4722 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4723 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4724 break; 4725 } 4726 #endif 4727 default: 4728 SCTP_PRINTF("?\n"); 4729 break; 4730 } 4731 } 4732 4733 void 4734 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4735 struct sctp_inpcb *new_inp, 4736 struct sctp_tcb *stcb, 4737 int waitflags) 4738 { 4739 /* 4740 * go through our old INP and pull off any control structures that 4741 * belong to stcb and move then to the new inp. 4742 */ 4743 struct socket *old_so, *new_so; 4744 struct sctp_queued_to_read *control, *nctl; 4745 struct sctp_readhead tmp_queue; 4746 struct mbuf *m; 4747 int error = 0; 4748 4749 old_so = old_inp->sctp_socket; 4750 new_so = new_inp->sctp_socket; 4751 TAILQ_INIT(&tmp_queue); 4752 error = sblock(&old_so->so_rcv, waitflags); 4753 if (error) { 4754 /* 4755 * Gak, can't get sblock, we have a problem. data will be 4756 * left stranded.. and we don't dare look at it since the 4757 * other thread may be reading something. Oh well, its a 4758 * screwed up app that does a peeloff OR a accept while 4759 * reading from the main socket... actually its only the 4760 * peeloff() case, since I think read will fail on a 4761 * listening socket.. 4762 */ 4763 return; 4764 } 4765 /* lock the socket buffers */ 4766 SCTP_INP_READ_LOCK(old_inp); 4767 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4768 /* Pull off all for out target stcb */ 4769 if (control->stcb == stcb) { 4770 /* remove it we want it */ 4771 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4772 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4773 m = control->data; 4774 while (m) { 4775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4776 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4777 } 4778 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4779 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4780 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4781 } 4782 m = SCTP_BUF_NEXT(m); 4783 } 4784 } 4785 } 4786 SCTP_INP_READ_UNLOCK(old_inp); 4787 /* Remove the sb-lock on the old socket */ 4788 4789 sbunlock(&old_so->so_rcv); 4790 /* Now we move them over to the new socket buffer */ 4791 SCTP_INP_READ_LOCK(new_inp); 4792 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4793 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4794 m = control->data; 4795 while (m) { 4796 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4797 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4798 } 4799 sctp_sballoc(stcb, &new_so->so_rcv, m); 4800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4801 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4802 } 4803 m = SCTP_BUF_NEXT(m); 4804 } 4805 } 4806 SCTP_INP_READ_UNLOCK(new_inp); 4807 } 4808 4809 void 4810 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4811 struct sctp_tcb *stcb, 4812 int so_locked 4813 SCTP_UNUSED 4814 ) 4815 { 4816 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4817 sctp_sorwakeup(inp, inp->sctp_socket); 4818 } 4819 } 4820 4821 void 4822 sctp_add_to_readq(struct sctp_inpcb *inp, 4823 struct sctp_tcb *stcb, 4824 struct sctp_queued_to_read *control, 4825 struct sockbuf *sb, 4826 int end, 4827 int inp_read_lock_held, 4828 int so_locked) 4829 { 4830 /* 4831 * Here we must place the control on the end of the socket read 4832 * queue AND increment sb_cc so that select will work properly on 4833 * read. 4834 */ 4835 struct mbuf *m, *prev = NULL; 4836 4837 if (inp == NULL) { 4838 /* Gak, TSNH!! */ 4839 #ifdef INVARIANTS 4840 panic("Gak, inp NULL on add_to_readq"); 4841 #endif 4842 return; 4843 } 4844 if (inp_read_lock_held == 0) 4845 SCTP_INP_READ_LOCK(inp); 4846 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4847 if (!control->on_strm_q) { 4848 sctp_free_remote_addr(control->whoFrom); 4849 if (control->data) { 4850 sctp_m_freem(control->data); 4851 control->data = NULL; 4852 } 4853 sctp_free_a_readq(stcb, control); 4854 } 4855 if (inp_read_lock_held == 0) 4856 SCTP_INP_READ_UNLOCK(inp); 4857 return; 4858 } 4859 if (!(control->spec_flags & M_NOTIFICATION)) { 4860 atomic_add_int(&inp->total_recvs, 1); 4861 if (!control->do_not_ref_stcb) { 4862 atomic_add_int(&stcb->total_recvs, 1); 4863 } 4864 } 4865 m = control->data; 4866 control->held_length = 0; 4867 control->length = 0; 4868 while (m) { 4869 if (SCTP_BUF_LEN(m) == 0) { 4870 /* Skip mbufs with NO length */ 4871 if (prev == NULL) { 4872 /* First one */ 4873 control->data = sctp_m_free(m); 4874 m = control->data; 4875 } else { 4876 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4877 m = SCTP_BUF_NEXT(prev); 4878 } 4879 if (m == NULL) { 4880 control->tail_mbuf = prev; 4881 } 4882 continue; 4883 } 4884 prev = m; 4885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4886 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4887 } 4888 sctp_sballoc(stcb, sb, m); 4889 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4890 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4891 } 4892 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4893 m = SCTP_BUF_NEXT(m); 4894 } 4895 if (prev != NULL) { 4896 control->tail_mbuf = prev; 4897 } else { 4898 /* Everything got collapsed out?? */ 4899 if (!control->on_strm_q) { 4900 sctp_free_remote_addr(control->whoFrom); 4901 sctp_free_a_readq(stcb, control); 4902 } 4903 if (inp_read_lock_held == 0) 4904 SCTP_INP_READ_UNLOCK(inp); 4905 return; 4906 } 4907 if (end) { 4908 control->end_added = 1; 4909 } 4910 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4911 control->on_read_q = 1; 4912 if (inp_read_lock_held == 0) 4913 SCTP_INP_READ_UNLOCK(inp); 4914 if (inp && inp->sctp_socket) { 4915 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4916 } 4917 } 4918 4919 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4920 *************ALTERNATE ROUTING CODE 4921 */ 4922 4923 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4924 *************ALTERNATE ROUTING CODE 4925 */ 4926 4927 struct mbuf * 4928 sctp_generate_cause(uint16_t code, char *info) 4929 { 4930 struct mbuf *m; 4931 struct sctp_gen_error_cause *cause; 4932 size_t info_len; 4933 uint16_t len; 4934 4935 if ((code == 0) || (info == NULL)) { 4936 return (NULL); 4937 } 4938 info_len = strlen(info); 4939 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4940 return (NULL); 4941 } 4942 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4943 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4944 if (m != NULL) { 4945 SCTP_BUF_LEN(m) = len; 4946 cause = mtod(m, struct sctp_gen_error_cause *); 4947 cause->code = htons(code); 4948 cause->length = htons(len); 4949 memcpy(cause->info, info, info_len); 4950 } 4951 return (m); 4952 } 4953 4954 struct mbuf * 4955 sctp_generate_no_user_data_cause(uint32_t tsn) 4956 { 4957 struct mbuf *m; 4958 struct sctp_error_no_user_data *no_user_data_cause; 4959 uint16_t len; 4960 4961 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4962 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4963 if (m != NULL) { 4964 SCTP_BUF_LEN(m) = len; 4965 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4966 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4967 no_user_data_cause->cause.length = htons(len); 4968 no_user_data_cause->tsn = htonl(tsn); 4969 } 4970 return (m); 4971 } 4972 4973 #ifdef SCTP_MBCNT_LOGGING 4974 void 4975 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4976 struct sctp_tmit_chunk *tp1, int chk_cnt) 4977 { 4978 if (tp1->data == NULL) { 4979 return; 4980 } 4981 asoc->chunks_on_out_queue -= chk_cnt; 4982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4983 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4984 asoc->total_output_queue_size, 4985 tp1->book_size, 4986 0, 4987 tp1->mbcnt); 4988 } 4989 if (asoc->total_output_queue_size >= tp1->book_size) { 4990 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4991 } else { 4992 asoc->total_output_queue_size = 0; 4993 } 4994 4995 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4996 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4997 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4998 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4999 } else { 5000 stcb->sctp_socket->so_snd.sb_cc = 0; 5001 } 5002 } 5003 } 5004 5005 #endif 5006 5007 int 5008 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5009 uint8_t sent, int so_locked) 5010 { 5011 struct sctp_stream_out *strq; 5012 struct sctp_tmit_chunk *chk = NULL, *tp2; 5013 struct sctp_stream_queue_pending *sp; 5014 uint32_t mid; 5015 uint16_t sid; 5016 uint8_t foundeom = 0; 5017 int ret_sz = 0; 5018 int notdone; 5019 int do_wakeup_routine = 0; 5020 5021 sid = tp1->rec.data.sid; 5022 mid = tp1->rec.data.mid; 5023 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5024 stcb->asoc.abandoned_sent[0]++; 5025 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5026 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5027 #if defined(SCTP_DETAILED_STR_STATS) 5028 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5029 #endif 5030 } else { 5031 stcb->asoc.abandoned_unsent[0]++; 5032 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5033 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5034 #if defined(SCTP_DETAILED_STR_STATS) 5035 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5036 #endif 5037 } 5038 do { 5039 ret_sz += tp1->book_size; 5040 if (tp1->data != NULL) { 5041 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5042 sctp_flight_size_decrease(tp1); 5043 sctp_total_flight_decrease(stcb, tp1); 5044 } 5045 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5046 stcb->asoc.peers_rwnd += tp1->send_size; 5047 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5048 if (sent) { 5049 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5050 } else { 5051 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5052 } 5053 if (tp1->data) { 5054 sctp_m_freem(tp1->data); 5055 tp1->data = NULL; 5056 } 5057 do_wakeup_routine = 1; 5058 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5059 stcb->asoc.sent_queue_cnt_removeable--; 5060 } 5061 } 5062 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5063 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5064 SCTP_DATA_NOT_FRAG) { 5065 /* not frag'ed we ae done */ 5066 notdone = 0; 5067 foundeom = 1; 5068 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5069 /* end of frag, we are done */ 5070 notdone = 0; 5071 foundeom = 1; 5072 } else { 5073 /* 5074 * Its a begin or middle piece, we must mark all of 5075 * it 5076 */ 5077 notdone = 1; 5078 tp1 = TAILQ_NEXT(tp1, sctp_next); 5079 } 5080 } while (tp1 && notdone); 5081 if (foundeom == 0) { 5082 /* 5083 * The multi-part message was scattered across the send and 5084 * sent queue. 5085 */ 5086 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5087 if ((tp1->rec.data.sid != sid) || 5088 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5089 break; 5090 } 5091 /* 5092 * save to chk in case we have some on stream out 5093 * queue. If so and we have an un-transmitted one we 5094 * don't have to fudge the TSN. 5095 */ 5096 chk = tp1; 5097 ret_sz += tp1->book_size; 5098 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5099 if (sent) { 5100 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5101 } else { 5102 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5103 } 5104 if (tp1->data) { 5105 sctp_m_freem(tp1->data); 5106 tp1->data = NULL; 5107 } 5108 /* No flight involved here book the size to 0 */ 5109 tp1->book_size = 0; 5110 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5111 foundeom = 1; 5112 } 5113 do_wakeup_routine = 1; 5114 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5115 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5116 /* 5117 * on to the sent queue so we can wait for it to be 5118 * passed by. 5119 */ 5120 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5121 sctp_next); 5122 stcb->asoc.send_queue_cnt--; 5123 stcb->asoc.sent_queue_cnt++; 5124 } 5125 } 5126 if (foundeom == 0) { 5127 /* 5128 * Still no eom found. That means there is stuff left on the 5129 * stream out queue.. yuck. 5130 */ 5131 SCTP_TCB_SEND_LOCK(stcb); 5132 strq = &stcb->asoc.strmout[sid]; 5133 sp = TAILQ_FIRST(&strq->outqueue); 5134 if (sp != NULL) { 5135 sp->discard_rest = 1; 5136 /* 5137 * We may need to put a chunk on the queue that 5138 * holds the TSN that would have been sent with the 5139 * LAST bit. 5140 */ 5141 if (chk == NULL) { 5142 /* Yep, we have to */ 5143 sctp_alloc_a_chunk(stcb, chk); 5144 if (chk == NULL) { 5145 /* 5146 * we are hosed. All we can do is 5147 * nothing.. which will cause an 5148 * abort if the peer is paying 5149 * attention. 5150 */ 5151 goto oh_well; 5152 } 5153 memset(chk, 0, sizeof(*chk)); 5154 chk->rec.data.rcv_flags = 0; 5155 chk->sent = SCTP_FORWARD_TSN_SKIP; 5156 chk->asoc = &stcb->asoc; 5157 if (stcb->asoc.idata_supported == 0) { 5158 if (sp->sinfo_flags & SCTP_UNORDERED) { 5159 chk->rec.data.mid = 0; 5160 } else { 5161 chk->rec.data.mid = strq->next_mid_ordered; 5162 } 5163 } else { 5164 if (sp->sinfo_flags & SCTP_UNORDERED) { 5165 chk->rec.data.mid = strq->next_mid_unordered; 5166 } else { 5167 chk->rec.data.mid = strq->next_mid_ordered; 5168 } 5169 } 5170 chk->rec.data.sid = sp->sid; 5171 chk->rec.data.ppid = sp->ppid; 5172 chk->rec.data.context = sp->context; 5173 chk->flags = sp->act_flags; 5174 chk->whoTo = NULL; 5175 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5176 strq->chunks_on_queues++; 5177 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5178 stcb->asoc.sent_queue_cnt++; 5179 stcb->asoc.pr_sctp_cnt++; 5180 } 5181 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5182 if (sp->sinfo_flags & SCTP_UNORDERED) { 5183 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5184 } 5185 if (stcb->asoc.idata_supported == 0) { 5186 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5187 strq->next_mid_ordered++; 5188 } 5189 } else { 5190 if (sp->sinfo_flags & SCTP_UNORDERED) { 5191 strq->next_mid_unordered++; 5192 } else { 5193 strq->next_mid_ordered++; 5194 } 5195 } 5196 oh_well: 5197 if (sp->data) { 5198 /* 5199 * Pull any data to free up the SB and allow 5200 * sender to "add more" while we will throw 5201 * away :-) 5202 */ 5203 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5204 ret_sz += sp->length; 5205 do_wakeup_routine = 1; 5206 sp->some_taken = 1; 5207 sctp_m_freem(sp->data); 5208 sp->data = NULL; 5209 sp->tail_mbuf = NULL; 5210 sp->length = 0; 5211 } 5212 } 5213 SCTP_TCB_SEND_UNLOCK(stcb); 5214 } 5215 if (do_wakeup_routine) { 5216 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5217 } 5218 return (ret_sz); 5219 } 5220 5221 /* 5222 * checks to see if the given address, sa, is one that is currently known by 5223 * the kernel note: can't distinguish the same address on multiple interfaces 5224 * and doesn't handle multiple addresses with different zone/scope id's note: 5225 * ifa_ifwithaddr() compares the entire sockaddr struct 5226 */ 5227 struct sctp_ifa * 5228 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5229 int holds_lock) 5230 { 5231 struct sctp_laddr *laddr; 5232 5233 if (holds_lock == 0) { 5234 SCTP_INP_RLOCK(inp); 5235 } 5236 5237 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5238 if (laddr->ifa == NULL) 5239 continue; 5240 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5241 continue; 5242 #ifdef INET 5243 if (addr->sa_family == AF_INET) { 5244 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5245 laddr->ifa->address.sin.sin_addr.s_addr) { 5246 /* found him. */ 5247 break; 5248 } 5249 } 5250 #endif 5251 #ifdef INET6 5252 if (addr->sa_family == AF_INET6) { 5253 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5254 &laddr->ifa->address.sin6)) { 5255 /* found him. */ 5256 break; 5257 } 5258 } 5259 #endif 5260 } 5261 if (holds_lock == 0) { 5262 SCTP_INP_RUNLOCK(inp); 5263 } 5264 if (laddr != NULL) { 5265 return (laddr->ifa); 5266 } else { 5267 return (NULL); 5268 } 5269 } 5270 5271 uint32_t 5272 sctp_get_ifa_hash_val(struct sockaddr *addr) 5273 { 5274 switch (addr->sa_family) { 5275 #ifdef INET 5276 case AF_INET: 5277 { 5278 struct sockaddr_in *sin; 5279 5280 sin = (struct sockaddr_in *)addr; 5281 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5282 } 5283 #endif 5284 #ifdef INET6 5285 case AF_INET6: 5286 { 5287 struct sockaddr_in6 *sin6; 5288 uint32_t hash_of_addr; 5289 5290 sin6 = (struct sockaddr_in6 *)addr; 5291 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5292 sin6->sin6_addr.s6_addr32[1] + 5293 sin6->sin6_addr.s6_addr32[2] + 5294 sin6->sin6_addr.s6_addr32[3]); 5295 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5296 return (hash_of_addr); 5297 } 5298 #endif 5299 default: 5300 break; 5301 } 5302 return (0); 5303 } 5304 5305 struct sctp_ifa * 5306 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5307 { 5308 struct sctp_ifa *sctp_ifap; 5309 struct sctp_vrf *vrf; 5310 struct sctp_ifalist *hash_head; 5311 uint32_t hash_of_addr; 5312 5313 if (holds_lock == 0) { 5314 SCTP_IPI_ADDR_RLOCK(); 5315 } else { 5316 SCTP_IPI_ADDR_LOCK_ASSERT(); 5317 } 5318 5319 vrf = sctp_find_vrf(vrf_id); 5320 if (vrf == NULL) { 5321 if (holds_lock == 0) 5322 SCTP_IPI_ADDR_RUNLOCK(); 5323 return (NULL); 5324 } 5325 5326 hash_of_addr = sctp_get_ifa_hash_val(addr); 5327 5328 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5329 if (hash_head == NULL) { 5330 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5331 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5332 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5333 sctp_print_address(addr); 5334 SCTP_PRINTF("No such bucket for address\n"); 5335 if (holds_lock == 0) 5336 SCTP_IPI_ADDR_RUNLOCK(); 5337 5338 return (NULL); 5339 } 5340 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5341 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5342 continue; 5343 #ifdef INET 5344 if (addr->sa_family == AF_INET) { 5345 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5346 sctp_ifap->address.sin.sin_addr.s_addr) { 5347 /* found him. */ 5348 break; 5349 } 5350 } 5351 #endif 5352 #ifdef INET6 5353 if (addr->sa_family == AF_INET6) { 5354 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5355 &sctp_ifap->address.sin6)) { 5356 /* found him. */ 5357 break; 5358 } 5359 } 5360 #endif 5361 } 5362 if (holds_lock == 0) 5363 SCTP_IPI_ADDR_RUNLOCK(); 5364 return (sctp_ifap); 5365 } 5366 5367 static void 5368 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5369 uint32_t rwnd_req) 5370 { 5371 /* User pulled some data, do we need a rwnd update? */ 5372 struct epoch_tracker et; 5373 int r_unlocked = 0; 5374 uint32_t dif, rwnd; 5375 struct socket *so = NULL; 5376 5377 if (stcb == NULL) 5378 return; 5379 5380 atomic_add_int(&stcb->asoc.refcnt, 1); 5381 5382 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5383 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5384 /* Pre-check If we are freeing no update */ 5385 goto no_lock; 5386 } 5387 SCTP_INP_INCR_REF(stcb->sctp_ep); 5388 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5389 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5390 goto out; 5391 } 5392 so = stcb->sctp_socket; 5393 if (so == NULL) { 5394 goto out; 5395 } 5396 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5397 /* Have you have freed enough to look */ 5398 *freed_so_far = 0; 5399 /* Yep, its worth a look and the lock overhead */ 5400 5401 /* Figure out what the rwnd would be */ 5402 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5403 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5404 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5405 } else { 5406 dif = 0; 5407 } 5408 if (dif >= rwnd_req) { 5409 if (hold_rlock) { 5410 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5411 r_unlocked = 1; 5412 } 5413 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5414 /* 5415 * One last check before we allow the guy possibly 5416 * to get in. There is a race, where the guy has not 5417 * reached the gate. In that case 5418 */ 5419 goto out; 5420 } 5421 SCTP_TCB_LOCK(stcb); 5422 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5423 /* No reports here */ 5424 SCTP_TCB_UNLOCK(stcb); 5425 goto out; 5426 } 5427 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5428 NET_EPOCH_ENTER(et); 5429 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5430 5431 sctp_chunk_output(stcb->sctp_ep, stcb, 5432 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5433 /* make sure no timer is running */ 5434 NET_EPOCH_EXIT(et); 5435 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5436 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5437 SCTP_TCB_UNLOCK(stcb); 5438 } else { 5439 /* Update how much we have pending */ 5440 stcb->freed_by_sorcv_sincelast = dif; 5441 } 5442 out: 5443 if (so && r_unlocked && hold_rlock) { 5444 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5445 } 5446 5447 SCTP_INP_DECR_REF(stcb->sctp_ep); 5448 no_lock: 5449 atomic_add_int(&stcb->asoc.refcnt, -1); 5450 return; 5451 } 5452 5453 int 5454 sctp_sorecvmsg(struct socket *so, 5455 struct uio *uio, 5456 struct mbuf **mp, 5457 struct sockaddr *from, 5458 int fromlen, 5459 int *msg_flags, 5460 struct sctp_sndrcvinfo *sinfo, 5461 int filling_sinfo) 5462 { 5463 /* 5464 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5465 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5466 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5467 * On the way out we may send out any combination of: 5468 * MSG_NOTIFICATION MSG_EOR 5469 * 5470 */ 5471 struct sctp_inpcb *inp = NULL; 5472 ssize_t my_len = 0; 5473 ssize_t cp_len = 0; 5474 int error = 0; 5475 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5476 struct mbuf *m = NULL; 5477 struct sctp_tcb *stcb = NULL; 5478 int wakeup_read_socket = 0; 5479 int freecnt_applied = 0; 5480 int out_flags = 0, in_flags = 0; 5481 int block_allowed = 1; 5482 uint32_t freed_so_far = 0; 5483 ssize_t copied_so_far = 0; 5484 int in_eeor_mode = 0; 5485 int no_rcv_needed = 0; 5486 uint32_t rwnd_req = 0; 5487 int hold_sblock = 0; 5488 int hold_rlock = 0; 5489 ssize_t slen = 0; 5490 uint32_t held_length = 0; 5491 int sockbuf_lock = 0; 5492 5493 if (uio == NULL) { 5494 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5495 return (EINVAL); 5496 } 5497 5498 if (msg_flags) { 5499 in_flags = *msg_flags; 5500 if (in_flags & MSG_PEEK) 5501 SCTP_STAT_INCR(sctps_read_peeks); 5502 } else { 5503 in_flags = 0; 5504 } 5505 slen = uio->uio_resid; 5506 5507 /* Pull in and set up our int flags */ 5508 if (in_flags & MSG_OOB) { 5509 /* Out of band's NOT supported */ 5510 return (EOPNOTSUPP); 5511 } 5512 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5513 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5514 return (EINVAL); 5515 } 5516 if ((in_flags & (MSG_DONTWAIT 5517 | MSG_NBIO 5518 )) || 5519 SCTP_SO_IS_NBIO(so)) { 5520 block_allowed = 0; 5521 } 5522 /* setup the endpoint */ 5523 inp = (struct sctp_inpcb *)so->so_pcb; 5524 if (inp == NULL) { 5525 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5526 return (EFAULT); 5527 } 5528 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5529 /* Must be at least a MTU's worth */ 5530 if (rwnd_req < SCTP_MIN_RWND) 5531 rwnd_req = SCTP_MIN_RWND; 5532 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5534 sctp_misc_ints(SCTP_SORECV_ENTER, 5535 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5536 } 5537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5538 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5539 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5540 } 5541 5542 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5543 if (error) { 5544 goto release_unlocked; 5545 } 5546 sockbuf_lock = 1; 5547 restart: 5548 5549 restart_nosblocks: 5550 if (hold_sblock == 0) { 5551 SOCKBUF_LOCK(&so->so_rcv); 5552 hold_sblock = 1; 5553 } 5554 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5555 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5556 goto out; 5557 } 5558 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5559 if (so->so_error) { 5560 error = so->so_error; 5561 if ((in_flags & MSG_PEEK) == 0) 5562 so->so_error = 0; 5563 goto out; 5564 } else { 5565 if (so->so_rcv.sb_cc == 0) { 5566 /* indicate EOF */ 5567 error = 0; 5568 goto out; 5569 } 5570 } 5571 } 5572 if (so->so_rcv.sb_cc <= held_length) { 5573 if (so->so_error) { 5574 error = so->so_error; 5575 if ((in_flags & MSG_PEEK) == 0) { 5576 so->so_error = 0; 5577 } 5578 goto out; 5579 } 5580 if ((so->so_rcv.sb_cc == 0) && 5581 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5582 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5583 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5584 /* 5585 * For active open side clear flags for 5586 * re-use passive open is blocked by 5587 * connect. 5588 */ 5589 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5590 /* 5591 * You were aborted, passive side 5592 * always hits here 5593 */ 5594 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5595 error = ECONNRESET; 5596 } 5597 so->so_state &= ~(SS_ISCONNECTING | 5598 SS_ISDISCONNECTING | 5599 SS_ISCONFIRMING | 5600 SS_ISCONNECTED); 5601 if (error == 0) { 5602 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5603 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5604 error = ENOTCONN; 5605 } 5606 } 5607 goto out; 5608 } 5609 } 5610 if (block_allowed) { 5611 error = sbwait(&so->so_rcv); 5612 if (error) { 5613 goto out; 5614 } 5615 held_length = 0; 5616 goto restart_nosblocks; 5617 } else { 5618 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5619 error = EWOULDBLOCK; 5620 goto out; 5621 } 5622 } 5623 if (hold_sblock == 1) { 5624 SOCKBUF_UNLOCK(&so->so_rcv); 5625 hold_sblock = 0; 5626 } 5627 /* we possibly have data we can read */ 5628 /* sa_ignore FREED_MEMORY */ 5629 control = TAILQ_FIRST(&inp->read_queue); 5630 if (control == NULL) { 5631 /* 5632 * This could be happening since the appender did the 5633 * increment but as not yet did the tailq insert onto the 5634 * read_queue 5635 */ 5636 if (hold_rlock == 0) { 5637 SCTP_INP_READ_LOCK(inp); 5638 } 5639 control = TAILQ_FIRST(&inp->read_queue); 5640 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5641 #ifdef INVARIANTS 5642 panic("Huh, its non zero and nothing on control?"); 5643 #endif 5644 so->so_rcv.sb_cc = 0; 5645 } 5646 SCTP_INP_READ_UNLOCK(inp); 5647 hold_rlock = 0; 5648 goto restart; 5649 } 5650 5651 if ((control->length == 0) && 5652 (control->do_not_ref_stcb)) { 5653 /* 5654 * Clean up code for freeing assoc that left behind a 5655 * pdapi.. maybe a peer in EEOR that just closed after 5656 * sending and never indicated a EOR. 5657 */ 5658 if (hold_rlock == 0) { 5659 hold_rlock = 1; 5660 SCTP_INP_READ_LOCK(inp); 5661 } 5662 control->held_length = 0; 5663 if (control->data) { 5664 /* Hmm there is data here .. fix */ 5665 struct mbuf *m_tmp; 5666 int cnt = 0; 5667 5668 m_tmp = control->data; 5669 while (m_tmp) { 5670 cnt += SCTP_BUF_LEN(m_tmp); 5671 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5672 control->tail_mbuf = m_tmp; 5673 control->end_added = 1; 5674 } 5675 m_tmp = SCTP_BUF_NEXT(m_tmp); 5676 } 5677 control->length = cnt; 5678 } else { 5679 /* remove it */ 5680 TAILQ_REMOVE(&inp->read_queue, control, next); 5681 /* Add back any hiddend data */ 5682 sctp_free_remote_addr(control->whoFrom); 5683 sctp_free_a_readq(stcb, control); 5684 } 5685 if (hold_rlock) { 5686 hold_rlock = 0; 5687 SCTP_INP_READ_UNLOCK(inp); 5688 } 5689 goto restart; 5690 } 5691 if ((control->length == 0) && 5692 (control->end_added == 1)) { 5693 /* 5694 * Do we also need to check for (control->pdapi_aborted == 5695 * 1)? 5696 */ 5697 if (hold_rlock == 0) { 5698 hold_rlock = 1; 5699 SCTP_INP_READ_LOCK(inp); 5700 } 5701 TAILQ_REMOVE(&inp->read_queue, control, next); 5702 if (control->data) { 5703 #ifdef INVARIANTS 5704 panic("control->data not null but control->length == 0"); 5705 #else 5706 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5707 sctp_m_freem(control->data); 5708 control->data = NULL; 5709 #endif 5710 } 5711 if (control->aux_data) { 5712 sctp_m_free(control->aux_data); 5713 control->aux_data = NULL; 5714 } 5715 #ifdef INVARIANTS 5716 if (control->on_strm_q) { 5717 panic("About to free ctl:%p so:%p and its in %d", 5718 control, so, control->on_strm_q); 5719 } 5720 #endif 5721 sctp_free_remote_addr(control->whoFrom); 5722 sctp_free_a_readq(stcb, control); 5723 if (hold_rlock) { 5724 hold_rlock = 0; 5725 SCTP_INP_READ_UNLOCK(inp); 5726 } 5727 goto restart; 5728 } 5729 if (control->length == 0) { 5730 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5731 (filling_sinfo)) { 5732 /* find a more suitable one then this */ 5733 ctl = TAILQ_NEXT(control, next); 5734 while (ctl) { 5735 if ((ctl->stcb != control->stcb) && (ctl->length) && 5736 (ctl->some_taken || 5737 (ctl->spec_flags & M_NOTIFICATION) || 5738 ((ctl->do_not_ref_stcb == 0) && 5739 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5740 ) { 5741 /*- 5742 * If we have a different TCB next, and there is data 5743 * present. If we have already taken some (pdapi), OR we can 5744 * ref the tcb and no delivery as started on this stream, we 5745 * take it. Note we allow a notification on a different 5746 * assoc to be delivered.. 5747 */ 5748 control = ctl; 5749 goto found_one; 5750 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5751 (ctl->length) && 5752 ((ctl->some_taken) || 5753 ((ctl->do_not_ref_stcb == 0) && 5754 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5755 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5756 /*- 5757 * If we have the same tcb, and there is data present, and we 5758 * have the strm interleave feature present. Then if we have 5759 * taken some (pdapi) or we can refer to tht tcb AND we have 5760 * not started a delivery for this stream, we can take it. 5761 * Note we do NOT allow a notificaiton on the same assoc to 5762 * be delivered. 5763 */ 5764 control = ctl; 5765 goto found_one; 5766 } 5767 ctl = TAILQ_NEXT(ctl, next); 5768 } 5769 } 5770 /* 5771 * if we reach here, not suitable replacement is available 5772 * <or> fragment interleave is NOT on. So stuff the sb_cc 5773 * into the our held count, and its time to sleep again. 5774 */ 5775 held_length = so->so_rcv.sb_cc; 5776 control->held_length = so->so_rcv.sb_cc; 5777 goto restart; 5778 } 5779 /* Clear the held length since there is something to read */ 5780 control->held_length = 0; 5781 found_one: 5782 /* 5783 * If we reach here, control has a some data for us to read off. 5784 * Note that stcb COULD be NULL. 5785 */ 5786 if (hold_rlock == 0) { 5787 hold_rlock = 1; 5788 SCTP_INP_READ_LOCK(inp); 5789 } 5790 control->some_taken++; 5791 stcb = control->stcb; 5792 if (stcb) { 5793 if ((control->do_not_ref_stcb == 0) && 5794 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5795 if (freecnt_applied == 0) 5796 stcb = NULL; 5797 } else if (control->do_not_ref_stcb == 0) { 5798 /* you can't free it on me please */ 5799 /* 5800 * The lock on the socket buffer protects us so the 5801 * free code will stop. But since we used the 5802 * socketbuf lock and the sender uses the tcb_lock 5803 * to increment, we need to use the atomic add to 5804 * the refcnt 5805 */ 5806 if (freecnt_applied) { 5807 #ifdef INVARIANTS 5808 panic("refcnt already incremented"); 5809 #else 5810 SCTP_PRINTF("refcnt already incremented?\n"); 5811 #endif 5812 } else { 5813 atomic_add_int(&stcb->asoc.refcnt, 1); 5814 freecnt_applied = 1; 5815 } 5816 /* 5817 * Setup to remember how much we have not yet told 5818 * the peer our rwnd has opened up. Note we grab the 5819 * value from the tcb from last time. Note too that 5820 * sack sending clears this when a sack is sent, 5821 * which is fine. Once we hit the rwnd_req, we then 5822 * will go to the sctp_user_rcvd() that will not 5823 * lock until it KNOWs it MUST send a WUP-SACK. 5824 */ 5825 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5826 stcb->freed_by_sorcv_sincelast = 0; 5827 } 5828 } 5829 if (stcb && 5830 ((control->spec_flags & M_NOTIFICATION) == 0) && 5831 control->do_not_ref_stcb == 0) { 5832 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5833 } 5834 5835 /* First lets get off the sinfo and sockaddr info */ 5836 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5837 sinfo->sinfo_stream = control->sinfo_stream; 5838 sinfo->sinfo_ssn = (uint16_t)control->mid; 5839 sinfo->sinfo_flags = control->sinfo_flags; 5840 sinfo->sinfo_ppid = control->sinfo_ppid; 5841 sinfo->sinfo_context = control->sinfo_context; 5842 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5843 sinfo->sinfo_tsn = control->sinfo_tsn; 5844 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5845 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5846 nxt = TAILQ_NEXT(control, next); 5847 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5848 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5849 struct sctp_extrcvinfo *s_extra; 5850 5851 s_extra = (struct sctp_extrcvinfo *)sinfo; 5852 if ((nxt) && 5853 (nxt->length)) { 5854 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5855 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5856 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5857 } 5858 if (nxt->spec_flags & M_NOTIFICATION) { 5859 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5860 } 5861 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5862 s_extra->serinfo_next_length = nxt->length; 5863 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5864 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5865 if (nxt->tail_mbuf != NULL) { 5866 if (nxt->end_added) { 5867 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5868 } 5869 } 5870 } else { 5871 /* 5872 * we explicitly 0 this, since the memcpy 5873 * got some other things beyond the older 5874 * sinfo_ that is on the control's structure 5875 * :-D 5876 */ 5877 nxt = NULL; 5878 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5879 s_extra->serinfo_next_aid = 0; 5880 s_extra->serinfo_next_length = 0; 5881 s_extra->serinfo_next_ppid = 0; 5882 s_extra->serinfo_next_stream = 0; 5883 } 5884 } 5885 /* 5886 * update off the real current cum-ack, if we have an stcb. 5887 */ 5888 if ((control->do_not_ref_stcb == 0) && stcb) 5889 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5890 /* 5891 * mask off the high bits, we keep the actual chunk bits in 5892 * there. 5893 */ 5894 sinfo->sinfo_flags &= 0x00ff; 5895 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5896 sinfo->sinfo_flags |= SCTP_UNORDERED; 5897 } 5898 } 5899 #ifdef SCTP_ASOCLOG_OF_TSNS 5900 { 5901 int index, newindex; 5902 struct sctp_pcbtsn_rlog *entry; 5903 5904 do { 5905 index = inp->readlog_index; 5906 newindex = index + 1; 5907 if (newindex >= SCTP_READ_LOG_SIZE) { 5908 newindex = 0; 5909 } 5910 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5911 entry = &inp->readlog[index]; 5912 entry->vtag = control->sinfo_assoc_id; 5913 entry->strm = control->sinfo_stream; 5914 entry->seq = (uint16_t)control->mid; 5915 entry->sz = control->length; 5916 entry->flgs = control->sinfo_flags; 5917 } 5918 #endif 5919 if ((fromlen > 0) && (from != NULL)) { 5920 union sctp_sockstore store; 5921 size_t len; 5922 5923 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5924 #ifdef INET6 5925 case AF_INET6: 5926 len = sizeof(struct sockaddr_in6); 5927 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5928 store.sin6.sin6_port = control->port_from; 5929 break; 5930 #endif 5931 #ifdef INET 5932 case AF_INET: 5933 #ifdef INET6 5934 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5935 len = sizeof(struct sockaddr_in6); 5936 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5937 &store.sin6); 5938 store.sin6.sin6_port = control->port_from; 5939 } else { 5940 len = sizeof(struct sockaddr_in); 5941 store.sin = control->whoFrom->ro._l_addr.sin; 5942 store.sin.sin_port = control->port_from; 5943 } 5944 #else 5945 len = sizeof(struct sockaddr_in); 5946 store.sin = control->whoFrom->ro._l_addr.sin; 5947 store.sin.sin_port = control->port_from; 5948 #endif 5949 break; 5950 #endif 5951 default: 5952 len = 0; 5953 break; 5954 } 5955 memcpy(from, &store, min((size_t)fromlen, len)); 5956 #ifdef INET6 5957 { 5958 struct sockaddr_in6 lsa6, *from6; 5959 5960 from6 = (struct sockaddr_in6 *)from; 5961 sctp_recover_scope_mac(from6, (&lsa6)); 5962 } 5963 #endif 5964 } 5965 if (hold_rlock) { 5966 SCTP_INP_READ_UNLOCK(inp); 5967 hold_rlock = 0; 5968 } 5969 if (hold_sblock) { 5970 SOCKBUF_UNLOCK(&so->so_rcv); 5971 hold_sblock = 0; 5972 } 5973 /* now copy out what data we can */ 5974 if (mp == NULL) { 5975 /* copy out each mbuf in the chain up to length */ 5976 get_more_data: 5977 m = control->data; 5978 while (m) { 5979 /* Move out all we can */ 5980 cp_len = uio->uio_resid; 5981 my_len = SCTP_BUF_LEN(m); 5982 if (cp_len > my_len) { 5983 /* not enough in this buf */ 5984 cp_len = my_len; 5985 } 5986 if (hold_rlock) { 5987 SCTP_INP_READ_UNLOCK(inp); 5988 hold_rlock = 0; 5989 } 5990 if (cp_len > 0) 5991 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5992 /* re-read */ 5993 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5994 goto release; 5995 } 5996 5997 if ((control->do_not_ref_stcb == 0) && stcb && 5998 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5999 no_rcv_needed = 1; 6000 } 6001 if (error) { 6002 /* error we are out of here */ 6003 goto release; 6004 } 6005 SCTP_INP_READ_LOCK(inp); 6006 hold_rlock = 1; 6007 if (cp_len == SCTP_BUF_LEN(m)) { 6008 if ((SCTP_BUF_NEXT(m) == NULL) && 6009 (control->end_added)) { 6010 out_flags |= MSG_EOR; 6011 if ((control->do_not_ref_stcb == 0) && 6012 (control->stcb != NULL) && 6013 ((control->spec_flags & M_NOTIFICATION) == 0)) 6014 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6015 } 6016 if (control->spec_flags & M_NOTIFICATION) { 6017 out_flags |= MSG_NOTIFICATION; 6018 } 6019 /* we ate up the mbuf */ 6020 if (in_flags & MSG_PEEK) { 6021 /* just looking */ 6022 m = SCTP_BUF_NEXT(m); 6023 copied_so_far += cp_len; 6024 } else { 6025 /* dispose of the mbuf */ 6026 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6027 sctp_sblog(&so->so_rcv, 6028 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6029 } 6030 sctp_sbfree(control, stcb, &so->so_rcv, m); 6031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6032 sctp_sblog(&so->so_rcv, 6033 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6034 } 6035 copied_so_far += cp_len; 6036 freed_so_far += (uint32_t)cp_len; 6037 freed_so_far += MSIZE; 6038 atomic_subtract_int(&control->length, cp_len); 6039 control->data = sctp_m_free(m); 6040 m = control->data; 6041 /* 6042 * been through it all, must hold sb 6043 * lock ok to null tail 6044 */ 6045 if (control->data == NULL) { 6046 #ifdef INVARIANTS 6047 if ((control->end_added == 0) || 6048 (TAILQ_NEXT(control, next) == NULL)) { 6049 /* 6050 * If the end is not 6051 * added, OR the 6052 * next is NOT null 6053 * we MUST have the 6054 * lock. 6055 */ 6056 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6057 panic("Hmm we don't own the lock?"); 6058 } 6059 } 6060 #endif 6061 control->tail_mbuf = NULL; 6062 #ifdef INVARIANTS 6063 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6064 panic("end_added, nothing left and no MSG_EOR"); 6065 } 6066 #endif 6067 } 6068 } 6069 } else { 6070 /* Do we need to trim the mbuf? */ 6071 if (control->spec_flags & M_NOTIFICATION) { 6072 out_flags |= MSG_NOTIFICATION; 6073 } 6074 if ((in_flags & MSG_PEEK) == 0) { 6075 SCTP_BUF_RESV_UF(m, cp_len); 6076 SCTP_BUF_LEN(m) -= (int)cp_len; 6077 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6078 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6079 } 6080 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6081 if ((control->do_not_ref_stcb == 0) && 6082 stcb) { 6083 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6084 } 6085 copied_so_far += cp_len; 6086 freed_so_far += (uint32_t)cp_len; 6087 freed_so_far += MSIZE; 6088 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6089 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6090 SCTP_LOG_SBRESULT, 0); 6091 } 6092 atomic_subtract_int(&control->length, cp_len); 6093 } else { 6094 copied_so_far += cp_len; 6095 } 6096 } 6097 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6098 break; 6099 } 6100 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6101 (control->do_not_ref_stcb == 0) && 6102 (freed_so_far >= rwnd_req)) { 6103 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6104 } 6105 } /* end while(m) */ 6106 /* 6107 * At this point we have looked at it all and we either have 6108 * a MSG_EOR/or read all the user wants... <OR> 6109 * control->length == 0. 6110 */ 6111 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6112 /* we are done with this control */ 6113 if (control->length == 0) { 6114 if (control->data) { 6115 #ifdef INVARIANTS 6116 panic("control->data not null at read eor?"); 6117 #else 6118 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6119 sctp_m_freem(control->data); 6120 control->data = NULL; 6121 #endif 6122 } 6123 done_with_control: 6124 if (hold_rlock == 0) { 6125 SCTP_INP_READ_LOCK(inp); 6126 hold_rlock = 1; 6127 } 6128 TAILQ_REMOVE(&inp->read_queue, control, next); 6129 /* Add back any hiddend data */ 6130 if (control->held_length) { 6131 held_length = 0; 6132 control->held_length = 0; 6133 wakeup_read_socket = 1; 6134 } 6135 if (control->aux_data) { 6136 sctp_m_free(control->aux_data); 6137 control->aux_data = NULL; 6138 } 6139 no_rcv_needed = control->do_not_ref_stcb; 6140 sctp_free_remote_addr(control->whoFrom); 6141 control->data = NULL; 6142 #ifdef INVARIANTS 6143 if (control->on_strm_q) { 6144 panic("About to free ctl:%p so:%p and its in %d", 6145 control, so, control->on_strm_q); 6146 } 6147 #endif 6148 sctp_free_a_readq(stcb, control); 6149 control = NULL; 6150 if ((freed_so_far >= rwnd_req) && 6151 (no_rcv_needed == 0)) 6152 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6153 6154 } else { 6155 /* 6156 * The user did not read all of this 6157 * message, turn off the returned MSG_EOR 6158 * since we are leaving more behind on the 6159 * control to read. 6160 */ 6161 #ifdef INVARIANTS 6162 if (control->end_added && 6163 (control->data == NULL) && 6164 (control->tail_mbuf == NULL)) { 6165 panic("Gak, control->length is corrupt?"); 6166 } 6167 #endif 6168 no_rcv_needed = control->do_not_ref_stcb; 6169 out_flags &= ~MSG_EOR; 6170 } 6171 } 6172 if (out_flags & MSG_EOR) { 6173 goto release; 6174 } 6175 if ((uio->uio_resid == 0) || 6176 ((in_eeor_mode) && 6177 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6178 goto release; 6179 } 6180 /* 6181 * If I hit here the receiver wants more and this message is 6182 * NOT done (pd-api). So two questions. Can we block? if not 6183 * we are done. Did the user NOT set MSG_WAITALL? 6184 */ 6185 if (block_allowed == 0) { 6186 goto release; 6187 } 6188 /* 6189 * We need to wait for more data a few things: - We don't 6190 * sbunlock() so we don't get someone else reading. - We 6191 * must be sure to account for the case where what is added 6192 * is NOT to our control when we wakeup. 6193 */ 6194 6195 /* 6196 * Do we need to tell the transport a rwnd update might be 6197 * needed before we go to sleep? 6198 */ 6199 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6200 ((freed_so_far >= rwnd_req) && 6201 (control->do_not_ref_stcb == 0) && 6202 (no_rcv_needed == 0))) { 6203 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6204 } 6205 wait_some_more: 6206 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6207 goto release; 6208 } 6209 6210 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6211 goto release; 6212 6213 if (hold_rlock == 1) { 6214 SCTP_INP_READ_UNLOCK(inp); 6215 hold_rlock = 0; 6216 } 6217 if (hold_sblock == 0) { 6218 SOCKBUF_LOCK(&so->so_rcv); 6219 hold_sblock = 1; 6220 } 6221 if ((copied_so_far) && (control->length == 0) && 6222 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6223 goto release; 6224 } 6225 if (so->so_rcv.sb_cc <= control->held_length) { 6226 error = sbwait(&so->so_rcv); 6227 if (error) { 6228 goto release; 6229 } 6230 control->held_length = 0; 6231 } 6232 if (hold_sblock) { 6233 SOCKBUF_UNLOCK(&so->so_rcv); 6234 hold_sblock = 0; 6235 } 6236 if (control->length == 0) { 6237 /* still nothing here */ 6238 if (control->end_added == 1) { 6239 /* he aborted, or is done i.e.did a shutdown */ 6240 out_flags |= MSG_EOR; 6241 if (control->pdapi_aborted) { 6242 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6243 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6244 6245 out_flags |= MSG_TRUNC; 6246 } else { 6247 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6248 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6249 } 6250 goto done_with_control; 6251 } 6252 if (so->so_rcv.sb_cc > held_length) { 6253 control->held_length = so->so_rcv.sb_cc; 6254 held_length = 0; 6255 } 6256 goto wait_some_more; 6257 } else if (control->data == NULL) { 6258 /* 6259 * we must re-sync since data is probably being 6260 * added 6261 */ 6262 SCTP_INP_READ_LOCK(inp); 6263 if ((control->length > 0) && (control->data == NULL)) { 6264 /* 6265 * big trouble.. we have the lock and its 6266 * corrupt? 6267 */ 6268 #ifdef INVARIANTS 6269 panic("Impossible data==NULL length !=0"); 6270 #endif 6271 out_flags |= MSG_EOR; 6272 out_flags |= MSG_TRUNC; 6273 control->length = 0; 6274 SCTP_INP_READ_UNLOCK(inp); 6275 goto done_with_control; 6276 } 6277 SCTP_INP_READ_UNLOCK(inp); 6278 /* We will fall around to get more data */ 6279 } 6280 goto get_more_data; 6281 } else { 6282 /*- 6283 * Give caller back the mbuf chain, 6284 * store in uio_resid the length 6285 */ 6286 wakeup_read_socket = 0; 6287 if ((control->end_added == 0) || 6288 (TAILQ_NEXT(control, next) == NULL)) { 6289 /* Need to get rlock */ 6290 if (hold_rlock == 0) { 6291 SCTP_INP_READ_LOCK(inp); 6292 hold_rlock = 1; 6293 } 6294 } 6295 if (control->end_added) { 6296 out_flags |= MSG_EOR; 6297 if ((control->do_not_ref_stcb == 0) && 6298 (control->stcb != NULL) && 6299 ((control->spec_flags & M_NOTIFICATION) == 0)) 6300 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6301 } 6302 if (control->spec_flags & M_NOTIFICATION) { 6303 out_flags |= MSG_NOTIFICATION; 6304 } 6305 uio->uio_resid = control->length; 6306 *mp = control->data; 6307 m = control->data; 6308 while (m) { 6309 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6310 sctp_sblog(&so->so_rcv, 6311 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6312 } 6313 sctp_sbfree(control, stcb, &so->so_rcv, m); 6314 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6315 freed_so_far += MSIZE; 6316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6317 sctp_sblog(&so->so_rcv, 6318 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6319 } 6320 m = SCTP_BUF_NEXT(m); 6321 } 6322 control->data = control->tail_mbuf = NULL; 6323 control->length = 0; 6324 if (out_flags & MSG_EOR) { 6325 /* Done with this control */ 6326 goto done_with_control; 6327 } 6328 } 6329 release: 6330 if (hold_rlock == 1) { 6331 SCTP_INP_READ_UNLOCK(inp); 6332 hold_rlock = 0; 6333 } 6334 if (hold_sblock == 1) { 6335 SOCKBUF_UNLOCK(&so->so_rcv); 6336 hold_sblock = 0; 6337 } 6338 6339 sbunlock(&so->so_rcv); 6340 sockbuf_lock = 0; 6341 6342 release_unlocked: 6343 if (hold_sblock) { 6344 SOCKBUF_UNLOCK(&so->so_rcv); 6345 hold_sblock = 0; 6346 } 6347 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6348 if ((freed_so_far >= rwnd_req) && 6349 (control && (control->do_not_ref_stcb == 0)) && 6350 (no_rcv_needed == 0)) 6351 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6352 } 6353 out: 6354 if (msg_flags) { 6355 *msg_flags = out_flags; 6356 } 6357 if (((out_flags & MSG_EOR) == 0) && 6358 ((in_flags & MSG_PEEK) == 0) && 6359 (sinfo) && 6360 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6361 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6362 struct sctp_extrcvinfo *s_extra; 6363 6364 s_extra = (struct sctp_extrcvinfo *)sinfo; 6365 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6366 } 6367 if (hold_rlock == 1) { 6368 SCTP_INP_READ_UNLOCK(inp); 6369 } 6370 if (hold_sblock) { 6371 SOCKBUF_UNLOCK(&so->so_rcv); 6372 } 6373 if (sockbuf_lock) { 6374 sbunlock(&so->so_rcv); 6375 } 6376 6377 if (freecnt_applied) { 6378 /* 6379 * The lock on the socket buffer protects us so the free 6380 * code will stop. But since we used the socketbuf lock and 6381 * the sender uses the tcb_lock to increment, we need to use 6382 * the atomic add to the refcnt. 6383 */ 6384 if (stcb == NULL) { 6385 #ifdef INVARIANTS 6386 panic("stcb for refcnt has gone NULL?"); 6387 goto stage_left; 6388 #else 6389 goto stage_left; 6390 #endif 6391 } 6392 /* Save the value back for next time */ 6393 stcb->freed_by_sorcv_sincelast = freed_so_far; 6394 atomic_add_int(&stcb->asoc.refcnt, -1); 6395 } 6396 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6397 if (stcb) { 6398 sctp_misc_ints(SCTP_SORECV_DONE, 6399 freed_so_far, 6400 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6401 stcb->asoc.my_rwnd, 6402 so->so_rcv.sb_cc); 6403 } else { 6404 sctp_misc_ints(SCTP_SORECV_DONE, 6405 freed_so_far, 6406 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6407 0, 6408 so->so_rcv.sb_cc); 6409 } 6410 } 6411 stage_left: 6412 if (wakeup_read_socket) { 6413 sctp_sorwakeup(inp, so); 6414 } 6415 return (error); 6416 } 6417 6418 #ifdef SCTP_MBUF_LOGGING 6419 struct mbuf * 6420 sctp_m_free(struct mbuf *m) 6421 { 6422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6423 sctp_log_mb(m, SCTP_MBUF_IFREE); 6424 } 6425 return (m_free(m)); 6426 } 6427 6428 void 6429 sctp_m_freem(struct mbuf *mb) 6430 { 6431 while (mb != NULL) 6432 mb = sctp_m_free(mb); 6433 } 6434 6435 #endif 6436 6437 int 6438 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6439 { 6440 /* 6441 * Given a local address. For all associations that holds the 6442 * address, request a peer-set-primary. 6443 */ 6444 struct sctp_ifa *ifa; 6445 struct sctp_laddr *wi; 6446 6447 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6448 if (ifa == NULL) { 6449 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6450 return (EADDRNOTAVAIL); 6451 } 6452 /* 6453 * Now that we have the ifa we must awaken the iterator with this 6454 * message. 6455 */ 6456 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6457 if (wi == NULL) { 6458 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6459 return (ENOMEM); 6460 } 6461 /* Now incr the count and int wi structure */ 6462 SCTP_INCR_LADDR_COUNT(); 6463 memset(wi, 0, sizeof(*wi)); 6464 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6465 wi->ifa = ifa; 6466 wi->action = SCTP_SET_PRIM_ADDR; 6467 atomic_add_int(&ifa->refcount, 1); 6468 6469 /* Now add it to the work queue */ 6470 SCTP_WQ_ADDR_LOCK(); 6471 /* 6472 * Should this really be a tailq? As it is we will process the 6473 * newest first :-0 6474 */ 6475 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6476 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6477 (struct sctp_inpcb *)NULL, 6478 (struct sctp_tcb *)NULL, 6479 (struct sctp_nets *)NULL); 6480 SCTP_WQ_ADDR_UNLOCK(); 6481 return (0); 6482 } 6483 6484 int 6485 sctp_soreceive(struct socket *so, 6486 struct sockaddr **psa, 6487 struct uio *uio, 6488 struct mbuf **mp0, 6489 struct mbuf **controlp, 6490 int *flagsp) 6491 { 6492 int error, fromlen; 6493 uint8_t sockbuf[256]; 6494 struct sockaddr *from; 6495 struct sctp_extrcvinfo sinfo; 6496 int filling_sinfo = 1; 6497 int flags; 6498 struct sctp_inpcb *inp; 6499 6500 inp = (struct sctp_inpcb *)so->so_pcb; 6501 /* pickup the assoc we are reading from */ 6502 if (inp == NULL) { 6503 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6504 return (EINVAL); 6505 } 6506 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6507 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6508 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6509 (controlp == NULL)) { 6510 /* user does not want the sndrcv ctl */ 6511 filling_sinfo = 0; 6512 } 6513 if (psa) { 6514 from = (struct sockaddr *)sockbuf; 6515 fromlen = sizeof(sockbuf); 6516 from->sa_len = 0; 6517 } else { 6518 from = NULL; 6519 fromlen = 0; 6520 } 6521 6522 if (filling_sinfo) { 6523 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6524 } 6525 if (flagsp != NULL) { 6526 flags = *flagsp; 6527 } else { 6528 flags = 0; 6529 } 6530 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6531 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6532 if (flagsp != NULL) { 6533 *flagsp = flags; 6534 } 6535 if (controlp != NULL) { 6536 /* copy back the sinfo in a CMSG format */ 6537 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6538 *controlp = sctp_build_ctl_nchunk(inp, 6539 (struct sctp_sndrcvinfo *)&sinfo); 6540 } else { 6541 *controlp = NULL; 6542 } 6543 } 6544 if (psa) { 6545 /* copy back the address info */ 6546 if (from && from->sa_len) { 6547 *psa = sodupsockaddr(from, M_NOWAIT); 6548 } else { 6549 *psa = NULL; 6550 } 6551 } 6552 return (error); 6553 } 6554 6555 int 6556 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6557 int totaddr, int *error) 6558 { 6559 int added = 0; 6560 int i; 6561 struct sctp_inpcb *inp; 6562 struct sockaddr *sa; 6563 size_t incr = 0; 6564 #ifdef INET 6565 struct sockaddr_in *sin; 6566 #endif 6567 #ifdef INET6 6568 struct sockaddr_in6 *sin6; 6569 #endif 6570 6571 sa = addr; 6572 inp = stcb->sctp_ep; 6573 *error = 0; 6574 for (i = 0; i < totaddr; i++) { 6575 switch (sa->sa_family) { 6576 #ifdef INET 6577 case AF_INET: 6578 incr = sizeof(struct sockaddr_in); 6579 sin = (struct sockaddr_in *)sa; 6580 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6581 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6582 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6583 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6584 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6585 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6586 *error = EINVAL; 6587 goto out_now; 6588 } 6589 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6590 SCTP_DONOT_SETSCOPE, 6591 SCTP_ADDR_IS_CONFIRMED)) { 6592 /* assoc gone no un-lock */ 6593 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6594 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6595 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6596 *error = ENOBUFS; 6597 goto out_now; 6598 } 6599 added++; 6600 break; 6601 #endif 6602 #ifdef INET6 6603 case AF_INET6: 6604 incr = sizeof(struct sockaddr_in6); 6605 sin6 = (struct sockaddr_in6 *)sa; 6606 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6607 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6608 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6609 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6610 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6611 *error = EINVAL; 6612 goto out_now; 6613 } 6614 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6615 SCTP_DONOT_SETSCOPE, 6616 SCTP_ADDR_IS_CONFIRMED)) { 6617 /* assoc gone no un-lock */ 6618 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6619 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6620 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6621 *error = ENOBUFS; 6622 goto out_now; 6623 } 6624 added++; 6625 break; 6626 #endif 6627 default: 6628 break; 6629 } 6630 sa = (struct sockaddr *)((caddr_t)sa + incr); 6631 } 6632 out_now: 6633 return (added); 6634 } 6635 6636 int 6637 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6638 unsigned int totaddr, 6639 unsigned int *num_v4, unsigned int *num_v6, 6640 unsigned int limit) 6641 { 6642 struct sockaddr *sa; 6643 struct sctp_tcb *stcb; 6644 unsigned int incr, at, i; 6645 6646 at = 0; 6647 sa = addr; 6648 *num_v6 = *num_v4 = 0; 6649 /* account and validate addresses */ 6650 if (totaddr == 0) { 6651 return (EINVAL); 6652 } 6653 for (i = 0; i < totaddr; i++) { 6654 if (at + sizeof(struct sockaddr) > limit) { 6655 return (EINVAL); 6656 } 6657 switch (sa->sa_family) { 6658 #ifdef INET 6659 case AF_INET: 6660 incr = (unsigned int)sizeof(struct sockaddr_in); 6661 if (sa->sa_len != incr) { 6662 return (EINVAL); 6663 } 6664 (*num_v4) += 1; 6665 break; 6666 #endif 6667 #ifdef INET6 6668 case AF_INET6: 6669 { 6670 struct sockaddr_in6 *sin6; 6671 6672 sin6 = (struct sockaddr_in6 *)sa; 6673 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6674 /* Must be non-mapped for connectx */ 6675 return (EINVAL); 6676 } 6677 incr = (unsigned int)sizeof(struct sockaddr_in6); 6678 if (sa->sa_len != incr) { 6679 return (EINVAL); 6680 } 6681 (*num_v6) += 1; 6682 break; 6683 } 6684 #endif 6685 default: 6686 return (EINVAL); 6687 } 6688 if ((at + incr) > limit) { 6689 return (EINVAL); 6690 } 6691 SCTP_INP_INCR_REF(inp); 6692 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6693 if (stcb != NULL) { 6694 SCTP_TCB_UNLOCK(stcb); 6695 return (EALREADY); 6696 } else { 6697 SCTP_INP_DECR_REF(inp); 6698 } 6699 at += incr; 6700 sa = (struct sockaddr *)((caddr_t)sa + incr); 6701 } 6702 return (0); 6703 } 6704 6705 /* 6706 * sctp_bindx(ADD) for one address. 6707 * assumes all arguments are valid/checked by caller. 6708 */ 6709 void 6710 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6711 struct sockaddr *sa, uint32_t vrf_id, int *error, 6712 void *p) 6713 { 6714 #if defined(INET) && defined(INET6) 6715 struct sockaddr_in sin; 6716 #endif 6717 #ifdef INET6 6718 struct sockaddr_in6 *sin6; 6719 #endif 6720 #ifdef INET 6721 struct sockaddr_in *sinp; 6722 #endif 6723 struct sockaddr *addr_to_use; 6724 struct sctp_inpcb *lep; 6725 uint16_t port; 6726 6727 /* see if we're bound all already! */ 6728 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6729 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6730 *error = EINVAL; 6731 return; 6732 } 6733 switch (sa->sa_family) { 6734 #ifdef INET6 6735 case AF_INET6: 6736 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6737 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6738 *error = EINVAL; 6739 return; 6740 } 6741 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6742 /* can only bind v6 on PF_INET6 sockets */ 6743 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6744 *error = EINVAL; 6745 return; 6746 } 6747 sin6 = (struct sockaddr_in6 *)sa; 6748 port = sin6->sin6_port; 6749 #ifdef INET 6750 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6751 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6752 SCTP_IPV6_V6ONLY(inp)) { 6753 /* can't bind v4-mapped on PF_INET sockets */ 6754 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6755 *error = EINVAL; 6756 return; 6757 } 6758 in6_sin6_2_sin(&sin, sin6); 6759 addr_to_use = (struct sockaddr *)&sin; 6760 } else { 6761 addr_to_use = sa; 6762 } 6763 #else 6764 addr_to_use = sa; 6765 #endif 6766 break; 6767 #endif 6768 #ifdef INET 6769 case AF_INET: 6770 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6771 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6772 *error = EINVAL; 6773 return; 6774 } 6775 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6776 SCTP_IPV6_V6ONLY(inp)) { 6777 /* can't bind v4 on PF_INET sockets */ 6778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6779 *error = EINVAL; 6780 return; 6781 } 6782 sinp = (struct sockaddr_in *)sa; 6783 port = sinp->sin_port; 6784 addr_to_use = sa; 6785 break; 6786 #endif 6787 default: 6788 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6789 *error = EINVAL; 6790 return; 6791 } 6792 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6793 if (p == NULL) { 6794 /* Can't get proc for Net/Open BSD */ 6795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6796 *error = EINVAL; 6797 return; 6798 } 6799 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6800 return; 6801 } 6802 /* Validate the incoming port. */ 6803 if ((port != 0) && (port != inp->sctp_lport)) { 6804 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6805 *error = EINVAL; 6806 return; 6807 } 6808 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6809 if (lep == NULL) { 6810 /* add the address */ 6811 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6812 SCTP_ADD_IP_ADDRESS, vrf_id); 6813 } else { 6814 if (lep != inp) { 6815 *error = EADDRINUSE; 6816 } 6817 SCTP_INP_DECR_REF(lep); 6818 } 6819 } 6820 6821 /* 6822 * sctp_bindx(DELETE) for one address. 6823 * assumes all arguments are valid/checked by caller. 6824 */ 6825 void 6826 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6827 struct sockaddr *sa, uint32_t vrf_id, int *error) 6828 { 6829 struct sockaddr *addr_to_use; 6830 #if defined(INET) && defined(INET6) 6831 struct sockaddr_in6 *sin6; 6832 struct sockaddr_in sin; 6833 #endif 6834 6835 /* see if we're bound all already! */ 6836 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6837 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6838 *error = EINVAL; 6839 return; 6840 } 6841 switch (sa->sa_family) { 6842 #ifdef INET6 6843 case AF_INET6: 6844 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6845 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6846 *error = EINVAL; 6847 return; 6848 } 6849 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6850 /* can only bind v6 on PF_INET6 sockets */ 6851 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6852 *error = EINVAL; 6853 return; 6854 } 6855 #ifdef INET 6856 sin6 = (struct sockaddr_in6 *)sa; 6857 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6858 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6859 SCTP_IPV6_V6ONLY(inp)) { 6860 /* can't bind mapped-v4 on PF_INET sockets */ 6861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6862 *error = EINVAL; 6863 return; 6864 } 6865 in6_sin6_2_sin(&sin, sin6); 6866 addr_to_use = (struct sockaddr *)&sin; 6867 } else { 6868 addr_to_use = sa; 6869 } 6870 #else 6871 addr_to_use = sa; 6872 #endif 6873 break; 6874 #endif 6875 #ifdef INET 6876 case AF_INET: 6877 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6879 *error = EINVAL; 6880 return; 6881 } 6882 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6883 SCTP_IPV6_V6ONLY(inp)) { 6884 /* can't bind v4 on PF_INET sockets */ 6885 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6886 *error = EINVAL; 6887 return; 6888 } 6889 addr_to_use = sa; 6890 break; 6891 #endif 6892 default: 6893 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6894 *error = EINVAL; 6895 return; 6896 } 6897 /* No lock required mgmt_ep_sa does its own locking. */ 6898 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6899 vrf_id); 6900 } 6901 6902 /* 6903 * returns the valid local address count for an assoc, taking into account 6904 * all scoping rules 6905 */ 6906 int 6907 sctp_local_addr_count(struct sctp_tcb *stcb) 6908 { 6909 int loopback_scope; 6910 #if defined(INET) 6911 int ipv4_local_scope, ipv4_addr_legal; 6912 #endif 6913 #if defined(INET6) 6914 int local_scope, site_scope, ipv6_addr_legal; 6915 #endif 6916 struct sctp_vrf *vrf; 6917 struct sctp_ifn *sctp_ifn; 6918 struct sctp_ifa *sctp_ifa; 6919 int count = 0; 6920 6921 /* Turn on all the appropriate scopes */ 6922 loopback_scope = stcb->asoc.scope.loopback_scope; 6923 #if defined(INET) 6924 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6925 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6926 #endif 6927 #if defined(INET6) 6928 local_scope = stcb->asoc.scope.local_scope; 6929 site_scope = stcb->asoc.scope.site_scope; 6930 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6931 #endif 6932 SCTP_IPI_ADDR_RLOCK(); 6933 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6934 if (vrf == NULL) { 6935 /* no vrf, no addresses */ 6936 SCTP_IPI_ADDR_RUNLOCK(); 6937 return (0); 6938 } 6939 6940 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6941 /* 6942 * bound all case: go through all ifns on the vrf 6943 */ 6944 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6945 if ((loopback_scope == 0) && 6946 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6947 continue; 6948 } 6949 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6950 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6951 continue; 6952 switch (sctp_ifa->address.sa.sa_family) { 6953 #ifdef INET 6954 case AF_INET: 6955 if (ipv4_addr_legal) { 6956 struct sockaddr_in *sin; 6957 6958 sin = &sctp_ifa->address.sin; 6959 if (sin->sin_addr.s_addr == 0) { 6960 /* 6961 * skip unspecified 6962 * addrs 6963 */ 6964 continue; 6965 } 6966 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6967 &sin->sin_addr) != 0) { 6968 continue; 6969 } 6970 if ((ipv4_local_scope == 0) && 6971 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6972 continue; 6973 } 6974 /* count this one */ 6975 count++; 6976 } else { 6977 continue; 6978 } 6979 break; 6980 #endif 6981 #ifdef INET6 6982 case AF_INET6: 6983 if (ipv6_addr_legal) { 6984 struct sockaddr_in6 *sin6; 6985 6986 sin6 = &sctp_ifa->address.sin6; 6987 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6988 continue; 6989 } 6990 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6991 &sin6->sin6_addr) != 0) { 6992 continue; 6993 } 6994 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6995 if (local_scope == 0) 6996 continue; 6997 if (sin6->sin6_scope_id == 0) { 6998 if (sa6_recoverscope(sin6) != 0) 6999 /* 7000 * 7001 * bad 7002 * link 7003 * 7004 * local 7005 * 7006 * address 7007 */ 7008 continue; 7009 } 7010 } 7011 if ((site_scope == 0) && 7012 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7013 continue; 7014 } 7015 /* count this one */ 7016 count++; 7017 } 7018 break; 7019 #endif 7020 default: 7021 /* TSNH */ 7022 break; 7023 } 7024 } 7025 } 7026 } else { 7027 /* 7028 * subset bound case 7029 */ 7030 struct sctp_laddr *laddr; 7031 7032 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7033 sctp_nxt_addr) { 7034 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7035 continue; 7036 } 7037 /* count this one */ 7038 count++; 7039 } 7040 } 7041 SCTP_IPI_ADDR_RUNLOCK(); 7042 return (count); 7043 } 7044 7045 #if defined(SCTP_LOCAL_TRACE_BUF) 7046 7047 void 7048 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7049 { 7050 uint32_t saveindex, newindex; 7051 7052 do { 7053 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7054 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7055 newindex = 1; 7056 } else { 7057 newindex = saveindex + 1; 7058 } 7059 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7060 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7061 saveindex = 0; 7062 } 7063 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7064 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7065 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7066 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7067 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7068 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7069 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7070 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7071 } 7072 7073 #endif 7074 static void 7075 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7076 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7077 { 7078 struct ip *iph; 7079 #ifdef INET6 7080 struct ip6_hdr *ip6; 7081 #endif 7082 struct mbuf *sp, *last; 7083 struct udphdr *uhdr; 7084 uint16_t port; 7085 7086 if ((m->m_flags & M_PKTHDR) == 0) { 7087 /* Can't handle one that is not a pkt hdr */ 7088 goto out; 7089 } 7090 /* Pull the src port */ 7091 iph = mtod(m, struct ip *); 7092 uhdr = (struct udphdr *)((caddr_t)iph + off); 7093 port = uhdr->uh_sport; 7094 /* 7095 * Split out the mbuf chain. Leave the IP header in m, place the 7096 * rest in the sp. 7097 */ 7098 sp = m_split(m, off, M_NOWAIT); 7099 if (sp == NULL) { 7100 /* Gak, drop packet, we can't do a split */ 7101 goto out; 7102 } 7103 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7104 /* Gak, packet can't have an SCTP header in it - too small */ 7105 m_freem(sp); 7106 goto out; 7107 } 7108 /* Now pull up the UDP header and SCTP header together */ 7109 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7110 if (sp == NULL) { 7111 /* Gak pullup failed */ 7112 goto out; 7113 } 7114 /* Trim out the UDP header */ 7115 m_adj(sp, sizeof(struct udphdr)); 7116 7117 /* Now reconstruct the mbuf chain */ 7118 for (last = m; last->m_next; last = last->m_next); 7119 last->m_next = sp; 7120 m->m_pkthdr.len += sp->m_pkthdr.len; 7121 /* 7122 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7123 * checksum and it was valid. Since CSUM_DATA_VALID == 7124 * CSUM_SCTP_VALID this would imply that the HW also verified the 7125 * SCTP checksum. Therefore, clear the bit. 7126 */ 7127 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7128 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7129 m->m_pkthdr.len, 7130 if_name(m->m_pkthdr.rcvif), 7131 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7132 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7133 iph = mtod(m, struct ip *); 7134 switch (iph->ip_v) { 7135 #ifdef INET 7136 case IPVERSION: 7137 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7138 sctp_input_with_port(m, off, port); 7139 break; 7140 #endif 7141 #ifdef INET6 7142 case IPV6_VERSION >> 4: 7143 ip6 = mtod(m, struct ip6_hdr *); 7144 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7145 sctp6_input_with_port(&m, &off, port); 7146 break; 7147 #endif 7148 default: 7149 goto out; 7150 break; 7151 } 7152 return; 7153 out: 7154 m_freem(m); 7155 } 7156 7157 #ifdef INET 7158 static void 7159 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7160 { 7161 struct ip *outer_ip, *inner_ip; 7162 struct sctphdr *sh; 7163 struct icmp *icmp; 7164 struct udphdr *udp; 7165 struct sctp_inpcb *inp; 7166 struct sctp_tcb *stcb; 7167 struct sctp_nets *net; 7168 struct sctp_init_chunk *ch; 7169 struct sockaddr_in src, dst; 7170 uint8_t type, code; 7171 7172 inner_ip = (struct ip *)vip; 7173 icmp = (struct icmp *)((caddr_t)inner_ip - 7174 (sizeof(struct icmp) - sizeof(struct ip))); 7175 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7176 if (ntohs(outer_ip->ip_len) < 7177 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7178 return; 7179 } 7180 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7181 sh = (struct sctphdr *)(udp + 1); 7182 memset(&src, 0, sizeof(struct sockaddr_in)); 7183 src.sin_family = AF_INET; 7184 src.sin_len = sizeof(struct sockaddr_in); 7185 src.sin_port = sh->src_port; 7186 src.sin_addr = inner_ip->ip_src; 7187 memset(&dst, 0, sizeof(struct sockaddr_in)); 7188 dst.sin_family = AF_INET; 7189 dst.sin_len = sizeof(struct sockaddr_in); 7190 dst.sin_port = sh->dest_port; 7191 dst.sin_addr = inner_ip->ip_dst; 7192 /* 7193 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7194 * holds our local endpoint address. Thus we reverse the dst and the 7195 * src in the lookup. 7196 */ 7197 inp = NULL; 7198 net = NULL; 7199 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7200 (struct sockaddr *)&src, 7201 &inp, &net, 1, 7202 SCTP_DEFAULT_VRFID); 7203 if ((stcb != NULL) && 7204 (net != NULL) && 7205 (inp != NULL)) { 7206 /* Check the UDP port numbers */ 7207 if ((udp->uh_dport != net->port) || 7208 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7209 SCTP_TCB_UNLOCK(stcb); 7210 return; 7211 } 7212 /* Check the verification tag */ 7213 if (ntohl(sh->v_tag) != 0) { 7214 /* 7215 * This must be the verification tag used for 7216 * sending out packets. We don't consider packets 7217 * reflecting the verification tag. 7218 */ 7219 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7220 SCTP_TCB_UNLOCK(stcb); 7221 return; 7222 } 7223 } else { 7224 if (ntohs(outer_ip->ip_len) >= 7225 sizeof(struct ip) + 7226 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7227 /* 7228 * In this case we can check if we got an 7229 * INIT chunk and if the initiate tag 7230 * matches. 7231 */ 7232 ch = (struct sctp_init_chunk *)(sh + 1); 7233 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7234 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7235 SCTP_TCB_UNLOCK(stcb); 7236 return; 7237 } 7238 } else { 7239 SCTP_TCB_UNLOCK(stcb); 7240 return; 7241 } 7242 } 7243 type = icmp->icmp_type; 7244 code = icmp->icmp_code; 7245 if ((type == ICMP_UNREACH) && 7246 (code == ICMP_UNREACH_PORT)) { 7247 code = ICMP_UNREACH_PROTOCOL; 7248 } 7249 sctp_notify(inp, stcb, net, type, code, 7250 ntohs(inner_ip->ip_len), 7251 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7252 } else { 7253 if ((stcb == NULL) && (inp != NULL)) { 7254 /* reduce ref-count */ 7255 SCTP_INP_WLOCK(inp); 7256 SCTP_INP_DECR_REF(inp); 7257 SCTP_INP_WUNLOCK(inp); 7258 } 7259 if (stcb) { 7260 SCTP_TCB_UNLOCK(stcb); 7261 } 7262 } 7263 return; 7264 } 7265 #endif 7266 7267 #ifdef INET6 7268 static void 7269 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7270 { 7271 struct ip6ctlparam *ip6cp; 7272 struct sctp_inpcb *inp; 7273 struct sctp_tcb *stcb; 7274 struct sctp_nets *net; 7275 struct sctphdr sh; 7276 struct udphdr udp; 7277 struct sockaddr_in6 src, dst; 7278 uint8_t type, code; 7279 7280 ip6cp = (struct ip6ctlparam *)d; 7281 /* 7282 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7283 */ 7284 if (ip6cp->ip6c_m == NULL) { 7285 return; 7286 } 7287 /* 7288 * Check if we can safely examine the ports and the verification tag 7289 * of the SCTP common header. 7290 */ 7291 if (ip6cp->ip6c_m->m_pkthdr.len < 7292 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7293 return; 7294 } 7295 /* Copy out the UDP header. */ 7296 memset(&udp, 0, sizeof(struct udphdr)); 7297 m_copydata(ip6cp->ip6c_m, 7298 ip6cp->ip6c_off, 7299 sizeof(struct udphdr), 7300 (caddr_t)&udp); 7301 /* Copy out the port numbers and the verification tag. */ 7302 memset(&sh, 0, sizeof(struct sctphdr)); 7303 m_copydata(ip6cp->ip6c_m, 7304 ip6cp->ip6c_off + sizeof(struct udphdr), 7305 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7306 (caddr_t)&sh); 7307 memset(&src, 0, sizeof(struct sockaddr_in6)); 7308 src.sin6_family = AF_INET6; 7309 src.sin6_len = sizeof(struct sockaddr_in6); 7310 src.sin6_port = sh.src_port; 7311 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7312 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7313 return; 7314 } 7315 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7316 dst.sin6_family = AF_INET6; 7317 dst.sin6_len = sizeof(struct sockaddr_in6); 7318 dst.sin6_port = sh.dest_port; 7319 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7320 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7321 return; 7322 } 7323 inp = NULL; 7324 net = NULL; 7325 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7326 (struct sockaddr *)&src, 7327 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7328 if ((stcb != NULL) && 7329 (net != NULL) && 7330 (inp != NULL)) { 7331 /* Check the UDP port numbers */ 7332 if ((udp.uh_dport != net->port) || 7333 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7334 SCTP_TCB_UNLOCK(stcb); 7335 return; 7336 } 7337 /* Check the verification tag */ 7338 if (ntohl(sh.v_tag) != 0) { 7339 /* 7340 * This must be the verification tag used for 7341 * sending out packets. We don't consider packets 7342 * reflecting the verification tag. 7343 */ 7344 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7345 SCTP_TCB_UNLOCK(stcb); 7346 return; 7347 } 7348 } else { 7349 if (ip6cp->ip6c_m->m_pkthdr.len >= 7350 ip6cp->ip6c_off + sizeof(struct udphdr) + 7351 sizeof(struct sctphdr) + 7352 sizeof(struct sctp_chunkhdr) + 7353 offsetof(struct sctp_init, a_rwnd)) { 7354 /* 7355 * In this case we can check if we got an 7356 * INIT chunk and if the initiate tag 7357 * matches. 7358 */ 7359 uint32_t initiate_tag; 7360 uint8_t chunk_type; 7361 7362 m_copydata(ip6cp->ip6c_m, 7363 ip6cp->ip6c_off + 7364 sizeof(struct udphdr) + 7365 sizeof(struct sctphdr), 7366 sizeof(uint8_t), 7367 (caddr_t)&chunk_type); 7368 m_copydata(ip6cp->ip6c_m, 7369 ip6cp->ip6c_off + 7370 sizeof(struct udphdr) + 7371 sizeof(struct sctphdr) + 7372 sizeof(struct sctp_chunkhdr), 7373 sizeof(uint32_t), 7374 (caddr_t)&initiate_tag); 7375 if ((chunk_type != SCTP_INITIATION) || 7376 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7377 SCTP_TCB_UNLOCK(stcb); 7378 return; 7379 } 7380 } else { 7381 SCTP_TCB_UNLOCK(stcb); 7382 return; 7383 } 7384 } 7385 type = ip6cp->ip6c_icmp6->icmp6_type; 7386 code = ip6cp->ip6c_icmp6->icmp6_code; 7387 if ((type == ICMP6_DST_UNREACH) && 7388 (code == ICMP6_DST_UNREACH_NOPORT)) { 7389 type = ICMP6_PARAM_PROB; 7390 code = ICMP6_PARAMPROB_NEXTHEADER; 7391 } 7392 sctp6_notify(inp, stcb, net, type, code, 7393 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7394 } else { 7395 if ((stcb == NULL) && (inp != NULL)) { 7396 /* reduce inp's ref-count */ 7397 SCTP_INP_WLOCK(inp); 7398 SCTP_INP_DECR_REF(inp); 7399 SCTP_INP_WUNLOCK(inp); 7400 } 7401 if (stcb) { 7402 SCTP_TCB_UNLOCK(stcb); 7403 } 7404 } 7405 } 7406 #endif 7407 7408 void 7409 sctp_over_udp_stop(void) 7410 { 7411 /* 7412 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7413 * for writting! 7414 */ 7415 #ifdef INET 7416 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7417 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7418 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7419 } 7420 #endif 7421 #ifdef INET6 7422 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7423 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7424 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7425 } 7426 #endif 7427 } 7428 7429 int 7430 sctp_over_udp_start(void) 7431 { 7432 uint16_t port; 7433 int ret; 7434 #ifdef INET 7435 struct sockaddr_in sin; 7436 #endif 7437 #ifdef INET6 7438 struct sockaddr_in6 sin6; 7439 #endif 7440 /* 7441 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7442 * for writting! 7443 */ 7444 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7445 if (ntohs(port) == 0) { 7446 /* Must have a port set */ 7447 return (EINVAL); 7448 } 7449 #ifdef INET 7450 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7451 /* Already running -- must stop first */ 7452 return (EALREADY); 7453 } 7454 #endif 7455 #ifdef INET6 7456 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7457 /* Already running -- must stop first */ 7458 return (EALREADY); 7459 } 7460 #endif 7461 #ifdef INET 7462 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7463 SOCK_DGRAM, IPPROTO_UDP, 7464 curthread->td_ucred, curthread))) { 7465 sctp_over_udp_stop(); 7466 return (ret); 7467 } 7468 /* Call the special UDP hook. */ 7469 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7470 sctp_recv_udp_tunneled_packet, 7471 sctp_recv_icmp_tunneled_packet, 7472 NULL))) { 7473 sctp_over_udp_stop(); 7474 return (ret); 7475 } 7476 /* Ok, we have a socket, bind it to the port. */ 7477 memset(&sin, 0, sizeof(struct sockaddr_in)); 7478 sin.sin_len = sizeof(struct sockaddr_in); 7479 sin.sin_family = AF_INET; 7480 sin.sin_port = htons(port); 7481 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7482 (struct sockaddr *)&sin, curthread))) { 7483 sctp_over_udp_stop(); 7484 return (ret); 7485 } 7486 #endif 7487 #ifdef INET6 7488 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7489 SOCK_DGRAM, IPPROTO_UDP, 7490 curthread->td_ucred, curthread))) { 7491 sctp_over_udp_stop(); 7492 return (ret); 7493 } 7494 /* Call the special UDP hook. */ 7495 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7496 sctp_recv_udp_tunneled_packet, 7497 sctp_recv_icmp6_tunneled_packet, 7498 NULL))) { 7499 sctp_over_udp_stop(); 7500 return (ret); 7501 } 7502 /* Ok, we have a socket, bind it to the port. */ 7503 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7504 sin6.sin6_len = sizeof(struct sockaddr_in6); 7505 sin6.sin6_family = AF_INET6; 7506 sin6.sin6_port = htons(port); 7507 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7508 (struct sockaddr *)&sin6, curthread))) { 7509 sctp_over_udp_stop(); 7510 return (ret); 7511 } 7512 #endif 7513 return (0); 7514 } 7515 7516 /* 7517 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7518 * If all arguments are zero, zero is returned. 7519 */ 7520 uint32_t 7521 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7522 { 7523 if (mtu1 > 0) { 7524 if (mtu2 > 0) { 7525 if (mtu3 > 0) { 7526 return (min(mtu1, min(mtu2, mtu3))); 7527 } else { 7528 return (min(mtu1, mtu2)); 7529 } 7530 } else { 7531 if (mtu3 > 0) { 7532 return (min(mtu1, mtu3)); 7533 } else { 7534 return (mtu1); 7535 } 7536 } 7537 } else { 7538 if (mtu2 > 0) { 7539 if (mtu3 > 0) { 7540 return (min(mtu2, mtu3)); 7541 } else { 7542 return (mtu2); 7543 } 7544 } else { 7545 return (mtu3); 7546 } 7547 } 7548 } 7549 7550 void 7551 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7552 { 7553 struct in_conninfo inc; 7554 7555 memset(&inc, 0, sizeof(struct in_conninfo)); 7556 inc.inc_fibnum = fibnum; 7557 switch (addr->sa.sa_family) { 7558 #ifdef INET 7559 case AF_INET: 7560 inc.inc_faddr = addr->sin.sin_addr; 7561 break; 7562 #endif 7563 #ifdef INET6 7564 case AF_INET6: 7565 inc.inc_flags |= INC_ISIPV6; 7566 inc.inc6_faddr = addr->sin6.sin6_addr; 7567 break; 7568 #endif 7569 default: 7570 return; 7571 } 7572 tcp_hc_updatemtu(&inc, (u_long)mtu); 7573 } 7574 7575 uint32_t 7576 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7577 { 7578 struct in_conninfo inc; 7579 7580 memset(&inc, 0, sizeof(struct in_conninfo)); 7581 inc.inc_fibnum = fibnum; 7582 switch (addr->sa.sa_family) { 7583 #ifdef INET 7584 case AF_INET: 7585 inc.inc_faddr = addr->sin.sin_addr; 7586 break; 7587 #endif 7588 #ifdef INET6 7589 case AF_INET6: 7590 inc.inc_flags |= INC_ISIPV6; 7591 inc.inc6_faddr = addr->sin6.sin6_addr; 7592 break; 7593 #endif 7594 default: 7595 return (0); 7596 } 7597 return ((uint32_t)tcp_hc_getmtu(&inc)); 7598 } 7599 7600 void 7601 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7602 { 7603 #if defined(KDTRACE_HOOKS) 7604 int old_state = stcb->asoc.state; 7605 #endif 7606 7607 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7608 ("sctp_set_state: Can't set substate (new_state = %x)", 7609 new_state)); 7610 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7611 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7612 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7613 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7614 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7615 } 7616 #if defined(KDTRACE_HOOKS) 7617 if (((old_state & SCTP_STATE_MASK) != new_state) && 7618 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7619 (new_state == SCTP_STATE_INUSE))) { 7620 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7621 } 7622 #endif 7623 } 7624 7625 void 7626 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7627 { 7628 #if defined(KDTRACE_HOOKS) 7629 int old_state = stcb->asoc.state; 7630 #endif 7631 7632 KASSERT((substate & SCTP_STATE_MASK) == 0, 7633 ("sctp_add_substate: Can't set state (substate = %x)", 7634 substate)); 7635 stcb->asoc.state |= substate; 7636 #if defined(KDTRACE_HOOKS) 7637 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7638 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7639 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7640 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7641 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7642 } 7643 #endif 7644 } 7645