1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 740 tot_out = 0; 741 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 742 if ((chk->whoTo == lnet) && 743 (chk->sent < SCTP_DATAGRAM_RESEND)) { 744 tot_out += chk->book_size; 745 } 746 } 747 if (lnet->flight_size != tot_out) { 748 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 749 (void *)lnet, lnet->flight_size, 750 tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * The conversion from time to ticks and vice versa is done by rounding 776 * upwards. This way we can test in the code the time to be positive and 777 * know that this corresponds to a positive number of ticks. 778 */ 779 780 uint32_t 781 sctp_msecs_to_ticks(uint32_t msecs) 782 { 783 uint64_t temp; 784 uint32_t ticks; 785 786 if (hz == 1000) { 787 ticks = msecs; 788 } else { 789 temp = (((uint64_t)msecs * hz) + 999) / 1000; 790 if (temp > UINT32_MAX) { 791 ticks = UINT32_MAX; 792 } else { 793 ticks = (uint32_t)temp; 794 } 795 } 796 return (ticks); 797 } 798 799 uint32_t 800 sctp_ticks_to_msecs(uint32_t ticks) 801 { 802 uint64_t temp; 803 uint32_t msecs; 804 805 if (hz == 1000) { 806 msecs = ticks; 807 } else { 808 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 809 if (temp > UINT32_MAX) { 810 msecs = UINT32_MAX; 811 } else { 812 msecs = (uint32_t)temp; 813 } 814 } 815 return (msecs); 816 } 817 818 uint32_t 819 sctp_secs_to_ticks(uint32_t secs) 820 { 821 uint64_t temp; 822 uint32_t ticks; 823 824 temp = (uint64_t)secs * hz; 825 if (temp > UINT32_MAX) { 826 ticks = UINT32_MAX; 827 } else { 828 ticks = (uint32_t)temp; 829 } 830 return (ticks); 831 } 832 833 uint32_t 834 sctp_ticks_to_secs(uint32_t ticks) 835 { 836 uint64_t temp; 837 uint32_t secs; 838 839 temp = ((uint64_t)ticks + (hz - 1)) / hz; 840 if (temp > UINT32_MAX) { 841 secs = UINT32_MAX; 842 } else { 843 secs = (uint32_t)temp; 844 } 845 return (secs); 846 } 847 848 /* 849 * sctp_stop_timers_for_shutdown() should be called 850 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 851 * state to make sure that all timers are stopped. 852 */ 853 void 854 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 855 { 856 struct sctp_inpcb *inp; 857 struct sctp_nets *net; 858 859 inp = stcb->sctp_ep; 860 861 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 863 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 865 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 866 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 867 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 868 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 869 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 870 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 871 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 872 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 873 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 874 } 875 } 876 877 void 878 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 879 { 880 struct sctp_inpcb *inp; 881 struct sctp_nets *net; 882 883 inp = stcb->sctp_ep; 884 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 885 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 886 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 887 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 888 if (stop_assoc_kill_timer) { 889 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 890 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 891 } 892 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 894 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 895 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 896 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 897 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 898 /* Mobility adaptation */ 899 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 900 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 901 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 902 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 904 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 908 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 910 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 912 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 913 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 914 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 915 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 916 } 917 } 918 919 /* 920 * A list of sizes based on typical mtu's, used only if next hop size not 921 * returned. These values MUST be multiples of 4 and MUST be ordered. 922 */ 923 static uint32_t sctp_mtu_sizes[] = { 924 68, 925 296, 926 508, 927 512, 928 544, 929 576, 930 1004, 931 1492, 932 1500, 933 1536, 934 2000, 935 2048, 936 4352, 937 4464, 938 8168, 939 17912, 940 32000, 941 65532 942 }; 943 944 /* 945 * Return the largest MTU in sctp_mtu_sizes smaller than val. 946 * If val is smaller than the minimum, just return the largest 947 * multiple of 4 smaller or equal to val. 948 * Ensure that the result is a multiple of 4. 949 */ 950 uint32_t 951 sctp_get_prev_mtu(uint32_t val) 952 { 953 uint32_t i; 954 955 val &= 0xfffffffc; 956 if (val <= sctp_mtu_sizes[0]) { 957 return (val); 958 } 959 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 960 if (val <= sctp_mtu_sizes[i]) { 961 break; 962 } 963 } 964 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 965 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 966 return (sctp_mtu_sizes[i - 1]); 967 } 968 969 /* 970 * Return the smallest MTU in sctp_mtu_sizes larger than val. 971 * If val is larger than the maximum, just return the largest multiple of 4 smaller 972 * or equal to val. 973 * Ensure that the result is a multiple of 4. 974 */ 975 uint32_t 976 sctp_get_next_mtu(uint32_t val) 977 { 978 /* select another MTU that is just bigger than this one */ 979 uint32_t i; 980 981 val &= 0xfffffffc; 982 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 983 if (val < sctp_mtu_sizes[i]) { 984 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 985 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 986 return (sctp_mtu_sizes[i]); 987 } 988 } 989 return (val); 990 } 991 992 void 993 sctp_fill_random_store(struct sctp_pcb *m) 994 { 995 /* 996 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 997 * our counter. The result becomes our good random numbers and we 998 * then setup to give these out. Note that we do no locking to 999 * protect this. This is ok, since if competing folks call this we 1000 * will get more gobbled gook in the random store which is what we 1001 * want. There is a danger that two guys will use the same random 1002 * numbers, but thats ok too since that is random as well :-> 1003 */ 1004 m->store_at = 0; 1005 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1006 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1007 sizeof(m->random_counter), (uint8_t *)m->random_store); 1008 m->random_counter++; 1009 } 1010 1011 uint32_t 1012 sctp_select_initial_TSN(struct sctp_pcb *inp) 1013 { 1014 /* 1015 * A true implementation should use random selection process to get 1016 * the initial stream sequence number, using RFC1750 as a good 1017 * guideline 1018 */ 1019 uint32_t x, *xp; 1020 uint8_t *p; 1021 int store_at, new_store; 1022 1023 if (inp->initial_sequence_debug != 0) { 1024 uint32_t ret; 1025 1026 ret = inp->initial_sequence_debug; 1027 inp->initial_sequence_debug++; 1028 return (ret); 1029 } 1030 retry: 1031 store_at = inp->store_at; 1032 new_store = store_at + sizeof(uint32_t); 1033 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1034 new_store = 0; 1035 } 1036 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1037 goto retry; 1038 } 1039 if (new_store == 0) { 1040 /* Refill the random store */ 1041 sctp_fill_random_store(inp); 1042 } 1043 p = &inp->random_store[store_at]; 1044 xp = (uint32_t *)p; 1045 x = *xp; 1046 return (x); 1047 } 1048 1049 uint32_t 1050 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1051 { 1052 uint32_t x; 1053 struct timeval now; 1054 1055 if (check) { 1056 (void)SCTP_GETTIME_TIMEVAL(&now); 1057 } 1058 for (;;) { 1059 x = sctp_select_initial_TSN(&inp->sctp_ep); 1060 if (x == 0) { 1061 /* we never use 0 */ 1062 continue; 1063 } 1064 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1065 break; 1066 } 1067 } 1068 return (x); 1069 } 1070 1071 int32_t 1072 sctp_map_assoc_state(int kernel_state) 1073 { 1074 int32_t user_state; 1075 1076 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1077 user_state = SCTP_CLOSED; 1078 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1079 user_state = SCTP_SHUTDOWN_PENDING; 1080 } else { 1081 switch (kernel_state & SCTP_STATE_MASK) { 1082 case SCTP_STATE_EMPTY: 1083 user_state = SCTP_CLOSED; 1084 break; 1085 case SCTP_STATE_INUSE: 1086 user_state = SCTP_CLOSED; 1087 break; 1088 case SCTP_STATE_COOKIE_WAIT: 1089 user_state = SCTP_COOKIE_WAIT; 1090 break; 1091 case SCTP_STATE_COOKIE_ECHOED: 1092 user_state = SCTP_COOKIE_ECHOED; 1093 break; 1094 case SCTP_STATE_OPEN: 1095 user_state = SCTP_ESTABLISHED; 1096 break; 1097 case SCTP_STATE_SHUTDOWN_SENT: 1098 user_state = SCTP_SHUTDOWN_SENT; 1099 break; 1100 case SCTP_STATE_SHUTDOWN_RECEIVED: 1101 user_state = SCTP_SHUTDOWN_RECEIVED; 1102 break; 1103 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1104 user_state = SCTP_SHUTDOWN_ACK_SENT; 1105 break; 1106 default: 1107 user_state = SCTP_CLOSED; 1108 break; 1109 } 1110 } 1111 return (user_state); 1112 } 1113 1114 int 1115 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1116 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1195 sctp_select_initial_TSN(&inp->sctp_ep); 1196 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1197 /* we are optimisitic here */ 1198 asoc->peer_supports_nat = 0; 1199 asoc->sent_queue_retran_cnt = 0; 1200 1201 /* for CMT */ 1202 asoc->last_net_cmt_send_started = NULL; 1203 1204 /* This will need to be adjusted */ 1205 asoc->last_acked_seq = asoc->init_seq_number - 1; 1206 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1207 asoc->asconf_seq_in = asoc->last_acked_seq; 1208 1209 /* here we are different, we hold the next one we expect */ 1210 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1211 1212 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1213 asoc->initial_rto = inp->sctp_ep.initial_rto; 1214 1215 asoc->default_mtu = inp->sctp_ep.default_mtu; 1216 asoc->max_init_times = inp->sctp_ep.max_init_times; 1217 asoc->max_send_times = inp->sctp_ep.max_send_times; 1218 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1219 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1220 asoc->free_chunk_cnt = 0; 1221 1222 asoc->iam_blocking = 0; 1223 asoc->context = inp->sctp_context; 1224 asoc->local_strreset_support = inp->local_strreset_support; 1225 asoc->def_send = inp->def_send; 1226 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1227 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1228 asoc->pr_sctp_cnt = 0; 1229 asoc->total_output_queue_size = 0; 1230 1231 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1232 asoc->scope.ipv6_addr_legal = 1; 1233 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1234 asoc->scope.ipv4_addr_legal = 1; 1235 } else { 1236 asoc->scope.ipv4_addr_legal = 0; 1237 } 1238 } else { 1239 asoc->scope.ipv6_addr_legal = 0; 1240 asoc->scope.ipv4_addr_legal = 1; 1241 } 1242 1243 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1244 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1245 1246 asoc->smallest_mtu = inp->sctp_frag_point; 1247 asoc->minrto = inp->sctp_ep.sctp_minrto; 1248 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1249 1250 asoc->stream_locked_on = 0; 1251 asoc->ecn_echo_cnt_onq = 0; 1252 asoc->stream_locked = 0; 1253 1254 asoc->send_sack = 1; 1255 1256 LIST_INIT(&asoc->sctp_restricted_addrs); 1257 1258 TAILQ_INIT(&asoc->nets); 1259 TAILQ_INIT(&asoc->pending_reply_queue); 1260 TAILQ_INIT(&asoc->asconf_ack_sent); 1261 /* Setup to fill the hb random cache at first HB */ 1262 asoc->hb_random_idx = 4; 1263 1264 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1265 1266 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1267 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1268 1269 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1270 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1271 1272 /* 1273 * Now the stream parameters, here we allocate space for all streams 1274 * that we request by default. 1275 */ 1276 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1277 o_strms; 1278 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1279 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1280 SCTP_M_STRMO); 1281 if (asoc->strmout == NULL) { 1282 /* big trouble no memory */ 1283 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1284 return (ENOMEM); 1285 } 1286 for (i = 0; i < asoc->streamoutcnt; i++) { 1287 /* 1288 * inbound side must be set to 0xffff, also NOTE when we get 1289 * the INIT-ACK back (for INIT sender) we MUST reduce the 1290 * count (streamoutcnt) but first check if we sent to any of 1291 * the upper streams that were dropped (if some were). Those 1292 * that were dropped must be notified to the upper layer as 1293 * failed to send. 1294 */ 1295 asoc->strmout[i].next_mid_ordered = 0; 1296 asoc->strmout[i].next_mid_unordered = 0; 1297 TAILQ_INIT(&asoc->strmout[i].outqueue); 1298 asoc->strmout[i].chunks_on_queues = 0; 1299 #if defined(SCTP_DETAILED_STR_STATS) 1300 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1301 asoc->strmout[i].abandoned_sent[j] = 0; 1302 asoc->strmout[i].abandoned_unsent[j] = 0; 1303 } 1304 #else 1305 asoc->strmout[i].abandoned_sent[0] = 0; 1306 asoc->strmout[i].abandoned_unsent[0] = 0; 1307 #endif 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1312 } 1313 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1314 1315 /* Now the mapping array */ 1316 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1317 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1318 SCTP_M_MAP); 1319 if (asoc->mapping_array == NULL) { 1320 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1321 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1322 return (ENOMEM); 1323 } 1324 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1325 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1326 SCTP_M_MAP); 1327 if (asoc->nr_mapping_array == NULL) { 1328 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1329 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1334 1335 /* Now the init of the other outqueues */ 1336 TAILQ_INIT(&asoc->free_chunks); 1337 TAILQ_INIT(&asoc->control_send_queue); 1338 TAILQ_INIT(&asoc->asconf_send_queue); 1339 TAILQ_INIT(&asoc->send_queue); 1340 TAILQ_INIT(&asoc->sent_queue); 1341 TAILQ_INIT(&asoc->resetHead); 1342 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1343 TAILQ_INIT(&asoc->asconf_queue); 1344 /* authentication fields */ 1345 asoc->authinfo.random = NULL; 1346 asoc->authinfo.active_keyid = 0; 1347 asoc->authinfo.assoc_key = NULL; 1348 asoc->authinfo.assoc_keyid = 0; 1349 asoc->authinfo.recv_key = NULL; 1350 asoc->authinfo.recv_keyid = 0; 1351 LIST_INIT(&asoc->shared_keys); 1352 asoc->marked_retrans = 0; 1353 asoc->port = inp->sctp_ep.port; 1354 asoc->timoinit = 0; 1355 asoc->timodata = 0; 1356 asoc->timosack = 0; 1357 asoc->timoshutdown = 0; 1358 asoc->timoheartbeat = 0; 1359 asoc->timocookie = 0; 1360 asoc->timoshutdownack = 0; 1361 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1362 asoc->discontinuity_time = asoc->start_time; 1363 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1364 asoc->abandoned_unsent[i] = 0; 1365 asoc->abandoned_sent[i] = 0; 1366 } 1367 /* 1368 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1369 * freed later when the association is freed. 1370 */ 1371 return (0); 1372 } 1373 1374 void 1375 sctp_print_mapping_array(struct sctp_association *asoc) 1376 { 1377 unsigned int i, limit; 1378 1379 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1380 asoc->mapping_array_size, 1381 asoc->mapping_array_base_tsn, 1382 asoc->cumulative_tsn, 1383 asoc->highest_tsn_inside_map, 1384 asoc->highest_tsn_inside_nr_map); 1385 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1386 if (asoc->mapping_array[limit - 1] != 0) { 1387 break; 1388 } 1389 } 1390 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1391 for (i = 0; i < limit; i++) { 1392 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1393 } 1394 if (limit % 16) 1395 SCTP_PRINTF("\n"); 1396 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1397 if (asoc->nr_mapping_array[limit - 1]) { 1398 break; 1399 } 1400 } 1401 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1402 for (i = 0; i < limit; i++) { 1403 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1404 } 1405 if (limit % 16) 1406 SCTP_PRINTF("\n"); 1407 } 1408 1409 int 1410 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1411 { 1412 /* mapping array needs to grow */ 1413 uint8_t *new_array1, *new_array2; 1414 uint32_t new_size; 1415 1416 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1417 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1418 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1419 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1420 /* can't get more, forget it */ 1421 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1422 if (new_array1) { 1423 SCTP_FREE(new_array1, SCTP_M_MAP); 1424 } 1425 if (new_array2) { 1426 SCTP_FREE(new_array2, SCTP_M_MAP); 1427 } 1428 return (-1); 1429 } 1430 memset(new_array1, 0, new_size); 1431 memset(new_array2, 0, new_size); 1432 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1433 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1434 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1435 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1436 asoc->mapping_array = new_array1; 1437 asoc->nr_mapping_array = new_array2; 1438 asoc->mapping_array_size = new_size; 1439 return (0); 1440 } 1441 1442 1443 static void 1444 sctp_iterator_work(struct sctp_iterator *it) 1445 { 1446 struct epoch_tracker et; 1447 struct sctp_inpcb *tinp; 1448 int iteration_count = 0; 1449 int inp_skip = 0; 1450 int first_in = 1; 1451 1452 NET_EPOCH_ENTER(et); 1453 SCTP_INP_INFO_RLOCK(); 1454 SCTP_ITERATOR_LOCK(); 1455 sctp_it_ctl.cur_it = it; 1456 if (it->inp) { 1457 SCTP_INP_RLOCK(it->inp); 1458 SCTP_INP_DECR_REF(it->inp); 1459 } 1460 if (it->inp == NULL) { 1461 /* iterator is complete */ 1462 done_with_iterator: 1463 sctp_it_ctl.cur_it = NULL; 1464 SCTP_ITERATOR_UNLOCK(); 1465 SCTP_INP_INFO_RUNLOCK(); 1466 if (it->function_atend != NULL) { 1467 (*it->function_atend) (it->pointer, it->val); 1468 } 1469 SCTP_FREE(it, SCTP_M_ITER); 1470 NET_EPOCH_EXIT(et); 1471 return; 1472 } 1473 select_a_new_ep: 1474 if (first_in) { 1475 first_in = 0; 1476 } else { 1477 SCTP_INP_RLOCK(it->inp); 1478 } 1479 while (((it->pcb_flags) && 1480 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1481 ((it->pcb_features) && 1482 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1483 /* endpoint flags or features don't match, so keep looking */ 1484 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1485 SCTP_INP_RUNLOCK(it->inp); 1486 goto done_with_iterator; 1487 } 1488 tinp = it->inp; 1489 it->inp = LIST_NEXT(it->inp, sctp_list); 1490 it->stcb = NULL; 1491 SCTP_INP_RUNLOCK(tinp); 1492 if (it->inp == NULL) { 1493 goto done_with_iterator; 1494 } 1495 SCTP_INP_RLOCK(it->inp); 1496 } 1497 /* now go through each assoc which is in the desired state */ 1498 if (it->done_current_ep == 0) { 1499 if (it->function_inp != NULL) 1500 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1501 it->done_current_ep = 1; 1502 } 1503 if (it->stcb == NULL) { 1504 /* run the per instance function */ 1505 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1506 } 1507 if ((inp_skip) || it->stcb == NULL) { 1508 if (it->function_inp_end != NULL) { 1509 inp_skip = (*it->function_inp_end) (it->inp, 1510 it->pointer, 1511 it->val); 1512 } 1513 SCTP_INP_RUNLOCK(it->inp); 1514 goto no_stcb; 1515 } 1516 while (it->stcb) { 1517 SCTP_TCB_LOCK(it->stcb); 1518 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1519 /* not in the right state... keep looking */ 1520 SCTP_TCB_UNLOCK(it->stcb); 1521 goto next_assoc; 1522 } 1523 /* see if we have limited out the iterator loop */ 1524 iteration_count++; 1525 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1526 /* Pause to let others grab the lock */ 1527 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 SCTP_INP_INCR_REF(it->inp); 1530 SCTP_INP_RUNLOCK(it->inp); 1531 SCTP_ITERATOR_UNLOCK(); 1532 SCTP_INP_INFO_RUNLOCK(); 1533 SCTP_INP_INFO_RLOCK(); 1534 SCTP_ITERATOR_LOCK(); 1535 if (sctp_it_ctl.iterator_flags) { 1536 /* We won't be staying here */ 1537 SCTP_INP_DECR_REF(it->inp); 1538 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1539 if (sctp_it_ctl.iterator_flags & 1540 SCTP_ITERATOR_STOP_CUR_IT) { 1541 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1542 goto done_with_iterator; 1543 } 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_INP) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1547 goto no_stcb; 1548 } 1549 /* If we reach here huh? */ 1550 SCTP_PRINTF("Unknown it ctl flag %x\n", 1551 sctp_it_ctl.iterator_flags); 1552 sctp_it_ctl.iterator_flags = 0; 1553 } 1554 SCTP_INP_RLOCK(it->inp); 1555 SCTP_INP_DECR_REF(it->inp); 1556 SCTP_TCB_LOCK(it->stcb); 1557 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1558 iteration_count = 0; 1559 } 1560 KASSERT(it->inp == it->stcb->sctp_ep, 1561 ("%s: stcb %p does not belong to inp %p, but inp %p", 1562 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1563 1564 /* run function on this one */ 1565 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1566 1567 /* 1568 * we lie here, it really needs to have its own type but 1569 * first I must verify that this won't effect things :-0 1570 */ 1571 if (it->no_chunk_output == 0) 1572 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1573 1574 SCTP_TCB_UNLOCK(it->stcb); 1575 next_assoc: 1576 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1577 if (it->stcb == NULL) { 1578 /* Run last function */ 1579 if (it->function_inp_end != NULL) { 1580 inp_skip = (*it->function_inp_end) (it->inp, 1581 it->pointer, 1582 it->val); 1583 } 1584 } 1585 } 1586 SCTP_INP_RUNLOCK(it->inp); 1587 no_stcb: 1588 /* done with all assocs on this endpoint, move on to next endpoint */ 1589 it->done_current_ep = 0; 1590 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1591 it->inp = NULL; 1592 } else { 1593 it->inp = LIST_NEXT(it->inp, sctp_list); 1594 } 1595 it->stcb = NULL; 1596 if (it->inp == NULL) { 1597 goto done_with_iterator; 1598 } 1599 goto select_a_new_ep; 1600 } 1601 1602 void 1603 sctp_iterator_worker(void) 1604 { 1605 struct sctp_iterator *it; 1606 1607 /* This function is called with the WQ lock in place */ 1608 sctp_it_ctl.iterator_running = 1; 1609 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1610 /* now lets work on this one */ 1611 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1612 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1613 CURVNET_SET(it->vn); 1614 sctp_iterator_work(it); 1615 CURVNET_RESTORE(); 1616 SCTP_IPI_ITERATOR_WQ_LOCK(); 1617 /* sa_ignore FREED_MEMORY */ 1618 } 1619 sctp_it_ctl.iterator_running = 0; 1620 return; 1621 } 1622 1623 1624 static void 1625 sctp_handle_addr_wq(void) 1626 { 1627 /* deal with the ADDR wq from the rtsock calls */ 1628 struct sctp_laddr *wi, *nwi; 1629 struct sctp_asconf_iterator *asc; 1630 1631 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1632 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1633 if (asc == NULL) { 1634 /* Try later, no memory */ 1635 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1636 (struct sctp_inpcb *)NULL, 1637 (struct sctp_tcb *)NULL, 1638 (struct sctp_nets *)NULL); 1639 return; 1640 } 1641 LIST_INIT(&asc->list_of_work); 1642 asc->cnt = 0; 1643 1644 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1645 LIST_REMOVE(wi, sctp_nxt_addr); 1646 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1647 asc->cnt++; 1648 } 1649 1650 if (asc->cnt == 0) { 1651 SCTP_FREE(asc, SCTP_M_ASC_IT); 1652 } else { 1653 int ret; 1654 1655 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1656 sctp_asconf_iterator_stcb, 1657 NULL, /* No ep end for boundall */ 1658 SCTP_PCB_FLAGS_BOUNDALL, 1659 SCTP_PCB_ANY_FEATURES, 1660 SCTP_ASOC_ANY_STATE, 1661 (void *)asc, 0, 1662 sctp_asconf_iterator_end, NULL, 0); 1663 if (ret) { 1664 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1665 /* 1666 * Freeing if we are stopping or put back on the 1667 * addr_wq. 1668 */ 1669 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1670 sctp_asconf_iterator_end(asc, 0); 1671 } else { 1672 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1673 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1674 } 1675 SCTP_FREE(asc, SCTP_M_ASC_IT); 1676 } 1677 } 1678 } 1679 } 1680 1681 /*- 1682 * The following table shows which pointers for the inp, stcb, or net are 1683 * stored for each timer after it was started. 1684 * 1685 *|Name |Timer |inp |stcb|net | 1686 *|-----------------------------|-----------------------------|----|----|----| 1687 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1690 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1694 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1697 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1698 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1700 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1701 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1703 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1704 */ 1705 1706 void 1707 sctp_timeout_handler(void *t) 1708 { 1709 struct epoch_tracker et; 1710 struct timeval tv; 1711 struct sctp_inpcb *inp; 1712 struct sctp_tcb *stcb; 1713 struct sctp_nets *net; 1714 struct sctp_timer *tmr; 1715 struct mbuf *op_err; 1716 int type; 1717 int i, secret; 1718 bool did_output, released_asoc_reference; 1719 1720 /* 1721 * If inp, stcb or net are not NULL, then references to these were 1722 * added when the timer was started, and must be released before 1723 * this function returns. 1724 */ 1725 tmr = (struct sctp_timer *)t; 1726 inp = (struct sctp_inpcb *)tmr->ep; 1727 stcb = (struct sctp_tcb *)tmr->tcb; 1728 net = (struct sctp_nets *)tmr->net; 1729 CURVNET_SET((struct vnet *)tmr->vnet); 1730 NET_EPOCH_ENTER(et); 1731 did_output = 1; 1732 released_asoc_reference = false; 1733 1734 #ifdef SCTP_AUDITING_ENABLED 1735 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1736 sctp_auditing(3, inp, stcb, net); 1737 #endif 1738 1739 /* sanity checks... */ 1740 KASSERT(tmr->self == NULL || tmr->self == tmr, 1741 ("sctp_timeout_handler: tmr->self corrupted")); 1742 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1743 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1744 type = tmr->type; 1745 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1746 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1747 type, stcb, stcb->sctp_ep)); 1748 tmr->stopped_from = 0xa001; 1749 if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { 1750 SCTPDBG(SCTP_DEBUG_TIMER2, 1751 "Timer type %d handler exiting due to CLOSED association.\n", 1752 type); 1753 goto out_decr; 1754 } 1755 tmr->stopped_from = 0xa002; 1756 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1757 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1758 SCTPDBG(SCTP_DEBUG_TIMER2, 1759 "Timer type %d handler exiting due to not being active.\n", 1760 type); 1761 goto out_decr; 1762 } 1763 1764 tmr->stopped_from = 0xa003; 1765 if (stcb) { 1766 SCTP_TCB_LOCK(stcb); 1767 /* 1768 * Release reference so that association can be freed if 1769 * necessary below. This is safe now that we have acquired 1770 * the lock. 1771 */ 1772 atomic_add_int(&stcb->asoc.refcnt, -1); 1773 released_asoc_reference = true; 1774 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1775 ((stcb->asoc.state == SCTP_STATE_EMPTY) || 1776 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1777 SCTPDBG(SCTP_DEBUG_TIMER2, 1778 "Timer type %d handler exiting due to CLOSED association.\n", 1779 type); 1780 goto out; 1781 } 1782 } else if (inp != NULL) { 1783 SCTP_INP_WLOCK(inp); 1784 } else { 1785 SCTP_WQ_ADDR_LOCK(); 1786 } 1787 1788 /* Record in stopped_from which timeout occurred. */ 1789 tmr->stopped_from = type; 1790 /* mark as being serviced now */ 1791 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1792 /* 1793 * Callout has been rescheduled. 1794 */ 1795 goto out; 1796 } 1797 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1798 /* 1799 * Not active, so no action. 1800 */ 1801 goto out; 1802 } 1803 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1804 1805 /* call the handler for the appropriate timer type */ 1806 switch (type) { 1807 case SCTP_TIMER_TYPE_SEND: 1808 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1809 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1810 type, inp, stcb, net)); 1811 SCTP_STAT_INCR(sctps_timodata); 1812 stcb->asoc.timodata++; 1813 stcb->asoc.num_send_timers_up--; 1814 if (stcb->asoc.num_send_timers_up < 0) { 1815 stcb->asoc.num_send_timers_up = 0; 1816 } 1817 SCTP_TCB_LOCK_ASSERT(stcb); 1818 if (sctp_t3rxt_timer(inp, stcb, net)) { 1819 /* no need to unlock on tcb its gone */ 1820 1821 goto out_decr; 1822 } 1823 SCTP_TCB_LOCK_ASSERT(stcb); 1824 #ifdef SCTP_AUDITING_ENABLED 1825 sctp_auditing(4, inp, stcb, net); 1826 #endif 1827 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1828 did_output = true; 1829 if ((stcb->asoc.num_send_timers_up == 0) && 1830 (stcb->asoc.sent_queue_cnt > 0)) { 1831 struct sctp_tmit_chunk *chk; 1832 1833 /* 1834 * Safeguard. If there on some on the sent queue 1835 * somewhere but no timers running something is 1836 * wrong... so we start a timer on the first chunk 1837 * on the send queue on whatever net it is sent to. 1838 */ 1839 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1840 if (chk->whoTo != NULL) { 1841 break; 1842 } 1843 } 1844 if (chk != NULL) { 1845 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1846 } 1847 } 1848 break; 1849 case SCTP_TIMER_TYPE_INIT: 1850 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1851 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1852 type, inp, stcb, net)); 1853 SCTP_STAT_INCR(sctps_timoinit); 1854 stcb->asoc.timoinit++; 1855 if (sctp_t1init_timer(inp, stcb, net)) { 1856 /* no need to unlock on tcb its gone */ 1857 goto out_decr; 1858 } 1859 did_output = false; 1860 break; 1861 case SCTP_TIMER_TYPE_RECV: 1862 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1863 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1864 type, inp, stcb, net)); 1865 SCTP_STAT_INCR(sctps_timosack); 1866 stcb->asoc.timosack++; 1867 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1868 #ifdef SCTP_AUDITING_ENABLED 1869 sctp_auditing(4, inp, stcb, NULL); 1870 #endif 1871 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1872 did_output = true; 1873 break; 1874 case SCTP_TIMER_TYPE_SHUTDOWN: 1875 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1876 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1877 type, inp, stcb, net)); 1878 SCTP_STAT_INCR(sctps_timoshutdown); 1879 stcb->asoc.timoshutdown++; 1880 if (sctp_shutdown_timer(inp, stcb, net)) { 1881 /* no need to unlock on tcb its gone */ 1882 goto out_decr; 1883 } 1884 #ifdef SCTP_AUDITING_ENABLED 1885 sctp_auditing(4, inp, stcb, net); 1886 #endif 1887 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1888 did_output = true; 1889 break; 1890 case SCTP_TIMER_TYPE_HEARTBEAT: 1891 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1892 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1893 type, inp, stcb, net)); 1894 SCTP_STAT_INCR(sctps_timoheartbeat); 1895 stcb->asoc.timoheartbeat++; 1896 if (sctp_heartbeat_timer(inp, stcb, net)) { 1897 /* no need to unlock on tcb its gone */ 1898 goto out_decr; 1899 } 1900 #ifdef SCTP_AUDITING_ENABLED 1901 sctp_auditing(4, inp, stcb, net); 1902 #endif 1903 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1904 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1905 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1906 did_output = true; 1907 } else { 1908 did_output = false; 1909 } 1910 break; 1911 case SCTP_TIMER_TYPE_COOKIE: 1912 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1913 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1914 type, inp, stcb, net)); 1915 SCTP_STAT_INCR(sctps_timocookie); 1916 stcb->asoc.timocookie++; 1917 if (sctp_cookie_timer(inp, stcb, net)) { 1918 /* no need to unlock on tcb its gone */ 1919 goto out_decr; 1920 } 1921 #ifdef SCTP_AUDITING_ENABLED 1922 sctp_auditing(4, inp, stcb, net); 1923 #endif 1924 /* 1925 * We consider T3 and Cookie timer pretty much the same with 1926 * respect to where from in chunk_output. 1927 */ 1928 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1929 did_output = true; 1930 break; 1931 case SCTP_TIMER_TYPE_NEWCOOKIE: 1932 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1933 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1934 type, inp, stcb, net)); 1935 SCTP_STAT_INCR(sctps_timosecret); 1936 (void)SCTP_GETTIME_TIMEVAL(&tv); 1937 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1938 inp->sctp_ep.last_secret_number = 1939 inp->sctp_ep.current_secret_number; 1940 inp->sctp_ep.current_secret_number++; 1941 if (inp->sctp_ep.current_secret_number >= 1942 SCTP_HOW_MANY_SECRETS) { 1943 inp->sctp_ep.current_secret_number = 0; 1944 } 1945 secret = (int)inp->sctp_ep.current_secret_number; 1946 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1947 inp->sctp_ep.secret_key[secret][i] = 1948 sctp_select_initial_TSN(&inp->sctp_ep); 1949 } 1950 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1951 did_output = false; 1952 break; 1953 case SCTP_TIMER_TYPE_PATHMTURAISE: 1954 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1955 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1956 type, inp, stcb, net)); 1957 SCTP_STAT_INCR(sctps_timopathmtu); 1958 sctp_pathmtu_timer(inp, stcb, net); 1959 did_output = false; 1960 break; 1961 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1962 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1963 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1964 type, inp, stcb, net)); 1965 if (sctp_shutdownack_timer(inp, stcb, net)) { 1966 /* no need to unlock on tcb its gone */ 1967 goto out_decr; 1968 } 1969 SCTP_STAT_INCR(sctps_timoshutdownack); 1970 stcb->asoc.timoshutdownack++; 1971 #ifdef SCTP_AUDITING_ENABLED 1972 sctp_auditing(4, inp, stcb, net); 1973 #endif 1974 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1975 did_output = true; 1976 break; 1977 case SCTP_TIMER_TYPE_ASCONF: 1978 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1979 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1980 type, inp, stcb, net)); 1981 SCTP_STAT_INCR(sctps_timoasconf); 1982 if (sctp_asconf_timer(inp, stcb, net)) { 1983 /* no need to unlock on tcb its gone */ 1984 goto out_decr; 1985 } 1986 #ifdef SCTP_AUDITING_ENABLED 1987 sctp_auditing(4, inp, stcb, net); 1988 #endif 1989 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1990 did_output = true; 1991 break; 1992 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1993 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1994 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1995 type, inp, stcb, net)); 1996 SCTP_STAT_INCR(sctps_timoshutdownguard); 1997 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 1998 "Shutdown guard timer expired"); 1999 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2000 did_output = true; 2001 /* no need to unlock on tcb its gone */ 2002 goto out_decr; 2003 case SCTP_TIMER_TYPE_AUTOCLOSE: 2004 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2005 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2006 type, inp, stcb, net)); 2007 SCTP_STAT_INCR(sctps_timoautoclose); 2008 sctp_autoclose_timer(inp, stcb); 2009 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2010 did_output = true; 2011 break; 2012 case SCTP_TIMER_TYPE_STRRESET: 2013 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2014 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2015 type, inp, stcb, net)); 2016 SCTP_STAT_INCR(sctps_timostrmrst); 2017 if (sctp_strreset_timer(inp, stcb)) { 2018 /* no need to unlock on tcb its gone */ 2019 goto out_decr; 2020 } 2021 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2022 did_output = true; 2023 break; 2024 case SCTP_TIMER_TYPE_INPKILL: 2025 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2026 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2027 type, inp, stcb, net)); 2028 SCTP_STAT_INCR(sctps_timoinpkill); 2029 /* 2030 * special case, take away our increment since WE are the 2031 * killer 2032 */ 2033 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2034 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2035 SCTP_INP_DECR_REF(inp); 2036 SCTP_INP_WUNLOCK(inp); 2037 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2038 SCTP_CALLED_FROM_INPKILL_TIMER); 2039 inp = NULL; 2040 goto out_no_decr; 2041 case SCTP_TIMER_TYPE_ASOCKILL: 2042 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2043 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2044 type, inp, stcb, net)); 2045 SCTP_STAT_INCR(sctps_timoassockill); 2046 /* Can we free it yet? */ 2047 SCTP_INP_DECR_REF(inp); 2048 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2049 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2050 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2051 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2052 /* 2053 * free asoc, always unlocks (or destroy's) so prevent 2054 * duplicate unlock or unlock of a free mtx :-0 2055 */ 2056 stcb = NULL; 2057 goto out_no_decr; 2058 case SCTP_TIMER_TYPE_ADDR_WQ: 2059 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2060 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2061 type, inp, stcb, net)); 2062 sctp_handle_addr_wq(); 2063 did_output = true; 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 did_output = false; 2072 break; 2073 default: 2074 #ifdef INVARIANTS 2075 panic("Unknown timer type %d", type); 2076 #else 2077 did_output = false; 2078 goto out; 2079 #endif 2080 } 2081 #ifdef SCTP_AUDITING_ENABLED 2082 sctp_audit_log(0xF1, (uint8_t)type); 2083 if (inp != NULL) 2084 sctp_auditing(5, inp, stcb, net); 2085 #endif 2086 if (did_output && (stcb != NULL)) { 2087 /* 2088 * Now we need to clean up the control chunk chain if an 2089 * ECNE is on it. It must be marked as UNSENT again so next 2090 * call will continue to send it until such time that we get 2091 * a CWR, to remove it. It is, however, less likely that we 2092 * will find a ecn echo on the chain though. 2093 */ 2094 sctp_fix_ecn_echo(&stcb->asoc); 2095 } 2096 out: 2097 if (stcb != NULL) { 2098 SCTP_TCB_UNLOCK(stcb); 2099 } else if (inp != NULL) { 2100 SCTP_INP_WUNLOCK(inp); 2101 } else { 2102 SCTP_WQ_ADDR_UNLOCK(); 2103 } 2104 2105 out_decr: 2106 /* These reference counts were incremented in sctp_timer_start(). */ 2107 if (inp != NULL) { 2108 SCTP_INP_DECR_REF(inp); 2109 } 2110 if ((stcb != NULL) && !released_asoc_reference) { 2111 atomic_add_int(&stcb->asoc.refcnt, -1); 2112 } 2113 if (net != NULL) { 2114 sctp_free_remote_addr(net); 2115 } 2116 out_no_decr: 2117 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2118 CURVNET_RESTORE(); 2119 NET_EPOCH_EXIT(et); 2120 } 2121 2122 /*- 2123 * The following table shows which parameters must be provided 2124 * when calling sctp_timer_start(). For parameters not being 2125 * provided, NULL must be used. 2126 * 2127 * |Name |inp |stcb|net | 2128 * |-----------------------------|----|----|----| 2129 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2133 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2135 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2136 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2137 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2138 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2139 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2140 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2141 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2142 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2143 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2144 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2145 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2146 * 2147 */ 2148 2149 void 2150 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2151 struct sctp_nets *net) 2152 { 2153 struct sctp_timer *tmr; 2154 uint32_t to_ticks; 2155 uint32_t rndval, jitter; 2156 2157 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2158 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2159 t_type, stcb, stcb->sctp_ep)); 2160 tmr = NULL; 2161 to_ticks = 0; 2162 if (stcb != NULL) { 2163 SCTP_TCB_LOCK_ASSERT(stcb); 2164 } else if (inp != NULL) { 2165 SCTP_INP_WLOCK_ASSERT(inp); 2166 } else { 2167 SCTP_WQ_ADDR_LOCK_ASSERT(); 2168 } 2169 if (stcb != NULL) { 2170 /* 2171 * Don't restart timer on association that's about to be 2172 * killed. 2173 */ 2174 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2175 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2176 SCTPDBG(SCTP_DEBUG_TIMER2, 2177 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2178 t_type, inp, stcb, net); 2179 return; 2180 } 2181 /* Don't restart timer on net that's been removed. */ 2182 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2183 SCTPDBG(SCTP_DEBUG_TIMER2, 2184 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2185 t_type, inp, stcb, net); 2186 return; 2187 } 2188 } 2189 switch (t_type) { 2190 case SCTP_TIMER_TYPE_SEND: 2191 /* Here we use the RTO timer. */ 2192 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2193 #ifdef INVARIANTS 2194 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2195 t_type, inp, stcb, net); 2196 #else 2197 return; 2198 #endif 2199 } 2200 tmr = &net->rxt_timer; 2201 if (net->RTO == 0) { 2202 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2203 } else { 2204 to_ticks = sctp_msecs_to_ticks(net->RTO); 2205 } 2206 break; 2207 case SCTP_TIMER_TYPE_INIT: 2208 /* 2209 * Here we use the INIT timer default usually about 1 2210 * second. 2211 */ 2212 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2213 #ifdef INVARIANTS 2214 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2215 t_type, inp, stcb, net); 2216 #else 2217 return; 2218 #endif 2219 } 2220 tmr = &net->rxt_timer; 2221 if (net->RTO == 0) { 2222 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2223 } else { 2224 to_ticks = sctp_msecs_to_ticks(net->RTO); 2225 } 2226 break; 2227 case SCTP_TIMER_TYPE_RECV: 2228 /* 2229 * Here we use the Delayed-Ack timer value from the inp, 2230 * ususually about 200ms. 2231 */ 2232 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2233 #ifdef INVARIANTS 2234 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2235 t_type, inp, stcb, net); 2236 #else 2237 return; 2238 #endif 2239 } 2240 tmr = &stcb->asoc.dack_timer; 2241 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2242 break; 2243 case SCTP_TIMER_TYPE_SHUTDOWN: 2244 /* Here we use the RTO of the destination. */ 2245 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2246 #ifdef INVARIANTS 2247 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2248 t_type, inp, stcb, net); 2249 #else 2250 return; 2251 #endif 2252 } 2253 tmr = &net->rxt_timer; 2254 if (net->RTO == 0) { 2255 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2256 } else { 2257 to_ticks = sctp_msecs_to_ticks(net->RTO); 2258 } 2259 break; 2260 case SCTP_TIMER_TYPE_HEARTBEAT: 2261 /* 2262 * The net is used here so that we can add in the RTO. Even 2263 * though we use a different timer. We also add the HB timer 2264 * PLUS a random jitter. 2265 */ 2266 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2267 #ifdef INVARIANTS 2268 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2269 t_type, inp, stcb, net); 2270 #else 2271 return; 2272 #endif 2273 } 2274 if ((net->dest_state & SCTP_ADDR_NOHB) && 2275 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2276 SCTPDBG(SCTP_DEBUG_TIMER2, 2277 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2278 t_type, inp, stcb, net); 2279 return; 2280 } 2281 tmr = &net->hb_timer; 2282 if (net->RTO == 0) { 2283 to_ticks = stcb->asoc.initial_rto; 2284 } else { 2285 to_ticks = net->RTO; 2286 } 2287 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2288 jitter = rndval % to_ticks; 2289 if (jitter >= (to_ticks >> 1)) { 2290 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2291 } else { 2292 to_ticks = to_ticks - jitter; 2293 } 2294 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2295 !(net->dest_state & SCTP_ADDR_PF)) { 2296 to_ticks += net->heart_beat_delay; 2297 } 2298 /* 2299 * Now we must convert the to_ticks that are now in ms to 2300 * ticks. 2301 */ 2302 to_ticks = sctp_msecs_to_ticks(to_ticks); 2303 break; 2304 case SCTP_TIMER_TYPE_COOKIE: 2305 /* 2306 * Here we can use the RTO timer from the network since one 2307 * RTT was complete. If a retransmission happened then we 2308 * will be using the RTO initial value. 2309 */ 2310 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2311 #ifdef INVARIANTS 2312 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2313 t_type, inp, stcb, net); 2314 #else 2315 return; 2316 #endif 2317 } 2318 tmr = &net->rxt_timer; 2319 if (net->RTO == 0) { 2320 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2321 } else { 2322 to_ticks = sctp_msecs_to_ticks(net->RTO); 2323 } 2324 break; 2325 case SCTP_TIMER_TYPE_NEWCOOKIE: 2326 /* 2327 * Nothing needed but the endpoint here ususually about 60 2328 * minutes. 2329 */ 2330 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2331 #ifdef INVARIANTS 2332 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2333 t_type, inp, stcb, net); 2334 #else 2335 return; 2336 #endif 2337 } 2338 tmr = &inp->sctp_ep.signature_change; 2339 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2340 break; 2341 case SCTP_TIMER_TYPE_PATHMTURAISE: 2342 /* 2343 * Here we use the value found in the EP for PMTUD, 2344 * ususually about 10 minutes. 2345 */ 2346 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2347 #ifdef INVARIANTS 2348 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2349 t_type, inp, stcb, net); 2350 #else 2351 return; 2352 #endif 2353 } 2354 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2355 SCTPDBG(SCTP_DEBUG_TIMER2, 2356 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2357 t_type, inp, stcb, net); 2358 return; 2359 } 2360 tmr = &net->pmtu_timer; 2361 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2362 break; 2363 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2364 /* Here we use the RTO of the destination. */ 2365 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2366 #ifdef INVARIANTS 2367 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2368 t_type, inp, stcb, net); 2369 #else 2370 return; 2371 #endif 2372 } 2373 tmr = &net->rxt_timer; 2374 if (net->RTO == 0) { 2375 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2376 } else { 2377 to_ticks = sctp_msecs_to_ticks(net->RTO); 2378 } 2379 break; 2380 case SCTP_TIMER_TYPE_ASCONF: 2381 /* 2382 * Here the timer comes from the stcb but its value is from 2383 * the net's RTO. 2384 */ 2385 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2386 #ifdef INVARIANTS 2387 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2388 t_type, inp, stcb, net); 2389 #else 2390 return; 2391 #endif 2392 } 2393 tmr = &stcb->asoc.asconf_timer; 2394 if (net->RTO == 0) { 2395 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2396 } else { 2397 to_ticks = sctp_msecs_to_ticks(net->RTO); 2398 } 2399 break; 2400 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2401 /* 2402 * Here we use the endpoints shutdown guard timer usually 2403 * about 3 minutes. 2404 */ 2405 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2406 #ifdef INVARIANTS 2407 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2408 t_type, inp, stcb, net); 2409 #else 2410 return; 2411 #endif 2412 } 2413 tmr = &stcb->asoc.shut_guard_timer; 2414 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2415 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2416 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2417 } else { 2418 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2419 } 2420 } else { 2421 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2422 } 2423 break; 2424 case SCTP_TIMER_TYPE_AUTOCLOSE: 2425 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2426 #ifdef INVARIANTS 2427 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2428 t_type, inp, stcb, net); 2429 #else 2430 return; 2431 #endif 2432 } 2433 tmr = &stcb->asoc.autoclose_timer; 2434 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2435 break; 2436 case SCTP_TIMER_TYPE_STRRESET: 2437 /* 2438 * Here the timer comes from the stcb but its value is from 2439 * the net's RTO. 2440 */ 2441 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2442 #ifdef INVARIANTS 2443 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2444 t_type, inp, stcb, net); 2445 #else 2446 return; 2447 #endif 2448 } 2449 tmr = &stcb->asoc.strreset_timer; 2450 if (net->RTO == 0) { 2451 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2452 } else { 2453 to_ticks = sctp_msecs_to_ticks(net->RTO); 2454 } 2455 break; 2456 case SCTP_TIMER_TYPE_INPKILL: 2457 /* 2458 * The inp is setup to die. We re-use the signature_chage 2459 * timer since that has stopped and we are in the GONE 2460 * state. 2461 */ 2462 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2463 #ifdef INVARIANTS 2464 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2465 t_type, inp, stcb, net); 2466 #else 2467 return; 2468 #endif 2469 } 2470 tmr = &inp->sctp_ep.signature_change; 2471 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2472 break; 2473 case SCTP_TIMER_TYPE_ASOCKILL: 2474 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2475 #ifdef INVARIANTS 2476 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2477 t_type, inp, stcb, net); 2478 #else 2479 return; 2480 #endif 2481 } 2482 tmr = &stcb->asoc.strreset_timer; 2483 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2484 break; 2485 case SCTP_TIMER_TYPE_ADDR_WQ: 2486 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2487 #ifdef INVARIANTS 2488 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2489 t_type, inp, stcb, net); 2490 #else 2491 return; 2492 #endif 2493 } 2494 /* Only 1 tick away :-) */ 2495 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2496 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2497 break; 2498 case SCTP_TIMER_TYPE_PRIM_DELETED: 2499 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2500 #ifdef INVARIANTS 2501 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2502 t_type, inp, stcb, net); 2503 #else 2504 return; 2505 #endif 2506 } 2507 tmr = &stcb->asoc.delete_prim_timer; 2508 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2509 break; 2510 default: 2511 #ifdef INVARIANTS 2512 panic("Unknown timer type %d", t_type); 2513 #else 2514 return; 2515 #endif 2516 } 2517 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2518 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2519 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2520 /* 2521 * We do NOT allow you to have it already running. If it is, 2522 * we leave the current one up unchanged. 2523 */ 2524 SCTPDBG(SCTP_DEBUG_TIMER2, 2525 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2526 t_type, inp, stcb, net); 2527 return; 2528 } 2529 /* At this point we can proceed. */ 2530 if (t_type == SCTP_TIMER_TYPE_SEND) { 2531 stcb->asoc.num_send_timers_up++; 2532 } 2533 tmr->stopped_from = 0; 2534 tmr->type = t_type; 2535 tmr->ep = (void *)inp; 2536 tmr->tcb = (void *)stcb; 2537 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2538 tmr->net = NULL; 2539 } else { 2540 tmr->net = (void *)net; 2541 } 2542 tmr->self = (void *)tmr; 2543 tmr->vnet = (void *)curvnet; 2544 tmr->ticks = sctp_get_tick_count(); 2545 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2546 SCTPDBG(SCTP_DEBUG_TIMER2, 2547 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2548 t_type, to_ticks, inp, stcb, net); 2549 /* 2550 * If this is a newly scheduled callout, as opposed to a 2551 * rescheduled one, increment relevant reference counts. 2552 */ 2553 if (tmr->ep != NULL) { 2554 SCTP_INP_INCR_REF(inp); 2555 } 2556 if (tmr->tcb != NULL) { 2557 atomic_add_int(&stcb->asoc.refcnt, 1); 2558 } 2559 if (tmr->net != NULL) { 2560 atomic_add_int(&net->ref_count, 1); 2561 } 2562 } else { 2563 /* 2564 * This should not happen, since we checked for pending 2565 * above. 2566 */ 2567 SCTPDBG(SCTP_DEBUG_TIMER2, 2568 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2569 t_type, to_ticks, inp, stcb, net); 2570 } 2571 return; 2572 } 2573 2574 /*- 2575 * The following table shows which parameters must be provided 2576 * when calling sctp_timer_stop(). For parameters not being 2577 * provided, NULL must be used. 2578 * 2579 * |Name |inp |stcb|net | 2580 * |-----------------------------|----|----|----| 2581 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2582 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2583 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2584 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2585 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2586 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2587 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2588 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2589 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2590 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2591 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2592 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2593 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2594 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2595 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2596 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2597 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2598 * 2599 */ 2600 2601 void 2602 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2603 struct sctp_nets *net, uint32_t from) 2604 { 2605 struct sctp_timer *tmr; 2606 2607 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2608 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2609 t_type, stcb, stcb->sctp_ep)); 2610 if (stcb != NULL) { 2611 SCTP_TCB_LOCK_ASSERT(stcb); 2612 } else if (inp != NULL) { 2613 SCTP_INP_WLOCK_ASSERT(inp); 2614 } else { 2615 SCTP_WQ_ADDR_LOCK_ASSERT(); 2616 } 2617 tmr = NULL; 2618 switch (t_type) { 2619 case SCTP_TIMER_TYPE_SEND: 2620 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2621 #ifdef INVARIANTS 2622 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2623 t_type, inp, stcb, net); 2624 #else 2625 return; 2626 #endif 2627 } 2628 tmr = &net->rxt_timer; 2629 break; 2630 case SCTP_TIMER_TYPE_INIT: 2631 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2632 #ifdef INVARIANTS 2633 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2634 t_type, inp, stcb, net); 2635 #else 2636 return; 2637 #endif 2638 } 2639 tmr = &net->rxt_timer; 2640 break; 2641 case SCTP_TIMER_TYPE_RECV: 2642 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2643 #ifdef INVARIANTS 2644 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2645 t_type, inp, stcb, net); 2646 #else 2647 return; 2648 #endif 2649 } 2650 tmr = &stcb->asoc.dack_timer; 2651 break; 2652 case SCTP_TIMER_TYPE_SHUTDOWN: 2653 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2654 #ifdef INVARIANTS 2655 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2656 t_type, inp, stcb, net); 2657 #else 2658 return; 2659 #endif 2660 } 2661 tmr = &net->rxt_timer; 2662 break; 2663 case SCTP_TIMER_TYPE_HEARTBEAT: 2664 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2665 #ifdef INVARIANTS 2666 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2667 t_type, inp, stcb, net); 2668 #else 2669 return; 2670 #endif 2671 } 2672 tmr = &net->hb_timer; 2673 break; 2674 case SCTP_TIMER_TYPE_COOKIE: 2675 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2676 #ifdef INVARIANTS 2677 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2678 t_type, inp, stcb, net); 2679 #else 2680 return; 2681 #endif 2682 } 2683 tmr = &net->rxt_timer; 2684 break; 2685 case SCTP_TIMER_TYPE_NEWCOOKIE: 2686 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2687 #ifdef INVARIANTS 2688 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2689 t_type, inp, stcb, net); 2690 #else 2691 return; 2692 #endif 2693 } 2694 tmr = &inp->sctp_ep.signature_change; 2695 break; 2696 case SCTP_TIMER_TYPE_PATHMTURAISE: 2697 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2698 #ifdef INVARIANTS 2699 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2700 t_type, inp, stcb, net); 2701 #else 2702 return; 2703 #endif 2704 } 2705 tmr = &net->pmtu_timer; 2706 break; 2707 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2708 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2709 #ifdef INVARIANTS 2710 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2711 t_type, inp, stcb, net); 2712 #else 2713 return; 2714 #endif 2715 } 2716 tmr = &net->rxt_timer; 2717 break; 2718 case SCTP_TIMER_TYPE_ASCONF: 2719 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2720 #ifdef INVARIANTS 2721 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2722 t_type, inp, stcb, net); 2723 #else 2724 return; 2725 #endif 2726 } 2727 tmr = &stcb->asoc.asconf_timer; 2728 break; 2729 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2730 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2731 #ifdef INVARIANTS 2732 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2733 t_type, inp, stcb, net); 2734 #else 2735 return; 2736 #endif 2737 } 2738 tmr = &stcb->asoc.shut_guard_timer; 2739 break; 2740 case SCTP_TIMER_TYPE_AUTOCLOSE: 2741 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2742 #ifdef INVARIANTS 2743 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2744 t_type, inp, stcb, net); 2745 #else 2746 return; 2747 #endif 2748 } 2749 tmr = &stcb->asoc.autoclose_timer; 2750 break; 2751 case SCTP_TIMER_TYPE_STRRESET: 2752 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2753 #ifdef INVARIANTS 2754 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2755 t_type, inp, stcb, net); 2756 #else 2757 return; 2758 #endif 2759 } 2760 tmr = &stcb->asoc.strreset_timer; 2761 break; 2762 case SCTP_TIMER_TYPE_INPKILL: 2763 /* 2764 * The inp is setup to die. We re-use the signature_chage 2765 * timer since that has stopped and we are in the GONE 2766 * state. 2767 */ 2768 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2769 #ifdef INVARIANTS 2770 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2771 t_type, inp, stcb, net); 2772 #else 2773 return; 2774 #endif 2775 } 2776 tmr = &inp->sctp_ep.signature_change; 2777 break; 2778 case SCTP_TIMER_TYPE_ASOCKILL: 2779 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2780 #ifdef INVARIANTS 2781 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2782 t_type, inp, stcb, net); 2783 #else 2784 return; 2785 #endif 2786 } 2787 tmr = &stcb->asoc.strreset_timer; 2788 break; 2789 case SCTP_TIMER_TYPE_ADDR_WQ: 2790 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2791 #ifdef INVARIANTS 2792 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2793 t_type, inp, stcb, net); 2794 #else 2795 return; 2796 #endif 2797 } 2798 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2799 break; 2800 case SCTP_TIMER_TYPE_PRIM_DELETED: 2801 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2802 #ifdef INVARIANTS 2803 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2804 t_type, inp, stcb, net); 2805 #else 2806 return; 2807 #endif 2808 } 2809 tmr = &stcb->asoc.delete_prim_timer; 2810 break; 2811 default: 2812 #ifdef INVARIANTS 2813 panic("Unknown timer type %d", t_type); 2814 #else 2815 return; 2816 #endif 2817 } 2818 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2819 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2820 (tmr->type != t_type)) { 2821 /* 2822 * Ok we have a timer that is under joint use. Cookie timer 2823 * per chance with the SEND timer. We therefore are NOT 2824 * running the timer that the caller wants stopped. So just 2825 * return. 2826 */ 2827 SCTPDBG(SCTP_DEBUG_TIMER2, 2828 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2829 t_type, inp, stcb, net); 2830 return; 2831 } 2832 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2833 stcb->asoc.num_send_timers_up--; 2834 if (stcb->asoc.num_send_timers_up < 0) { 2835 stcb->asoc.num_send_timers_up = 0; 2836 } 2837 } 2838 tmr->self = NULL; 2839 tmr->stopped_from = from; 2840 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2841 KASSERT(tmr->ep == inp, 2842 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2843 t_type, inp, tmr->ep)); 2844 KASSERT(tmr->tcb == stcb, 2845 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2846 t_type, stcb, tmr->tcb)); 2847 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2848 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2849 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2850 t_type, net, tmr->net)); 2851 SCTPDBG(SCTP_DEBUG_TIMER2, 2852 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2853 t_type, inp, stcb, net); 2854 /* 2855 * If the timer was actually stopped, decrement reference 2856 * counts that were incremented in sctp_timer_start(). 2857 */ 2858 if (tmr->ep != NULL) { 2859 SCTP_INP_DECR_REF(inp); 2860 tmr->ep = NULL; 2861 } 2862 if (tmr->tcb != NULL) { 2863 atomic_add_int(&stcb->asoc.refcnt, -1); 2864 tmr->tcb = NULL; 2865 } 2866 if (tmr->net != NULL) { 2867 /* 2868 * Can't use net, since it doesn't work for 2869 * SCTP_TIMER_TYPE_ASCONF. 2870 */ 2871 sctp_free_remote_addr((struct sctp_nets *)tmr->net); 2872 tmr->net = NULL; 2873 } 2874 } else { 2875 SCTPDBG(SCTP_DEBUG_TIMER2, 2876 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2877 t_type, inp, stcb, net); 2878 } 2879 return; 2880 } 2881 2882 uint32_t 2883 sctp_calculate_len(struct mbuf *m) 2884 { 2885 uint32_t tlen = 0; 2886 struct mbuf *at; 2887 2888 at = m; 2889 while (at) { 2890 tlen += SCTP_BUF_LEN(at); 2891 at = SCTP_BUF_NEXT(at); 2892 } 2893 return (tlen); 2894 } 2895 2896 void 2897 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2898 struct sctp_association *asoc, uint32_t mtu) 2899 { 2900 /* 2901 * Reset the P-MTU size on this association, this involves changing 2902 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2903 * allow the DF flag to be cleared. 2904 */ 2905 struct sctp_tmit_chunk *chk; 2906 unsigned int eff_mtu, ovh; 2907 2908 asoc->smallest_mtu = mtu; 2909 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2910 ovh = SCTP_MIN_OVERHEAD; 2911 } else { 2912 ovh = SCTP_MIN_V4_OVERHEAD; 2913 } 2914 eff_mtu = mtu - ovh; 2915 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2916 if (chk->send_size > eff_mtu) { 2917 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2918 } 2919 } 2920 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2921 if (chk->send_size > eff_mtu) { 2922 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2923 } 2924 } 2925 } 2926 2927 2928 /* 2929 * Given an association and starting time of the current RTT period, update 2930 * RTO in number of msecs. net should point to the current network. 2931 * Return 1, if an RTO update was performed, return 0 if no update was 2932 * performed due to invalid starting point. 2933 */ 2934 2935 int 2936 sctp_calculate_rto(struct sctp_tcb *stcb, 2937 struct sctp_association *asoc, 2938 struct sctp_nets *net, 2939 struct timeval *old, 2940 int rtt_from_sack) 2941 { 2942 struct timeval now; 2943 uint64_t rtt_us; /* RTT in us */ 2944 int32_t rtt; /* RTT in ms */ 2945 uint32_t new_rto; 2946 int first_measure = 0; 2947 2948 /************************/ 2949 /* 1. calculate new RTT */ 2950 /************************/ 2951 /* get the current time */ 2952 if (stcb->asoc.use_precise_time) { 2953 (void)SCTP_GETPTIME_TIMEVAL(&now); 2954 } else { 2955 (void)SCTP_GETTIME_TIMEVAL(&now); 2956 } 2957 if ((old->tv_sec > now.tv_sec) || 2958 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2959 /* The starting point is in the future. */ 2960 return (0); 2961 } 2962 timevalsub(&now, old); 2963 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2964 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2965 /* The RTT is larger than a sane value. */ 2966 return (0); 2967 } 2968 /* store the current RTT in us */ 2969 net->rtt = rtt_us; 2970 /* compute rtt in ms */ 2971 rtt = (int32_t)(net->rtt / 1000); 2972 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2973 /* 2974 * Tell the CC module that a new update has just occurred 2975 * from a sack 2976 */ 2977 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2978 } 2979 /* 2980 * Do we need to determine the lan? We do this only on sacks i.e. 2981 * RTT being determined from data not non-data (HB/INIT->INITACK). 2982 */ 2983 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2984 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2985 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2986 net->lan_type = SCTP_LAN_INTERNET; 2987 } else { 2988 net->lan_type = SCTP_LAN_LOCAL; 2989 } 2990 } 2991 2992 /***************************/ 2993 /* 2. update RTTVAR & SRTT */ 2994 /***************************/ 2995 /*- 2996 * Compute the scaled average lastsa and the 2997 * scaled variance lastsv as described in van Jacobson 2998 * Paper "Congestion Avoidance and Control", Annex A. 2999 * 3000 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 3001 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 3002 */ 3003 if (net->RTO_measured) { 3004 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 3005 net->lastsa += rtt; 3006 if (rtt < 0) { 3007 rtt = -rtt; 3008 } 3009 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 3010 net->lastsv += rtt; 3011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3012 rto_logging(net, SCTP_LOG_RTTVAR); 3013 } 3014 } else { 3015 /* First RTO measurment */ 3016 net->RTO_measured = 1; 3017 first_measure = 1; 3018 net->lastsa = rtt << SCTP_RTT_SHIFT; 3019 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 3021 rto_logging(net, SCTP_LOG_INITIAL_RTT); 3022 } 3023 } 3024 if (net->lastsv == 0) { 3025 net->lastsv = SCTP_CLOCK_GRANULARITY; 3026 } 3027 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 3028 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 3029 (stcb->asoc.sat_network_lockout == 0)) { 3030 stcb->asoc.sat_network = 1; 3031 } else if ((!first_measure) && stcb->asoc.sat_network) { 3032 stcb->asoc.sat_network = 0; 3033 stcb->asoc.sat_network_lockout = 1; 3034 } 3035 /* bound it, per C6/C7 in Section 5.3.1 */ 3036 if (new_rto < stcb->asoc.minrto) { 3037 new_rto = stcb->asoc.minrto; 3038 } 3039 if (new_rto > stcb->asoc.maxrto) { 3040 new_rto = stcb->asoc.maxrto; 3041 } 3042 net->RTO = new_rto; 3043 return (1); 3044 } 3045 3046 /* 3047 * return a pointer to a contiguous piece of data from the given mbuf chain 3048 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3049 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3050 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3051 */ 3052 caddr_t 3053 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3054 { 3055 uint32_t count; 3056 uint8_t *ptr; 3057 3058 ptr = in_ptr; 3059 if ((off < 0) || (len <= 0)) 3060 return (NULL); 3061 3062 /* find the desired start location */ 3063 while ((m != NULL) && (off > 0)) { 3064 if (off < SCTP_BUF_LEN(m)) 3065 break; 3066 off -= SCTP_BUF_LEN(m); 3067 m = SCTP_BUF_NEXT(m); 3068 } 3069 if (m == NULL) 3070 return (NULL); 3071 3072 /* is the current mbuf large enough (eg. contiguous)? */ 3073 if ((SCTP_BUF_LEN(m) - off) >= len) { 3074 return (mtod(m, caddr_t)+off); 3075 } else { 3076 /* else, it spans more than one mbuf, so save a temp copy... */ 3077 while ((m != NULL) && (len > 0)) { 3078 count = min(SCTP_BUF_LEN(m) - off, len); 3079 memcpy(ptr, mtod(m, caddr_t)+off, count); 3080 len -= count; 3081 ptr += count; 3082 off = 0; 3083 m = SCTP_BUF_NEXT(m); 3084 } 3085 if ((m == NULL) && (len > 0)) 3086 return (NULL); 3087 else 3088 return ((caddr_t)in_ptr); 3089 } 3090 } 3091 3092 3093 3094 struct sctp_paramhdr * 3095 sctp_get_next_param(struct mbuf *m, 3096 int offset, 3097 struct sctp_paramhdr *pull, 3098 int pull_limit) 3099 { 3100 /* This just provides a typed signature to Peter's Pull routine */ 3101 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3102 (uint8_t *)pull)); 3103 } 3104 3105 3106 struct mbuf * 3107 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3108 { 3109 struct mbuf *m_last; 3110 caddr_t dp; 3111 3112 if (padlen > 3) { 3113 return (NULL); 3114 } 3115 if (padlen <= M_TRAILINGSPACE(m)) { 3116 /* 3117 * The easy way. We hope the majority of the time we hit 3118 * here :) 3119 */ 3120 m_last = m; 3121 } else { 3122 /* Hard way we must grow the mbuf chain */ 3123 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3124 if (m_last == NULL) { 3125 return (NULL); 3126 } 3127 SCTP_BUF_LEN(m_last) = 0; 3128 SCTP_BUF_NEXT(m_last) = NULL; 3129 SCTP_BUF_NEXT(m) = m_last; 3130 } 3131 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3132 SCTP_BUF_LEN(m_last) += padlen; 3133 memset(dp, 0, padlen); 3134 return (m_last); 3135 } 3136 3137 struct mbuf * 3138 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3139 { 3140 /* find the last mbuf in chain and pad it */ 3141 struct mbuf *m_at; 3142 3143 if (last_mbuf != NULL) { 3144 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3145 } else { 3146 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3147 if (SCTP_BUF_NEXT(m_at) == NULL) { 3148 return (sctp_add_pad_tombuf(m_at, padval)); 3149 } 3150 } 3151 } 3152 return (NULL); 3153 } 3154 3155 static void 3156 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3157 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3158 { 3159 struct mbuf *m_notify; 3160 struct sctp_assoc_change *sac; 3161 struct sctp_queued_to_read *control; 3162 unsigned int notif_len; 3163 uint16_t abort_len; 3164 unsigned int i; 3165 3166 if (stcb == NULL) { 3167 return; 3168 } 3169 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3170 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3171 if (abort != NULL) { 3172 abort_len = ntohs(abort->ch.chunk_length); 3173 /* 3174 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3175 * contiguous. 3176 */ 3177 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3178 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3179 } 3180 } else { 3181 abort_len = 0; 3182 } 3183 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3184 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3185 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3186 notif_len += abort_len; 3187 } 3188 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3189 if (m_notify == NULL) { 3190 /* Retry with smaller value. */ 3191 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3192 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3193 if (m_notify == NULL) { 3194 goto set_error; 3195 } 3196 } 3197 SCTP_BUF_NEXT(m_notify) = NULL; 3198 sac = mtod(m_notify, struct sctp_assoc_change *); 3199 memset(sac, 0, notif_len); 3200 sac->sac_type = SCTP_ASSOC_CHANGE; 3201 sac->sac_flags = 0; 3202 sac->sac_length = sizeof(struct sctp_assoc_change); 3203 sac->sac_state = state; 3204 sac->sac_error = error; 3205 /* XXX verify these stream counts */ 3206 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3207 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3208 sac->sac_assoc_id = sctp_get_associd(stcb); 3209 if (notif_len > sizeof(struct sctp_assoc_change)) { 3210 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3211 i = 0; 3212 if (stcb->asoc.prsctp_supported == 1) { 3213 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3214 } 3215 if (stcb->asoc.auth_supported == 1) { 3216 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3217 } 3218 if (stcb->asoc.asconf_supported == 1) { 3219 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3220 } 3221 if (stcb->asoc.idata_supported == 1) { 3222 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3223 } 3224 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3225 if (stcb->asoc.reconfig_supported == 1) { 3226 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3227 } 3228 sac->sac_length += i; 3229 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3230 memcpy(sac->sac_info, abort, abort_len); 3231 sac->sac_length += abort_len; 3232 } 3233 } 3234 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3235 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3236 0, 0, stcb->asoc.context, 0, 0, 0, 3237 m_notify); 3238 if (control != NULL) { 3239 control->length = SCTP_BUF_LEN(m_notify); 3240 control->spec_flags = M_NOTIFICATION; 3241 /* not that we need this */ 3242 control->tail_mbuf = m_notify; 3243 sctp_add_to_readq(stcb->sctp_ep, stcb, 3244 control, 3245 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3246 so_locked); 3247 } else { 3248 sctp_m_freem(m_notify); 3249 } 3250 } 3251 /* 3252 * For 1-to-1 style sockets, we send up and error when an ABORT 3253 * comes in. 3254 */ 3255 set_error: 3256 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3257 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3258 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3259 SOCK_LOCK(stcb->sctp_socket); 3260 if (from_peer) { 3261 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3262 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3263 stcb->sctp_socket->so_error = ECONNREFUSED; 3264 } else { 3265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3266 stcb->sctp_socket->so_error = ECONNRESET; 3267 } 3268 } else { 3269 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3270 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3271 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3272 stcb->sctp_socket->so_error = ETIMEDOUT; 3273 } else { 3274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3275 stcb->sctp_socket->so_error = ECONNABORTED; 3276 } 3277 } 3278 SOCK_UNLOCK(stcb->sctp_socket); 3279 } 3280 /* Wake ANY sleepers */ 3281 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3282 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3283 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3284 socantrcvmore(stcb->sctp_socket); 3285 } 3286 sorwakeup(stcb->sctp_socket); 3287 sowwakeup(stcb->sctp_socket); 3288 } 3289 3290 static void 3291 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3292 struct sockaddr *sa, uint32_t error, int so_locked) 3293 { 3294 struct mbuf *m_notify; 3295 struct sctp_paddr_change *spc; 3296 struct sctp_queued_to_read *control; 3297 3298 if ((stcb == NULL) || 3299 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3300 /* event not enabled */ 3301 return; 3302 } 3303 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3304 if (m_notify == NULL) 3305 return; 3306 SCTP_BUF_LEN(m_notify) = 0; 3307 spc = mtod(m_notify, struct sctp_paddr_change *); 3308 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3309 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3310 spc->spc_flags = 0; 3311 spc->spc_length = sizeof(struct sctp_paddr_change); 3312 switch (sa->sa_family) { 3313 #ifdef INET 3314 case AF_INET: 3315 #ifdef INET6 3316 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3317 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3318 (struct sockaddr_in6 *)&spc->spc_aaddr); 3319 } else { 3320 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3321 } 3322 #else 3323 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3324 #endif 3325 break; 3326 #endif 3327 #ifdef INET6 3328 case AF_INET6: 3329 { 3330 struct sockaddr_in6 *sin6; 3331 3332 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3333 3334 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3335 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3336 if (sin6->sin6_scope_id == 0) { 3337 /* recover scope_id for user */ 3338 (void)sa6_recoverscope(sin6); 3339 } else { 3340 /* clear embedded scope_id for user */ 3341 in6_clearscope(&sin6->sin6_addr); 3342 } 3343 } 3344 break; 3345 } 3346 #endif 3347 default: 3348 /* TSNH */ 3349 break; 3350 } 3351 spc->spc_state = state; 3352 spc->spc_error = error; 3353 spc->spc_assoc_id = sctp_get_associd(stcb); 3354 3355 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3356 SCTP_BUF_NEXT(m_notify) = NULL; 3357 3358 /* append to socket */ 3359 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3360 0, 0, stcb->asoc.context, 0, 0, 0, 3361 m_notify); 3362 if (control == NULL) { 3363 /* no memory */ 3364 sctp_m_freem(m_notify); 3365 return; 3366 } 3367 control->length = SCTP_BUF_LEN(m_notify); 3368 control->spec_flags = M_NOTIFICATION; 3369 /* not that we need this */ 3370 control->tail_mbuf = m_notify; 3371 sctp_add_to_readq(stcb->sctp_ep, stcb, 3372 control, 3373 &stcb->sctp_socket->so_rcv, 1, 3374 SCTP_READ_LOCK_NOT_HELD, 3375 so_locked); 3376 } 3377 3378 3379 static void 3380 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3381 struct sctp_tmit_chunk *chk, int so_locked) 3382 { 3383 struct mbuf *m_notify; 3384 struct sctp_send_failed *ssf; 3385 struct sctp_send_failed_event *ssfe; 3386 struct sctp_queued_to_read *control; 3387 struct sctp_chunkhdr *chkhdr; 3388 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3389 3390 if ((stcb == NULL) || 3391 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3392 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3393 /* event not enabled */ 3394 return; 3395 } 3396 3397 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3398 notifhdr_len = sizeof(struct sctp_send_failed_event); 3399 } else { 3400 notifhdr_len = sizeof(struct sctp_send_failed); 3401 } 3402 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3403 if (m_notify == NULL) 3404 /* no space left */ 3405 return; 3406 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3407 if (stcb->asoc.idata_supported) { 3408 chkhdr_len = sizeof(struct sctp_idata_chunk); 3409 } else { 3410 chkhdr_len = sizeof(struct sctp_data_chunk); 3411 } 3412 /* Use some defaults in case we can't access the chunk header */ 3413 if (chk->send_size >= chkhdr_len) { 3414 payload_len = chk->send_size - chkhdr_len; 3415 } else { 3416 payload_len = 0; 3417 } 3418 padding_len = 0; 3419 if (chk->data != NULL) { 3420 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3421 if (chkhdr != NULL) { 3422 chk_len = ntohs(chkhdr->chunk_length); 3423 if ((chk_len >= chkhdr_len) && 3424 (chk->send_size >= chk_len) && 3425 (chk->send_size - chk_len < 4)) { 3426 padding_len = chk->send_size - chk_len; 3427 payload_len = chk->send_size - chkhdr_len - padding_len; 3428 } 3429 } 3430 } 3431 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3432 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3433 memset(ssfe, 0, notifhdr_len); 3434 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3435 if (sent) { 3436 ssfe->ssfe_flags = SCTP_DATA_SENT; 3437 } else { 3438 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3439 } 3440 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3441 ssfe->ssfe_error = error; 3442 /* not exactly what the user sent in, but should be close :) */ 3443 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3444 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3445 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3446 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3447 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3448 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3449 } else { 3450 ssf = mtod(m_notify, struct sctp_send_failed *); 3451 memset(ssf, 0, notifhdr_len); 3452 ssf->ssf_type = SCTP_SEND_FAILED; 3453 if (sent) { 3454 ssf->ssf_flags = SCTP_DATA_SENT; 3455 } else { 3456 ssf->ssf_flags = SCTP_DATA_UNSENT; 3457 } 3458 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3459 ssf->ssf_error = error; 3460 /* not exactly what the user sent in, but should be close :) */ 3461 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3462 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3463 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3464 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3465 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3466 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3467 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3468 } 3469 if (chk->data != NULL) { 3470 /* Trim off the sctp chunk header (it should be there) */ 3471 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3472 m_adj(chk->data, chkhdr_len); 3473 m_adj(chk->data, -padding_len); 3474 sctp_mbuf_crush(chk->data); 3475 chk->send_size -= (chkhdr_len + padding_len); 3476 } 3477 } 3478 SCTP_BUF_NEXT(m_notify) = chk->data; 3479 /* Steal off the mbuf */ 3480 chk->data = NULL; 3481 /* 3482 * For this case, we check the actual socket buffer, since the assoc 3483 * is going away we don't want to overfill the socket buffer for a 3484 * non-reader 3485 */ 3486 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3487 sctp_m_freem(m_notify); 3488 return; 3489 } 3490 /* append to socket */ 3491 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3492 0, 0, stcb->asoc.context, 0, 0, 0, 3493 m_notify); 3494 if (control == NULL) { 3495 /* no memory */ 3496 sctp_m_freem(m_notify); 3497 return; 3498 } 3499 control->length = SCTP_BUF_LEN(m_notify); 3500 control->spec_flags = M_NOTIFICATION; 3501 /* not that we need this */ 3502 control->tail_mbuf = m_notify; 3503 sctp_add_to_readq(stcb->sctp_ep, stcb, 3504 control, 3505 &stcb->sctp_socket->so_rcv, 1, 3506 SCTP_READ_LOCK_NOT_HELD, 3507 so_locked); 3508 } 3509 3510 3511 static void 3512 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3513 struct sctp_stream_queue_pending *sp, int so_locked) 3514 { 3515 struct mbuf *m_notify; 3516 struct sctp_send_failed *ssf; 3517 struct sctp_send_failed_event *ssfe; 3518 struct sctp_queued_to_read *control; 3519 int notifhdr_len; 3520 3521 if ((stcb == NULL) || 3522 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3523 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3524 /* event not enabled */ 3525 return; 3526 } 3527 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3528 notifhdr_len = sizeof(struct sctp_send_failed_event); 3529 } else { 3530 notifhdr_len = sizeof(struct sctp_send_failed); 3531 } 3532 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3533 if (m_notify == NULL) { 3534 /* no space left */ 3535 return; 3536 } 3537 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3538 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3539 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3540 memset(ssfe, 0, notifhdr_len); 3541 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3542 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3543 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3544 ssfe->ssfe_error = error; 3545 /* not exactly what the user sent in, but should be close :) */ 3546 ssfe->ssfe_info.snd_sid = sp->sid; 3547 if (sp->some_taken) { 3548 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3549 } else { 3550 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3551 } 3552 ssfe->ssfe_info.snd_ppid = sp->ppid; 3553 ssfe->ssfe_info.snd_context = sp->context; 3554 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3555 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3556 } else { 3557 ssf = mtod(m_notify, struct sctp_send_failed *); 3558 memset(ssf, 0, notifhdr_len); 3559 ssf->ssf_type = SCTP_SEND_FAILED; 3560 ssf->ssf_flags = SCTP_DATA_UNSENT; 3561 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3562 ssf->ssf_error = error; 3563 /* not exactly what the user sent in, but should be close :) */ 3564 ssf->ssf_info.sinfo_stream = sp->sid; 3565 ssf->ssf_info.sinfo_ssn = 0; 3566 if (sp->some_taken) { 3567 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3568 } else { 3569 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3570 } 3571 ssf->ssf_info.sinfo_ppid = sp->ppid; 3572 ssf->ssf_info.sinfo_context = sp->context; 3573 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3574 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3575 } 3576 SCTP_BUF_NEXT(m_notify) = sp->data; 3577 3578 /* Steal off the mbuf */ 3579 sp->data = NULL; 3580 /* 3581 * For this case, we check the actual socket buffer, since the assoc 3582 * is going away we don't want to overfill the socket buffer for a 3583 * non-reader 3584 */ 3585 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3586 sctp_m_freem(m_notify); 3587 return; 3588 } 3589 /* append to socket */ 3590 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3591 0, 0, stcb->asoc.context, 0, 0, 0, 3592 m_notify); 3593 if (control == NULL) { 3594 /* no memory */ 3595 sctp_m_freem(m_notify); 3596 return; 3597 } 3598 control->length = SCTP_BUF_LEN(m_notify); 3599 control->spec_flags = M_NOTIFICATION; 3600 /* not that we need this */ 3601 control->tail_mbuf = m_notify; 3602 sctp_add_to_readq(stcb->sctp_ep, stcb, 3603 control, 3604 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3605 } 3606 3607 3608 3609 static void 3610 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3611 { 3612 struct mbuf *m_notify; 3613 struct sctp_adaptation_event *sai; 3614 struct sctp_queued_to_read *control; 3615 3616 if ((stcb == NULL) || 3617 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3618 /* event not enabled */ 3619 return; 3620 } 3621 3622 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3623 if (m_notify == NULL) 3624 /* no space left */ 3625 return; 3626 SCTP_BUF_LEN(m_notify) = 0; 3627 sai = mtod(m_notify, struct sctp_adaptation_event *); 3628 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3629 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3630 sai->sai_flags = 0; 3631 sai->sai_length = sizeof(struct sctp_adaptation_event); 3632 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3633 sai->sai_assoc_id = sctp_get_associd(stcb); 3634 3635 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3636 SCTP_BUF_NEXT(m_notify) = NULL; 3637 3638 /* append to socket */ 3639 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3640 0, 0, stcb->asoc.context, 0, 0, 0, 3641 m_notify); 3642 if (control == NULL) { 3643 /* no memory */ 3644 sctp_m_freem(m_notify); 3645 return; 3646 } 3647 control->length = SCTP_BUF_LEN(m_notify); 3648 control->spec_flags = M_NOTIFICATION; 3649 /* not that we need this */ 3650 control->tail_mbuf = m_notify; 3651 sctp_add_to_readq(stcb->sctp_ep, stcb, 3652 control, 3653 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3654 } 3655 3656 /* This always must be called with the read-queue LOCKED in the INP */ 3657 static void 3658 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3659 uint32_t val, int so_locked) 3660 { 3661 struct mbuf *m_notify; 3662 struct sctp_pdapi_event *pdapi; 3663 struct sctp_queued_to_read *control; 3664 struct sockbuf *sb; 3665 3666 if ((stcb == NULL) || 3667 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3668 /* event not enabled */ 3669 return; 3670 } 3671 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3672 return; 3673 } 3674 3675 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3676 if (m_notify == NULL) 3677 /* no space left */ 3678 return; 3679 SCTP_BUF_LEN(m_notify) = 0; 3680 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3681 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3682 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3683 pdapi->pdapi_flags = 0; 3684 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3685 pdapi->pdapi_indication = error; 3686 pdapi->pdapi_stream = (val >> 16); 3687 pdapi->pdapi_seq = (val & 0x0000ffff); 3688 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3689 3690 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3691 SCTP_BUF_NEXT(m_notify) = NULL; 3692 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3693 0, 0, stcb->asoc.context, 0, 0, 0, 3694 m_notify); 3695 if (control == NULL) { 3696 /* no memory */ 3697 sctp_m_freem(m_notify); 3698 return; 3699 } 3700 control->length = SCTP_BUF_LEN(m_notify); 3701 control->spec_flags = M_NOTIFICATION; 3702 /* not that we need this */ 3703 control->tail_mbuf = m_notify; 3704 sb = &stcb->sctp_socket->so_rcv; 3705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3706 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3707 } 3708 sctp_sballoc(stcb, sb, m_notify); 3709 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3710 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3711 } 3712 control->end_added = 1; 3713 if (stcb->asoc.control_pdapi) 3714 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3715 else { 3716 /* we really should not see this case */ 3717 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3718 } 3719 if (stcb->sctp_ep && stcb->sctp_socket) { 3720 /* This should always be the case */ 3721 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3722 } 3723 } 3724 3725 static void 3726 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3727 { 3728 struct mbuf *m_notify; 3729 struct sctp_shutdown_event *sse; 3730 struct sctp_queued_to_read *control; 3731 3732 /* 3733 * For TCP model AND UDP connected sockets we will send an error up 3734 * when an SHUTDOWN completes 3735 */ 3736 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3737 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3738 /* mark socket closed for read/write and wakeup! */ 3739 socantsendmore(stcb->sctp_socket); 3740 } 3741 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3742 /* event not enabled */ 3743 return; 3744 } 3745 3746 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3747 if (m_notify == NULL) 3748 /* no space left */ 3749 return; 3750 sse = mtod(m_notify, struct sctp_shutdown_event *); 3751 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3752 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3753 sse->sse_flags = 0; 3754 sse->sse_length = sizeof(struct sctp_shutdown_event); 3755 sse->sse_assoc_id = sctp_get_associd(stcb); 3756 3757 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3758 SCTP_BUF_NEXT(m_notify) = NULL; 3759 3760 /* append to socket */ 3761 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3762 0, 0, stcb->asoc.context, 0, 0, 0, 3763 m_notify); 3764 if (control == NULL) { 3765 /* no memory */ 3766 sctp_m_freem(m_notify); 3767 return; 3768 } 3769 control->length = SCTP_BUF_LEN(m_notify); 3770 control->spec_flags = M_NOTIFICATION; 3771 /* not that we need this */ 3772 control->tail_mbuf = m_notify; 3773 sctp_add_to_readq(stcb->sctp_ep, stcb, 3774 control, 3775 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3776 } 3777 3778 static void 3779 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3780 int so_locked) 3781 { 3782 struct mbuf *m_notify; 3783 struct sctp_sender_dry_event *event; 3784 struct sctp_queued_to_read *control; 3785 3786 if ((stcb == NULL) || 3787 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3788 /* event not enabled */ 3789 return; 3790 } 3791 3792 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3793 if (m_notify == NULL) { 3794 /* no space left */ 3795 return; 3796 } 3797 SCTP_BUF_LEN(m_notify) = 0; 3798 event = mtod(m_notify, struct sctp_sender_dry_event *); 3799 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3800 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3801 event->sender_dry_flags = 0; 3802 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3803 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3804 3805 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3806 SCTP_BUF_NEXT(m_notify) = NULL; 3807 3808 /* append to socket */ 3809 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3810 0, 0, stcb->asoc.context, 0, 0, 0, 3811 m_notify); 3812 if (control == NULL) { 3813 /* no memory */ 3814 sctp_m_freem(m_notify); 3815 return; 3816 } 3817 control->length = SCTP_BUF_LEN(m_notify); 3818 control->spec_flags = M_NOTIFICATION; 3819 /* not that we need this */ 3820 control->tail_mbuf = m_notify; 3821 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3822 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3823 } 3824 3825 3826 void 3827 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3828 { 3829 struct mbuf *m_notify; 3830 struct sctp_queued_to_read *control; 3831 struct sctp_stream_change_event *stradd; 3832 3833 if ((stcb == NULL) || 3834 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3835 /* event not enabled */ 3836 return; 3837 } 3838 if ((stcb->asoc.peer_req_out) && flag) { 3839 /* Peer made the request, don't tell the local user */ 3840 stcb->asoc.peer_req_out = 0; 3841 return; 3842 } 3843 stcb->asoc.peer_req_out = 0; 3844 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3845 if (m_notify == NULL) 3846 /* no space left */ 3847 return; 3848 SCTP_BUF_LEN(m_notify) = 0; 3849 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3850 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3851 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3852 stradd->strchange_flags = flag; 3853 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3854 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3855 stradd->strchange_instrms = numberin; 3856 stradd->strchange_outstrms = numberout; 3857 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3858 SCTP_BUF_NEXT(m_notify) = NULL; 3859 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3860 /* no space */ 3861 sctp_m_freem(m_notify); 3862 return; 3863 } 3864 /* append to socket */ 3865 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3866 0, 0, stcb->asoc.context, 0, 0, 0, 3867 m_notify); 3868 if (control == NULL) { 3869 /* no memory */ 3870 sctp_m_freem(m_notify); 3871 return; 3872 } 3873 control->length = SCTP_BUF_LEN(m_notify); 3874 control->spec_flags = M_NOTIFICATION; 3875 /* not that we need this */ 3876 control->tail_mbuf = m_notify; 3877 sctp_add_to_readq(stcb->sctp_ep, stcb, 3878 control, 3879 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3880 } 3881 3882 void 3883 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3884 { 3885 struct mbuf *m_notify; 3886 struct sctp_queued_to_read *control; 3887 struct sctp_assoc_reset_event *strasoc; 3888 3889 if ((stcb == NULL) || 3890 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3891 /* event not enabled */ 3892 return; 3893 } 3894 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3895 if (m_notify == NULL) 3896 /* no space left */ 3897 return; 3898 SCTP_BUF_LEN(m_notify) = 0; 3899 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3900 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3901 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3902 strasoc->assocreset_flags = flag; 3903 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3904 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3905 strasoc->assocreset_local_tsn = sending_tsn; 3906 strasoc->assocreset_remote_tsn = recv_tsn; 3907 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3908 SCTP_BUF_NEXT(m_notify) = NULL; 3909 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3910 /* no space */ 3911 sctp_m_freem(m_notify); 3912 return; 3913 } 3914 /* append to socket */ 3915 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3916 0, 0, stcb->asoc.context, 0, 0, 0, 3917 m_notify); 3918 if (control == NULL) { 3919 /* no memory */ 3920 sctp_m_freem(m_notify); 3921 return; 3922 } 3923 control->length = SCTP_BUF_LEN(m_notify); 3924 control->spec_flags = M_NOTIFICATION; 3925 /* not that we need this */ 3926 control->tail_mbuf = m_notify; 3927 sctp_add_to_readq(stcb->sctp_ep, stcb, 3928 control, 3929 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3930 } 3931 3932 3933 3934 static void 3935 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3936 int number_entries, uint16_t *list, int flag) 3937 { 3938 struct mbuf *m_notify; 3939 struct sctp_queued_to_read *control; 3940 struct sctp_stream_reset_event *strreset; 3941 int len; 3942 3943 if ((stcb == NULL) || 3944 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3945 /* event not enabled */ 3946 return; 3947 } 3948 3949 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3950 if (m_notify == NULL) 3951 /* no space left */ 3952 return; 3953 SCTP_BUF_LEN(m_notify) = 0; 3954 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3955 if (len > M_TRAILINGSPACE(m_notify)) { 3956 /* never enough room */ 3957 sctp_m_freem(m_notify); 3958 return; 3959 } 3960 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3961 memset(strreset, 0, len); 3962 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3963 strreset->strreset_flags = flag; 3964 strreset->strreset_length = len; 3965 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3966 if (number_entries) { 3967 int i; 3968 3969 for (i = 0; i < number_entries; i++) { 3970 strreset->strreset_stream_list[i] = ntohs(list[i]); 3971 } 3972 } 3973 SCTP_BUF_LEN(m_notify) = len; 3974 SCTP_BUF_NEXT(m_notify) = NULL; 3975 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3976 /* no space */ 3977 sctp_m_freem(m_notify); 3978 return; 3979 } 3980 /* append to socket */ 3981 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3982 0, 0, stcb->asoc.context, 0, 0, 0, 3983 m_notify); 3984 if (control == NULL) { 3985 /* no memory */ 3986 sctp_m_freem(m_notify); 3987 return; 3988 } 3989 control->length = SCTP_BUF_LEN(m_notify); 3990 control->spec_flags = M_NOTIFICATION; 3991 /* not that we need this */ 3992 control->tail_mbuf = m_notify; 3993 sctp_add_to_readq(stcb->sctp_ep, stcb, 3994 control, 3995 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3996 } 3997 3998 3999 static void 4000 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 4001 { 4002 struct mbuf *m_notify; 4003 struct sctp_remote_error *sre; 4004 struct sctp_queued_to_read *control; 4005 unsigned int notif_len; 4006 uint16_t chunk_len; 4007 4008 if ((stcb == NULL) || 4009 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 4010 return; 4011 } 4012 if (chunk != NULL) { 4013 chunk_len = ntohs(chunk->ch.chunk_length); 4014 /* 4015 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 4016 * contiguous. 4017 */ 4018 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 4019 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 4020 } 4021 } else { 4022 chunk_len = 0; 4023 } 4024 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 4025 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4026 if (m_notify == NULL) { 4027 /* Retry with smaller value. */ 4028 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 4029 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 4030 if (m_notify == NULL) { 4031 return; 4032 } 4033 } 4034 SCTP_BUF_NEXT(m_notify) = NULL; 4035 sre = mtod(m_notify, struct sctp_remote_error *); 4036 memset(sre, 0, notif_len); 4037 sre->sre_type = SCTP_REMOTE_ERROR; 4038 sre->sre_flags = 0; 4039 sre->sre_length = sizeof(struct sctp_remote_error); 4040 sre->sre_error = error; 4041 sre->sre_assoc_id = sctp_get_associd(stcb); 4042 if (notif_len > sizeof(struct sctp_remote_error)) { 4043 memcpy(sre->sre_data, chunk, chunk_len); 4044 sre->sre_length += chunk_len; 4045 } 4046 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4047 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4048 0, 0, stcb->asoc.context, 0, 0, 0, 4049 m_notify); 4050 if (control != NULL) { 4051 control->length = SCTP_BUF_LEN(m_notify); 4052 control->spec_flags = M_NOTIFICATION; 4053 /* not that we need this */ 4054 control->tail_mbuf = m_notify; 4055 sctp_add_to_readq(stcb->sctp_ep, stcb, 4056 control, 4057 &stcb->sctp_socket->so_rcv, 1, 4058 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4059 } else { 4060 sctp_m_freem(m_notify); 4061 } 4062 } 4063 4064 4065 void 4066 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4067 uint32_t error, void *data, int so_locked) 4068 { 4069 if ((stcb == NULL) || 4070 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4071 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4072 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4073 /* If the socket is gone we are out of here */ 4074 return; 4075 } 4076 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4077 return; 4078 } 4079 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4080 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4081 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4082 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4083 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4084 /* Don't report these in front states */ 4085 return; 4086 } 4087 } 4088 switch (notification) { 4089 case SCTP_NOTIFY_ASSOC_UP: 4090 if (stcb->asoc.assoc_up_sent == 0) { 4091 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4092 stcb->asoc.assoc_up_sent = 1; 4093 } 4094 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4095 sctp_notify_adaptation_layer(stcb); 4096 } 4097 if (stcb->asoc.auth_supported == 0) { 4098 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4099 NULL, so_locked); 4100 } 4101 break; 4102 case SCTP_NOTIFY_ASSOC_DOWN: 4103 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4104 break; 4105 case SCTP_NOTIFY_INTERFACE_DOWN: 4106 { 4107 struct sctp_nets *net; 4108 4109 net = (struct sctp_nets *)data; 4110 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4111 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4112 break; 4113 } 4114 case SCTP_NOTIFY_INTERFACE_UP: 4115 { 4116 struct sctp_nets *net; 4117 4118 net = (struct sctp_nets *)data; 4119 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4120 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4121 break; 4122 } 4123 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4124 { 4125 struct sctp_nets *net; 4126 4127 net = (struct sctp_nets *)data; 4128 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4129 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4130 break; 4131 } 4132 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4133 sctp_notify_send_failed2(stcb, error, 4134 (struct sctp_stream_queue_pending *)data, so_locked); 4135 break; 4136 case SCTP_NOTIFY_SENT_DG_FAIL: 4137 sctp_notify_send_failed(stcb, 1, error, 4138 (struct sctp_tmit_chunk *)data, so_locked); 4139 break; 4140 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4141 sctp_notify_send_failed(stcb, 0, error, 4142 (struct sctp_tmit_chunk *)data, so_locked); 4143 break; 4144 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4145 { 4146 uint32_t val; 4147 4148 val = *((uint32_t *)data); 4149 4150 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4151 break; 4152 } 4153 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4154 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4155 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4156 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4157 } else { 4158 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4159 } 4160 break; 4161 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4162 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4163 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4164 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4165 } else { 4166 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4167 } 4168 break; 4169 case SCTP_NOTIFY_ASSOC_RESTART: 4170 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4171 if (stcb->asoc.auth_supported == 0) { 4172 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4173 NULL, so_locked); 4174 } 4175 break; 4176 case SCTP_NOTIFY_STR_RESET_SEND: 4177 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4178 break; 4179 case SCTP_NOTIFY_STR_RESET_RECV: 4180 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4181 break; 4182 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4183 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4184 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4185 break; 4186 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4187 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4188 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4189 break; 4190 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4191 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4192 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4193 break; 4194 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4195 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4196 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4197 break; 4198 case SCTP_NOTIFY_ASCONF_ADD_IP: 4199 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4200 error, so_locked); 4201 break; 4202 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4203 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4204 error, so_locked); 4205 break; 4206 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4207 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4208 error, so_locked); 4209 break; 4210 case SCTP_NOTIFY_PEER_SHUTDOWN: 4211 sctp_notify_shutdown_event(stcb); 4212 break; 4213 case SCTP_NOTIFY_AUTH_NEW_KEY: 4214 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4215 (uint16_t)(uintptr_t)data, 4216 so_locked); 4217 break; 4218 case SCTP_NOTIFY_AUTH_FREE_KEY: 4219 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4220 (uint16_t)(uintptr_t)data, 4221 so_locked); 4222 break; 4223 case SCTP_NOTIFY_NO_PEER_AUTH: 4224 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4225 (uint16_t)(uintptr_t)data, 4226 so_locked); 4227 break; 4228 case SCTP_NOTIFY_SENDER_DRY: 4229 sctp_notify_sender_dry_event(stcb, so_locked); 4230 break; 4231 case SCTP_NOTIFY_REMOTE_ERROR: 4232 sctp_notify_remote_error(stcb, error, data); 4233 break; 4234 default: 4235 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4236 __func__, notification, notification); 4237 break; 4238 } /* end switch */ 4239 } 4240 4241 void 4242 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) 4243 { 4244 struct sctp_association *asoc; 4245 struct sctp_stream_out *outs; 4246 struct sctp_tmit_chunk *chk, *nchk; 4247 struct sctp_stream_queue_pending *sp, *nsp; 4248 int i; 4249 4250 if (stcb == NULL) { 4251 return; 4252 } 4253 asoc = &stcb->asoc; 4254 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4255 /* already being freed */ 4256 return; 4257 } 4258 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4259 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4260 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4261 return; 4262 } 4263 /* now through all the gunk freeing chunks */ 4264 /* sent queue SHOULD be empty */ 4265 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4266 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4267 asoc->sent_queue_cnt--; 4268 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4269 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4270 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4271 #ifdef INVARIANTS 4272 } else { 4273 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4274 #endif 4275 } 4276 } 4277 if (chk->data != NULL) { 4278 sctp_free_bufspace(stcb, asoc, chk, 1); 4279 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4280 error, chk, so_locked); 4281 if (chk->data) { 4282 sctp_m_freem(chk->data); 4283 chk->data = NULL; 4284 } 4285 } 4286 sctp_free_a_chunk(stcb, chk, so_locked); 4287 /* sa_ignore FREED_MEMORY */ 4288 } 4289 /* pending send queue SHOULD be empty */ 4290 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4291 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4292 asoc->send_queue_cnt--; 4293 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4294 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4295 #ifdef INVARIANTS 4296 } else { 4297 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4298 #endif 4299 } 4300 if (chk->data != NULL) { 4301 sctp_free_bufspace(stcb, asoc, chk, 1); 4302 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4303 error, chk, so_locked); 4304 if (chk->data) { 4305 sctp_m_freem(chk->data); 4306 chk->data = NULL; 4307 } 4308 } 4309 sctp_free_a_chunk(stcb, chk, so_locked); 4310 /* sa_ignore FREED_MEMORY */ 4311 } 4312 for (i = 0; i < asoc->streamoutcnt; i++) { 4313 /* For each stream */ 4314 outs = &asoc->strmout[i]; 4315 /* clean up any sends there */ 4316 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4317 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4318 TAILQ_REMOVE(&outs->outqueue, sp, next); 4319 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4320 sctp_free_spbufspace(stcb, asoc, sp); 4321 if (sp->data) { 4322 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4323 error, (void *)sp, so_locked); 4324 if (sp->data) { 4325 sctp_m_freem(sp->data); 4326 sp->data = NULL; 4327 sp->tail_mbuf = NULL; 4328 sp->length = 0; 4329 } 4330 } 4331 if (sp->net) { 4332 sctp_free_remote_addr(sp->net); 4333 sp->net = NULL; 4334 } 4335 /* Free the chunk */ 4336 sctp_free_a_strmoq(stcb, sp, so_locked); 4337 /* sa_ignore FREED_MEMORY */ 4338 } 4339 } 4340 } 4341 4342 void 4343 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4344 struct sctp_abort_chunk *abort, int so_locked) 4345 { 4346 if (stcb == NULL) { 4347 return; 4348 } 4349 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4350 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4351 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4352 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4353 } 4354 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4355 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4356 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4357 return; 4358 } 4359 SCTP_TCB_SEND_LOCK(stcb); 4360 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4361 /* Tell them we lost the asoc */ 4362 sctp_report_all_outbound(stcb, error, so_locked); 4363 SCTP_TCB_SEND_UNLOCK(stcb); 4364 if (from_peer) { 4365 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4366 } else { 4367 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4368 } 4369 } 4370 4371 void 4372 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4373 struct mbuf *m, int iphlen, 4374 struct sockaddr *src, struct sockaddr *dst, 4375 struct sctphdr *sh, struct mbuf *op_err, 4376 uint8_t mflowtype, uint32_t mflowid, 4377 uint32_t vrf_id, uint16_t port) 4378 { 4379 uint32_t vtag; 4380 4381 vtag = 0; 4382 if (stcb != NULL) { 4383 vtag = stcb->asoc.peer_vtag; 4384 vrf_id = stcb->asoc.vrf_id; 4385 } 4386 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4387 mflowtype, mflowid, inp->fibnum, 4388 vrf_id, port); 4389 if (stcb != NULL) { 4390 /* We have a TCB to abort, send notification too */ 4391 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4392 /* Ok, now lets free it */ 4393 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4394 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4395 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4396 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4397 } 4398 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4399 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4400 } 4401 } 4402 #ifdef SCTP_ASOCLOG_OF_TSNS 4403 void 4404 sctp_print_out_track_log(struct sctp_tcb *stcb) 4405 { 4406 #ifdef NOSIY_PRINTS 4407 int i; 4408 4409 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4410 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4411 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4412 SCTP_PRINTF("None rcvd\n"); 4413 goto none_in; 4414 } 4415 if (stcb->asoc.tsn_in_wrapped) { 4416 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4417 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4418 stcb->asoc.in_tsnlog[i].tsn, 4419 stcb->asoc.in_tsnlog[i].strm, 4420 stcb->asoc.in_tsnlog[i].seq, 4421 stcb->asoc.in_tsnlog[i].flgs, 4422 stcb->asoc.in_tsnlog[i].sz); 4423 } 4424 } 4425 if (stcb->asoc.tsn_in_at) { 4426 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4427 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4428 stcb->asoc.in_tsnlog[i].tsn, 4429 stcb->asoc.in_tsnlog[i].strm, 4430 stcb->asoc.in_tsnlog[i].seq, 4431 stcb->asoc.in_tsnlog[i].flgs, 4432 stcb->asoc.in_tsnlog[i].sz); 4433 } 4434 } 4435 none_in: 4436 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4437 if ((stcb->asoc.tsn_out_at == 0) && 4438 (stcb->asoc.tsn_out_wrapped == 0)) { 4439 SCTP_PRINTF("None sent\n"); 4440 } 4441 if (stcb->asoc.tsn_out_wrapped) { 4442 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4443 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4444 stcb->asoc.out_tsnlog[i].tsn, 4445 stcb->asoc.out_tsnlog[i].strm, 4446 stcb->asoc.out_tsnlog[i].seq, 4447 stcb->asoc.out_tsnlog[i].flgs, 4448 stcb->asoc.out_tsnlog[i].sz); 4449 } 4450 } 4451 if (stcb->asoc.tsn_out_at) { 4452 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4453 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4454 stcb->asoc.out_tsnlog[i].tsn, 4455 stcb->asoc.out_tsnlog[i].strm, 4456 stcb->asoc.out_tsnlog[i].seq, 4457 stcb->asoc.out_tsnlog[i].flgs, 4458 stcb->asoc.out_tsnlog[i].sz); 4459 } 4460 } 4461 #endif 4462 } 4463 #endif 4464 4465 void 4466 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4467 struct mbuf *op_err, 4468 int so_locked) 4469 { 4470 4471 if (stcb == NULL) { 4472 /* Got to have a TCB */ 4473 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4474 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4475 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4476 SCTP_CALLED_DIRECTLY_NOCMPSET); 4477 } 4478 } 4479 return; 4480 } 4481 /* notify the peer */ 4482 sctp_send_abort_tcb(stcb, op_err, so_locked); 4483 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4484 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4485 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4486 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4487 } 4488 /* notify the ulp */ 4489 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4490 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4491 } 4492 /* now free the asoc */ 4493 #ifdef SCTP_ASOCLOG_OF_TSNS 4494 sctp_print_out_track_log(stcb); 4495 #endif 4496 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4497 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4498 } 4499 4500 void 4501 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4502 struct sockaddr *src, struct sockaddr *dst, 4503 struct sctphdr *sh, struct sctp_inpcb *inp, 4504 struct mbuf *cause, 4505 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4506 uint32_t vrf_id, uint16_t port) 4507 { 4508 struct sctp_chunkhdr *ch, chunk_buf; 4509 unsigned int chk_length; 4510 int contains_init_chunk; 4511 4512 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4513 /* Generate a TO address for future reference */ 4514 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4515 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4516 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4517 SCTP_CALLED_DIRECTLY_NOCMPSET); 4518 } 4519 } 4520 contains_init_chunk = 0; 4521 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4522 sizeof(*ch), (uint8_t *)&chunk_buf); 4523 while (ch != NULL) { 4524 chk_length = ntohs(ch->chunk_length); 4525 if (chk_length < sizeof(*ch)) { 4526 /* break to abort land */ 4527 break; 4528 } 4529 switch (ch->chunk_type) { 4530 case SCTP_INIT: 4531 contains_init_chunk = 1; 4532 break; 4533 case SCTP_PACKET_DROPPED: 4534 /* we don't respond to pkt-dropped */ 4535 return; 4536 case SCTP_ABORT_ASSOCIATION: 4537 /* we don't respond with an ABORT to an ABORT */ 4538 return; 4539 case SCTP_SHUTDOWN_COMPLETE: 4540 /* 4541 * we ignore it since we are not waiting for it and 4542 * peer is gone 4543 */ 4544 return; 4545 case SCTP_SHUTDOWN_ACK: 4546 sctp_send_shutdown_complete2(src, dst, sh, 4547 mflowtype, mflowid, fibnum, 4548 vrf_id, port); 4549 return; 4550 default: 4551 break; 4552 } 4553 offset += SCTP_SIZE32(chk_length); 4554 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4555 sizeof(*ch), (uint8_t *)&chunk_buf); 4556 } 4557 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4558 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4559 (contains_init_chunk == 0))) { 4560 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4561 mflowtype, mflowid, fibnum, 4562 vrf_id, port); 4563 } 4564 } 4565 4566 /* 4567 * check the inbound datagram to make sure there is not an abort inside it, 4568 * if there is return 1, else return 0. 4569 */ 4570 int 4571 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4572 { 4573 struct sctp_chunkhdr *ch; 4574 struct sctp_init_chunk *init_chk, chunk_buf; 4575 int offset; 4576 unsigned int chk_length; 4577 4578 offset = iphlen + sizeof(struct sctphdr); 4579 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4580 (uint8_t *)&chunk_buf); 4581 while (ch != NULL) { 4582 chk_length = ntohs(ch->chunk_length); 4583 if (chk_length < sizeof(*ch)) { 4584 /* packet is probably corrupt */ 4585 break; 4586 } 4587 /* we seem to be ok, is it an abort? */ 4588 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4589 /* yep, tell them */ 4590 return (1); 4591 } 4592 if (ch->chunk_type == SCTP_INITIATION) { 4593 /* need to update the Vtag */ 4594 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4595 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4596 if (init_chk != NULL) { 4597 *vtagfill = ntohl(init_chk->init.initiate_tag); 4598 } 4599 } 4600 /* Nope, move to the next chunk */ 4601 offset += SCTP_SIZE32(chk_length); 4602 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4603 sizeof(*ch), (uint8_t *)&chunk_buf); 4604 } 4605 return (0); 4606 } 4607 4608 /* 4609 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4610 * set (i.e. it's 0) so, create this function to compare link local scopes 4611 */ 4612 #ifdef INET6 4613 uint32_t 4614 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4615 { 4616 struct sockaddr_in6 a, b; 4617 4618 /* save copies */ 4619 a = *addr1; 4620 b = *addr2; 4621 4622 if (a.sin6_scope_id == 0) 4623 if (sa6_recoverscope(&a)) { 4624 /* can't get scope, so can't match */ 4625 return (0); 4626 } 4627 if (b.sin6_scope_id == 0) 4628 if (sa6_recoverscope(&b)) { 4629 /* can't get scope, so can't match */ 4630 return (0); 4631 } 4632 if (a.sin6_scope_id != b.sin6_scope_id) 4633 return (0); 4634 4635 return (1); 4636 } 4637 4638 /* 4639 * returns a sockaddr_in6 with embedded scope recovered and removed 4640 */ 4641 struct sockaddr_in6 * 4642 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4643 { 4644 /* check and strip embedded scope junk */ 4645 if (addr->sin6_family == AF_INET6) { 4646 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4647 if (addr->sin6_scope_id == 0) { 4648 *store = *addr; 4649 if (!sa6_recoverscope(store)) { 4650 /* use the recovered scope */ 4651 addr = store; 4652 } 4653 } else { 4654 /* else, return the original "to" addr */ 4655 in6_clearscope(&addr->sin6_addr); 4656 } 4657 } 4658 } 4659 return (addr); 4660 } 4661 #endif 4662 4663 /* 4664 * are the two addresses the same? currently a "scopeless" check returns: 1 4665 * if same, 0 if not 4666 */ 4667 int 4668 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4669 { 4670 4671 /* must be valid */ 4672 if (sa1 == NULL || sa2 == NULL) 4673 return (0); 4674 4675 /* must be the same family */ 4676 if (sa1->sa_family != sa2->sa_family) 4677 return (0); 4678 4679 switch (sa1->sa_family) { 4680 #ifdef INET6 4681 case AF_INET6: 4682 { 4683 /* IPv6 addresses */ 4684 struct sockaddr_in6 *sin6_1, *sin6_2; 4685 4686 sin6_1 = (struct sockaddr_in6 *)sa1; 4687 sin6_2 = (struct sockaddr_in6 *)sa2; 4688 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4689 sin6_2)); 4690 } 4691 #endif 4692 #ifdef INET 4693 case AF_INET: 4694 { 4695 /* IPv4 addresses */ 4696 struct sockaddr_in *sin_1, *sin_2; 4697 4698 sin_1 = (struct sockaddr_in *)sa1; 4699 sin_2 = (struct sockaddr_in *)sa2; 4700 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4701 } 4702 #endif 4703 default: 4704 /* we don't do these... */ 4705 return (0); 4706 } 4707 } 4708 4709 void 4710 sctp_print_address(struct sockaddr *sa) 4711 { 4712 #ifdef INET6 4713 char ip6buf[INET6_ADDRSTRLEN]; 4714 #endif 4715 4716 switch (sa->sa_family) { 4717 #ifdef INET6 4718 case AF_INET6: 4719 { 4720 struct sockaddr_in6 *sin6; 4721 4722 sin6 = (struct sockaddr_in6 *)sa; 4723 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4724 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4725 ntohs(sin6->sin6_port), 4726 sin6->sin6_scope_id); 4727 break; 4728 } 4729 #endif 4730 #ifdef INET 4731 case AF_INET: 4732 { 4733 struct sockaddr_in *sin; 4734 unsigned char *p; 4735 4736 sin = (struct sockaddr_in *)sa; 4737 p = (unsigned char *)&sin->sin_addr; 4738 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4739 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4740 break; 4741 } 4742 #endif 4743 default: 4744 SCTP_PRINTF("?\n"); 4745 break; 4746 } 4747 } 4748 4749 void 4750 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4751 struct sctp_inpcb *new_inp, 4752 struct sctp_tcb *stcb, 4753 int waitflags) 4754 { 4755 /* 4756 * go through our old INP and pull off any control structures that 4757 * belong to stcb and move then to the new inp. 4758 */ 4759 struct socket *old_so, *new_so; 4760 struct sctp_queued_to_read *control, *nctl; 4761 struct sctp_readhead tmp_queue; 4762 struct mbuf *m; 4763 int error = 0; 4764 4765 old_so = old_inp->sctp_socket; 4766 new_so = new_inp->sctp_socket; 4767 TAILQ_INIT(&tmp_queue); 4768 error = sblock(&old_so->so_rcv, waitflags); 4769 if (error) { 4770 /* 4771 * Gak, can't get sblock, we have a problem. data will be 4772 * left stranded.. and we don't dare look at it since the 4773 * other thread may be reading something. Oh well, its a 4774 * screwed up app that does a peeloff OR a accept while 4775 * reading from the main socket... actually its only the 4776 * peeloff() case, since I think read will fail on a 4777 * listening socket.. 4778 */ 4779 return; 4780 } 4781 /* lock the socket buffers */ 4782 SCTP_INP_READ_LOCK(old_inp); 4783 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4784 /* Pull off all for out target stcb */ 4785 if (control->stcb == stcb) { 4786 /* remove it we want it */ 4787 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4788 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4789 m = control->data; 4790 while (m) { 4791 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4792 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4793 } 4794 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4795 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4796 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4797 } 4798 m = SCTP_BUF_NEXT(m); 4799 } 4800 } 4801 } 4802 SCTP_INP_READ_UNLOCK(old_inp); 4803 /* Remove the sb-lock on the old socket */ 4804 4805 sbunlock(&old_so->so_rcv); 4806 /* Now we move them over to the new socket buffer */ 4807 SCTP_INP_READ_LOCK(new_inp); 4808 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4809 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4810 m = control->data; 4811 while (m) { 4812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4813 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4814 } 4815 sctp_sballoc(stcb, &new_so->so_rcv, m); 4816 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4817 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4818 } 4819 m = SCTP_BUF_NEXT(m); 4820 } 4821 } 4822 SCTP_INP_READ_UNLOCK(new_inp); 4823 } 4824 4825 void 4826 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4827 struct sctp_tcb *stcb, 4828 int so_locked 4829 SCTP_UNUSED 4830 ) 4831 { 4832 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4833 sctp_sorwakeup(inp, inp->sctp_socket); 4834 } 4835 } 4836 4837 void 4838 sctp_add_to_readq(struct sctp_inpcb *inp, 4839 struct sctp_tcb *stcb, 4840 struct sctp_queued_to_read *control, 4841 struct sockbuf *sb, 4842 int end, 4843 int inp_read_lock_held, 4844 int so_locked) 4845 { 4846 /* 4847 * Here we must place the control on the end of the socket read 4848 * queue AND increment sb_cc so that select will work properly on 4849 * read. 4850 */ 4851 struct mbuf *m, *prev = NULL; 4852 4853 if (inp == NULL) { 4854 /* Gak, TSNH!! */ 4855 #ifdef INVARIANTS 4856 panic("Gak, inp NULL on add_to_readq"); 4857 #endif 4858 return; 4859 } 4860 if (inp_read_lock_held == 0) 4861 SCTP_INP_READ_LOCK(inp); 4862 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4863 if (!control->on_strm_q) { 4864 sctp_free_remote_addr(control->whoFrom); 4865 if (control->data) { 4866 sctp_m_freem(control->data); 4867 control->data = NULL; 4868 } 4869 sctp_free_a_readq(stcb, control); 4870 } 4871 if (inp_read_lock_held == 0) 4872 SCTP_INP_READ_UNLOCK(inp); 4873 return; 4874 } 4875 if (!(control->spec_flags & M_NOTIFICATION)) { 4876 atomic_add_int(&inp->total_recvs, 1); 4877 if (!control->do_not_ref_stcb) { 4878 atomic_add_int(&stcb->total_recvs, 1); 4879 } 4880 } 4881 m = control->data; 4882 control->held_length = 0; 4883 control->length = 0; 4884 while (m) { 4885 if (SCTP_BUF_LEN(m) == 0) { 4886 /* Skip mbufs with NO length */ 4887 if (prev == NULL) { 4888 /* First one */ 4889 control->data = sctp_m_free(m); 4890 m = control->data; 4891 } else { 4892 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4893 m = SCTP_BUF_NEXT(prev); 4894 } 4895 if (m == NULL) { 4896 control->tail_mbuf = prev; 4897 } 4898 continue; 4899 } 4900 prev = m; 4901 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4902 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4903 } 4904 sctp_sballoc(stcb, sb, m); 4905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4906 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4907 } 4908 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4909 m = SCTP_BUF_NEXT(m); 4910 } 4911 if (prev != NULL) { 4912 control->tail_mbuf = prev; 4913 } else { 4914 /* Everything got collapsed out?? */ 4915 if (!control->on_strm_q) { 4916 sctp_free_remote_addr(control->whoFrom); 4917 sctp_free_a_readq(stcb, control); 4918 } 4919 if (inp_read_lock_held == 0) 4920 SCTP_INP_READ_UNLOCK(inp); 4921 return; 4922 } 4923 if (end) { 4924 control->end_added = 1; 4925 } 4926 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4927 control->on_read_q = 1; 4928 if (inp_read_lock_held == 0) 4929 SCTP_INP_READ_UNLOCK(inp); 4930 if (inp && inp->sctp_socket) { 4931 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4932 } 4933 } 4934 4935 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4936 *************ALTERNATE ROUTING CODE 4937 */ 4938 4939 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4940 *************ALTERNATE ROUTING CODE 4941 */ 4942 4943 struct mbuf * 4944 sctp_generate_cause(uint16_t code, char *info) 4945 { 4946 struct mbuf *m; 4947 struct sctp_gen_error_cause *cause; 4948 size_t info_len; 4949 uint16_t len; 4950 4951 if ((code == 0) || (info == NULL)) { 4952 return (NULL); 4953 } 4954 info_len = strlen(info); 4955 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4956 return (NULL); 4957 } 4958 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4959 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4960 if (m != NULL) { 4961 SCTP_BUF_LEN(m) = len; 4962 cause = mtod(m, struct sctp_gen_error_cause *); 4963 cause->code = htons(code); 4964 cause->length = htons(len); 4965 memcpy(cause->info, info, info_len); 4966 } 4967 return (m); 4968 } 4969 4970 struct mbuf * 4971 sctp_generate_no_user_data_cause(uint32_t tsn) 4972 { 4973 struct mbuf *m; 4974 struct sctp_error_no_user_data *no_user_data_cause; 4975 uint16_t len; 4976 4977 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4978 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4979 if (m != NULL) { 4980 SCTP_BUF_LEN(m) = len; 4981 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4982 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4983 no_user_data_cause->cause.length = htons(len); 4984 no_user_data_cause->tsn = htonl(tsn); 4985 } 4986 return (m); 4987 } 4988 4989 #ifdef SCTP_MBCNT_LOGGING 4990 void 4991 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4992 struct sctp_tmit_chunk *tp1, int chk_cnt) 4993 { 4994 if (tp1->data == NULL) { 4995 return; 4996 } 4997 asoc->chunks_on_out_queue -= chk_cnt; 4998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4999 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 5000 asoc->total_output_queue_size, 5001 tp1->book_size, 5002 0, 5003 tp1->mbcnt); 5004 } 5005 if (asoc->total_output_queue_size >= tp1->book_size) { 5006 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 5007 } else { 5008 asoc->total_output_queue_size = 0; 5009 } 5010 5011 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 5012 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 5013 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 5014 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 5015 } else { 5016 stcb->sctp_socket->so_snd.sb_cc = 0; 5017 5018 } 5019 } 5020 } 5021 5022 #endif 5023 5024 int 5025 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 5026 uint8_t sent, int so_locked) 5027 { 5028 struct sctp_stream_out *strq; 5029 struct sctp_tmit_chunk *chk = NULL, *tp2; 5030 struct sctp_stream_queue_pending *sp; 5031 uint32_t mid; 5032 uint16_t sid; 5033 uint8_t foundeom = 0; 5034 int ret_sz = 0; 5035 int notdone; 5036 int do_wakeup_routine = 0; 5037 5038 sid = tp1->rec.data.sid; 5039 mid = tp1->rec.data.mid; 5040 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5041 stcb->asoc.abandoned_sent[0]++; 5042 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5043 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5044 #if defined(SCTP_DETAILED_STR_STATS) 5045 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5046 #endif 5047 } else { 5048 stcb->asoc.abandoned_unsent[0]++; 5049 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5050 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5051 #if defined(SCTP_DETAILED_STR_STATS) 5052 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5053 #endif 5054 } 5055 do { 5056 ret_sz += tp1->book_size; 5057 if (tp1->data != NULL) { 5058 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5059 sctp_flight_size_decrease(tp1); 5060 sctp_total_flight_decrease(stcb, tp1); 5061 } 5062 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5063 stcb->asoc.peers_rwnd += tp1->send_size; 5064 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5065 if (sent) { 5066 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5067 } else { 5068 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5069 } 5070 if (tp1->data) { 5071 sctp_m_freem(tp1->data); 5072 tp1->data = NULL; 5073 } 5074 do_wakeup_routine = 1; 5075 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5076 stcb->asoc.sent_queue_cnt_removeable--; 5077 } 5078 } 5079 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5080 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5081 SCTP_DATA_NOT_FRAG) { 5082 /* not frag'ed we ae done */ 5083 notdone = 0; 5084 foundeom = 1; 5085 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5086 /* end of frag, we are done */ 5087 notdone = 0; 5088 foundeom = 1; 5089 } else { 5090 /* 5091 * Its a begin or middle piece, we must mark all of 5092 * it 5093 */ 5094 notdone = 1; 5095 tp1 = TAILQ_NEXT(tp1, sctp_next); 5096 } 5097 } while (tp1 && notdone); 5098 if (foundeom == 0) { 5099 /* 5100 * The multi-part message was scattered across the send and 5101 * sent queue. 5102 */ 5103 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5104 if ((tp1->rec.data.sid != sid) || 5105 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5106 break; 5107 } 5108 /* 5109 * save to chk in case we have some on stream out 5110 * queue. If so and we have an un-transmitted one we 5111 * don't have to fudge the TSN. 5112 */ 5113 chk = tp1; 5114 ret_sz += tp1->book_size; 5115 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5116 if (sent) { 5117 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5118 } else { 5119 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5120 } 5121 if (tp1->data) { 5122 sctp_m_freem(tp1->data); 5123 tp1->data = NULL; 5124 } 5125 /* No flight involved here book the size to 0 */ 5126 tp1->book_size = 0; 5127 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5128 foundeom = 1; 5129 } 5130 do_wakeup_routine = 1; 5131 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5132 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5133 /* 5134 * on to the sent queue so we can wait for it to be 5135 * passed by. 5136 */ 5137 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5138 sctp_next); 5139 stcb->asoc.send_queue_cnt--; 5140 stcb->asoc.sent_queue_cnt++; 5141 } 5142 } 5143 if (foundeom == 0) { 5144 /* 5145 * Still no eom found. That means there is stuff left on the 5146 * stream out queue.. yuck. 5147 */ 5148 SCTP_TCB_SEND_LOCK(stcb); 5149 strq = &stcb->asoc.strmout[sid]; 5150 sp = TAILQ_FIRST(&strq->outqueue); 5151 if (sp != NULL) { 5152 sp->discard_rest = 1; 5153 /* 5154 * We may need to put a chunk on the queue that 5155 * holds the TSN that would have been sent with the 5156 * LAST bit. 5157 */ 5158 if (chk == NULL) { 5159 /* Yep, we have to */ 5160 sctp_alloc_a_chunk(stcb, chk); 5161 if (chk == NULL) { 5162 /* 5163 * we are hosed. All we can do is 5164 * nothing.. which will cause an 5165 * abort if the peer is paying 5166 * attention. 5167 */ 5168 goto oh_well; 5169 } 5170 memset(chk, 0, sizeof(*chk)); 5171 chk->rec.data.rcv_flags = 0; 5172 chk->sent = SCTP_FORWARD_TSN_SKIP; 5173 chk->asoc = &stcb->asoc; 5174 if (stcb->asoc.idata_supported == 0) { 5175 if (sp->sinfo_flags & SCTP_UNORDERED) { 5176 chk->rec.data.mid = 0; 5177 } else { 5178 chk->rec.data.mid = strq->next_mid_ordered; 5179 } 5180 } else { 5181 if (sp->sinfo_flags & SCTP_UNORDERED) { 5182 chk->rec.data.mid = strq->next_mid_unordered; 5183 } else { 5184 chk->rec.data.mid = strq->next_mid_ordered; 5185 } 5186 } 5187 chk->rec.data.sid = sp->sid; 5188 chk->rec.data.ppid = sp->ppid; 5189 chk->rec.data.context = sp->context; 5190 chk->flags = sp->act_flags; 5191 chk->whoTo = NULL; 5192 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5193 strq->chunks_on_queues++; 5194 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5195 stcb->asoc.sent_queue_cnt++; 5196 stcb->asoc.pr_sctp_cnt++; 5197 } 5198 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5199 if (sp->sinfo_flags & SCTP_UNORDERED) { 5200 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5201 } 5202 if (stcb->asoc.idata_supported == 0) { 5203 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5204 strq->next_mid_ordered++; 5205 } 5206 } else { 5207 if (sp->sinfo_flags & SCTP_UNORDERED) { 5208 strq->next_mid_unordered++; 5209 } else { 5210 strq->next_mid_ordered++; 5211 } 5212 } 5213 oh_well: 5214 if (sp->data) { 5215 /* 5216 * Pull any data to free up the SB and allow 5217 * sender to "add more" while we will throw 5218 * away :-) 5219 */ 5220 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5221 ret_sz += sp->length; 5222 do_wakeup_routine = 1; 5223 sp->some_taken = 1; 5224 sctp_m_freem(sp->data); 5225 sp->data = NULL; 5226 sp->tail_mbuf = NULL; 5227 sp->length = 0; 5228 } 5229 } 5230 SCTP_TCB_SEND_UNLOCK(stcb); 5231 } 5232 if (do_wakeup_routine) { 5233 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5234 } 5235 return (ret_sz); 5236 } 5237 5238 /* 5239 * checks to see if the given address, sa, is one that is currently known by 5240 * the kernel note: can't distinguish the same address on multiple interfaces 5241 * and doesn't handle multiple addresses with different zone/scope id's note: 5242 * ifa_ifwithaddr() compares the entire sockaddr struct 5243 */ 5244 struct sctp_ifa * 5245 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5246 int holds_lock) 5247 { 5248 struct sctp_laddr *laddr; 5249 5250 if (holds_lock == 0) { 5251 SCTP_INP_RLOCK(inp); 5252 } 5253 5254 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5255 if (laddr->ifa == NULL) 5256 continue; 5257 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5258 continue; 5259 #ifdef INET 5260 if (addr->sa_family == AF_INET) { 5261 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5262 laddr->ifa->address.sin.sin_addr.s_addr) { 5263 /* found him. */ 5264 break; 5265 } 5266 } 5267 #endif 5268 #ifdef INET6 5269 if (addr->sa_family == AF_INET6) { 5270 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5271 &laddr->ifa->address.sin6)) { 5272 /* found him. */ 5273 break; 5274 } 5275 } 5276 #endif 5277 } 5278 if (holds_lock == 0) { 5279 SCTP_INP_RUNLOCK(inp); 5280 } 5281 if (laddr != NULL) { 5282 return (laddr->ifa); 5283 } else { 5284 return (NULL); 5285 } 5286 } 5287 5288 uint32_t 5289 sctp_get_ifa_hash_val(struct sockaddr *addr) 5290 { 5291 switch (addr->sa_family) { 5292 #ifdef INET 5293 case AF_INET: 5294 { 5295 struct sockaddr_in *sin; 5296 5297 sin = (struct sockaddr_in *)addr; 5298 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5299 } 5300 #endif 5301 #ifdef INET6 5302 case AF_INET6: 5303 { 5304 struct sockaddr_in6 *sin6; 5305 uint32_t hash_of_addr; 5306 5307 sin6 = (struct sockaddr_in6 *)addr; 5308 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5309 sin6->sin6_addr.s6_addr32[1] + 5310 sin6->sin6_addr.s6_addr32[2] + 5311 sin6->sin6_addr.s6_addr32[3]); 5312 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5313 return (hash_of_addr); 5314 } 5315 #endif 5316 default: 5317 break; 5318 } 5319 return (0); 5320 } 5321 5322 struct sctp_ifa * 5323 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5324 { 5325 struct sctp_ifa *sctp_ifap; 5326 struct sctp_vrf *vrf; 5327 struct sctp_ifalist *hash_head; 5328 uint32_t hash_of_addr; 5329 5330 if (holds_lock == 0) { 5331 SCTP_IPI_ADDR_RLOCK(); 5332 } else { 5333 SCTP_IPI_ADDR_LOCK_ASSERT(); 5334 } 5335 5336 vrf = sctp_find_vrf(vrf_id); 5337 if (vrf == NULL) { 5338 if (holds_lock == 0) 5339 SCTP_IPI_ADDR_RUNLOCK(); 5340 return (NULL); 5341 } 5342 5343 hash_of_addr = sctp_get_ifa_hash_val(addr); 5344 5345 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5346 if (hash_head == NULL) { 5347 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5348 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5349 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5350 sctp_print_address(addr); 5351 SCTP_PRINTF("No such bucket for address\n"); 5352 if (holds_lock == 0) 5353 SCTP_IPI_ADDR_RUNLOCK(); 5354 5355 return (NULL); 5356 } 5357 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5358 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5359 continue; 5360 #ifdef INET 5361 if (addr->sa_family == AF_INET) { 5362 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5363 sctp_ifap->address.sin.sin_addr.s_addr) { 5364 /* found him. */ 5365 break; 5366 } 5367 } 5368 #endif 5369 #ifdef INET6 5370 if (addr->sa_family == AF_INET6) { 5371 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5372 &sctp_ifap->address.sin6)) { 5373 /* found him. */ 5374 break; 5375 } 5376 } 5377 #endif 5378 } 5379 if (holds_lock == 0) 5380 SCTP_IPI_ADDR_RUNLOCK(); 5381 return (sctp_ifap); 5382 } 5383 5384 static void 5385 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5386 uint32_t rwnd_req) 5387 { 5388 /* User pulled some data, do we need a rwnd update? */ 5389 struct epoch_tracker et; 5390 int r_unlocked = 0; 5391 uint32_t dif, rwnd; 5392 struct socket *so = NULL; 5393 5394 if (stcb == NULL) 5395 return; 5396 5397 atomic_add_int(&stcb->asoc.refcnt, 1); 5398 5399 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5400 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5401 /* Pre-check If we are freeing no update */ 5402 goto no_lock; 5403 } 5404 SCTP_INP_INCR_REF(stcb->sctp_ep); 5405 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5406 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5407 goto out; 5408 } 5409 so = stcb->sctp_socket; 5410 if (so == NULL) { 5411 goto out; 5412 } 5413 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5414 /* Have you have freed enough to look */ 5415 *freed_so_far = 0; 5416 /* Yep, its worth a look and the lock overhead */ 5417 5418 /* Figure out what the rwnd would be */ 5419 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5420 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5421 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5422 } else { 5423 dif = 0; 5424 } 5425 if (dif >= rwnd_req) { 5426 if (hold_rlock) { 5427 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5428 r_unlocked = 1; 5429 } 5430 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5431 /* 5432 * One last check before we allow the guy possibly 5433 * to get in. There is a race, where the guy has not 5434 * reached the gate. In that case 5435 */ 5436 goto out; 5437 } 5438 SCTP_TCB_LOCK(stcb); 5439 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5440 /* No reports here */ 5441 SCTP_TCB_UNLOCK(stcb); 5442 goto out; 5443 } 5444 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5445 NET_EPOCH_ENTER(et); 5446 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5447 5448 sctp_chunk_output(stcb->sctp_ep, stcb, 5449 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5450 /* make sure no timer is running */ 5451 NET_EPOCH_EXIT(et); 5452 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5453 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5454 SCTP_TCB_UNLOCK(stcb); 5455 } else { 5456 /* Update how much we have pending */ 5457 stcb->freed_by_sorcv_sincelast = dif; 5458 } 5459 out: 5460 if (so && r_unlocked && hold_rlock) { 5461 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5462 } 5463 5464 SCTP_INP_DECR_REF(stcb->sctp_ep); 5465 no_lock: 5466 atomic_add_int(&stcb->asoc.refcnt, -1); 5467 return; 5468 } 5469 5470 int 5471 sctp_sorecvmsg(struct socket *so, 5472 struct uio *uio, 5473 struct mbuf **mp, 5474 struct sockaddr *from, 5475 int fromlen, 5476 int *msg_flags, 5477 struct sctp_sndrcvinfo *sinfo, 5478 int filling_sinfo) 5479 { 5480 /* 5481 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5482 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5483 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5484 * On the way out we may send out any combination of: 5485 * MSG_NOTIFICATION MSG_EOR 5486 * 5487 */ 5488 struct sctp_inpcb *inp = NULL; 5489 ssize_t my_len = 0; 5490 ssize_t cp_len = 0; 5491 int error = 0; 5492 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5493 struct mbuf *m = NULL; 5494 struct sctp_tcb *stcb = NULL; 5495 int wakeup_read_socket = 0; 5496 int freecnt_applied = 0; 5497 int out_flags = 0, in_flags = 0; 5498 int block_allowed = 1; 5499 uint32_t freed_so_far = 0; 5500 ssize_t copied_so_far = 0; 5501 int in_eeor_mode = 0; 5502 int no_rcv_needed = 0; 5503 uint32_t rwnd_req = 0; 5504 int hold_sblock = 0; 5505 int hold_rlock = 0; 5506 ssize_t slen = 0; 5507 uint32_t held_length = 0; 5508 int sockbuf_lock = 0; 5509 5510 if (uio == NULL) { 5511 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5512 return (EINVAL); 5513 } 5514 5515 if (msg_flags) { 5516 in_flags = *msg_flags; 5517 if (in_flags & MSG_PEEK) 5518 SCTP_STAT_INCR(sctps_read_peeks); 5519 } else { 5520 in_flags = 0; 5521 } 5522 slen = uio->uio_resid; 5523 5524 /* Pull in and set up our int flags */ 5525 if (in_flags & MSG_OOB) { 5526 /* Out of band's NOT supported */ 5527 return (EOPNOTSUPP); 5528 } 5529 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5530 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5531 return (EINVAL); 5532 } 5533 if ((in_flags & (MSG_DONTWAIT 5534 | MSG_NBIO 5535 )) || 5536 SCTP_SO_IS_NBIO(so)) { 5537 block_allowed = 0; 5538 } 5539 /* setup the endpoint */ 5540 inp = (struct sctp_inpcb *)so->so_pcb; 5541 if (inp == NULL) { 5542 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5543 return (EFAULT); 5544 } 5545 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5546 /* Must be at least a MTU's worth */ 5547 if (rwnd_req < SCTP_MIN_RWND) 5548 rwnd_req = SCTP_MIN_RWND; 5549 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5551 sctp_misc_ints(SCTP_SORECV_ENTER, 5552 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5553 } 5554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5555 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5556 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5557 } 5558 5559 5560 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5561 if (error) { 5562 goto release_unlocked; 5563 } 5564 sockbuf_lock = 1; 5565 restart: 5566 5567 restart_nosblocks: 5568 if (hold_sblock == 0) { 5569 SOCKBUF_LOCK(&so->so_rcv); 5570 hold_sblock = 1; 5571 } 5572 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5573 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5574 goto out; 5575 } 5576 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5577 if (so->so_error) { 5578 error = so->so_error; 5579 if ((in_flags & MSG_PEEK) == 0) 5580 so->so_error = 0; 5581 goto out; 5582 } else { 5583 if (so->so_rcv.sb_cc == 0) { 5584 /* indicate EOF */ 5585 error = 0; 5586 goto out; 5587 } 5588 } 5589 } 5590 if (so->so_rcv.sb_cc <= held_length) { 5591 if (so->so_error) { 5592 error = so->so_error; 5593 if ((in_flags & MSG_PEEK) == 0) { 5594 so->so_error = 0; 5595 } 5596 goto out; 5597 } 5598 if ((so->so_rcv.sb_cc == 0) && 5599 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5600 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5601 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5602 /* 5603 * For active open side clear flags for 5604 * re-use passive open is blocked by 5605 * connect. 5606 */ 5607 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5608 /* 5609 * You were aborted, passive side 5610 * always hits here 5611 */ 5612 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5613 error = ECONNRESET; 5614 } 5615 so->so_state &= ~(SS_ISCONNECTING | 5616 SS_ISDISCONNECTING | 5617 SS_ISCONFIRMING | 5618 SS_ISCONNECTED); 5619 if (error == 0) { 5620 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5621 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5622 error = ENOTCONN; 5623 } 5624 } 5625 goto out; 5626 } 5627 } 5628 if (block_allowed) { 5629 error = sbwait(&so->so_rcv); 5630 if (error) { 5631 goto out; 5632 } 5633 held_length = 0; 5634 goto restart_nosblocks; 5635 } else { 5636 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5637 error = EWOULDBLOCK; 5638 goto out; 5639 } 5640 } 5641 if (hold_sblock == 1) { 5642 SOCKBUF_UNLOCK(&so->so_rcv); 5643 hold_sblock = 0; 5644 } 5645 /* we possibly have data we can read */ 5646 /* sa_ignore FREED_MEMORY */ 5647 control = TAILQ_FIRST(&inp->read_queue); 5648 if (control == NULL) { 5649 /* 5650 * This could be happening since the appender did the 5651 * increment but as not yet did the tailq insert onto the 5652 * read_queue 5653 */ 5654 if (hold_rlock == 0) { 5655 SCTP_INP_READ_LOCK(inp); 5656 } 5657 control = TAILQ_FIRST(&inp->read_queue); 5658 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5659 #ifdef INVARIANTS 5660 panic("Huh, its non zero and nothing on control?"); 5661 #endif 5662 so->so_rcv.sb_cc = 0; 5663 } 5664 SCTP_INP_READ_UNLOCK(inp); 5665 hold_rlock = 0; 5666 goto restart; 5667 } 5668 5669 if ((control->length == 0) && 5670 (control->do_not_ref_stcb)) { 5671 /* 5672 * Clean up code for freeing assoc that left behind a 5673 * pdapi.. maybe a peer in EEOR that just closed after 5674 * sending and never indicated a EOR. 5675 */ 5676 if (hold_rlock == 0) { 5677 hold_rlock = 1; 5678 SCTP_INP_READ_LOCK(inp); 5679 } 5680 control->held_length = 0; 5681 if (control->data) { 5682 /* Hmm there is data here .. fix */ 5683 struct mbuf *m_tmp; 5684 int cnt = 0; 5685 5686 m_tmp = control->data; 5687 while (m_tmp) { 5688 cnt += SCTP_BUF_LEN(m_tmp); 5689 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5690 control->tail_mbuf = m_tmp; 5691 control->end_added = 1; 5692 } 5693 m_tmp = SCTP_BUF_NEXT(m_tmp); 5694 } 5695 control->length = cnt; 5696 } else { 5697 /* remove it */ 5698 TAILQ_REMOVE(&inp->read_queue, control, next); 5699 /* Add back any hiddend data */ 5700 sctp_free_remote_addr(control->whoFrom); 5701 sctp_free_a_readq(stcb, control); 5702 } 5703 if (hold_rlock) { 5704 hold_rlock = 0; 5705 SCTP_INP_READ_UNLOCK(inp); 5706 } 5707 goto restart; 5708 } 5709 if ((control->length == 0) && 5710 (control->end_added == 1)) { 5711 /* 5712 * Do we also need to check for (control->pdapi_aborted == 5713 * 1)? 5714 */ 5715 if (hold_rlock == 0) { 5716 hold_rlock = 1; 5717 SCTP_INP_READ_LOCK(inp); 5718 } 5719 TAILQ_REMOVE(&inp->read_queue, control, next); 5720 if (control->data) { 5721 #ifdef INVARIANTS 5722 panic("control->data not null but control->length == 0"); 5723 #else 5724 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5725 sctp_m_freem(control->data); 5726 control->data = NULL; 5727 #endif 5728 } 5729 if (control->aux_data) { 5730 sctp_m_free(control->aux_data); 5731 control->aux_data = NULL; 5732 } 5733 #ifdef INVARIANTS 5734 if (control->on_strm_q) { 5735 panic("About to free ctl:%p so:%p and its in %d", 5736 control, so, control->on_strm_q); 5737 } 5738 #endif 5739 sctp_free_remote_addr(control->whoFrom); 5740 sctp_free_a_readq(stcb, control); 5741 if (hold_rlock) { 5742 hold_rlock = 0; 5743 SCTP_INP_READ_UNLOCK(inp); 5744 } 5745 goto restart; 5746 } 5747 if (control->length == 0) { 5748 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5749 (filling_sinfo)) { 5750 /* find a more suitable one then this */ 5751 ctl = TAILQ_NEXT(control, next); 5752 while (ctl) { 5753 if ((ctl->stcb != control->stcb) && (ctl->length) && 5754 (ctl->some_taken || 5755 (ctl->spec_flags & M_NOTIFICATION) || 5756 ((ctl->do_not_ref_stcb == 0) && 5757 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5758 ) { 5759 /*- 5760 * If we have a different TCB next, and there is data 5761 * present. If we have already taken some (pdapi), OR we can 5762 * ref the tcb and no delivery as started on this stream, we 5763 * take it. Note we allow a notification on a different 5764 * assoc to be delivered.. 5765 */ 5766 control = ctl; 5767 goto found_one; 5768 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5769 (ctl->length) && 5770 ((ctl->some_taken) || 5771 ((ctl->do_not_ref_stcb == 0) && 5772 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5773 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5774 /*- 5775 * If we have the same tcb, and there is data present, and we 5776 * have the strm interleave feature present. Then if we have 5777 * taken some (pdapi) or we can refer to tht tcb AND we have 5778 * not started a delivery for this stream, we can take it. 5779 * Note we do NOT allow a notificaiton on the same assoc to 5780 * be delivered. 5781 */ 5782 control = ctl; 5783 goto found_one; 5784 } 5785 ctl = TAILQ_NEXT(ctl, next); 5786 } 5787 } 5788 /* 5789 * if we reach here, not suitable replacement is available 5790 * <or> fragment interleave is NOT on. So stuff the sb_cc 5791 * into the our held count, and its time to sleep again. 5792 */ 5793 held_length = so->so_rcv.sb_cc; 5794 control->held_length = so->so_rcv.sb_cc; 5795 goto restart; 5796 } 5797 /* Clear the held length since there is something to read */ 5798 control->held_length = 0; 5799 found_one: 5800 /* 5801 * If we reach here, control has a some data for us to read off. 5802 * Note that stcb COULD be NULL. 5803 */ 5804 if (hold_rlock == 0) { 5805 hold_rlock = 1; 5806 SCTP_INP_READ_LOCK(inp); 5807 } 5808 control->some_taken++; 5809 stcb = control->stcb; 5810 if (stcb) { 5811 if ((control->do_not_ref_stcb == 0) && 5812 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5813 if (freecnt_applied == 0) 5814 stcb = NULL; 5815 } else if (control->do_not_ref_stcb == 0) { 5816 /* you can't free it on me please */ 5817 /* 5818 * The lock on the socket buffer protects us so the 5819 * free code will stop. But since we used the 5820 * socketbuf lock and the sender uses the tcb_lock 5821 * to increment, we need to use the atomic add to 5822 * the refcnt 5823 */ 5824 if (freecnt_applied) { 5825 #ifdef INVARIANTS 5826 panic("refcnt already incremented"); 5827 #else 5828 SCTP_PRINTF("refcnt already incremented?\n"); 5829 #endif 5830 } else { 5831 atomic_add_int(&stcb->asoc.refcnt, 1); 5832 freecnt_applied = 1; 5833 } 5834 /* 5835 * Setup to remember how much we have not yet told 5836 * the peer our rwnd has opened up. Note we grab the 5837 * value from the tcb from last time. Note too that 5838 * sack sending clears this when a sack is sent, 5839 * which is fine. Once we hit the rwnd_req, we then 5840 * will go to the sctp_user_rcvd() that will not 5841 * lock until it KNOWs it MUST send a WUP-SACK. 5842 */ 5843 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5844 stcb->freed_by_sorcv_sincelast = 0; 5845 } 5846 } 5847 if (stcb && 5848 ((control->spec_flags & M_NOTIFICATION) == 0) && 5849 control->do_not_ref_stcb == 0) { 5850 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5851 } 5852 5853 /* First lets get off the sinfo and sockaddr info */ 5854 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5855 sinfo->sinfo_stream = control->sinfo_stream; 5856 sinfo->sinfo_ssn = (uint16_t)control->mid; 5857 sinfo->sinfo_flags = control->sinfo_flags; 5858 sinfo->sinfo_ppid = control->sinfo_ppid; 5859 sinfo->sinfo_context = control->sinfo_context; 5860 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5861 sinfo->sinfo_tsn = control->sinfo_tsn; 5862 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5863 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5864 nxt = TAILQ_NEXT(control, next); 5865 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5866 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5867 struct sctp_extrcvinfo *s_extra; 5868 5869 s_extra = (struct sctp_extrcvinfo *)sinfo; 5870 if ((nxt) && 5871 (nxt->length)) { 5872 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5873 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5874 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5875 } 5876 if (nxt->spec_flags & M_NOTIFICATION) { 5877 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5878 } 5879 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5880 s_extra->serinfo_next_length = nxt->length; 5881 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5882 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5883 if (nxt->tail_mbuf != NULL) { 5884 if (nxt->end_added) { 5885 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5886 } 5887 } 5888 } else { 5889 /* 5890 * we explicitly 0 this, since the memcpy 5891 * got some other things beyond the older 5892 * sinfo_ that is on the control's structure 5893 * :-D 5894 */ 5895 nxt = NULL; 5896 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5897 s_extra->serinfo_next_aid = 0; 5898 s_extra->serinfo_next_length = 0; 5899 s_extra->serinfo_next_ppid = 0; 5900 s_extra->serinfo_next_stream = 0; 5901 } 5902 } 5903 /* 5904 * update off the real current cum-ack, if we have an stcb. 5905 */ 5906 if ((control->do_not_ref_stcb == 0) && stcb) 5907 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5908 /* 5909 * mask off the high bits, we keep the actual chunk bits in 5910 * there. 5911 */ 5912 sinfo->sinfo_flags &= 0x00ff; 5913 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5914 sinfo->sinfo_flags |= SCTP_UNORDERED; 5915 } 5916 } 5917 #ifdef SCTP_ASOCLOG_OF_TSNS 5918 { 5919 int index, newindex; 5920 struct sctp_pcbtsn_rlog *entry; 5921 5922 do { 5923 index = inp->readlog_index; 5924 newindex = index + 1; 5925 if (newindex >= SCTP_READ_LOG_SIZE) { 5926 newindex = 0; 5927 } 5928 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5929 entry = &inp->readlog[index]; 5930 entry->vtag = control->sinfo_assoc_id; 5931 entry->strm = control->sinfo_stream; 5932 entry->seq = (uint16_t)control->mid; 5933 entry->sz = control->length; 5934 entry->flgs = control->sinfo_flags; 5935 } 5936 #endif 5937 if ((fromlen > 0) && (from != NULL)) { 5938 union sctp_sockstore store; 5939 size_t len; 5940 5941 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5942 #ifdef INET6 5943 case AF_INET6: 5944 len = sizeof(struct sockaddr_in6); 5945 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5946 store.sin6.sin6_port = control->port_from; 5947 break; 5948 #endif 5949 #ifdef INET 5950 case AF_INET: 5951 #ifdef INET6 5952 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5953 len = sizeof(struct sockaddr_in6); 5954 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5955 &store.sin6); 5956 store.sin6.sin6_port = control->port_from; 5957 } else { 5958 len = sizeof(struct sockaddr_in); 5959 store.sin = control->whoFrom->ro._l_addr.sin; 5960 store.sin.sin_port = control->port_from; 5961 } 5962 #else 5963 len = sizeof(struct sockaddr_in); 5964 store.sin = control->whoFrom->ro._l_addr.sin; 5965 store.sin.sin_port = control->port_from; 5966 #endif 5967 break; 5968 #endif 5969 default: 5970 len = 0; 5971 break; 5972 } 5973 memcpy(from, &store, min((size_t)fromlen, len)); 5974 #ifdef INET6 5975 { 5976 struct sockaddr_in6 lsa6, *from6; 5977 5978 from6 = (struct sockaddr_in6 *)from; 5979 sctp_recover_scope_mac(from6, (&lsa6)); 5980 } 5981 #endif 5982 } 5983 if (hold_rlock) { 5984 SCTP_INP_READ_UNLOCK(inp); 5985 hold_rlock = 0; 5986 } 5987 if (hold_sblock) { 5988 SOCKBUF_UNLOCK(&so->so_rcv); 5989 hold_sblock = 0; 5990 } 5991 /* now copy out what data we can */ 5992 if (mp == NULL) { 5993 /* copy out each mbuf in the chain up to length */ 5994 get_more_data: 5995 m = control->data; 5996 while (m) { 5997 /* Move out all we can */ 5998 cp_len = uio->uio_resid; 5999 my_len = SCTP_BUF_LEN(m); 6000 if (cp_len > my_len) { 6001 /* not enough in this buf */ 6002 cp_len = my_len; 6003 } 6004 if (hold_rlock) { 6005 SCTP_INP_READ_UNLOCK(inp); 6006 hold_rlock = 0; 6007 } 6008 if (cp_len > 0) 6009 error = uiomove(mtod(m, char *), (int)cp_len, uio); 6010 /* re-read */ 6011 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 6012 goto release; 6013 } 6014 6015 if ((control->do_not_ref_stcb == 0) && stcb && 6016 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 6017 no_rcv_needed = 1; 6018 } 6019 if (error) { 6020 /* error we are out of here */ 6021 goto release; 6022 } 6023 SCTP_INP_READ_LOCK(inp); 6024 hold_rlock = 1; 6025 if (cp_len == SCTP_BUF_LEN(m)) { 6026 if ((SCTP_BUF_NEXT(m) == NULL) && 6027 (control->end_added)) { 6028 out_flags |= MSG_EOR; 6029 if ((control->do_not_ref_stcb == 0) && 6030 (control->stcb != NULL) && 6031 ((control->spec_flags & M_NOTIFICATION) == 0)) 6032 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6033 } 6034 if (control->spec_flags & M_NOTIFICATION) { 6035 out_flags |= MSG_NOTIFICATION; 6036 } 6037 /* we ate up the mbuf */ 6038 if (in_flags & MSG_PEEK) { 6039 /* just looking */ 6040 m = SCTP_BUF_NEXT(m); 6041 copied_so_far += cp_len; 6042 } else { 6043 /* dispose of the mbuf */ 6044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6045 sctp_sblog(&so->so_rcv, 6046 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6047 } 6048 sctp_sbfree(control, stcb, &so->so_rcv, m); 6049 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6050 sctp_sblog(&so->so_rcv, 6051 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6052 } 6053 copied_so_far += cp_len; 6054 freed_so_far += (uint32_t)cp_len; 6055 freed_so_far += MSIZE; 6056 atomic_subtract_int(&control->length, cp_len); 6057 control->data = sctp_m_free(m); 6058 m = control->data; 6059 /* 6060 * been through it all, must hold sb 6061 * lock ok to null tail 6062 */ 6063 if (control->data == NULL) { 6064 #ifdef INVARIANTS 6065 if ((control->end_added == 0) || 6066 (TAILQ_NEXT(control, next) == NULL)) { 6067 /* 6068 * If the end is not 6069 * added, OR the 6070 * next is NOT null 6071 * we MUST have the 6072 * lock. 6073 */ 6074 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6075 panic("Hmm we don't own the lock?"); 6076 } 6077 } 6078 #endif 6079 control->tail_mbuf = NULL; 6080 #ifdef INVARIANTS 6081 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6082 panic("end_added, nothing left and no MSG_EOR"); 6083 } 6084 #endif 6085 } 6086 } 6087 } else { 6088 /* Do we need to trim the mbuf? */ 6089 if (control->spec_flags & M_NOTIFICATION) { 6090 out_flags |= MSG_NOTIFICATION; 6091 } 6092 if ((in_flags & MSG_PEEK) == 0) { 6093 SCTP_BUF_RESV_UF(m, cp_len); 6094 SCTP_BUF_LEN(m) -= (int)cp_len; 6095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6096 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6097 } 6098 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6099 if ((control->do_not_ref_stcb == 0) && 6100 stcb) { 6101 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6102 } 6103 copied_so_far += cp_len; 6104 freed_so_far += (uint32_t)cp_len; 6105 freed_so_far += MSIZE; 6106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6107 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6108 SCTP_LOG_SBRESULT, 0); 6109 } 6110 atomic_subtract_int(&control->length, cp_len); 6111 } else { 6112 copied_so_far += cp_len; 6113 } 6114 } 6115 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6116 break; 6117 } 6118 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6119 (control->do_not_ref_stcb == 0) && 6120 (freed_so_far >= rwnd_req)) { 6121 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6122 } 6123 } /* end while(m) */ 6124 /* 6125 * At this point we have looked at it all and we either have 6126 * a MSG_EOR/or read all the user wants... <OR> 6127 * control->length == 0. 6128 */ 6129 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6130 /* we are done with this control */ 6131 if (control->length == 0) { 6132 if (control->data) { 6133 #ifdef INVARIANTS 6134 panic("control->data not null at read eor?"); 6135 #else 6136 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6137 sctp_m_freem(control->data); 6138 control->data = NULL; 6139 #endif 6140 } 6141 done_with_control: 6142 if (hold_rlock == 0) { 6143 SCTP_INP_READ_LOCK(inp); 6144 hold_rlock = 1; 6145 } 6146 TAILQ_REMOVE(&inp->read_queue, control, next); 6147 /* Add back any hiddend data */ 6148 if (control->held_length) { 6149 held_length = 0; 6150 control->held_length = 0; 6151 wakeup_read_socket = 1; 6152 } 6153 if (control->aux_data) { 6154 sctp_m_free(control->aux_data); 6155 control->aux_data = NULL; 6156 } 6157 no_rcv_needed = control->do_not_ref_stcb; 6158 sctp_free_remote_addr(control->whoFrom); 6159 control->data = NULL; 6160 #ifdef INVARIANTS 6161 if (control->on_strm_q) { 6162 panic("About to free ctl:%p so:%p and its in %d", 6163 control, so, control->on_strm_q); 6164 } 6165 #endif 6166 sctp_free_a_readq(stcb, control); 6167 control = NULL; 6168 if ((freed_so_far >= rwnd_req) && 6169 (no_rcv_needed == 0)) 6170 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6171 6172 } else { 6173 /* 6174 * The user did not read all of this 6175 * message, turn off the returned MSG_EOR 6176 * since we are leaving more behind on the 6177 * control to read. 6178 */ 6179 #ifdef INVARIANTS 6180 if (control->end_added && 6181 (control->data == NULL) && 6182 (control->tail_mbuf == NULL)) { 6183 panic("Gak, control->length is corrupt?"); 6184 } 6185 #endif 6186 no_rcv_needed = control->do_not_ref_stcb; 6187 out_flags &= ~MSG_EOR; 6188 } 6189 } 6190 if (out_flags & MSG_EOR) { 6191 goto release; 6192 } 6193 if ((uio->uio_resid == 0) || 6194 ((in_eeor_mode) && 6195 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6196 goto release; 6197 } 6198 /* 6199 * If I hit here the receiver wants more and this message is 6200 * NOT done (pd-api). So two questions. Can we block? if not 6201 * we are done. Did the user NOT set MSG_WAITALL? 6202 */ 6203 if (block_allowed == 0) { 6204 goto release; 6205 } 6206 /* 6207 * We need to wait for more data a few things: - We don't 6208 * sbunlock() so we don't get someone else reading. - We 6209 * must be sure to account for the case where what is added 6210 * is NOT to our control when we wakeup. 6211 */ 6212 6213 /* 6214 * Do we need to tell the transport a rwnd update might be 6215 * needed before we go to sleep? 6216 */ 6217 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6218 ((freed_so_far >= rwnd_req) && 6219 (control->do_not_ref_stcb == 0) && 6220 (no_rcv_needed == 0))) { 6221 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6222 } 6223 wait_some_more: 6224 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6225 goto release; 6226 } 6227 6228 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6229 goto release; 6230 6231 if (hold_rlock == 1) { 6232 SCTP_INP_READ_UNLOCK(inp); 6233 hold_rlock = 0; 6234 } 6235 if (hold_sblock == 0) { 6236 SOCKBUF_LOCK(&so->so_rcv); 6237 hold_sblock = 1; 6238 } 6239 if ((copied_so_far) && (control->length == 0) && 6240 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6241 goto release; 6242 } 6243 if (so->so_rcv.sb_cc <= control->held_length) { 6244 error = sbwait(&so->so_rcv); 6245 if (error) { 6246 goto release; 6247 } 6248 control->held_length = 0; 6249 } 6250 if (hold_sblock) { 6251 SOCKBUF_UNLOCK(&so->so_rcv); 6252 hold_sblock = 0; 6253 } 6254 if (control->length == 0) { 6255 /* still nothing here */ 6256 if (control->end_added == 1) { 6257 /* he aborted, or is done i.e.did a shutdown */ 6258 out_flags |= MSG_EOR; 6259 if (control->pdapi_aborted) { 6260 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6261 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6262 6263 out_flags |= MSG_TRUNC; 6264 } else { 6265 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6266 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6267 } 6268 goto done_with_control; 6269 } 6270 if (so->so_rcv.sb_cc > held_length) { 6271 control->held_length = so->so_rcv.sb_cc; 6272 held_length = 0; 6273 } 6274 goto wait_some_more; 6275 } else if (control->data == NULL) { 6276 /* 6277 * we must re-sync since data is probably being 6278 * added 6279 */ 6280 SCTP_INP_READ_LOCK(inp); 6281 if ((control->length > 0) && (control->data == NULL)) { 6282 /* 6283 * big trouble.. we have the lock and its 6284 * corrupt? 6285 */ 6286 #ifdef INVARIANTS 6287 panic("Impossible data==NULL length !=0"); 6288 #endif 6289 out_flags |= MSG_EOR; 6290 out_flags |= MSG_TRUNC; 6291 control->length = 0; 6292 SCTP_INP_READ_UNLOCK(inp); 6293 goto done_with_control; 6294 } 6295 SCTP_INP_READ_UNLOCK(inp); 6296 /* We will fall around to get more data */ 6297 } 6298 goto get_more_data; 6299 } else { 6300 /*- 6301 * Give caller back the mbuf chain, 6302 * store in uio_resid the length 6303 */ 6304 wakeup_read_socket = 0; 6305 if ((control->end_added == 0) || 6306 (TAILQ_NEXT(control, next) == NULL)) { 6307 /* Need to get rlock */ 6308 if (hold_rlock == 0) { 6309 SCTP_INP_READ_LOCK(inp); 6310 hold_rlock = 1; 6311 } 6312 } 6313 if (control->end_added) { 6314 out_flags |= MSG_EOR; 6315 if ((control->do_not_ref_stcb == 0) && 6316 (control->stcb != NULL) && 6317 ((control->spec_flags & M_NOTIFICATION) == 0)) 6318 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6319 } 6320 if (control->spec_flags & M_NOTIFICATION) { 6321 out_flags |= MSG_NOTIFICATION; 6322 } 6323 uio->uio_resid = control->length; 6324 *mp = control->data; 6325 m = control->data; 6326 while (m) { 6327 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6328 sctp_sblog(&so->so_rcv, 6329 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6330 } 6331 sctp_sbfree(control, stcb, &so->so_rcv, m); 6332 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6333 freed_so_far += MSIZE; 6334 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6335 sctp_sblog(&so->so_rcv, 6336 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6337 } 6338 m = SCTP_BUF_NEXT(m); 6339 } 6340 control->data = control->tail_mbuf = NULL; 6341 control->length = 0; 6342 if (out_flags & MSG_EOR) { 6343 /* Done with this control */ 6344 goto done_with_control; 6345 } 6346 } 6347 release: 6348 if (hold_rlock == 1) { 6349 SCTP_INP_READ_UNLOCK(inp); 6350 hold_rlock = 0; 6351 } 6352 if (hold_sblock == 1) { 6353 SOCKBUF_UNLOCK(&so->so_rcv); 6354 hold_sblock = 0; 6355 } 6356 6357 sbunlock(&so->so_rcv); 6358 sockbuf_lock = 0; 6359 6360 release_unlocked: 6361 if (hold_sblock) { 6362 SOCKBUF_UNLOCK(&so->so_rcv); 6363 hold_sblock = 0; 6364 } 6365 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6366 if ((freed_so_far >= rwnd_req) && 6367 (control && (control->do_not_ref_stcb == 0)) && 6368 (no_rcv_needed == 0)) 6369 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6370 } 6371 out: 6372 if (msg_flags) { 6373 *msg_flags = out_flags; 6374 } 6375 if (((out_flags & MSG_EOR) == 0) && 6376 ((in_flags & MSG_PEEK) == 0) && 6377 (sinfo) && 6378 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6379 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6380 struct sctp_extrcvinfo *s_extra; 6381 6382 s_extra = (struct sctp_extrcvinfo *)sinfo; 6383 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6384 } 6385 if (hold_rlock == 1) { 6386 SCTP_INP_READ_UNLOCK(inp); 6387 } 6388 if (hold_sblock) { 6389 SOCKBUF_UNLOCK(&so->so_rcv); 6390 } 6391 if (sockbuf_lock) { 6392 sbunlock(&so->so_rcv); 6393 } 6394 6395 if (freecnt_applied) { 6396 /* 6397 * The lock on the socket buffer protects us so the free 6398 * code will stop. But since we used the socketbuf lock and 6399 * the sender uses the tcb_lock to increment, we need to use 6400 * the atomic add to the refcnt. 6401 */ 6402 if (stcb == NULL) { 6403 #ifdef INVARIANTS 6404 panic("stcb for refcnt has gone NULL?"); 6405 goto stage_left; 6406 #else 6407 goto stage_left; 6408 #endif 6409 } 6410 /* Save the value back for next time */ 6411 stcb->freed_by_sorcv_sincelast = freed_so_far; 6412 atomic_add_int(&stcb->asoc.refcnt, -1); 6413 } 6414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6415 if (stcb) { 6416 sctp_misc_ints(SCTP_SORECV_DONE, 6417 freed_so_far, 6418 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6419 stcb->asoc.my_rwnd, 6420 so->so_rcv.sb_cc); 6421 } else { 6422 sctp_misc_ints(SCTP_SORECV_DONE, 6423 freed_so_far, 6424 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6425 0, 6426 so->so_rcv.sb_cc); 6427 } 6428 } 6429 stage_left: 6430 if (wakeup_read_socket) { 6431 sctp_sorwakeup(inp, so); 6432 } 6433 return (error); 6434 } 6435 6436 6437 #ifdef SCTP_MBUF_LOGGING 6438 struct mbuf * 6439 sctp_m_free(struct mbuf *m) 6440 { 6441 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6442 sctp_log_mb(m, SCTP_MBUF_IFREE); 6443 } 6444 return (m_free(m)); 6445 } 6446 6447 void 6448 sctp_m_freem(struct mbuf *mb) 6449 { 6450 while (mb != NULL) 6451 mb = sctp_m_free(mb); 6452 } 6453 6454 #endif 6455 6456 int 6457 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6458 { 6459 /* 6460 * Given a local address. For all associations that holds the 6461 * address, request a peer-set-primary. 6462 */ 6463 struct sctp_ifa *ifa; 6464 struct sctp_laddr *wi; 6465 6466 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6467 if (ifa == NULL) { 6468 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6469 return (EADDRNOTAVAIL); 6470 } 6471 /* 6472 * Now that we have the ifa we must awaken the iterator with this 6473 * message. 6474 */ 6475 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6476 if (wi == NULL) { 6477 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6478 return (ENOMEM); 6479 } 6480 /* Now incr the count and int wi structure */ 6481 SCTP_INCR_LADDR_COUNT(); 6482 memset(wi, 0, sizeof(*wi)); 6483 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6484 wi->ifa = ifa; 6485 wi->action = SCTP_SET_PRIM_ADDR; 6486 atomic_add_int(&ifa->refcount, 1); 6487 6488 /* Now add it to the work queue */ 6489 SCTP_WQ_ADDR_LOCK(); 6490 /* 6491 * Should this really be a tailq? As it is we will process the 6492 * newest first :-0 6493 */ 6494 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6495 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6496 (struct sctp_inpcb *)NULL, 6497 (struct sctp_tcb *)NULL, 6498 (struct sctp_nets *)NULL); 6499 SCTP_WQ_ADDR_UNLOCK(); 6500 return (0); 6501 } 6502 6503 6504 int 6505 sctp_soreceive(struct socket *so, 6506 struct sockaddr **psa, 6507 struct uio *uio, 6508 struct mbuf **mp0, 6509 struct mbuf **controlp, 6510 int *flagsp) 6511 { 6512 int error, fromlen; 6513 uint8_t sockbuf[256]; 6514 struct sockaddr *from; 6515 struct sctp_extrcvinfo sinfo; 6516 int filling_sinfo = 1; 6517 int flags; 6518 struct sctp_inpcb *inp; 6519 6520 inp = (struct sctp_inpcb *)so->so_pcb; 6521 /* pickup the assoc we are reading from */ 6522 if (inp == NULL) { 6523 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6524 return (EINVAL); 6525 } 6526 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6527 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6528 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6529 (controlp == NULL)) { 6530 /* user does not want the sndrcv ctl */ 6531 filling_sinfo = 0; 6532 } 6533 if (psa) { 6534 from = (struct sockaddr *)sockbuf; 6535 fromlen = sizeof(sockbuf); 6536 from->sa_len = 0; 6537 } else { 6538 from = NULL; 6539 fromlen = 0; 6540 } 6541 6542 if (filling_sinfo) { 6543 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6544 } 6545 if (flagsp != NULL) { 6546 flags = *flagsp; 6547 } else { 6548 flags = 0; 6549 } 6550 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6551 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6552 if (flagsp != NULL) { 6553 *flagsp = flags; 6554 } 6555 if (controlp != NULL) { 6556 /* copy back the sinfo in a CMSG format */ 6557 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6558 *controlp = sctp_build_ctl_nchunk(inp, 6559 (struct sctp_sndrcvinfo *)&sinfo); 6560 } else { 6561 *controlp = NULL; 6562 } 6563 } 6564 if (psa) { 6565 /* copy back the address info */ 6566 if (from && from->sa_len) { 6567 *psa = sodupsockaddr(from, M_NOWAIT); 6568 } else { 6569 *psa = NULL; 6570 } 6571 } 6572 return (error); 6573 } 6574 6575 6576 6577 6578 6579 int 6580 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6581 int totaddr, int *error) 6582 { 6583 int added = 0; 6584 int i; 6585 struct sctp_inpcb *inp; 6586 struct sockaddr *sa; 6587 size_t incr = 0; 6588 #ifdef INET 6589 struct sockaddr_in *sin; 6590 #endif 6591 #ifdef INET6 6592 struct sockaddr_in6 *sin6; 6593 #endif 6594 6595 sa = addr; 6596 inp = stcb->sctp_ep; 6597 *error = 0; 6598 for (i = 0; i < totaddr; i++) { 6599 switch (sa->sa_family) { 6600 #ifdef INET 6601 case AF_INET: 6602 incr = sizeof(struct sockaddr_in); 6603 sin = (struct sockaddr_in *)sa; 6604 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6605 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6606 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6607 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6608 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6609 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6610 *error = EINVAL; 6611 goto out_now; 6612 } 6613 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6614 SCTP_DONOT_SETSCOPE, 6615 SCTP_ADDR_IS_CONFIRMED)) { 6616 /* assoc gone no un-lock */ 6617 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6618 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6619 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6620 *error = ENOBUFS; 6621 goto out_now; 6622 } 6623 added++; 6624 break; 6625 #endif 6626 #ifdef INET6 6627 case AF_INET6: 6628 incr = sizeof(struct sockaddr_in6); 6629 sin6 = (struct sockaddr_in6 *)sa; 6630 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6631 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6632 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6633 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6634 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6635 *error = EINVAL; 6636 goto out_now; 6637 } 6638 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6639 SCTP_DONOT_SETSCOPE, 6640 SCTP_ADDR_IS_CONFIRMED)) { 6641 /* assoc gone no un-lock */ 6642 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6643 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6644 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6645 *error = ENOBUFS; 6646 goto out_now; 6647 } 6648 added++; 6649 break; 6650 #endif 6651 default: 6652 break; 6653 } 6654 sa = (struct sockaddr *)((caddr_t)sa + incr); 6655 } 6656 out_now: 6657 return (added); 6658 } 6659 6660 int 6661 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6662 unsigned int totaddr, 6663 unsigned int *num_v4, unsigned int *num_v6, 6664 unsigned int limit) 6665 { 6666 struct sockaddr *sa; 6667 struct sctp_tcb *stcb; 6668 unsigned int incr, at, i; 6669 6670 at = 0; 6671 sa = addr; 6672 *num_v6 = *num_v4 = 0; 6673 /* account and validate addresses */ 6674 if (totaddr == 0) { 6675 return (EINVAL); 6676 } 6677 for (i = 0; i < totaddr; i++) { 6678 if (at + sizeof(struct sockaddr) > limit) { 6679 return (EINVAL); 6680 } 6681 switch (sa->sa_family) { 6682 #ifdef INET 6683 case AF_INET: 6684 incr = (unsigned int)sizeof(struct sockaddr_in); 6685 if (sa->sa_len != incr) { 6686 return (EINVAL); 6687 } 6688 (*num_v4) += 1; 6689 break; 6690 #endif 6691 #ifdef INET6 6692 case AF_INET6: 6693 { 6694 struct sockaddr_in6 *sin6; 6695 6696 sin6 = (struct sockaddr_in6 *)sa; 6697 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6698 /* Must be non-mapped for connectx */ 6699 return (EINVAL); 6700 } 6701 incr = (unsigned int)sizeof(struct sockaddr_in6); 6702 if (sa->sa_len != incr) { 6703 return (EINVAL); 6704 } 6705 (*num_v6) += 1; 6706 break; 6707 } 6708 #endif 6709 default: 6710 return (EINVAL); 6711 } 6712 if ((at + incr) > limit) { 6713 return (EINVAL); 6714 } 6715 SCTP_INP_INCR_REF(inp); 6716 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6717 if (stcb != NULL) { 6718 SCTP_TCB_UNLOCK(stcb); 6719 return (EALREADY); 6720 } else { 6721 SCTP_INP_DECR_REF(inp); 6722 } 6723 at += incr; 6724 sa = (struct sockaddr *)((caddr_t)sa + incr); 6725 } 6726 return (0); 6727 } 6728 6729 /* 6730 * sctp_bindx(ADD) for one address. 6731 * assumes all arguments are valid/checked by caller. 6732 */ 6733 void 6734 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6735 struct sockaddr *sa, uint32_t vrf_id, int *error, 6736 void *p) 6737 { 6738 #if defined(INET) && defined(INET6) 6739 struct sockaddr_in sin; 6740 #endif 6741 #ifdef INET6 6742 struct sockaddr_in6 *sin6; 6743 #endif 6744 #ifdef INET 6745 struct sockaddr_in *sinp; 6746 #endif 6747 struct sockaddr *addr_to_use; 6748 struct sctp_inpcb *lep; 6749 uint16_t port; 6750 6751 /* see if we're bound all already! */ 6752 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6753 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6754 *error = EINVAL; 6755 return; 6756 } 6757 switch (sa->sa_family) { 6758 #ifdef INET6 6759 case AF_INET6: 6760 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6762 *error = EINVAL; 6763 return; 6764 } 6765 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6766 /* can only bind v6 on PF_INET6 sockets */ 6767 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6768 *error = EINVAL; 6769 return; 6770 } 6771 sin6 = (struct sockaddr_in6 *)sa; 6772 port = sin6->sin6_port; 6773 #ifdef INET 6774 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6775 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6776 SCTP_IPV6_V6ONLY(inp)) { 6777 /* can't bind v4-mapped on PF_INET sockets */ 6778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6779 *error = EINVAL; 6780 return; 6781 } 6782 in6_sin6_2_sin(&sin, sin6); 6783 addr_to_use = (struct sockaddr *)&sin; 6784 } else { 6785 addr_to_use = sa; 6786 } 6787 #else 6788 addr_to_use = sa; 6789 #endif 6790 break; 6791 #endif 6792 #ifdef INET 6793 case AF_INET: 6794 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6795 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6796 *error = EINVAL; 6797 return; 6798 } 6799 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6800 SCTP_IPV6_V6ONLY(inp)) { 6801 /* can't bind v4 on PF_INET sockets */ 6802 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6803 *error = EINVAL; 6804 return; 6805 } 6806 sinp = (struct sockaddr_in *)sa; 6807 port = sinp->sin_port; 6808 addr_to_use = sa; 6809 break; 6810 #endif 6811 default: 6812 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6813 *error = EINVAL; 6814 return; 6815 } 6816 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6817 if (p == NULL) { 6818 /* Can't get proc for Net/Open BSD */ 6819 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6820 *error = EINVAL; 6821 return; 6822 } 6823 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6824 return; 6825 } 6826 /* Validate the incoming port. */ 6827 if ((port != 0) && (port != inp->sctp_lport)) { 6828 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6829 *error = EINVAL; 6830 return; 6831 } 6832 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6833 if (lep == NULL) { 6834 /* add the address */ 6835 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6836 SCTP_ADD_IP_ADDRESS, vrf_id); 6837 } else { 6838 if (lep != inp) { 6839 *error = EADDRINUSE; 6840 } 6841 SCTP_INP_DECR_REF(lep); 6842 } 6843 } 6844 6845 /* 6846 * sctp_bindx(DELETE) for one address. 6847 * assumes all arguments are valid/checked by caller. 6848 */ 6849 void 6850 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6851 struct sockaddr *sa, uint32_t vrf_id, int *error) 6852 { 6853 struct sockaddr *addr_to_use; 6854 #if defined(INET) && defined(INET6) 6855 struct sockaddr_in6 *sin6; 6856 struct sockaddr_in sin; 6857 #endif 6858 6859 /* see if we're bound all already! */ 6860 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6862 *error = EINVAL; 6863 return; 6864 } 6865 switch (sa->sa_family) { 6866 #ifdef INET6 6867 case AF_INET6: 6868 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6869 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6870 *error = EINVAL; 6871 return; 6872 } 6873 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6874 /* can only bind v6 on PF_INET6 sockets */ 6875 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6876 *error = EINVAL; 6877 return; 6878 } 6879 #ifdef INET 6880 sin6 = (struct sockaddr_in6 *)sa; 6881 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6882 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6883 SCTP_IPV6_V6ONLY(inp)) { 6884 /* can't bind mapped-v4 on PF_INET sockets */ 6885 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6886 *error = EINVAL; 6887 return; 6888 } 6889 in6_sin6_2_sin(&sin, sin6); 6890 addr_to_use = (struct sockaddr *)&sin; 6891 } else { 6892 addr_to_use = sa; 6893 } 6894 #else 6895 addr_to_use = sa; 6896 #endif 6897 break; 6898 #endif 6899 #ifdef INET 6900 case AF_INET: 6901 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6902 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6903 *error = EINVAL; 6904 return; 6905 } 6906 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6907 SCTP_IPV6_V6ONLY(inp)) { 6908 /* can't bind v4 on PF_INET sockets */ 6909 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6910 *error = EINVAL; 6911 return; 6912 } 6913 addr_to_use = sa; 6914 break; 6915 #endif 6916 default: 6917 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6918 *error = EINVAL; 6919 return; 6920 } 6921 /* No lock required mgmt_ep_sa does its own locking. */ 6922 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6923 vrf_id); 6924 } 6925 6926 /* 6927 * returns the valid local address count for an assoc, taking into account 6928 * all scoping rules 6929 */ 6930 int 6931 sctp_local_addr_count(struct sctp_tcb *stcb) 6932 { 6933 int loopback_scope; 6934 #if defined(INET) 6935 int ipv4_local_scope, ipv4_addr_legal; 6936 #endif 6937 #if defined(INET6) 6938 int local_scope, site_scope, ipv6_addr_legal; 6939 #endif 6940 struct sctp_vrf *vrf; 6941 struct sctp_ifn *sctp_ifn; 6942 struct sctp_ifa *sctp_ifa; 6943 int count = 0; 6944 6945 /* Turn on all the appropriate scopes */ 6946 loopback_scope = stcb->asoc.scope.loopback_scope; 6947 #if defined(INET) 6948 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6949 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6950 #endif 6951 #if defined(INET6) 6952 local_scope = stcb->asoc.scope.local_scope; 6953 site_scope = stcb->asoc.scope.site_scope; 6954 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6955 #endif 6956 SCTP_IPI_ADDR_RLOCK(); 6957 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6958 if (vrf == NULL) { 6959 /* no vrf, no addresses */ 6960 SCTP_IPI_ADDR_RUNLOCK(); 6961 return (0); 6962 } 6963 6964 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6965 /* 6966 * bound all case: go through all ifns on the vrf 6967 */ 6968 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6969 if ((loopback_scope == 0) && 6970 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6971 continue; 6972 } 6973 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6974 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6975 continue; 6976 switch (sctp_ifa->address.sa.sa_family) { 6977 #ifdef INET 6978 case AF_INET: 6979 if (ipv4_addr_legal) { 6980 struct sockaddr_in *sin; 6981 6982 sin = &sctp_ifa->address.sin; 6983 if (sin->sin_addr.s_addr == 0) { 6984 /* 6985 * skip unspecified 6986 * addrs 6987 */ 6988 continue; 6989 } 6990 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6991 &sin->sin_addr) != 0) { 6992 continue; 6993 } 6994 if ((ipv4_local_scope == 0) && 6995 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6996 continue; 6997 } 6998 /* count this one */ 6999 count++; 7000 } else { 7001 continue; 7002 } 7003 break; 7004 #endif 7005 #ifdef INET6 7006 case AF_INET6: 7007 if (ipv6_addr_legal) { 7008 struct sockaddr_in6 *sin6; 7009 7010 sin6 = &sctp_ifa->address.sin6; 7011 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 7012 continue; 7013 } 7014 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 7015 &sin6->sin6_addr) != 0) { 7016 continue; 7017 } 7018 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 7019 if (local_scope == 0) 7020 continue; 7021 if (sin6->sin6_scope_id == 0) { 7022 if (sa6_recoverscope(sin6) != 0) 7023 /* 7024 * 7025 * bad 7026 * link 7027 * 7028 * local 7029 * 7030 * address 7031 */ 7032 continue; 7033 } 7034 } 7035 if ((site_scope == 0) && 7036 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7037 continue; 7038 } 7039 /* count this one */ 7040 count++; 7041 } 7042 break; 7043 #endif 7044 default: 7045 /* TSNH */ 7046 break; 7047 } 7048 } 7049 } 7050 } else { 7051 /* 7052 * subset bound case 7053 */ 7054 struct sctp_laddr *laddr; 7055 7056 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7057 sctp_nxt_addr) { 7058 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7059 continue; 7060 } 7061 /* count this one */ 7062 count++; 7063 } 7064 } 7065 SCTP_IPI_ADDR_RUNLOCK(); 7066 return (count); 7067 } 7068 7069 #if defined(SCTP_LOCAL_TRACE_BUF) 7070 7071 void 7072 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7073 { 7074 uint32_t saveindex, newindex; 7075 7076 do { 7077 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7078 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7079 newindex = 1; 7080 } else { 7081 newindex = saveindex + 1; 7082 } 7083 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7084 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7085 saveindex = 0; 7086 } 7087 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7088 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7089 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7090 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7091 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7092 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7093 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7094 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7095 } 7096 7097 #endif 7098 static void 7099 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7100 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7101 { 7102 struct ip *iph; 7103 #ifdef INET6 7104 struct ip6_hdr *ip6; 7105 #endif 7106 struct mbuf *sp, *last; 7107 struct udphdr *uhdr; 7108 uint16_t port; 7109 7110 if ((m->m_flags & M_PKTHDR) == 0) { 7111 /* Can't handle one that is not a pkt hdr */ 7112 goto out; 7113 } 7114 /* Pull the src port */ 7115 iph = mtod(m, struct ip *); 7116 uhdr = (struct udphdr *)((caddr_t)iph + off); 7117 port = uhdr->uh_sport; 7118 /* 7119 * Split out the mbuf chain. Leave the IP header in m, place the 7120 * rest in the sp. 7121 */ 7122 sp = m_split(m, off, M_NOWAIT); 7123 if (sp == NULL) { 7124 /* Gak, drop packet, we can't do a split */ 7125 goto out; 7126 } 7127 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7128 /* Gak, packet can't have an SCTP header in it - too small */ 7129 m_freem(sp); 7130 goto out; 7131 } 7132 /* Now pull up the UDP header and SCTP header together */ 7133 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7134 if (sp == NULL) { 7135 /* Gak pullup failed */ 7136 goto out; 7137 } 7138 /* Trim out the UDP header */ 7139 m_adj(sp, sizeof(struct udphdr)); 7140 7141 /* Now reconstruct the mbuf chain */ 7142 for (last = m; last->m_next; last = last->m_next); 7143 last->m_next = sp; 7144 m->m_pkthdr.len += sp->m_pkthdr.len; 7145 /* 7146 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7147 * checksum and it was valid. Since CSUM_DATA_VALID == 7148 * CSUM_SCTP_VALID this would imply that the HW also verified the 7149 * SCTP checksum. Therefore, clear the bit. 7150 */ 7151 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7152 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7153 m->m_pkthdr.len, 7154 if_name(m->m_pkthdr.rcvif), 7155 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7156 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7157 iph = mtod(m, struct ip *); 7158 switch (iph->ip_v) { 7159 #ifdef INET 7160 case IPVERSION: 7161 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7162 sctp_input_with_port(m, off, port); 7163 break; 7164 #endif 7165 #ifdef INET6 7166 case IPV6_VERSION >> 4: 7167 ip6 = mtod(m, struct ip6_hdr *); 7168 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7169 sctp6_input_with_port(&m, &off, port); 7170 break; 7171 #endif 7172 default: 7173 goto out; 7174 break; 7175 } 7176 return; 7177 out: 7178 m_freem(m); 7179 } 7180 7181 #ifdef INET 7182 static void 7183 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7184 { 7185 struct ip *outer_ip, *inner_ip; 7186 struct sctphdr *sh; 7187 struct icmp *icmp; 7188 struct udphdr *udp; 7189 struct sctp_inpcb *inp; 7190 struct sctp_tcb *stcb; 7191 struct sctp_nets *net; 7192 struct sctp_init_chunk *ch; 7193 struct sockaddr_in src, dst; 7194 uint8_t type, code; 7195 7196 inner_ip = (struct ip *)vip; 7197 icmp = (struct icmp *)((caddr_t)inner_ip - 7198 (sizeof(struct icmp) - sizeof(struct ip))); 7199 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7200 if (ntohs(outer_ip->ip_len) < 7201 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7202 return; 7203 } 7204 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7205 sh = (struct sctphdr *)(udp + 1); 7206 memset(&src, 0, sizeof(struct sockaddr_in)); 7207 src.sin_family = AF_INET; 7208 src.sin_len = sizeof(struct sockaddr_in); 7209 src.sin_port = sh->src_port; 7210 src.sin_addr = inner_ip->ip_src; 7211 memset(&dst, 0, sizeof(struct sockaddr_in)); 7212 dst.sin_family = AF_INET; 7213 dst.sin_len = sizeof(struct sockaddr_in); 7214 dst.sin_port = sh->dest_port; 7215 dst.sin_addr = inner_ip->ip_dst; 7216 /* 7217 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7218 * holds our local endpoint address. Thus we reverse the dst and the 7219 * src in the lookup. 7220 */ 7221 inp = NULL; 7222 net = NULL; 7223 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7224 (struct sockaddr *)&src, 7225 &inp, &net, 1, 7226 SCTP_DEFAULT_VRFID); 7227 if ((stcb != NULL) && 7228 (net != NULL) && 7229 (inp != NULL)) { 7230 /* Check the UDP port numbers */ 7231 if ((udp->uh_dport != net->port) || 7232 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7233 SCTP_TCB_UNLOCK(stcb); 7234 return; 7235 } 7236 /* Check the verification tag */ 7237 if (ntohl(sh->v_tag) != 0) { 7238 /* 7239 * This must be the verification tag used for 7240 * sending out packets. We don't consider packets 7241 * reflecting the verification tag. 7242 */ 7243 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7244 SCTP_TCB_UNLOCK(stcb); 7245 return; 7246 } 7247 } else { 7248 if (ntohs(outer_ip->ip_len) >= 7249 sizeof(struct ip) + 7250 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7251 /* 7252 * In this case we can check if we got an 7253 * INIT chunk and if the initiate tag 7254 * matches. 7255 */ 7256 ch = (struct sctp_init_chunk *)(sh + 1); 7257 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7258 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7259 SCTP_TCB_UNLOCK(stcb); 7260 return; 7261 } 7262 } else { 7263 SCTP_TCB_UNLOCK(stcb); 7264 return; 7265 } 7266 } 7267 type = icmp->icmp_type; 7268 code = icmp->icmp_code; 7269 if ((type == ICMP_UNREACH) && 7270 (code == ICMP_UNREACH_PORT)) { 7271 code = ICMP_UNREACH_PROTOCOL; 7272 } 7273 sctp_notify(inp, stcb, net, type, code, 7274 ntohs(inner_ip->ip_len), 7275 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7276 } else { 7277 if ((stcb == NULL) && (inp != NULL)) { 7278 /* reduce ref-count */ 7279 SCTP_INP_WLOCK(inp); 7280 SCTP_INP_DECR_REF(inp); 7281 SCTP_INP_WUNLOCK(inp); 7282 } 7283 if (stcb) { 7284 SCTP_TCB_UNLOCK(stcb); 7285 } 7286 } 7287 return; 7288 } 7289 #endif 7290 7291 #ifdef INET6 7292 static void 7293 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7294 { 7295 struct ip6ctlparam *ip6cp; 7296 struct sctp_inpcb *inp; 7297 struct sctp_tcb *stcb; 7298 struct sctp_nets *net; 7299 struct sctphdr sh; 7300 struct udphdr udp; 7301 struct sockaddr_in6 src, dst; 7302 uint8_t type, code; 7303 7304 ip6cp = (struct ip6ctlparam *)d; 7305 /* 7306 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7307 */ 7308 if (ip6cp->ip6c_m == NULL) { 7309 return; 7310 } 7311 /* 7312 * Check if we can safely examine the ports and the verification tag 7313 * of the SCTP common header. 7314 */ 7315 if (ip6cp->ip6c_m->m_pkthdr.len < 7316 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7317 return; 7318 } 7319 /* Copy out the UDP header. */ 7320 memset(&udp, 0, sizeof(struct udphdr)); 7321 m_copydata(ip6cp->ip6c_m, 7322 ip6cp->ip6c_off, 7323 sizeof(struct udphdr), 7324 (caddr_t)&udp); 7325 /* Copy out the port numbers and the verification tag. */ 7326 memset(&sh, 0, sizeof(struct sctphdr)); 7327 m_copydata(ip6cp->ip6c_m, 7328 ip6cp->ip6c_off + sizeof(struct udphdr), 7329 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7330 (caddr_t)&sh); 7331 memset(&src, 0, sizeof(struct sockaddr_in6)); 7332 src.sin6_family = AF_INET6; 7333 src.sin6_len = sizeof(struct sockaddr_in6); 7334 src.sin6_port = sh.src_port; 7335 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7336 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7337 return; 7338 } 7339 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7340 dst.sin6_family = AF_INET6; 7341 dst.sin6_len = sizeof(struct sockaddr_in6); 7342 dst.sin6_port = sh.dest_port; 7343 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7344 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7345 return; 7346 } 7347 inp = NULL; 7348 net = NULL; 7349 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7350 (struct sockaddr *)&src, 7351 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7352 if ((stcb != NULL) && 7353 (net != NULL) && 7354 (inp != NULL)) { 7355 /* Check the UDP port numbers */ 7356 if ((udp.uh_dport != net->port) || 7357 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7358 SCTP_TCB_UNLOCK(stcb); 7359 return; 7360 } 7361 /* Check the verification tag */ 7362 if (ntohl(sh.v_tag) != 0) { 7363 /* 7364 * This must be the verification tag used for 7365 * sending out packets. We don't consider packets 7366 * reflecting the verification tag. 7367 */ 7368 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7369 SCTP_TCB_UNLOCK(stcb); 7370 return; 7371 } 7372 } else { 7373 if (ip6cp->ip6c_m->m_pkthdr.len >= 7374 ip6cp->ip6c_off + sizeof(struct udphdr) + 7375 sizeof(struct sctphdr) + 7376 sizeof(struct sctp_chunkhdr) + 7377 offsetof(struct sctp_init, a_rwnd)) { 7378 /* 7379 * In this case we can check if we got an 7380 * INIT chunk and if the initiate tag 7381 * matches. 7382 */ 7383 uint32_t initiate_tag; 7384 uint8_t chunk_type; 7385 7386 m_copydata(ip6cp->ip6c_m, 7387 ip6cp->ip6c_off + 7388 sizeof(struct udphdr) + 7389 sizeof(struct sctphdr), 7390 sizeof(uint8_t), 7391 (caddr_t)&chunk_type); 7392 m_copydata(ip6cp->ip6c_m, 7393 ip6cp->ip6c_off + 7394 sizeof(struct udphdr) + 7395 sizeof(struct sctphdr) + 7396 sizeof(struct sctp_chunkhdr), 7397 sizeof(uint32_t), 7398 (caddr_t)&initiate_tag); 7399 if ((chunk_type != SCTP_INITIATION) || 7400 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7401 SCTP_TCB_UNLOCK(stcb); 7402 return; 7403 } 7404 } else { 7405 SCTP_TCB_UNLOCK(stcb); 7406 return; 7407 } 7408 } 7409 type = ip6cp->ip6c_icmp6->icmp6_type; 7410 code = ip6cp->ip6c_icmp6->icmp6_code; 7411 if ((type == ICMP6_DST_UNREACH) && 7412 (code == ICMP6_DST_UNREACH_NOPORT)) { 7413 type = ICMP6_PARAM_PROB; 7414 code = ICMP6_PARAMPROB_NEXTHEADER; 7415 } 7416 sctp6_notify(inp, stcb, net, type, code, 7417 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7418 } else { 7419 if ((stcb == NULL) && (inp != NULL)) { 7420 /* reduce inp's ref-count */ 7421 SCTP_INP_WLOCK(inp); 7422 SCTP_INP_DECR_REF(inp); 7423 SCTP_INP_WUNLOCK(inp); 7424 } 7425 if (stcb) { 7426 SCTP_TCB_UNLOCK(stcb); 7427 } 7428 } 7429 } 7430 #endif 7431 7432 void 7433 sctp_over_udp_stop(void) 7434 { 7435 /* 7436 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7437 * for writting! 7438 */ 7439 #ifdef INET 7440 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7441 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7442 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7443 } 7444 #endif 7445 #ifdef INET6 7446 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7447 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7448 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7449 } 7450 #endif 7451 } 7452 7453 int 7454 sctp_over_udp_start(void) 7455 { 7456 uint16_t port; 7457 int ret; 7458 #ifdef INET 7459 struct sockaddr_in sin; 7460 #endif 7461 #ifdef INET6 7462 struct sockaddr_in6 sin6; 7463 #endif 7464 /* 7465 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7466 * for writting! 7467 */ 7468 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7469 if (ntohs(port) == 0) { 7470 /* Must have a port set */ 7471 return (EINVAL); 7472 } 7473 #ifdef INET 7474 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7475 /* Already running -- must stop first */ 7476 return (EALREADY); 7477 } 7478 #endif 7479 #ifdef INET6 7480 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7481 /* Already running -- must stop first */ 7482 return (EALREADY); 7483 } 7484 #endif 7485 #ifdef INET 7486 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7487 SOCK_DGRAM, IPPROTO_UDP, 7488 curthread->td_ucred, curthread))) { 7489 sctp_over_udp_stop(); 7490 return (ret); 7491 } 7492 /* Call the special UDP hook. */ 7493 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7494 sctp_recv_udp_tunneled_packet, 7495 sctp_recv_icmp_tunneled_packet, 7496 NULL))) { 7497 sctp_over_udp_stop(); 7498 return (ret); 7499 } 7500 /* Ok, we have a socket, bind it to the port. */ 7501 memset(&sin, 0, sizeof(struct sockaddr_in)); 7502 sin.sin_len = sizeof(struct sockaddr_in); 7503 sin.sin_family = AF_INET; 7504 sin.sin_port = htons(port); 7505 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7506 (struct sockaddr *)&sin, curthread))) { 7507 sctp_over_udp_stop(); 7508 return (ret); 7509 } 7510 #endif 7511 #ifdef INET6 7512 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7513 SOCK_DGRAM, IPPROTO_UDP, 7514 curthread->td_ucred, curthread))) { 7515 sctp_over_udp_stop(); 7516 return (ret); 7517 } 7518 /* Call the special UDP hook. */ 7519 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7520 sctp_recv_udp_tunneled_packet, 7521 sctp_recv_icmp6_tunneled_packet, 7522 NULL))) { 7523 sctp_over_udp_stop(); 7524 return (ret); 7525 } 7526 /* Ok, we have a socket, bind it to the port. */ 7527 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7528 sin6.sin6_len = sizeof(struct sockaddr_in6); 7529 sin6.sin6_family = AF_INET6; 7530 sin6.sin6_port = htons(port); 7531 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7532 (struct sockaddr *)&sin6, curthread))) { 7533 sctp_over_udp_stop(); 7534 return (ret); 7535 } 7536 #endif 7537 return (0); 7538 } 7539 7540 /* 7541 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7542 * If all arguments are zero, zero is returned. 7543 */ 7544 uint32_t 7545 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7546 { 7547 if (mtu1 > 0) { 7548 if (mtu2 > 0) { 7549 if (mtu3 > 0) { 7550 return (min(mtu1, min(mtu2, mtu3))); 7551 } else { 7552 return (min(mtu1, mtu2)); 7553 } 7554 } else { 7555 if (mtu3 > 0) { 7556 return (min(mtu1, mtu3)); 7557 } else { 7558 return (mtu1); 7559 } 7560 } 7561 } else { 7562 if (mtu2 > 0) { 7563 if (mtu3 > 0) { 7564 return (min(mtu2, mtu3)); 7565 } else { 7566 return (mtu2); 7567 } 7568 } else { 7569 return (mtu3); 7570 } 7571 } 7572 } 7573 7574 void 7575 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7576 { 7577 struct in_conninfo inc; 7578 7579 memset(&inc, 0, sizeof(struct in_conninfo)); 7580 inc.inc_fibnum = fibnum; 7581 switch (addr->sa.sa_family) { 7582 #ifdef INET 7583 case AF_INET: 7584 inc.inc_faddr = addr->sin.sin_addr; 7585 break; 7586 #endif 7587 #ifdef INET6 7588 case AF_INET6: 7589 inc.inc_flags |= INC_ISIPV6; 7590 inc.inc6_faddr = addr->sin6.sin6_addr; 7591 break; 7592 #endif 7593 default: 7594 return; 7595 } 7596 tcp_hc_updatemtu(&inc, (u_long)mtu); 7597 } 7598 7599 uint32_t 7600 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7601 { 7602 struct in_conninfo inc; 7603 7604 memset(&inc, 0, sizeof(struct in_conninfo)); 7605 inc.inc_fibnum = fibnum; 7606 switch (addr->sa.sa_family) { 7607 #ifdef INET 7608 case AF_INET: 7609 inc.inc_faddr = addr->sin.sin_addr; 7610 break; 7611 #endif 7612 #ifdef INET6 7613 case AF_INET6: 7614 inc.inc_flags |= INC_ISIPV6; 7615 inc.inc6_faddr = addr->sin6.sin6_addr; 7616 break; 7617 #endif 7618 default: 7619 return (0); 7620 } 7621 return ((uint32_t)tcp_hc_getmtu(&inc)); 7622 } 7623 7624 void 7625 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7626 { 7627 #if defined(KDTRACE_HOOKS) 7628 int old_state = stcb->asoc.state; 7629 #endif 7630 7631 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7632 ("sctp_set_state: Can't set substate (new_state = %x)", 7633 new_state)); 7634 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7635 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7636 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7637 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7638 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7639 } 7640 #if defined(KDTRACE_HOOKS) 7641 if (((old_state & SCTP_STATE_MASK) != new_state) && 7642 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7643 (new_state == SCTP_STATE_INUSE))) { 7644 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7645 } 7646 #endif 7647 } 7648 7649 void 7650 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7651 { 7652 #if defined(KDTRACE_HOOKS) 7653 int old_state = stcb->asoc.state; 7654 #endif 7655 7656 KASSERT((substate & SCTP_STATE_MASK) == 0, 7657 ("sctp_add_substate: Can't set state (substate = %x)", 7658 substate)); 7659 stcb->asoc.state |= substate; 7660 #if defined(KDTRACE_HOOKS) 7661 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7662 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7663 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7664 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7665 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7666 } 7667 #endif 7668 } 7669