1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 740 tot_out = 0; 741 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 742 if ((chk->whoTo == lnet) && 743 (chk->sent < SCTP_DATAGRAM_RESEND)) { 744 tot_out += chk->book_size; 745 } 746 } 747 if (lnet->flight_size != tot_out) { 748 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 749 (void *)lnet, lnet->flight_size, 750 tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * The conversion from time to ticks and vice versa is done by rounding 776 * upwards. This way we can test in the code the time to be positive and 777 * know that this corresponds to a positive number of ticks. 778 */ 779 780 uint32_t 781 sctp_msecs_to_ticks(uint32_t msecs) 782 { 783 uint64_t temp; 784 uint32_t ticks; 785 786 if (hz == 1000) { 787 ticks = msecs; 788 } else { 789 temp = (((uint64_t)msecs * hz) + 999) / 1000; 790 if (temp > UINT32_MAX) { 791 ticks = UINT32_MAX; 792 } else { 793 ticks = (uint32_t)temp; 794 } 795 } 796 return (ticks); 797 } 798 799 uint32_t 800 sctp_ticks_to_msecs(uint32_t ticks) 801 { 802 uint64_t temp; 803 uint32_t msecs; 804 805 if (hz == 1000) { 806 msecs = ticks; 807 } else { 808 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 809 if (temp > UINT32_MAX) { 810 msecs = UINT32_MAX; 811 } else { 812 msecs = (uint32_t)temp; 813 } 814 } 815 return (msecs); 816 } 817 818 uint32_t 819 sctp_secs_to_ticks(uint32_t secs) 820 { 821 uint64_t temp; 822 uint32_t ticks; 823 824 temp = (uint64_t)secs * hz; 825 if (temp > UINT32_MAX) { 826 ticks = UINT32_MAX; 827 } else { 828 ticks = (uint32_t)temp; 829 } 830 return (ticks); 831 } 832 833 uint32_t 834 sctp_ticks_to_secs(uint32_t ticks) 835 { 836 uint64_t temp; 837 uint32_t secs; 838 839 temp = ((uint64_t)ticks + (hz - 1)) / hz; 840 if (temp > UINT32_MAX) { 841 secs = UINT32_MAX; 842 } else { 843 secs = (uint32_t)temp; 844 } 845 return (secs); 846 } 847 848 /* 849 * sctp_stop_timers_for_shutdown() should be called 850 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 851 * state to make sure that all timers are stopped. 852 */ 853 void 854 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 855 { 856 struct sctp_inpcb *inp; 857 struct sctp_nets *net; 858 859 inp = stcb->sctp_ep; 860 861 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 863 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 865 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 866 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 867 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 868 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 869 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 870 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 871 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 872 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 873 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 874 } 875 } 876 877 void 878 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 879 { 880 struct sctp_inpcb *inp; 881 struct sctp_nets *net; 882 883 inp = stcb->sctp_ep; 884 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 885 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 886 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 887 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 888 if (stop_assoc_kill_timer) { 889 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 890 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 891 } 892 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 894 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 895 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 896 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 897 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 898 /* Mobility adaptation */ 899 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 900 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 901 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 902 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 904 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 908 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 910 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 912 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 913 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 914 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 915 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 916 } 917 } 918 919 /* 920 * A list of sizes based on typical mtu's, used only if next hop size not 921 * returned. These values MUST be multiples of 4 and MUST be ordered. 922 */ 923 static uint32_t sctp_mtu_sizes[] = { 924 68, 925 296, 926 508, 927 512, 928 544, 929 576, 930 1004, 931 1492, 932 1500, 933 1536, 934 2000, 935 2048, 936 4352, 937 4464, 938 8168, 939 17912, 940 32000, 941 65532 942 }; 943 944 /* 945 * Return the largest MTU in sctp_mtu_sizes smaller than val. 946 * If val is smaller than the minimum, just return the largest 947 * multiple of 4 smaller or equal to val. 948 * Ensure that the result is a multiple of 4. 949 */ 950 uint32_t 951 sctp_get_prev_mtu(uint32_t val) 952 { 953 uint32_t i; 954 955 val &= 0xfffffffc; 956 if (val <= sctp_mtu_sizes[0]) { 957 return (val); 958 } 959 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 960 if (val <= sctp_mtu_sizes[i]) { 961 break; 962 } 963 } 964 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 965 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 966 return (sctp_mtu_sizes[i - 1]); 967 } 968 969 /* 970 * Return the smallest MTU in sctp_mtu_sizes larger than val. 971 * If val is larger than the maximum, just return the largest multiple of 4 smaller 972 * or equal to val. 973 * Ensure that the result is a multiple of 4. 974 */ 975 uint32_t 976 sctp_get_next_mtu(uint32_t val) 977 { 978 /* select another MTU that is just bigger than this one */ 979 uint32_t i; 980 981 val &= 0xfffffffc; 982 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 983 if (val < sctp_mtu_sizes[i]) { 984 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 985 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 986 return (sctp_mtu_sizes[i]); 987 } 988 } 989 return (val); 990 } 991 992 void 993 sctp_fill_random_store(struct sctp_pcb *m) 994 { 995 /* 996 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 997 * our counter. The result becomes our good random numbers and we 998 * then setup to give these out. Note that we do no locking to 999 * protect this. This is ok, since if competing folks call this we 1000 * will get more gobbled gook in the random store which is what we 1001 * want. There is a danger that two guys will use the same random 1002 * numbers, but thats ok too since that is random as well :-> 1003 */ 1004 m->store_at = 0; 1005 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1006 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1007 sizeof(m->random_counter), (uint8_t *)m->random_store); 1008 m->random_counter++; 1009 } 1010 1011 uint32_t 1012 sctp_select_initial_TSN(struct sctp_pcb *inp) 1013 { 1014 /* 1015 * A true implementation should use random selection process to get 1016 * the initial stream sequence number, using RFC1750 as a good 1017 * guideline 1018 */ 1019 uint32_t x, *xp; 1020 uint8_t *p; 1021 int store_at, new_store; 1022 1023 if (inp->initial_sequence_debug != 0) { 1024 uint32_t ret; 1025 1026 ret = inp->initial_sequence_debug; 1027 inp->initial_sequence_debug++; 1028 return (ret); 1029 } 1030 retry: 1031 store_at = inp->store_at; 1032 new_store = store_at + sizeof(uint32_t); 1033 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1034 new_store = 0; 1035 } 1036 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1037 goto retry; 1038 } 1039 if (new_store == 0) { 1040 /* Refill the random store */ 1041 sctp_fill_random_store(inp); 1042 } 1043 p = &inp->random_store[store_at]; 1044 xp = (uint32_t *)p; 1045 x = *xp; 1046 return (x); 1047 } 1048 1049 uint32_t 1050 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1051 { 1052 uint32_t x; 1053 struct timeval now; 1054 1055 if (check) { 1056 (void)SCTP_GETTIME_TIMEVAL(&now); 1057 } 1058 for (;;) { 1059 x = sctp_select_initial_TSN(&inp->sctp_ep); 1060 if (x == 0) { 1061 /* we never use 0 */ 1062 continue; 1063 } 1064 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1065 break; 1066 } 1067 } 1068 return (x); 1069 } 1070 1071 int32_t 1072 sctp_map_assoc_state(int kernel_state) 1073 { 1074 int32_t user_state; 1075 1076 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1077 user_state = SCTP_CLOSED; 1078 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1079 user_state = SCTP_SHUTDOWN_PENDING; 1080 } else { 1081 switch (kernel_state & SCTP_STATE_MASK) { 1082 case SCTP_STATE_EMPTY: 1083 user_state = SCTP_CLOSED; 1084 break; 1085 case SCTP_STATE_INUSE: 1086 user_state = SCTP_CLOSED; 1087 break; 1088 case SCTP_STATE_COOKIE_WAIT: 1089 user_state = SCTP_COOKIE_WAIT; 1090 break; 1091 case SCTP_STATE_COOKIE_ECHOED: 1092 user_state = SCTP_COOKIE_ECHOED; 1093 break; 1094 case SCTP_STATE_OPEN: 1095 user_state = SCTP_ESTABLISHED; 1096 break; 1097 case SCTP_STATE_SHUTDOWN_SENT: 1098 user_state = SCTP_SHUTDOWN_SENT; 1099 break; 1100 case SCTP_STATE_SHUTDOWN_RECEIVED: 1101 user_state = SCTP_SHUTDOWN_RECEIVED; 1102 break; 1103 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1104 user_state = SCTP_SHUTDOWN_ACK_SENT; 1105 break; 1106 default: 1107 user_state = SCTP_CLOSED; 1108 break; 1109 } 1110 } 1111 return (user_state); 1112 } 1113 1114 int 1115 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1116 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1195 sctp_select_initial_TSN(&inp->sctp_ep); 1196 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1197 /* we are optimisitic here */ 1198 asoc->peer_supports_nat = 0; 1199 asoc->sent_queue_retran_cnt = 0; 1200 1201 /* for CMT */ 1202 asoc->last_net_cmt_send_started = NULL; 1203 1204 /* This will need to be adjusted */ 1205 asoc->last_acked_seq = asoc->init_seq_number - 1; 1206 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1207 asoc->asconf_seq_in = asoc->last_acked_seq; 1208 1209 /* here we are different, we hold the next one we expect */ 1210 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1211 1212 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1213 asoc->initial_rto = inp->sctp_ep.initial_rto; 1214 1215 asoc->default_mtu = inp->sctp_ep.default_mtu; 1216 asoc->max_init_times = inp->sctp_ep.max_init_times; 1217 asoc->max_send_times = inp->sctp_ep.max_send_times; 1218 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1219 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1220 asoc->free_chunk_cnt = 0; 1221 1222 asoc->iam_blocking = 0; 1223 asoc->context = inp->sctp_context; 1224 asoc->local_strreset_support = inp->local_strreset_support; 1225 asoc->def_send = inp->def_send; 1226 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1227 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1228 asoc->pr_sctp_cnt = 0; 1229 asoc->total_output_queue_size = 0; 1230 1231 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1232 asoc->scope.ipv6_addr_legal = 1; 1233 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1234 asoc->scope.ipv4_addr_legal = 1; 1235 } else { 1236 asoc->scope.ipv4_addr_legal = 0; 1237 } 1238 } else { 1239 asoc->scope.ipv6_addr_legal = 0; 1240 asoc->scope.ipv4_addr_legal = 1; 1241 } 1242 1243 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1244 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1245 1246 asoc->smallest_mtu = inp->sctp_frag_point; 1247 asoc->minrto = inp->sctp_ep.sctp_minrto; 1248 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1249 1250 asoc->stream_locked_on = 0; 1251 asoc->ecn_echo_cnt_onq = 0; 1252 asoc->stream_locked = 0; 1253 1254 asoc->send_sack = 1; 1255 1256 LIST_INIT(&asoc->sctp_restricted_addrs); 1257 1258 TAILQ_INIT(&asoc->nets); 1259 TAILQ_INIT(&asoc->pending_reply_queue); 1260 TAILQ_INIT(&asoc->asconf_ack_sent); 1261 /* Setup to fill the hb random cache at first HB */ 1262 asoc->hb_random_idx = 4; 1263 1264 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1265 1266 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1267 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1268 1269 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1270 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1271 1272 /* 1273 * Now the stream parameters, here we allocate space for all streams 1274 * that we request by default. 1275 */ 1276 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1277 o_strms; 1278 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1279 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1280 SCTP_M_STRMO); 1281 if (asoc->strmout == NULL) { 1282 /* big trouble no memory */ 1283 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1284 return (ENOMEM); 1285 } 1286 for (i = 0; i < asoc->streamoutcnt; i++) { 1287 /* 1288 * inbound side must be set to 0xffff, also NOTE when we get 1289 * the INIT-ACK back (for INIT sender) we MUST reduce the 1290 * count (streamoutcnt) but first check if we sent to any of 1291 * the upper streams that were dropped (if some were). Those 1292 * that were dropped must be notified to the upper layer as 1293 * failed to send. 1294 */ 1295 asoc->strmout[i].next_mid_ordered = 0; 1296 asoc->strmout[i].next_mid_unordered = 0; 1297 TAILQ_INIT(&asoc->strmout[i].outqueue); 1298 asoc->strmout[i].chunks_on_queues = 0; 1299 #if defined(SCTP_DETAILED_STR_STATS) 1300 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1301 asoc->strmout[i].abandoned_sent[j] = 0; 1302 asoc->strmout[i].abandoned_unsent[j] = 0; 1303 } 1304 #else 1305 asoc->strmout[i].abandoned_sent[0] = 0; 1306 asoc->strmout[i].abandoned_unsent[0] = 0; 1307 #endif 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1312 } 1313 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1314 1315 /* Now the mapping array */ 1316 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1317 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1318 SCTP_M_MAP); 1319 if (asoc->mapping_array == NULL) { 1320 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1321 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1322 return (ENOMEM); 1323 } 1324 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1325 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1326 SCTP_M_MAP); 1327 if (asoc->nr_mapping_array == NULL) { 1328 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1329 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1334 1335 /* Now the init of the other outqueues */ 1336 TAILQ_INIT(&asoc->free_chunks); 1337 TAILQ_INIT(&asoc->control_send_queue); 1338 TAILQ_INIT(&asoc->asconf_send_queue); 1339 TAILQ_INIT(&asoc->send_queue); 1340 TAILQ_INIT(&asoc->sent_queue); 1341 TAILQ_INIT(&asoc->resetHead); 1342 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1343 TAILQ_INIT(&asoc->asconf_queue); 1344 /* authentication fields */ 1345 asoc->authinfo.random = NULL; 1346 asoc->authinfo.active_keyid = 0; 1347 asoc->authinfo.assoc_key = NULL; 1348 asoc->authinfo.assoc_keyid = 0; 1349 asoc->authinfo.recv_key = NULL; 1350 asoc->authinfo.recv_keyid = 0; 1351 LIST_INIT(&asoc->shared_keys); 1352 asoc->marked_retrans = 0; 1353 asoc->port = inp->sctp_ep.port; 1354 asoc->timoinit = 0; 1355 asoc->timodata = 0; 1356 asoc->timosack = 0; 1357 asoc->timoshutdown = 0; 1358 asoc->timoheartbeat = 0; 1359 asoc->timocookie = 0; 1360 asoc->timoshutdownack = 0; 1361 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1362 asoc->discontinuity_time = asoc->start_time; 1363 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1364 asoc->abandoned_unsent[i] = 0; 1365 asoc->abandoned_sent[i] = 0; 1366 } 1367 /* 1368 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1369 * freed later when the association is freed. 1370 */ 1371 return (0); 1372 } 1373 1374 void 1375 sctp_print_mapping_array(struct sctp_association *asoc) 1376 { 1377 unsigned int i, limit; 1378 1379 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1380 asoc->mapping_array_size, 1381 asoc->mapping_array_base_tsn, 1382 asoc->cumulative_tsn, 1383 asoc->highest_tsn_inside_map, 1384 asoc->highest_tsn_inside_nr_map); 1385 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1386 if (asoc->mapping_array[limit - 1] != 0) { 1387 break; 1388 } 1389 } 1390 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1391 for (i = 0; i < limit; i++) { 1392 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1393 } 1394 if (limit % 16) 1395 SCTP_PRINTF("\n"); 1396 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1397 if (asoc->nr_mapping_array[limit - 1]) { 1398 break; 1399 } 1400 } 1401 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1402 for (i = 0; i < limit; i++) { 1403 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1404 } 1405 if (limit % 16) 1406 SCTP_PRINTF("\n"); 1407 } 1408 1409 int 1410 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1411 { 1412 /* mapping array needs to grow */ 1413 uint8_t *new_array1, *new_array2; 1414 uint32_t new_size; 1415 1416 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1417 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1418 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1419 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1420 /* can't get more, forget it */ 1421 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1422 if (new_array1) { 1423 SCTP_FREE(new_array1, SCTP_M_MAP); 1424 } 1425 if (new_array2) { 1426 SCTP_FREE(new_array2, SCTP_M_MAP); 1427 } 1428 return (-1); 1429 } 1430 memset(new_array1, 0, new_size); 1431 memset(new_array2, 0, new_size); 1432 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1433 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1434 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1435 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1436 asoc->mapping_array = new_array1; 1437 asoc->nr_mapping_array = new_array2; 1438 asoc->mapping_array_size = new_size; 1439 return (0); 1440 } 1441 1442 1443 static void 1444 sctp_iterator_work(struct sctp_iterator *it) 1445 { 1446 struct epoch_tracker et; 1447 struct sctp_inpcb *tinp; 1448 int iteration_count = 0; 1449 int inp_skip = 0; 1450 int first_in = 1; 1451 1452 NET_EPOCH_ENTER(et); 1453 SCTP_INP_INFO_RLOCK(); 1454 SCTP_ITERATOR_LOCK(); 1455 sctp_it_ctl.cur_it = it; 1456 if (it->inp) { 1457 SCTP_INP_RLOCK(it->inp); 1458 SCTP_INP_DECR_REF(it->inp); 1459 } 1460 if (it->inp == NULL) { 1461 /* iterator is complete */ 1462 done_with_iterator: 1463 sctp_it_ctl.cur_it = NULL; 1464 SCTP_ITERATOR_UNLOCK(); 1465 SCTP_INP_INFO_RUNLOCK(); 1466 if (it->function_atend != NULL) { 1467 (*it->function_atend) (it->pointer, it->val); 1468 } 1469 SCTP_FREE(it, SCTP_M_ITER); 1470 NET_EPOCH_EXIT(et); 1471 return; 1472 } 1473 select_a_new_ep: 1474 if (first_in) { 1475 first_in = 0; 1476 } else { 1477 SCTP_INP_RLOCK(it->inp); 1478 } 1479 while (((it->pcb_flags) && 1480 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1481 ((it->pcb_features) && 1482 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1483 /* endpoint flags or features don't match, so keep looking */ 1484 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1485 SCTP_INP_RUNLOCK(it->inp); 1486 goto done_with_iterator; 1487 } 1488 tinp = it->inp; 1489 it->inp = LIST_NEXT(it->inp, sctp_list); 1490 it->stcb = NULL; 1491 SCTP_INP_RUNLOCK(tinp); 1492 if (it->inp == NULL) { 1493 goto done_with_iterator; 1494 } 1495 SCTP_INP_RLOCK(it->inp); 1496 } 1497 /* now go through each assoc which is in the desired state */ 1498 if (it->done_current_ep == 0) { 1499 if (it->function_inp != NULL) 1500 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1501 it->done_current_ep = 1; 1502 } 1503 if (it->stcb == NULL) { 1504 /* run the per instance function */ 1505 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1506 } 1507 if ((inp_skip) || it->stcb == NULL) { 1508 if (it->function_inp_end != NULL) { 1509 inp_skip = (*it->function_inp_end) (it->inp, 1510 it->pointer, 1511 it->val); 1512 } 1513 SCTP_INP_RUNLOCK(it->inp); 1514 goto no_stcb; 1515 } 1516 while (it->stcb) { 1517 SCTP_TCB_LOCK(it->stcb); 1518 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1519 /* not in the right state... keep looking */ 1520 SCTP_TCB_UNLOCK(it->stcb); 1521 goto next_assoc; 1522 } 1523 /* see if we have limited out the iterator loop */ 1524 iteration_count++; 1525 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1526 /* Pause to let others grab the lock */ 1527 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 SCTP_INP_INCR_REF(it->inp); 1530 SCTP_INP_RUNLOCK(it->inp); 1531 SCTP_ITERATOR_UNLOCK(); 1532 SCTP_INP_INFO_RUNLOCK(); 1533 SCTP_INP_INFO_RLOCK(); 1534 SCTP_ITERATOR_LOCK(); 1535 if (sctp_it_ctl.iterator_flags) { 1536 /* We won't be staying here */ 1537 SCTP_INP_DECR_REF(it->inp); 1538 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1539 if (sctp_it_ctl.iterator_flags & 1540 SCTP_ITERATOR_STOP_CUR_IT) { 1541 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1542 goto done_with_iterator; 1543 } 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_INP) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1547 goto no_stcb; 1548 } 1549 /* If we reach here huh? */ 1550 SCTP_PRINTF("Unknown it ctl flag %x\n", 1551 sctp_it_ctl.iterator_flags); 1552 sctp_it_ctl.iterator_flags = 0; 1553 } 1554 SCTP_INP_RLOCK(it->inp); 1555 SCTP_INP_DECR_REF(it->inp); 1556 SCTP_TCB_LOCK(it->stcb); 1557 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1558 iteration_count = 0; 1559 } 1560 KASSERT(it->inp == it->stcb->sctp_ep, 1561 ("%s: stcb %p does not belong to inp %p, but inp %p", 1562 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1563 1564 /* run function on this one */ 1565 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1566 1567 /* 1568 * we lie here, it really needs to have its own type but 1569 * first I must verify that this won't effect things :-0 1570 */ 1571 if (it->no_chunk_output == 0) 1572 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1573 1574 SCTP_TCB_UNLOCK(it->stcb); 1575 next_assoc: 1576 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1577 if (it->stcb == NULL) { 1578 /* Run last function */ 1579 if (it->function_inp_end != NULL) { 1580 inp_skip = (*it->function_inp_end) (it->inp, 1581 it->pointer, 1582 it->val); 1583 } 1584 } 1585 } 1586 SCTP_INP_RUNLOCK(it->inp); 1587 no_stcb: 1588 /* done with all assocs on this endpoint, move on to next endpoint */ 1589 it->done_current_ep = 0; 1590 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1591 it->inp = NULL; 1592 } else { 1593 it->inp = LIST_NEXT(it->inp, sctp_list); 1594 } 1595 it->stcb = NULL; 1596 if (it->inp == NULL) { 1597 goto done_with_iterator; 1598 } 1599 goto select_a_new_ep; 1600 } 1601 1602 void 1603 sctp_iterator_worker(void) 1604 { 1605 struct sctp_iterator *it; 1606 1607 /* This function is called with the WQ lock in place */ 1608 sctp_it_ctl.iterator_running = 1; 1609 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1610 /* now lets work on this one */ 1611 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1612 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1613 CURVNET_SET(it->vn); 1614 sctp_iterator_work(it); 1615 CURVNET_RESTORE(); 1616 SCTP_IPI_ITERATOR_WQ_LOCK(); 1617 /* sa_ignore FREED_MEMORY */ 1618 } 1619 sctp_it_ctl.iterator_running = 0; 1620 return; 1621 } 1622 1623 1624 static void 1625 sctp_handle_addr_wq(void) 1626 { 1627 /* deal with the ADDR wq from the rtsock calls */ 1628 struct sctp_laddr *wi, *nwi; 1629 struct sctp_asconf_iterator *asc; 1630 1631 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1632 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1633 if (asc == NULL) { 1634 /* Try later, no memory */ 1635 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1636 (struct sctp_inpcb *)NULL, 1637 (struct sctp_tcb *)NULL, 1638 (struct sctp_nets *)NULL); 1639 return; 1640 } 1641 LIST_INIT(&asc->list_of_work); 1642 asc->cnt = 0; 1643 1644 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1645 LIST_REMOVE(wi, sctp_nxt_addr); 1646 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1647 asc->cnt++; 1648 } 1649 1650 if (asc->cnt == 0) { 1651 SCTP_FREE(asc, SCTP_M_ASC_IT); 1652 } else { 1653 int ret; 1654 1655 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1656 sctp_asconf_iterator_stcb, 1657 NULL, /* No ep end for boundall */ 1658 SCTP_PCB_FLAGS_BOUNDALL, 1659 SCTP_PCB_ANY_FEATURES, 1660 SCTP_ASOC_ANY_STATE, 1661 (void *)asc, 0, 1662 sctp_asconf_iterator_end, NULL, 0); 1663 if (ret) { 1664 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1665 /* 1666 * Freeing if we are stopping or put back on the 1667 * addr_wq. 1668 */ 1669 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1670 sctp_asconf_iterator_end(asc, 0); 1671 } else { 1672 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1673 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1674 } 1675 SCTP_FREE(asc, SCTP_M_ASC_IT); 1676 } 1677 } 1678 } 1679 } 1680 1681 /*- 1682 * The following table shows which pointers for the inp, stcb, or net are 1683 * stored for each timer after it was started. 1684 * 1685 *|Name |Timer |inp |stcb|net | 1686 *|-----------------------------|-----------------------------|----|----|----| 1687 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1690 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1694 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1697 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1698 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1700 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1701 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1703 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1704 */ 1705 1706 void 1707 sctp_timeout_handler(void *t) 1708 { 1709 struct epoch_tracker et; 1710 struct timeval tv; 1711 struct sctp_inpcb *inp; 1712 struct sctp_tcb *stcb; 1713 struct sctp_nets *net; 1714 struct sctp_timer *tmr; 1715 struct mbuf *op_err; 1716 int did_output; 1717 int type; 1718 int i, secret; 1719 1720 tmr = (struct sctp_timer *)t; 1721 inp = (struct sctp_inpcb *)tmr->ep; 1722 stcb = (struct sctp_tcb *)tmr->tcb; 1723 net = (struct sctp_nets *)tmr->net; 1724 CURVNET_SET((struct vnet *)tmr->vnet); 1725 did_output = 1; 1726 1727 #ifdef SCTP_AUDITING_ENABLED 1728 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1729 sctp_auditing(3, inp, stcb, net); 1730 #endif 1731 1732 /* sanity checks... */ 1733 KASSERT(tmr->self == NULL || tmr->self == tmr, 1734 ("sctp_timeout_handler: tmr->self corrupted")); 1735 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1736 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1737 type = tmr->type; 1738 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1739 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1740 type, stcb, stcb->sctp_ep)); 1741 if (inp) { 1742 SCTP_INP_INCR_REF(inp); 1743 } 1744 tmr->stopped_from = 0xa001; 1745 if (stcb) { 1746 atomic_add_int(&stcb->asoc.refcnt, 1); 1747 if (stcb->asoc.state == 0) { 1748 atomic_add_int(&stcb->asoc.refcnt, -1); 1749 if (inp) { 1750 SCTP_INP_DECR_REF(inp); 1751 } 1752 SCTPDBG(SCTP_DEBUG_TIMER2, 1753 "Timer type %d handler exiting due to CLOSED association.\n", 1754 type); 1755 CURVNET_RESTORE(); 1756 return; 1757 } 1758 } 1759 tmr->stopped_from = 0xa002; 1760 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1761 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1762 if (inp) { 1763 SCTP_INP_DECR_REF(inp); 1764 } 1765 if (stcb) { 1766 atomic_add_int(&stcb->asoc.refcnt, -1); 1767 } 1768 SCTPDBG(SCTP_DEBUG_TIMER2, 1769 "Timer type %d handler exiting due to not being active.\n", 1770 type); 1771 CURVNET_RESTORE(); 1772 return; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 atomic_add_int(&stcb->asoc.refcnt, -1); 1779 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1780 ((stcb->asoc.state == 0) || 1781 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1782 SCTP_TCB_UNLOCK(stcb); 1783 if (inp) { 1784 SCTP_INP_DECR_REF(inp); 1785 } 1786 SCTPDBG(SCTP_DEBUG_TIMER2, 1787 "Timer type %d handler exiting due to CLOSED association.\n", 1788 type); 1789 CURVNET_RESTORE(); 1790 return; 1791 } 1792 } else if (inp != NULL) { 1793 SCTP_INP_WLOCK(inp); 1794 } else { 1795 SCTP_WQ_ADDR_LOCK(); 1796 } 1797 1798 /* Record in stopped_from which timeout occurred. */ 1799 tmr->stopped_from = type; 1800 NET_EPOCH_ENTER(et); 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto get_out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto get_out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 if ((stcb->asoc.num_send_timers_up == 0) && 1840 (stcb->asoc.sent_queue_cnt > 0)) { 1841 struct sctp_tmit_chunk *chk; 1842 1843 /* 1844 * Safeguard. If there on some on the sent queue 1845 * somewhere but no timers running something is 1846 * wrong... so we start a timer on the first chunk 1847 * on the send queue on whatever net it is sent to. 1848 */ 1849 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1850 if (chk->whoTo != NULL) { 1851 break; 1852 } 1853 } 1854 if (chk != NULL) { 1855 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1856 } 1857 } 1858 break; 1859 case SCTP_TIMER_TYPE_INIT: 1860 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1861 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1862 type, inp, stcb, net)); 1863 SCTP_STAT_INCR(sctps_timoinit); 1864 stcb->asoc.timoinit++; 1865 if (sctp_t1init_timer(inp, stcb, net)) { 1866 /* no need to unlock on tcb its gone */ 1867 goto out_decr; 1868 } 1869 /* We do output but not here */ 1870 did_output = 0; 1871 break; 1872 case SCTP_TIMER_TYPE_RECV: 1873 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1874 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1875 type, inp, stcb, net)); 1876 SCTP_STAT_INCR(sctps_timosack); 1877 stcb->asoc.timosack++; 1878 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1879 #ifdef SCTP_AUDITING_ENABLED 1880 sctp_auditing(4, inp, stcb, NULL); 1881 #endif 1882 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1883 break; 1884 case SCTP_TIMER_TYPE_SHUTDOWN: 1885 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1886 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1887 type, inp, stcb, net)); 1888 SCTP_STAT_INCR(sctps_timoshutdown); 1889 stcb->asoc.timoshutdown++; 1890 if (sctp_shutdown_timer(inp, stcb, net)) { 1891 /* no need to unlock on tcb its gone */ 1892 goto out_decr; 1893 } 1894 #ifdef SCTP_AUDITING_ENABLED 1895 sctp_auditing(4, inp, stcb, net); 1896 #endif 1897 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1898 break; 1899 case SCTP_TIMER_TYPE_HEARTBEAT: 1900 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1901 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1902 type, inp, stcb, net)); 1903 SCTP_STAT_INCR(sctps_timoheartbeat); 1904 stcb->asoc.timoheartbeat++; 1905 if (sctp_heartbeat_timer(inp, stcb, net)) { 1906 /* no need to unlock on tcb its gone */ 1907 goto out_decr; 1908 } 1909 #ifdef SCTP_AUDITING_ENABLED 1910 sctp_auditing(4, inp, stcb, net); 1911 #endif 1912 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1913 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1914 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1915 } 1916 break; 1917 case SCTP_TIMER_TYPE_COOKIE: 1918 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1919 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1920 type, inp, stcb, net)); 1921 SCTP_STAT_INCR(sctps_timocookie); 1922 stcb->asoc.timocookie++; 1923 if (sctp_cookie_timer(inp, stcb, net)) { 1924 /* no need to unlock on tcb its gone */ 1925 goto out_decr; 1926 } 1927 #ifdef SCTP_AUDITING_ENABLED 1928 sctp_auditing(4, inp, stcb, net); 1929 #endif 1930 /* 1931 * We consider T3 and Cookie timer pretty much the same with 1932 * respect to where from in chunk_output. 1933 */ 1934 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1935 break; 1936 case SCTP_TIMER_TYPE_NEWCOOKIE: 1937 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1938 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1939 type, inp, stcb, net)); 1940 SCTP_STAT_INCR(sctps_timosecret); 1941 (void)SCTP_GETTIME_TIMEVAL(&tv); 1942 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1943 inp->sctp_ep.last_secret_number = 1944 inp->sctp_ep.current_secret_number; 1945 inp->sctp_ep.current_secret_number++; 1946 if (inp->sctp_ep.current_secret_number >= 1947 SCTP_HOW_MANY_SECRETS) { 1948 inp->sctp_ep.current_secret_number = 0; 1949 } 1950 secret = (int)inp->sctp_ep.current_secret_number; 1951 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1952 inp->sctp_ep.secret_key[secret][i] = 1953 sctp_select_initial_TSN(&inp->sctp_ep); 1954 } 1955 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1956 did_output = 0; 1957 break; 1958 case SCTP_TIMER_TYPE_PATHMTURAISE: 1959 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1960 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1961 type, inp, stcb, net)); 1962 SCTP_STAT_INCR(sctps_timopathmtu); 1963 sctp_pathmtu_timer(inp, stcb, net); 1964 did_output = 0; 1965 break; 1966 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1967 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1968 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1969 type, inp, stcb, net)); 1970 if (sctp_shutdownack_timer(inp, stcb, net)) { 1971 /* no need to unlock on tcb its gone */ 1972 goto out_decr; 1973 } 1974 SCTP_STAT_INCR(sctps_timoshutdownack); 1975 stcb->asoc.timoshutdownack++; 1976 #ifdef SCTP_AUDITING_ENABLED 1977 sctp_auditing(4, inp, stcb, net); 1978 #endif 1979 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1980 break; 1981 case SCTP_TIMER_TYPE_ASCONF: 1982 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1983 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1984 type, inp, stcb, net)); 1985 SCTP_STAT_INCR(sctps_timoasconf); 1986 if (sctp_asconf_timer(inp, stcb, net)) { 1987 /* no need to unlock on tcb its gone */ 1988 goto out_decr; 1989 } 1990 #ifdef SCTP_AUDITING_ENABLED 1991 sctp_auditing(4, inp, stcb, net); 1992 #endif 1993 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1994 break; 1995 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1996 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1997 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1998 type, inp, stcb, net)); 1999 SCTP_STAT_INCR(sctps_timoshutdownguard); 2000 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2001 "Shutdown guard timer expired"); 2002 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2003 /* no need to unlock on tcb its gone */ 2004 goto out_decr; 2005 case SCTP_TIMER_TYPE_AUTOCLOSE: 2006 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2007 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2008 type, inp, stcb, net)); 2009 SCTP_STAT_INCR(sctps_timoautoclose); 2010 sctp_autoclose_timer(inp, stcb); 2011 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2012 did_output = 0; 2013 break; 2014 case SCTP_TIMER_TYPE_STRRESET: 2015 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2016 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2017 type, inp, stcb, net)); 2018 SCTP_STAT_INCR(sctps_timostrmrst); 2019 if (sctp_strreset_timer(inp, stcb)) { 2020 /* no need to unlock on tcb its gone */ 2021 goto out_decr; 2022 } 2023 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2024 break; 2025 case SCTP_TIMER_TYPE_INPKILL: 2026 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2027 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2028 type, inp, stcb, net)); 2029 SCTP_STAT_INCR(sctps_timoinpkill); 2030 /* 2031 * special case, take away our increment since WE are the 2032 * killer 2033 */ 2034 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2035 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2036 SCTP_INP_DECR_REF(inp); 2037 SCTP_INP_WUNLOCK(inp); 2038 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2039 SCTP_CALLED_FROM_INPKILL_TIMER); 2040 inp = NULL; 2041 goto out_no_decr; 2042 case SCTP_TIMER_TYPE_ASOCKILL: 2043 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2044 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2045 type, inp, stcb, net)); 2046 SCTP_STAT_INCR(sctps_timoassockill); 2047 /* Can we free it yet? */ 2048 SCTP_INP_DECR_REF(inp); 2049 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2050 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2051 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2052 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2053 /* 2054 * free asoc, always unlocks (or destroy's) so prevent 2055 * duplicate unlock or unlock of a free mtx :-0 2056 */ 2057 stcb = NULL; 2058 goto out_no_decr; 2059 case SCTP_TIMER_TYPE_ADDR_WQ: 2060 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2061 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2062 type, inp, stcb, net)); 2063 sctp_handle_addr_wq(); 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 break; 2072 default: 2073 #ifdef INVARIANTS 2074 panic("Unknown timer type %d", type); 2075 #else 2076 goto get_out; 2077 #endif 2078 } 2079 #ifdef SCTP_AUDITING_ENABLED 2080 sctp_audit_log(0xF1, (uint8_t)type); 2081 if (inp) 2082 sctp_auditing(5, inp, stcb, net); 2083 #endif 2084 if ((did_output) && stcb) { 2085 /* 2086 * Now we need to clean up the control chunk chain if an 2087 * ECNE is on it. It must be marked as UNSENT again so next 2088 * call will continue to send it until such time that we get 2089 * a CWR, to remove it. It is, however, less likely that we 2090 * will find a ecn echo on the chain though. 2091 */ 2092 sctp_fix_ecn_echo(&stcb->asoc); 2093 } 2094 get_out: 2095 if (stcb) { 2096 SCTP_TCB_UNLOCK(stcb); 2097 } else if (inp != NULL) { 2098 SCTP_INP_WUNLOCK(inp); 2099 } else { 2100 SCTP_WQ_ADDR_UNLOCK(); 2101 } 2102 2103 out_decr: 2104 if (inp) { 2105 SCTP_INP_DECR_REF(inp); 2106 } 2107 2108 out_no_decr: 2109 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2110 CURVNET_RESTORE(); 2111 NET_EPOCH_EXIT(et); 2112 } 2113 2114 /*- 2115 * The following table shows which parameters must be provided 2116 * when calling sctp_timer_start(). For parameters not being 2117 * provided, NULL must be used. 2118 * 2119 * |Name |inp |stcb|net | 2120 * |-----------------------------|----|----|----| 2121 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2122 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2123 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2124 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2126 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2128 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2133 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2135 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2136 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2137 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2138 * 2139 */ 2140 2141 void 2142 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2143 struct sctp_nets *net) 2144 { 2145 struct sctp_timer *tmr; 2146 uint32_t to_ticks; 2147 uint32_t rndval, jitter; 2148 2149 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2150 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2151 t_type, stcb, stcb->sctp_ep)); 2152 tmr = NULL; 2153 to_ticks = 0; 2154 if (stcb != NULL) { 2155 SCTP_TCB_LOCK_ASSERT(stcb); 2156 } else if (inp != NULL) { 2157 SCTP_INP_WLOCK_ASSERT(inp); 2158 } else { 2159 SCTP_WQ_ADDR_LOCK_ASSERT(); 2160 } 2161 if (stcb != NULL) { 2162 /* 2163 * Don't restart timer on association that's about to be 2164 * killed. 2165 */ 2166 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2167 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2168 SCTPDBG(SCTP_DEBUG_TIMER2, 2169 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2170 t_type, inp, stcb, net); 2171 return; 2172 } 2173 /* Don't restart timer on net that's been removed. */ 2174 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2175 SCTPDBG(SCTP_DEBUG_TIMER2, 2176 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2177 t_type, inp, stcb, net); 2178 return; 2179 } 2180 } 2181 switch (t_type) { 2182 case SCTP_TIMER_TYPE_SEND: 2183 /* Here we use the RTO timer. */ 2184 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2185 #ifdef INVARIANTS 2186 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2187 t_type, inp, stcb, net); 2188 #else 2189 return; 2190 #endif 2191 } 2192 tmr = &net->rxt_timer; 2193 if (net->RTO == 0) { 2194 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2195 } else { 2196 to_ticks = sctp_msecs_to_ticks(net->RTO); 2197 } 2198 break; 2199 case SCTP_TIMER_TYPE_INIT: 2200 /* 2201 * Here we use the INIT timer default usually about 1 2202 * second. 2203 */ 2204 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2205 #ifdef INVARIANTS 2206 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2207 t_type, inp, stcb, net); 2208 #else 2209 return; 2210 #endif 2211 } 2212 tmr = &net->rxt_timer; 2213 if (net->RTO == 0) { 2214 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2215 } else { 2216 to_ticks = sctp_msecs_to_ticks(net->RTO); 2217 } 2218 break; 2219 case SCTP_TIMER_TYPE_RECV: 2220 /* 2221 * Here we use the Delayed-Ack timer value from the inp, 2222 * ususually about 200ms. 2223 */ 2224 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2225 #ifdef INVARIANTS 2226 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2227 t_type, inp, stcb, net); 2228 #else 2229 return; 2230 #endif 2231 } 2232 tmr = &stcb->asoc.dack_timer; 2233 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2234 break; 2235 case SCTP_TIMER_TYPE_SHUTDOWN: 2236 /* Here we use the RTO of the destination. */ 2237 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2238 #ifdef INVARIANTS 2239 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2240 t_type, inp, stcb, net); 2241 #else 2242 return; 2243 #endif 2244 } 2245 tmr = &net->rxt_timer; 2246 if (net->RTO == 0) { 2247 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2248 } else { 2249 to_ticks = sctp_msecs_to_ticks(net->RTO); 2250 } 2251 break; 2252 case SCTP_TIMER_TYPE_HEARTBEAT: 2253 /* 2254 * The net is used here so that we can add in the RTO. Even 2255 * though we use a different timer. We also add the HB timer 2256 * PLUS a random jitter. 2257 */ 2258 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2259 #ifdef INVARIANTS 2260 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2261 t_type, inp, stcb, net); 2262 #else 2263 return; 2264 #endif 2265 } 2266 if ((net->dest_state & SCTP_ADDR_NOHB) && 2267 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2268 SCTPDBG(SCTP_DEBUG_TIMER2, 2269 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2270 t_type, inp, stcb, net); 2271 return; 2272 } 2273 tmr = &net->hb_timer; 2274 if (net->RTO == 0) { 2275 to_ticks = stcb->asoc.initial_rto; 2276 } else { 2277 to_ticks = net->RTO; 2278 } 2279 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2280 jitter = rndval % to_ticks; 2281 if (jitter >= (to_ticks >> 1)) { 2282 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2283 } else { 2284 to_ticks = to_ticks - jitter; 2285 } 2286 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2287 !(net->dest_state & SCTP_ADDR_PF)) { 2288 to_ticks += net->heart_beat_delay; 2289 } 2290 /* 2291 * Now we must convert the to_ticks that are now in ms to 2292 * ticks. 2293 */ 2294 to_ticks = sctp_msecs_to_ticks(to_ticks); 2295 break; 2296 case SCTP_TIMER_TYPE_COOKIE: 2297 /* 2298 * Here we can use the RTO timer from the network since one 2299 * RTT was complete. If a retransmission happened then we 2300 * will be using the RTO initial value. 2301 */ 2302 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2303 #ifdef INVARIANTS 2304 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2305 t_type, inp, stcb, net); 2306 #else 2307 return; 2308 #endif 2309 } 2310 tmr = &net->rxt_timer; 2311 if (net->RTO == 0) { 2312 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2313 } else { 2314 to_ticks = sctp_msecs_to_ticks(net->RTO); 2315 } 2316 break; 2317 case SCTP_TIMER_TYPE_NEWCOOKIE: 2318 /* 2319 * Nothing needed but the endpoint here ususually about 60 2320 * minutes. 2321 */ 2322 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2323 #ifdef INVARIANTS 2324 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2325 t_type, inp, stcb, net); 2326 #else 2327 return; 2328 #endif 2329 } 2330 tmr = &inp->sctp_ep.signature_change; 2331 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2332 break; 2333 case SCTP_TIMER_TYPE_PATHMTURAISE: 2334 /* 2335 * Here we use the value found in the EP for PMTUD, 2336 * ususually about 10 minutes. 2337 */ 2338 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2339 #ifdef INVARIANTS 2340 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2341 t_type, inp, stcb, net); 2342 #else 2343 return; 2344 #endif 2345 } 2346 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2347 SCTPDBG(SCTP_DEBUG_TIMER2, 2348 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2349 t_type, inp, stcb, net); 2350 return; 2351 } 2352 tmr = &net->pmtu_timer; 2353 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2354 break; 2355 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2356 /* Here we use the RTO of the destination. */ 2357 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2358 #ifdef INVARIANTS 2359 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2360 t_type, inp, stcb, net); 2361 #else 2362 return; 2363 #endif 2364 } 2365 tmr = &net->rxt_timer; 2366 if (net->RTO == 0) { 2367 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2368 } else { 2369 to_ticks = sctp_msecs_to_ticks(net->RTO); 2370 } 2371 break; 2372 case SCTP_TIMER_TYPE_ASCONF: 2373 /* 2374 * Here the timer comes from the stcb but its value is from 2375 * the net's RTO. 2376 */ 2377 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2378 #ifdef INVARIANTS 2379 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2380 t_type, inp, stcb, net); 2381 #else 2382 return; 2383 #endif 2384 } 2385 tmr = &stcb->asoc.asconf_timer; 2386 if (net->RTO == 0) { 2387 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2388 } else { 2389 to_ticks = sctp_msecs_to_ticks(net->RTO); 2390 } 2391 break; 2392 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2393 /* 2394 * Here we use the endpoints shutdown guard timer usually 2395 * about 3 minutes. 2396 */ 2397 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2398 #ifdef INVARIANTS 2399 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2400 t_type, inp, stcb, net); 2401 #else 2402 return; 2403 #endif 2404 } 2405 tmr = &stcb->asoc.shut_guard_timer; 2406 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2407 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2408 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2411 } 2412 } else { 2413 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2414 } 2415 break; 2416 case SCTP_TIMER_TYPE_AUTOCLOSE: 2417 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2418 #ifdef INVARIANTS 2419 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2420 t_type, inp, stcb, net); 2421 #else 2422 return; 2423 #endif 2424 } 2425 tmr = &stcb->asoc.autoclose_timer; 2426 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2427 break; 2428 case SCTP_TIMER_TYPE_STRRESET: 2429 /* 2430 * Here the timer comes from the stcb but its value is from 2431 * the net's RTO. 2432 */ 2433 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2434 #ifdef INVARIANTS 2435 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2436 t_type, inp, stcb, net); 2437 #else 2438 return; 2439 #endif 2440 } 2441 tmr = &stcb->asoc.strreset_timer; 2442 if (net->RTO == 0) { 2443 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2444 } else { 2445 to_ticks = sctp_msecs_to_ticks(net->RTO); 2446 } 2447 break; 2448 case SCTP_TIMER_TYPE_INPKILL: 2449 /* 2450 * The inp is setup to die. We re-use the signature_chage 2451 * timer since that has stopped and we are in the GONE 2452 * state. 2453 */ 2454 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &inp->sctp_ep.signature_change; 2463 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2464 break; 2465 case SCTP_TIMER_TYPE_ASOCKILL: 2466 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2467 #ifdef INVARIANTS 2468 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2469 t_type, inp, stcb, net); 2470 #else 2471 return; 2472 #endif 2473 } 2474 tmr = &stcb->asoc.strreset_timer; 2475 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2476 break; 2477 case SCTP_TIMER_TYPE_ADDR_WQ: 2478 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 /* Only 1 tick away :-) */ 2487 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2488 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2489 break; 2490 case SCTP_TIMER_TYPE_PRIM_DELETED: 2491 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2492 #ifdef INVARIANTS 2493 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2494 t_type, inp, stcb, net); 2495 #else 2496 return; 2497 #endif 2498 } 2499 tmr = &stcb->asoc.delete_prim_timer; 2500 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2501 break; 2502 default: 2503 #ifdef INVARIANTS 2504 panic("Unknown timer type %d", t_type); 2505 #else 2506 return; 2507 #endif 2508 } 2509 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2510 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2511 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2512 /* 2513 * We do NOT allow you to have it already running. If it is, 2514 * we leave the current one up unchanged. 2515 */ 2516 SCTPDBG(SCTP_DEBUG_TIMER2, 2517 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2518 t_type, inp, stcb, net); 2519 return; 2520 } 2521 /* At this point we can proceed. */ 2522 if (t_type == SCTP_TIMER_TYPE_SEND) { 2523 stcb->asoc.num_send_timers_up++; 2524 } 2525 tmr->stopped_from = 0; 2526 tmr->type = t_type; 2527 tmr->ep = (void *)inp; 2528 tmr->tcb = (void *)stcb; 2529 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2530 tmr->net = NULL; 2531 } else { 2532 tmr->net = (void *)net; 2533 } 2534 tmr->self = (void *)tmr; 2535 tmr->vnet = (void *)curvnet; 2536 tmr->ticks = sctp_get_tick_count(); 2537 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2538 SCTPDBG(SCTP_DEBUG_TIMER2, 2539 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2540 t_type, to_ticks, inp, stcb, net); 2541 } else { 2542 /* 2543 * This should not happen, since we checked for pending 2544 * above. 2545 */ 2546 SCTPDBG(SCTP_DEBUG_TIMER2, 2547 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2548 t_type, to_ticks, inp, stcb, net); 2549 } 2550 return; 2551 } 2552 2553 /*- 2554 * The following table shows which parameters must be provided 2555 * when calling sctp_timer_stop(). For parameters not being 2556 * provided, NULL must be used. 2557 * 2558 * |Name |inp |stcb|net | 2559 * |-----------------------------|----|----|----| 2560 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2561 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2562 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2563 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2564 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2565 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2566 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2567 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2568 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2569 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2570 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2571 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2572 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2573 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2574 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2575 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2576 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2577 * 2578 */ 2579 2580 void 2581 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2582 struct sctp_nets *net, uint32_t from) 2583 { 2584 struct sctp_timer *tmr; 2585 2586 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2587 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2588 t_type, stcb, stcb->sctp_ep)); 2589 if (stcb != NULL) { 2590 SCTP_TCB_LOCK_ASSERT(stcb); 2591 } else if (inp != NULL) { 2592 SCTP_INP_WLOCK_ASSERT(inp); 2593 } else { 2594 SCTP_WQ_ADDR_LOCK_ASSERT(); 2595 } 2596 tmr = NULL; 2597 switch (t_type) { 2598 case SCTP_TIMER_TYPE_SEND: 2599 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2600 #ifdef INVARIANTS 2601 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2602 t_type, inp, stcb, net); 2603 #else 2604 return; 2605 #endif 2606 } 2607 tmr = &net->rxt_timer; 2608 break; 2609 case SCTP_TIMER_TYPE_INIT: 2610 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2611 #ifdef INVARIANTS 2612 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2613 t_type, inp, stcb, net); 2614 #else 2615 return; 2616 #endif 2617 } 2618 tmr = &net->rxt_timer; 2619 break; 2620 case SCTP_TIMER_TYPE_RECV: 2621 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2622 #ifdef INVARIANTS 2623 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2624 t_type, inp, stcb, net); 2625 #else 2626 return; 2627 #endif 2628 } 2629 tmr = &stcb->asoc.dack_timer; 2630 break; 2631 case SCTP_TIMER_TYPE_SHUTDOWN: 2632 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2633 #ifdef INVARIANTS 2634 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2635 t_type, inp, stcb, net); 2636 #else 2637 return; 2638 #endif 2639 } 2640 tmr = &net->rxt_timer; 2641 break; 2642 case SCTP_TIMER_TYPE_HEARTBEAT: 2643 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2644 #ifdef INVARIANTS 2645 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2646 t_type, inp, stcb, net); 2647 #else 2648 return; 2649 #endif 2650 } 2651 tmr = &net->hb_timer; 2652 break; 2653 case SCTP_TIMER_TYPE_COOKIE: 2654 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2655 #ifdef INVARIANTS 2656 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2657 t_type, inp, stcb, net); 2658 #else 2659 return; 2660 #endif 2661 } 2662 tmr = &net->rxt_timer; 2663 break; 2664 case SCTP_TIMER_TYPE_NEWCOOKIE: 2665 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2666 #ifdef INVARIANTS 2667 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2668 t_type, inp, stcb, net); 2669 #else 2670 return; 2671 #endif 2672 } 2673 tmr = &inp->sctp_ep.signature_change; 2674 break; 2675 case SCTP_TIMER_TYPE_PATHMTURAISE: 2676 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2677 #ifdef INVARIANTS 2678 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2679 t_type, inp, stcb, net); 2680 #else 2681 return; 2682 #endif 2683 } 2684 tmr = &net->pmtu_timer; 2685 break; 2686 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2687 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2688 #ifdef INVARIANTS 2689 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2690 t_type, inp, stcb, net); 2691 #else 2692 return; 2693 #endif 2694 } 2695 tmr = &net->rxt_timer; 2696 break; 2697 case SCTP_TIMER_TYPE_ASCONF: 2698 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2699 #ifdef INVARIANTS 2700 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2701 t_type, inp, stcb, net); 2702 #else 2703 return; 2704 #endif 2705 } 2706 tmr = &stcb->asoc.asconf_timer; 2707 break; 2708 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2709 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2710 #ifdef INVARIANTS 2711 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2712 t_type, inp, stcb, net); 2713 #else 2714 return; 2715 #endif 2716 } 2717 tmr = &stcb->asoc.shut_guard_timer; 2718 break; 2719 case SCTP_TIMER_TYPE_AUTOCLOSE: 2720 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2721 #ifdef INVARIANTS 2722 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2723 t_type, inp, stcb, net); 2724 #else 2725 return; 2726 #endif 2727 } 2728 tmr = &stcb->asoc.autoclose_timer; 2729 break; 2730 case SCTP_TIMER_TYPE_STRRESET: 2731 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2732 #ifdef INVARIANTS 2733 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2734 t_type, inp, stcb, net); 2735 #else 2736 return; 2737 #endif 2738 } 2739 tmr = &stcb->asoc.strreset_timer; 2740 break; 2741 case SCTP_TIMER_TYPE_INPKILL: 2742 /* 2743 * The inp is setup to die. We re-use the signature_chage 2744 * timer since that has stopped and we are in the GONE 2745 * state. 2746 */ 2747 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2748 #ifdef INVARIANTS 2749 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2750 t_type, inp, stcb, net); 2751 #else 2752 return; 2753 #endif 2754 } 2755 tmr = &inp->sctp_ep.signature_change; 2756 break; 2757 case SCTP_TIMER_TYPE_ASOCKILL: 2758 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2759 #ifdef INVARIANTS 2760 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2761 t_type, inp, stcb, net); 2762 #else 2763 return; 2764 #endif 2765 } 2766 tmr = &stcb->asoc.strreset_timer; 2767 break; 2768 case SCTP_TIMER_TYPE_ADDR_WQ: 2769 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2770 #ifdef INVARIANTS 2771 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2772 t_type, inp, stcb, net); 2773 #else 2774 return; 2775 #endif 2776 } 2777 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2778 break; 2779 case SCTP_TIMER_TYPE_PRIM_DELETED: 2780 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2781 #ifdef INVARIANTS 2782 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2783 t_type, inp, stcb, net); 2784 #else 2785 return; 2786 #endif 2787 } 2788 tmr = &stcb->asoc.delete_prim_timer; 2789 break; 2790 default: 2791 #ifdef INVARIANTS 2792 panic("Unknown timer type %d", t_type); 2793 #else 2794 return; 2795 #endif 2796 } 2797 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2798 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2799 (tmr->type != t_type)) { 2800 /* 2801 * Ok we have a timer that is under joint use. Cookie timer 2802 * per chance with the SEND timer. We therefore are NOT 2803 * running the timer that the caller wants stopped. So just 2804 * return. 2805 */ 2806 SCTPDBG(SCTP_DEBUG_TIMER2, 2807 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2808 t_type, inp, stcb, net); 2809 return; 2810 } 2811 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2812 stcb->asoc.num_send_timers_up--; 2813 if (stcb->asoc.num_send_timers_up < 0) { 2814 stcb->asoc.num_send_timers_up = 0; 2815 } 2816 } 2817 tmr->self = NULL; 2818 tmr->stopped_from = from; 2819 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2820 KASSERT(tmr->ep == inp, 2821 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2822 t_type, inp, tmr->ep)); 2823 KASSERT(tmr->tcb == stcb, 2824 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2825 t_type, stcb, tmr->tcb)); 2826 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2827 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2828 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2829 t_type, net, tmr->net)); 2830 SCTPDBG(SCTP_DEBUG_TIMER2, 2831 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2832 t_type, inp, stcb, net); 2833 tmr->ep = NULL; 2834 tmr->tcb = NULL; 2835 tmr->net = NULL; 2836 } else { 2837 SCTPDBG(SCTP_DEBUG_TIMER2, 2838 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2839 t_type, inp, stcb, net); 2840 } 2841 return; 2842 } 2843 2844 uint32_t 2845 sctp_calculate_len(struct mbuf *m) 2846 { 2847 uint32_t tlen = 0; 2848 struct mbuf *at; 2849 2850 at = m; 2851 while (at) { 2852 tlen += SCTP_BUF_LEN(at); 2853 at = SCTP_BUF_NEXT(at); 2854 } 2855 return (tlen); 2856 } 2857 2858 void 2859 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2860 struct sctp_association *asoc, uint32_t mtu) 2861 { 2862 /* 2863 * Reset the P-MTU size on this association, this involves changing 2864 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2865 * allow the DF flag to be cleared. 2866 */ 2867 struct sctp_tmit_chunk *chk; 2868 unsigned int eff_mtu, ovh; 2869 2870 asoc->smallest_mtu = mtu; 2871 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2872 ovh = SCTP_MIN_OVERHEAD; 2873 } else { 2874 ovh = SCTP_MIN_V4_OVERHEAD; 2875 } 2876 eff_mtu = mtu - ovh; 2877 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2878 if (chk->send_size > eff_mtu) { 2879 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2880 } 2881 } 2882 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2883 if (chk->send_size > eff_mtu) { 2884 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2885 } 2886 } 2887 } 2888 2889 2890 /* 2891 * Given an association and starting time of the current RTT period, update 2892 * RTO in number of msecs. net should point to the current network. 2893 * Return 1, if an RTO update was performed, return 0 if no update was 2894 * performed due to invalid starting point. 2895 */ 2896 2897 int 2898 sctp_calculate_rto(struct sctp_tcb *stcb, 2899 struct sctp_association *asoc, 2900 struct sctp_nets *net, 2901 struct timeval *old, 2902 int rtt_from_sack) 2903 { 2904 struct timeval now; 2905 uint64_t rtt_us; /* RTT in us */ 2906 int32_t rtt; /* RTT in ms */ 2907 uint32_t new_rto; 2908 int first_measure = 0; 2909 2910 /************************/ 2911 /* 1. calculate new RTT */ 2912 /************************/ 2913 /* get the current time */ 2914 if (stcb->asoc.use_precise_time) { 2915 (void)SCTP_GETPTIME_TIMEVAL(&now); 2916 } else { 2917 (void)SCTP_GETTIME_TIMEVAL(&now); 2918 } 2919 if ((old->tv_sec > now.tv_sec) || 2920 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2921 /* The starting point is in the future. */ 2922 return (0); 2923 } 2924 timevalsub(&now, old); 2925 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2926 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2927 /* The RTT is larger than a sane value. */ 2928 return (0); 2929 } 2930 /* store the current RTT in us */ 2931 net->rtt = rtt_us; 2932 /* compute rtt in ms */ 2933 rtt = (int32_t)(net->rtt / 1000); 2934 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2935 /* 2936 * Tell the CC module that a new update has just occurred 2937 * from a sack 2938 */ 2939 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2940 } 2941 /* 2942 * Do we need to determine the lan? We do this only on sacks i.e. 2943 * RTT being determined from data not non-data (HB/INIT->INITACK). 2944 */ 2945 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2946 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2947 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2948 net->lan_type = SCTP_LAN_INTERNET; 2949 } else { 2950 net->lan_type = SCTP_LAN_LOCAL; 2951 } 2952 } 2953 2954 /***************************/ 2955 /* 2. update RTTVAR & SRTT */ 2956 /***************************/ 2957 /*- 2958 * Compute the scaled average lastsa and the 2959 * scaled variance lastsv as described in van Jacobson 2960 * Paper "Congestion Avoidance and Control", Annex A. 2961 * 2962 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2963 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2964 */ 2965 if (net->RTO_measured) { 2966 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2967 net->lastsa += rtt; 2968 if (rtt < 0) { 2969 rtt = -rtt; 2970 } 2971 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2972 net->lastsv += rtt; 2973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2974 rto_logging(net, SCTP_LOG_RTTVAR); 2975 } 2976 } else { 2977 /* First RTO measurment */ 2978 net->RTO_measured = 1; 2979 first_measure = 1; 2980 net->lastsa = rtt << SCTP_RTT_SHIFT; 2981 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2983 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2984 } 2985 } 2986 if (net->lastsv == 0) { 2987 net->lastsv = SCTP_CLOCK_GRANULARITY; 2988 } 2989 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2990 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2991 (stcb->asoc.sat_network_lockout == 0)) { 2992 stcb->asoc.sat_network = 1; 2993 } else if ((!first_measure) && stcb->asoc.sat_network) { 2994 stcb->asoc.sat_network = 0; 2995 stcb->asoc.sat_network_lockout = 1; 2996 } 2997 /* bound it, per C6/C7 in Section 5.3.1 */ 2998 if (new_rto < stcb->asoc.minrto) { 2999 new_rto = stcb->asoc.minrto; 3000 } 3001 if (new_rto > stcb->asoc.maxrto) { 3002 new_rto = stcb->asoc.maxrto; 3003 } 3004 net->RTO = new_rto; 3005 return (1); 3006 } 3007 3008 /* 3009 * return a pointer to a contiguous piece of data from the given mbuf chain 3010 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3011 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3012 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3013 */ 3014 caddr_t 3015 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3016 { 3017 uint32_t count; 3018 uint8_t *ptr; 3019 3020 ptr = in_ptr; 3021 if ((off < 0) || (len <= 0)) 3022 return (NULL); 3023 3024 /* find the desired start location */ 3025 while ((m != NULL) && (off > 0)) { 3026 if (off < SCTP_BUF_LEN(m)) 3027 break; 3028 off -= SCTP_BUF_LEN(m); 3029 m = SCTP_BUF_NEXT(m); 3030 } 3031 if (m == NULL) 3032 return (NULL); 3033 3034 /* is the current mbuf large enough (eg. contiguous)? */ 3035 if ((SCTP_BUF_LEN(m) - off) >= len) { 3036 return (mtod(m, caddr_t)+off); 3037 } else { 3038 /* else, it spans more than one mbuf, so save a temp copy... */ 3039 while ((m != NULL) && (len > 0)) { 3040 count = min(SCTP_BUF_LEN(m) - off, len); 3041 memcpy(ptr, mtod(m, caddr_t)+off, count); 3042 len -= count; 3043 ptr += count; 3044 off = 0; 3045 m = SCTP_BUF_NEXT(m); 3046 } 3047 if ((m == NULL) && (len > 0)) 3048 return (NULL); 3049 else 3050 return ((caddr_t)in_ptr); 3051 } 3052 } 3053 3054 3055 3056 struct sctp_paramhdr * 3057 sctp_get_next_param(struct mbuf *m, 3058 int offset, 3059 struct sctp_paramhdr *pull, 3060 int pull_limit) 3061 { 3062 /* This just provides a typed signature to Peter's Pull routine */ 3063 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3064 (uint8_t *)pull)); 3065 } 3066 3067 3068 struct mbuf * 3069 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3070 { 3071 struct mbuf *m_last; 3072 caddr_t dp; 3073 3074 if (padlen > 3) { 3075 return (NULL); 3076 } 3077 if (padlen <= M_TRAILINGSPACE(m)) { 3078 /* 3079 * The easy way. We hope the majority of the time we hit 3080 * here :) 3081 */ 3082 m_last = m; 3083 } else { 3084 /* Hard way we must grow the mbuf chain */ 3085 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3086 if (m_last == NULL) { 3087 return (NULL); 3088 } 3089 SCTP_BUF_LEN(m_last) = 0; 3090 SCTP_BUF_NEXT(m_last) = NULL; 3091 SCTP_BUF_NEXT(m) = m_last; 3092 } 3093 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3094 SCTP_BUF_LEN(m_last) += padlen; 3095 memset(dp, 0, padlen); 3096 return (m_last); 3097 } 3098 3099 struct mbuf * 3100 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3101 { 3102 /* find the last mbuf in chain and pad it */ 3103 struct mbuf *m_at; 3104 3105 if (last_mbuf != NULL) { 3106 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3107 } else { 3108 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3109 if (SCTP_BUF_NEXT(m_at) == NULL) { 3110 return (sctp_add_pad_tombuf(m_at, padval)); 3111 } 3112 } 3113 } 3114 return (NULL); 3115 } 3116 3117 static void 3118 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3119 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3120 { 3121 struct mbuf *m_notify; 3122 struct sctp_assoc_change *sac; 3123 struct sctp_queued_to_read *control; 3124 unsigned int notif_len; 3125 uint16_t abort_len; 3126 unsigned int i; 3127 3128 if (stcb == NULL) { 3129 return; 3130 } 3131 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3132 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3133 if (abort != NULL) { 3134 abort_len = ntohs(abort->ch.chunk_length); 3135 /* 3136 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3137 * contiguous. 3138 */ 3139 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3140 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3141 } 3142 } else { 3143 abort_len = 0; 3144 } 3145 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3146 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3147 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3148 notif_len += abort_len; 3149 } 3150 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3151 if (m_notify == NULL) { 3152 /* Retry with smaller value. */ 3153 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3154 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3155 if (m_notify == NULL) { 3156 goto set_error; 3157 } 3158 } 3159 SCTP_BUF_NEXT(m_notify) = NULL; 3160 sac = mtod(m_notify, struct sctp_assoc_change *); 3161 memset(sac, 0, notif_len); 3162 sac->sac_type = SCTP_ASSOC_CHANGE; 3163 sac->sac_flags = 0; 3164 sac->sac_length = sizeof(struct sctp_assoc_change); 3165 sac->sac_state = state; 3166 sac->sac_error = error; 3167 /* XXX verify these stream counts */ 3168 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3169 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3170 sac->sac_assoc_id = sctp_get_associd(stcb); 3171 if (notif_len > sizeof(struct sctp_assoc_change)) { 3172 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3173 i = 0; 3174 if (stcb->asoc.prsctp_supported == 1) { 3175 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3176 } 3177 if (stcb->asoc.auth_supported == 1) { 3178 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3179 } 3180 if (stcb->asoc.asconf_supported == 1) { 3181 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3182 } 3183 if (stcb->asoc.idata_supported == 1) { 3184 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3185 } 3186 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3187 if (stcb->asoc.reconfig_supported == 1) { 3188 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3189 } 3190 sac->sac_length += i; 3191 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3192 memcpy(sac->sac_info, abort, abort_len); 3193 sac->sac_length += abort_len; 3194 } 3195 } 3196 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3197 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3198 0, 0, stcb->asoc.context, 0, 0, 0, 3199 m_notify); 3200 if (control != NULL) { 3201 control->length = SCTP_BUF_LEN(m_notify); 3202 control->spec_flags = M_NOTIFICATION; 3203 /* not that we need this */ 3204 control->tail_mbuf = m_notify; 3205 sctp_add_to_readq(stcb->sctp_ep, stcb, 3206 control, 3207 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3208 so_locked); 3209 } else { 3210 sctp_m_freem(m_notify); 3211 } 3212 } 3213 /* 3214 * For 1-to-1 style sockets, we send up and error when an ABORT 3215 * comes in. 3216 */ 3217 set_error: 3218 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3219 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3220 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3221 SOCK_LOCK(stcb->sctp_socket); 3222 if (from_peer) { 3223 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3224 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3225 stcb->sctp_socket->so_error = ECONNREFUSED; 3226 } else { 3227 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3228 stcb->sctp_socket->so_error = ECONNRESET; 3229 } 3230 } else { 3231 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3232 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3233 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3234 stcb->sctp_socket->so_error = ETIMEDOUT; 3235 } else { 3236 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3237 stcb->sctp_socket->so_error = ECONNABORTED; 3238 } 3239 } 3240 SOCK_UNLOCK(stcb->sctp_socket); 3241 } 3242 /* Wake ANY sleepers */ 3243 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3244 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3245 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3246 socantrcvmore(stcb->sctp_socket); 3247 } 3248 sorwakeup(stcb->sctp_socket); 3249 sowwakeup(stcb->sctp_socket); 3250 } 3251 3252 static void 3253 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3254 struct sockaddr *sa, uint32_t error, int so_locked) 3255 { 3256 struct mbuf *m_notify; 3257 struct sctp_paddr_change *spc; 3258 struct sctp_queued_to_read *control; 3259 3260 if ((stcb == NULL) || 3261 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3262 /* event not enabled */ 3263 return; 3264 } 3265 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3266 if (m_notify == NULL) 3267 return; 3268 SCTP_BUF_LEN(m_notify) = 0; 3269 spc = mtod(m_notify, struct sctp_paddr_change *); 3270 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3271 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3272 spc->spc_flags = 0; 3273 spc->spc_length = sizeof(struct sctp_paddr_change); 3274 switch (sa->sa_family) { 3275 #ifdef INET 3276 case AF_INET: 3277 #ifdef INET6 3278 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3279 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3280 (struct sockaddr_in6 *)&spc->spc_aaddr); 3281 } else { 3282 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3283 } 3284 #else 3285 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3286 #endif 3287 break; 3288 #endif 3289 #ifdef INET6 3290 case AF_INET6: 3291 { 3292 struct sockaddr_in6 *sin6; 3293 3294 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3295 3296 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3297 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3298 if (sin6->sin6_scope_id == 0) { 3299 /* recover scope_id for user */ 3300 (void)sa6_recoverscope(sin6); 3301 } else { 3302 /* clear embedded scope_id for user */ 3303 in6_clearscope(&sin6->sin6_addr); 3304 } 3305 } 3306 break; 3307 } 3308 #endif 3309 default: 3310 /* TSNH */ 3311 break; 3312 } 3313 spc->spc_state = state; 3314 spc->spc_error = error; 3315 spc->spc_assoc_id = sctp_get_associd(stcb); 3316 3317 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3318 SCTP_BUF_NEXT(m_notify) = NULL; 3319 3320 /* append to socket */ 3321 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3322 0, 0, stcb->asoc.context, 0, 0, 0, 3323 m_notify); 3324 if (control == NULL) { 3325 /* no memory */ 3326 sctp_m_freem(m_notify); 3327 return; 3328 } 3329 control->length = SCTP_BUF_LEN(m_notify); 3330 control->spec_flags = M_NOTIFICATION; 3331 /* not that we need this */ 3332 control->tail_mbuf = m_notify; 3333 sctp_add_to_readq(stcb->sctp_ep, stcb, 3334 control, 3335 &stcb->sctp_socket->so_rcv, 1, 3336 SCTP_READ_LOCK_NOT_HELD, 3337 so_locked); 3338 } 3339 3340 3341 static void 3342 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3343 struct sctp_tmit_chunk *chk, int so_locked) 3344 { 3345 struct mbuf *m_notify; 3346 struct sctp_send_failed *ssf; 3347 struct sctp_send_failed_event *ssfe; 3348 struct sctp_queued_to_read *control; 3349 struct sctp_chunkhdr *chkhdr; 3350 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3351 3352 if ((stcb == NULL) || 3353 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3354 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3355 /* event not enabled */ 3356 return; 3357 } 3358 3359 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3360 notifhdr_len = sizeof(struct sctp_send_failed_event); 3361 } else { 3362 notifhdr_len = sizeof(struct sctp_send_failed); 3363 } 3364 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3365 if (m_notify == NULL) 3366 /* no space left */ 3367 return; 3368 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3369 if (stcb->asoc.idata_supported) { 3370 chkhdr_len = sizeof(struct sctp_idata_chunk); 3371 } else { 3372 chkhdr_len = sizeof(struct sctp_data_chunk); 3373 } 3374 /* Use some defaults in case we can't access the chunk header */ 3375 if (chk->send_size >= chkhdr_len) { 3376 payload_len = chk->send_size - chkhdr_len; 3377 } else { 3378 payload_len = 0; 3379 } 3380 padding_len = 0; 3381 if (chk->data != NULL) { 3382 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3383 if (chkhdr != NULL) { 3384 chk_len = ntohs(chkhdr->chunk_length); 3385 if ((chk_len >= chkhdr_len) && 3386 (chk->send_size >= chk_len) && 3387 (chk->send_size - chk_len < 4)) { 3388 padding_len = chk->send_size - chk_len; 3389 payload_len = chk->send_size - chkhdr_len - padding_len; 3390 } 3391 } 3392 } 3393 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3394 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3395 memset(ssfe, 0, notifhdr_len); 3396 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3397 if (sent) { 3398 ssfe->ssfe_flags = SCTP_DATA_SENT; 3399 } else { 3400 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3401 } 3402 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3403 ssfe->ssfe_error = error; 3404 /* not exactly what the user sent in, but should be close :) */ 3405 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3406 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3407 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3408 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3409 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3410 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3411 } else { 3412 ssf = mtod(m_notify, struct sctp_send_failed *); 3413 memset(ssf, 0, notifhdr_len); 3414 ssf->ssf_type = SCTP_SEND_FAILED; 3415 if (sent) { 3416 ssf->ssf_flags = SCTP_DATA_SENT; 3417 } else { 3418 ssf->ssf_flags = SCTP_DATA_UNSENT; 3419 } 3420 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3421 ssf->ssf_error = error; 3422 /* not exactly what the user sent in, but should be close :) */ 3423 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3424 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3425 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3426 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3427 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3428 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3429 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3430 } 3431 if (chk->data != NULL) { 3432 /* Trim off the sctp chunk header (it should be there) */ 3433 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3434 m_adj(chk->data, chkhdr_len); 3435 m_adj(chk->data, -padding_len); 3436 sctp_mbuf_crush(chk->data); 3437 chk->send_size -= (chkhdr_len + padding_len); 3438 } 3439 } 3440 SCTP_BUF_NEXT(m_notify) = chk->data; 3441 /* Steal off the mbuf */ 3442 chk->data = NULL; 3443 /* 3444 * For this case, we check the actual socket buffer, since the assoc 3445 * is going away we don't want to overfill the socket buffer for a 3446 * non-reader 3447 */ 3448 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3449 sctp_m_freem(m_notify); 3450 return; 3451 } 3452 /* append to socket */ 3453 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3454 0, 0, stcb->asoc.context, 0, 0, 0, 3455 m_notify); 3456 if (control == NULL) { 3457 /* no memory */ 3458 sctp_m_freem(m_notify); 3459 return; 3460 } 3461 control->length = SCTP_BUF_LEN(m_notify); 3462 control->spec_flags = M_NOTIFICATION; 3463 /* not that we need this */ 3464 control->tail_mbuf = m_notify; 3465 sctp_add_to_readq(stcb->sctp_ep, stcb, 3466 control, 3467 &stcb->sctp_socket->so_rcv, 1, 3468 SCTP_READ_LOCK_NOT_HELD, 3469 so_locked); 3470 } 3471 3472 3473 static void 3474 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3475 struct sctp_stream_queue_pending *sp, int so_locked) 3476 { 3477 struct mbuf *m_notify; 3478 struct sctp_send_failed *ssf; 3479 struct sctp_send_failed_event *ssfe; 3480 struct sctp_queued_to_read *control; 3481 int notifhdr_len; 3482 3483 if ((stcb == NULL) || 3484 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3485 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3486 /* event not enabled */ 3487 return; 3488 } 3489 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3490 notifhdr_len = sizeof(struct sctp_send_failed_event); 3491 } else { 3492 notifhdr_len = sizeof(struct sctp_send_failed); 3493 } 3494 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3495 if (m_notify == NULL) { 3496 /* no space left */ 3497 return; 3498 } 3499 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3500 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3501 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3502 memset(ssfe, 0, notifhdr_len); 3503 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3504 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3505 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3506 ssfe->ssfe_error = error; 3507 /* not exactly what the user sent in, but should be close :) */ 3508 ssfe->ssfe_info.snd_sid = sp->sid; 3509 if (sp->some_taken) { 3510 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3511 } else { 3512 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3513 } 3514 ssfe->ssfe_info.snd_ppid = sp->ppid; 3515 ssfe->ssfe_info.snd_context = sp->context; 3516 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3517 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3518 } else { 3519 ssf = mtod(m_notify, struct sctp_send_failed *); 3520 memset(ssf, 0, notifhdr_len); 3521 ssf->ssf_type = SCTP_SEND_FAILED; 3522 ssf->ssf_flags = SCTP_DATA_UNSENT; 3523 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3524 ssf->ssf_error = error; 3525 /* not exactly what the user sent in, but should be close :) */ 3526 ssf->ssf_info.sinfo_stream = sp->sid; 3527 ssf->ssf_info.sinfo_ssn = 0; 3528 if (sp->some_taken) { 3529 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3530 } else { 3531 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3532 } 3533 ssf->ssf_info.sinfo_ppid = sp->ppid; 3534 ssf->ssf_info.sinfo_context = sp->context; 3535 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3536 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3537 } 3538 SCTP_BUF_NEXT(m_notify) = sp->data; 3539 3540 /* Steal off the mbuf */ 3541 sp->data = NULL; 3542 /* 3543 * For this case, we check the actual socket buffer, since the assoc 3544 * is going away we don't want to overfill the socket buffer for a 3545 * non-reader 3546 */ 3547 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3548 sctp_m_freem(m_notify); 3549 return; 3550 } 3551 /* append to socket */ 3552 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3553 0, 0, stcb->asoc.context, 0, 0, 0, 3554 m_notify); 3555 if (control == NULL) { 3556 /* no memory */ 3557 sctp_m_freem(m_notify); 3558 return; 3559 } 3560 control->length = SCTP_BUF_LEN(m_notify); 3561 control->spec_flags = M_NOTIFICATION; 3562 /* not that we need this */ 3563 control->tail_mbuf = m_notify; 3564 sctp_add_to_readq(stcb->sctp_ep, stcb, 3565 control, 3566 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3567 } 3568 3569 3570 3571 static void 3572 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3573 { 3574 struct mbuf *m_notify; 3575 struct sctp_adaptation_event *sai; 3576 struct sctp_queued_to_read *control; 3577 3578 if ((stcb == NULL) || 3579 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3580 /* event not enabled */ 3581 return; 3582 } 3583 3584 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3585 if (m_notify == NULL) 3586 /* no space left */ 3587 return; 3588 SCTP_BUF_LEN(m_notify) = 0; 3589 sai = mtod(m_notify, struct sctp_adaptation_event *); 3590 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3591 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3592 sai->sai_flags = 0; 3593 sai->sai_length = sizeof(struct sctp_adaptation_event); 3594 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3595 sai->sai_assoc_id = sctp_get_associd(stcb); 3596 3597 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3598 SCTP_BUF_NEXT(m_notify) = NULL; 3599 3600 /* append to socket */ 3601 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3602 0, 0, stcb->asoc.context, 0, 0, 0, 3603 m_notify); 3604 if (control == NULL) { 3605 /* no memory */ 3606 sctp_m_freem(m_notify); 3607 return; 3608 } 3609 control->length = SCTP_BUF_LEN(m_notify); 3610 control->spec_flags = M_NOTIFICATION; 3611 /* not that we need this */ 3612 control->tail_mbuf = m_notify; 3613 sctp_add_to_readq(stcb->sctp_ep, stcb, 3614 control, 3615 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3616 } 3617 3618 /* This always must be called with the read-queue LOCKED in the INP */ 3619 static void 3620 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3621 uint32_t val, int so_locked) 3622 { 3623 struct mbuf *m_notify; 3624 struct sctp_pdapi_event *pdapi; 3625 struct sctp_queued_to_read *control; 3626 struct sockbuf *sb; 3627 3628 if ((stcb == NULL) || 3629 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3630 /* event not enabled */ 3631 return; 3632 } 3633 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3634 return; 3635 } 3636 3637 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3638 if (m_notify == NULL) 3639 /* no space left */ 3640 return; 3641 SCTP_BUF_LEN(m_notify) = 0; 3642 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3643 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3644 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3645 pdapi->pdapi_flags = 0; 3646 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3647 pdapi->pdapi_indication = error; 3648 pdapi->pdapi_stream = (val >> 16); 3649 pdapi->pdapi_seq = (val & 0x0000ffff); 3650 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3651 3652 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3653 SCTP_BUF_NEXT(m_notify) = NULL; 3654 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3655 0, 0, stcb->asoc.context, 0, 0, 0, 3656 m_notify); 3657 if (control == NULL) { 3658 /* no memory */ 3659 sctp_m_freem(m_notify); 3660 return; 3661 } 3662 control->length = SCTP_BUF_LEN(m_notify); 3663 control->spec_flags = M_NOTIFICATION; 3664 /* not that we need this */ 3665 control->tail_mbuf = m_notify; 3666 sb = &stcb->sctp_socket->so_rcv; 3667 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3668 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3669 } 3670 sctp_sballoc(stcb, sb, m_notify); 3671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3672 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3673 } 3674 control->end_added = 1; 3675 if (stcb->asoc.control_pdapi) 3676 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3677 else { 3678 /* we really should not see this case */ 3679 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3680 } 3681 if (stcb->sctp_ep && stcb->sctp_socket) { 3682 /* This should always be the case */ 3683 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3684 } 3685 } 3686 3687 static void 3688 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3689 { 3690 struct mbuf *m_notify; 3691 struct sctp_shutdown_event *sse; 3692 struct sctp_queued_to_read *control; 3693 3694 /* 3695 * For TCP model AND UDP connected sockets we will send an error up 3696 * when an SHUTDOWN completes 3697 */ 3698 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3699 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3700 /* mark socket closed for read/write and wakeup! */ 3701 socantsendmore(stcb->sctp_socket); 3702 } 3703 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3704 /* event not enabled */ 3705 return; 3706 } 3707 3708 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3709 if (m_notify == NULL) 3710 /* no space left */ 3711 return; 3712 sse = mtod(m_notify, struct sctp_shutdown_event *); 3713 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3714 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3715 sse->sse_flags = 0; 3716 sse->sse_length = sizeof(struct sctp_shutdown_event); 3717 sse->sse_assoc_id = sctp_get_associd(stcb); 3718 3719 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3720 SCTP_BUF_NEXT(m_notify) = NULL; 3721 3722 /* append to socket */ 3723 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3724 0, 0, stcb->asoc.context, 0, 0, 0, 3725 m_notify); 3726 if (control == NULL) { 3727 /* no memory */ 3728 sctp_m_freem(m_notify); 3729 return; 3730 } 3731 control->length = SCTP_BUF_LEN(m_notify); 3732 control->spec_flags = M_NOTIFICATION; 3733 /* not that we need this */ 3734 control->tail_mbuf = m_notify; 3735 sctp_add_to_readq(stcb->sctp_ep, stcb, 3736 control, 3737 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3738 } 3739 3740 static void 3741 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3742 int so_locked) 3743 { 3744 struct mbuf *m_notify; 3745 struct sctp_sender_dry_event *event; 3746 struct sctp_queued_to_read *control; 3747 3748 if ((stcb == NULL) || 3749 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3750 /* event not enabled */ 3751 return; 3752 } 3753 3754 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3755 if (m_notify == NULL) { 3756 /* no space left */ 3757 return; 3758 } 3759 SCTP_BUF_LEN(m_notify) = 0; 3760 event = mtod(m_notify, struct sctp_sender_dry_event *); 3761 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3762 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3763 event->sender_dry_flags = 0; 3764 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3765 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3766 3767 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3768 SCTP_BUF_NEXT(m_notify) = NULL; 3769 3770 /* append to socket */ 3771 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3772 0, 0, stcb->asoc.context, 0, 0, 0, 3773 m_notify); 3774 if (control == NULL) { 3775 /* no memory */ 3776 sctp_m_freem(m_notify); 3777 return; 3778 } 3779 control->length = SCTP_BUF_LEN(m_notify); 3780 control->spec_flags = M_NOTIFICATION; 3781 /* not that we need this */ 3782 control->tail_mbuf = m_notify; 3783 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3784 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3785 } 3786 3787 3788 void 3789 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3790 { 3791 struct mbuf *m_notify; 3792 struct sctp_queued_to_read *control; 3793 struct sctp_stream_change_event *stradd; 3794 3795 if ((stcb == NULL) || 3796 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3797 /* event not enabled */ 3798 return; 3799 } 3800 if ((stcb->asoc.peer_req_out) && flag) { 3801 /* Peer made the request, don't tell the local user */ 3802 stcb->asoc.peer_req_out = 0; 3803 return; 3804 } 3805 stcb->asoc.peer_req_out = 0; 3806 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3807 if (m_notify == NULL) 3808 /* no space left */ 3809 return; 3810 SCTP_BUF_LEN(m_notify) = 0; 3811 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3812 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3813 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3814 stradd->strchange_flags = flag; 3815 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3816 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3817 stradd->strchange_instrms = numberin; 3818 stradd->strchange_outstrms = numberout; 3819 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3820 SCTP_BUF_NEXT(m_notify) = NULL; 3821 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3822 /* no space */ 3823 sctp_m_freem(m_notify); 3824 return; 3825 } 3826 /* append to socket */ 3827 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3828 0, 0, stcb->asoc.context, 0, 0, 0, 3829 m_notify); 3830 if (control == NULL) { 3831 /* no memory */ 3832 sctp_m_freem(m_notify); 3833 return; 3834 } 3835 control->length = SCTP_BUF_LEN(m_notify); 3836 control->spec_flags = M_NOTIFICATION; 3837 /* not that we need this */ 3838 control->tail_mbuf = m_notify; 3839 sctp_add_to_readq(stcb->sctp_ep, stcb, 3840 control, 3841 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3842 } 3843 3844 void 3845 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3846 { 3847 struct mbuf *m_notify; 3848 struct sctp_queued_to_read *control; 3849 struct sctp_assoc_reset_event *strasoc; 3850 3851 if ((stcb == NULL) || 3852 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3853 /* event not enabled */ 3854 return; 3855 } 3856 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3857 if (m_notify == NULL) 3858 /* no space left */ 3859 return; 3860 SCTP_BUF_LEN(m_notify) = 0; 3861 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3862 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3863 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3864 strasoc->assocreset_flags = flag; 3865 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3866 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3867 strasoc->assocreset_local_tsn = sending_tsn; 3868 strasoc->assocreset_remote_tsn = recv_tsn; 3869 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3870 SCTP_BUF_NEXT(m_notify) = NULL; 3871 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3872 /* no space */ 3873 sctp_m_freem(m_notify); 3874 return; 3875 } 3876 /* append to socket */ 3877 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3878 0, 0, stcb->asoc.context, 0, 0, 0, 3879 m_notify); 3880 if (control == NULL) { 3881 /* no memory */ 3882 sctp_m_freem(m_notify); 3883 return; 3884 } 3885 control->length = SCTP_BUF_LEN(m_notify); 3886 control->spec_flags = M_NOTIFICATION; 3887 /* not that we need this */ 3888 control->tail_mbuf = m_notify; 3889 sctp_add_to_readq(stcb->sctp_ep, stcb, 3890 control, 3891 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3892 } 3893 3894 3895 3896 static void 3897 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3898 int number_entries, uint16_t *list, int flag) 3899 { 3900 struct mbuf *m_notify; 3901 struct sctp_queued_to_read *control; 3902 struct sctp_stream_reset_event *strreset; 3903 int len; 3904 3905 if ((stcb == NULL) || 3906 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3907 /* event not enabled */ 3908 return; 3909 } 3910 3911 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3912 if (m_notify == NULL) 3913 /* no space left */ 3914 return; 3915 SCTP_BUF_LEN(m_notify) = 0; 3916 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3917 if (len > M_TRAILINGSPACE(m_notify)) { 3918 /* never enough room */ 3919 sctp_m_freem(m_notify); 3920 return; 3921 } 3922 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3923 memset(strreset, 0, len); 3924 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3925 strreset->strreset_flags = flag; 3926 strreset->strreset_length = len; 3927 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3928 if (number_entries) { 3929 int i; 3930 3931 for (i = 0; i < number_entries; i++) { 3932 strreset->strreset_stream_list[i] = ntohs(list[i]); 3933 } 3934 } 3935 SCTP_BUF_LEN(m_notify) = len; 3936 SCTP_BUF_NEXT(m_notify) = NULL; 3937 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3938 /* no space */ 3939 sctp_m_freem(m_notify); 3940 return; 3941 } 3942 /* append to socket */ 3943 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3944 0, 0, stcb->asoc.context, 0, 0, 0, 3945 m_notify); 3946 if (control == NULL) { 3947 /* no memory */ 3948 sctp_m_freem(m_notify); 3949 return; 3950 } 3951 control->length = SCTP_BUF_LEN(m_notify); 3952 control->spec_flags = M_NOTIFICATION; 3953 /* not that we need this */ 3954 control->tail_mbuf = m_notify; 3955 sctp_add_to_readq(stcb->sctp_ep, stcb, 3956 control, 3957 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3958 } 3959 3960 3961 static void 3962 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3963 { 3964 struct mbuf *m_notify; 3965 struct sctp_remote_error *sre; 3966 struct sctp_queued_to_read *control; 3967 unsigned int notif_len; 3968 uint16_t chunk_len; 3969 3970 if ((stcb == NULL) || 3971 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3972 return; 3973 } 3974 if (chunk != NULL) { 3975 chunk_len = ntohs(chunk->ch.chunk_length); 3976 /* 3977 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3978 * contiguous. 3979 */ 3980 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3981 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3982 } 3983 } else { 3984 chunk_len = 0; 3985 } 3986 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 3987 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3988 if (m_notify == NULL) { 3989 /* Retry with smaller value. */ 3990 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 3991 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3992 if (m_notify == NULL) { 3993 return; 3994 } 3995 } 3996 SCTP_BUF_NEXT(m_notify) = NULL; 3997 sre = mtod(m_notify, struct sctp_remote_error *); 3998 memset(sre, 0, notif_len); 3999 sre->sre_type = SCTP_REMOTE_ERROR; 4000 sre->sre_flags = 0; 4001 sre->sre_length = sizeof(struct sctp_remote_error); 4002 sre->sre_error = error; 4003 sre->sre_assoc_id = sctp_get_associd(stcb); 4004 if (notif_len > sizeof(struct sctp_remote_error)) { 4005 memcpy(sre->sre_data, chunk, chunk_len); 4006 sre->sre_length += chunk_len; 4007 } 4008 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4009 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4010 0, 0, stcb->asoc.context, 0, 0, 0, 4011 m_notify); 4012 if (control != NULL) { 4013 control->length = SCTP_BUF_LEN(m_notify); 4014 control->spec_flags = M_NOTIFICATION; 4015 /* not that we need this */ 4016 control->tail_mbuf = m_notify; 4017 sctp_add_to_readq(stcb->sctp_ep, stcb, 4018 control, 4019 &stcb->sctp_socket->so_rcv, 1, 4020 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4021 } else { 4022 sctp_m_freem(m_notify); 4023 } 4024 } 4025 4026 4027 void 4028 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4029 uint32_t error, void *data, int so_locked) 4030 { 4031 if ((stcb == NULL) || 4032 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4033 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4034 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4035 /* If the socket is gone we are out of here */ 4036 return; 4037 } 4038 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4039 return; 4040 } 4041 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4042 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4043 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4044 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4045 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4046 /* Don't report these in front states */ 4047 return; 4048 } 4049 } 4050 switch (notification) { 4051 case SCTP_NOTIFY_ASSOC_UP: 4052 if (stcb->asoc.assoc_up_sent == 0) { 4053 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4054 stcb->asoc.assoc_up_sent = 1; 4055 } 4056 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4057 sctp_notify_adaptation_layer(stcb); 4058 } 4059 if (stcb->asoc.auth_supported == 0) { 4060 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4061 NULL, so_locked); 4062 } 4063 break; 4064 case SCTP_NOTIFY_ASSOC_DOWN: 4065 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4066 break; 4067 case SCTP_NOTIFY_INTERFACE_DOWN: 4068 { 4069 struct sctp_nets *net; 4070 4071 net = (struct sctp_nets *)data; 4072 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4073 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4074 break; 4075 } 4076 case SCTP_NOTIFY_INTERFACE_UP: 4077 { 4078 struct sctp_nets *net; 4079 4080 net = (struct sctp_nets *)data; 4081 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4082 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4083 break; 4084 } 4085 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4086 { 4087 struct sctp_nets *net; 4088 4089 net = (struct sctp_nets *)data; 4090 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4091 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4092 break; 4093 } 4094 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4095 sctp_notify_send_failed2(stcb, error, 4096 (struct sctp_stream_queue_pending *)data, so_locked); 4097 break; 4098 case SCTP_NOTIFY_SENT_DG_FAIL: 4099 sctp_notify_send_failed(stcb, 1, error, 4100 (struct sctp_tmit_chunk *)data, so_locked); 4101 break; 4102 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4103 sctp_notify_send_failed(stcb, 0, error, 4104 (struct sctp_tmit_chunk *)data, so_locked); 4105 break; 4106 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4107 { 4108 uint32_t val; 4109 4110 val = *((uint32_t *)data); 4111 4112 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4113 break; 4114 } 4115 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4116 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4117 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4118 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4119 } else { 4120 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4121 } 4122 break; 4123 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4124 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4125 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4126 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4127 } else { 4128 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4129 } 4130 break; 4131 case SCTP_NOTIFY_ASSOC_RESTART: 4132 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4133 if (stcb->asoc.auth_supported == 0) { 4134 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4135 NULL, so_locked); 4136 } 4137 break; 4138 case SCTP_NOTIFY_STR_RESET_SEND: 4139 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4140 break; 4141 case SCTP_NOTIFY_STR_RESET_RECV: 4142 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4143 break; 4144 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4145 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4146 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4147 break; 4148 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4149 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4150 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4151 break; 4152 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4153 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4154 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4155 break; 4156 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4157 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4158 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4159 break; 4160 case SCTP_NOTIFY_ASCONF_ADD_IP: 4161 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4162 error, so_locked); 4163 break; 4164 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4165 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4166 error, so_locked); 4167 break; 4168 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4169 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4170 error, so_locked); 4171 break; 4172 case SCTP_NOTIFY_PEER_SHUTDOWN: 4173 sctp_notify_shutdown_event(stcb); 4174 break; 4175 case SCTP_NOTIFY_AUTH_NEW_KEY: 4176 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4177 (uint16_t)(uintptr_t)data, 4178 so_locked); 4179 break; 4180 case SCTP_NOTIFY_AUTH_FREE_KEY: 4181 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4182 (uint16_t)(uintptr_t)data, 4183 so_locked); 4184 break; 4185 case SCTP_NOTIFY_NO_PEER_AUTH: 4186 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4187 (uint16_t)(uintptr_t)data, 4188 so_locked); 4189 break; 4190 case SCTP_NOTIFY_SENDER_DRY: 4191 sctp_notify_sender_dry_event(stcb, so_locked); 4192 break; 4193 case SCTP_NOTIFY_REMOTE_ERROR: 4194 sctp_notify_remote_error(stcb, error, data); 4195 break; 4196 default: 4197 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4198 __func__, notification, notification); 4199 break; 4200 } /* end switch */ 4201 } 4202 4203 void 4204 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked) 4205 { 4206 struct sctp_association *asoc; 4207 struct sctp_stream_out *outs; 4208 struct sctp_tmit_chunk *chk, *nchk; 4209 struct sctp_stream_queue_pending *sp, *nsp; 4210 int i; 4211 4212 if (stcb == NULL) { 4213 return; 4214 } 4215 asoc = &stcb->asoc; 4216 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4217 /* already being freed */ 4218 return; 4219 } 4220 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4221 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4222 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4223 return; 4224 } 4225 /* now through all the gunk freeing chunks */ 4226 if (holds_lock == 0) { 4227 SCTP_TCB_SEND_LOCK(stcb); 4228 } 4229 /* sent queue SHOULD be empty */ 4230 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4231 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4232 asoc->sent_queue_cnt--; 4233 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4234 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4235 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4236 #ifdef INVARIANTS 4237 } else { 4238 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4239 #endif 4240 } 4241 } 4242 if (chk->data != NULL) { 4243 sctp_free_bufspace(stcb, asoc, chk, 1); 4244 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4245 error, chk, so_locked); 4246 if (chk->data) { 4247 sctp_m_freem(chk->data); 4248 chk->data = NULL; 4249 } 4250 } 4251 sctp_free_a_chunk(stcb, chk, so_locked); 4252 /* sa_ignore FREED_MEMORY */ 4253 } 4254 /* pending send queue SHOULD be empty */ 4255 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4256 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4257 asoc->send_queue_cnt--; 4258 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4259 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4260 #ifdef INVARIANTS 4261 } else { 4262 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4263 #endif 4264 } 4265 if (chk->data != NULL) { 4266 sctp_free_bufspace(stcb, asoc, chk, 1); 4267 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4268 error, chk, so_locked); 4269 if (chk->data) { 4270 sctp_m_freem(chk->data); 4271 chk->data = NULL; 4272 } 4273 } 4274 sctp_free_a_chunk(stcb, chk, so_locked); 4275 /* sa_ignore FREED_MEMORY */ 4276 } 4277 for (i = 0; i < asoc->streamoutcnt; i++) { 4278 /* For each stream */ 4279 outs = &asoc->strmout[i]; 4280 /* clean up any sends there */ 4281 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4282 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4283 TAILQ_REMOVE(&outs->outqueue, sp, next); 4284 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4285 sctp_free_spbufspace(stcb, asoc, sp); 4286 if (sp->data) { 4287 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4288 error, (void *)sp, so_locked); 4289 if (sp->data) { 4290 sctp_m_freem(sp->data); 4291 sp->data = NULL; 4292 sp->tail_mbuf = NULL; 4293 sp->length = 0; 4294 } 4295 } 4296 if (sp->net) { 4297 sctp_free_remote_addr(sp->net); 4298 sp->net = NULL; 4299 } 4300 /* Free the chunk */ 4301 sctp_free_a_strmoq(stcb, sp, so_locked); 4302 /* sa_ignore FREED_MEMORY */ 4303 } 4304 } 4305 4306 if (holds_lock == 0) { 4307 SCTP_TCB_SEND_UNLOCK(stcb); 4308 } 4309 } 4310 4311 void 4312 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4313 struct sctp_abort_chunk *abort, int so_locked) 4314 { 4315 if (stcb == NULL) { 4316 return; 4317 } 4318 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4319 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4320 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4321 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4322 } 4323 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4324 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4325 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4326 return; 4327 } 4328 /* Tell them we lost the asoc */ 4329 sctp_report_all_outbound(stcb, error, 0, so_locked); 4330 if (from_peer) { 4331 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4332 } else { 4333 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4334 } 4335 } 4336 4337 void 4338 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4339 struct mbuf *m, int iphlen, 4340 struct sockaddr *src, struct sockaddr *dst, 4341 struct sctphdr *sh, struct mbuf *op_err, 4342 uint8_t mflowtype, uint32_t mflowid, 4343 uint32_t vrf_id, uint16_t port) 4344 { 4345 uint32_t vtag; 4346 4347 vtag = 0; 4348 if (stcb != NULL) { 4349 vtag = stcb->asoc.peer_vtag; 4350 vrf_id = stcb->asoc.vrf_id; 4351 } 4352 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4353 mflowtype, mflowid, inp->fibnum, 4354 vrf_id, port); 4355 if (stcb != NULL) { 4356 /* We have a TCB to abort, send notification too */ 4357 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4358 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4359 /* Ok, now lets free it */ 4360 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4361 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4362 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4364 } 4365 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4366 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4367 } 4368 } 4369 #ifdef SCTP_ASOCLOG_OF_TSNS 4370 void 4371 sctp_print_out_track_log(struct sctp_tcb *stcb) 4372 { 4373 #ifdef NOSIY_PRINTS 4374 int i; 4375 4376 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4377 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4378 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4379 SCTP_PRINTF("None rcvd\n"); 4380 goto none_in; 4381 } 4382 if (stcb->asoc.tsn_in_wrapped) { 4383 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4384 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4385 stcb->asoc.in_tsnlog[i].tsn, 4386 stcb->asoc.in_tsnlog[i].strm, 4387 stcb->asoc.in_tsnlog[i].seq, 4388 stcb->asoc.in_tsnlog[i].flgs, 4389 stcb->asoc.in_tsnlog[i].sz); 4390 } 4391 } 4392 if (stcb->asoc.tsn_in_at) { 4393 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4394 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4395 stcb->asoc.in_tsnlog[i].tsn, 4396 stcb->asoc.in_tsnlog[i].strm, 4397 stcb->asoc.in_tsnlog[i].seq, 4398 stcb->asoc.in_tsnlog[i].flgs, 4399 stcb->asoc.in_tsnlog[i].sz); 4400 } 4401 } 4402 none_in: 4403 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4404 if ((stcb->asoc.tsn_out_at == 0) && 4405 (stcb->asoc.tsn_out_wrapped == 0)) { 4406 SCTP_PRINTF("None sent\n"); 4407 } 4408 if (stcb->asoc.tsn_out_wrapped) { 4409 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4410 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4411 stcb->asoc.out_tsnlog[i].tsn, 4412 stcb->asoc.out_tsnlog[i].strm, 4413 stcb->asoc.out_tsnlog[i].seq, 4414 stcb->asoc.out_tsnlog[i].flgs, 4415 stcb->asoc.out_tsnlog[i].sz); 4416 } 4417 } 4418 if (stcb->asoc.tsn_out_at) { 4419 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4420 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4421 stcb->asoc.out_tsnlog[i].tsn, 4422 stcb->asoc.out_tsnlog[i].strm, 4423 stcb->asoc.out_tsnlog[i].seq, 4424 stcb->asoc.out_tsnlog[i].flgs, 4425 stcb->asoc.out_tsnlog[i].sz); 4426 } 4427 } 4428 #endif 4429 } 4430 #endif 4431 4432 void 4433 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4434 struct mbuf *op_err, 4435 int so_locked) 4436 { 4437 4438 if (stcb == NULL) { 4439 /* Got to have a TCB */ 4440 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4441 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4442 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4443 SCTP_CALLED_DIRECTLY_NOCMPSET); 4444 } 4445 } 4446 return; 4447 } else { 4448 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4449 } 4450 /* notify the peer */ 4451 sctp_send_abort_tcb(stcb, op_err, so_locked); 4452 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4453 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4454 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4455 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4456 } 4457 /* notify the ulp */ 4458 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4459 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4460 } 4461 /* now free the asoc */ 4462 #ifdef SCTP_ASOCLOG_OF_TSNS 4463 sctp_print_out_track_log(stcb); 4464 #endif 4465 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4466 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4467 } 4468 4469 void 4470 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4471 struct sockaddr *src, struct sockaddr *dst, 4472 struct sctphdr *sh, struct sctp_inpcb *inp, 4473 struct mbuf *cause, 4474 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4475 uint32_t vrf_id, uint16_t port) 4476 { 4477 struct sctp_chunkhdr *ch, chunk_buf; 4478 unsigned int chk_length; 4479 int contains_init_chunk; 4480 4481 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4482 /* Generate a TO address for future reference */ 4483 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4484 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4485 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4486 SCTP_CALLED_DIRECTLY_NOCMPSET); 4487 } 4488 } 4489 contains_init_chunk = 0; 4490 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4491 sizeof(*ch), (uint8_t *)&chunk_buf); 4492 while (ch != NULL) { 4493 chk_length = ntohs(ch->chunk_length); 4494 if (chk_length < sizeof(*ch)) { 4495 /* break to abort land */ 4496 break; 4497 } 4498 switch (ch->chunk_type) { 4499 case SCTP_INIT: 4500 contains_init_chunk = 1; 4501 break; 4502 case SCTP_PACKET_DROPPED: 4503 /* we don't respond to pkt-dropped */ 4504 return; 4505 case SCTP_ABORT_ASSOCIATION: 4506 /* we don't respond with an ABORT to an ABORT */ 4507 return; 4508 case SCTP_SHUTDOWN_COMPLETE: 4509 /* 4510 * we ignore it since we are not waiting for it and 4511 * peer is gone 4512 */ 4513 return; 4514 case SCTP_SHUTDOWN_ACK: 4515 sctp_send_shutdown_complete2(src, dst, sh, 4516 mflowtype, mflowid, fibnum, 4517 vrf_id, port); 4518 return; 4519 default: 4520 break; 4521 } 4522 offset += SCTP_SIZE32(chk_length); 4523 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4524 sizeof(*ch), (uint8_t *)&chunk_buf); 4525 } 4526 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4527 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4528 (contains_init_chunk == 0))) { 4529 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4530 mflowtype, mflowid, fibnum, 4531 vrf_id, port); 4532 } 4533 } 4534 4535 /* 4536 * check the inbound datagram to make sure there is not an abort inside it, 4537 * if there is return 1, else return 0. 4538 */ 4539 int 4540 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4541 { 4542 struct sctp_chunkhdr *ch; 4543 struct sctp_init_chunk *init_chk, chunk_buf; 4544 int offset; 4545 unsigned int chk_length; 4546 4547 offset = iphlen + sizeof(struct sctphdr); 4548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4549 (uint8_t *)&chunk_buf); 4550 while (ch != NULL) { 4551 chk_length = ntohs(ch->chunk_length); 4552 if (chk_length < sizeof(*ch)) { 4553 /* packet is probably corrupt */ 4554 break; 4555 } 4556 /* we seem to be ok, is it an abort? */ 4557 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4558 /* yep, tell them */ 4559 return (1); 4560 } 4561 if (ch->chunk_type == SCTP_INITIATION) { 4562 /* need to update the Vtag */ 4563 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4564 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4565 if (init_chk != NULL) { 4566 *vtagfill = ntohl(init_chk->init.initiate_tag); 4567 } 4568 } 4569 /* Nope, move to the next chunk */ 4570 offset += SCTP_SIZE32(chk_length); 4571 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4572 sizeof(*ch), (uint8_t *)&chunk_buf); 4573 } 4574 return (0); 4575 } 4576 4577 /* 4578 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4579 * set (i.e. it's 0) so, create this function to compare link local scopes 4580 */ 4581 #ifdef INET6 4582 uint32_t 4583 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4584 { 4585 struct sockaddr_in6 a, b; 4586 4587 /* save copies */ 4588 a = *addr1; 4589 b = *addr2; 4590 4591 if (a.sin6_scope_id == 0) 4592 if (sa6_recoverscope(&a)) { 4593 /* can't get scope, so can't match */ 4594 return (0); 4595 } 4596 if (b.sin6_scope_id == 0) 4597 if (sa6_recoverscope(&b)) { 4598 /* can't get scope, so can't match */ 4599 return (0); 4600 } 4601 if (a.sin6_scope_id != b.sin6_scope_id) 4602 return (0); 4603 4604 return (1); 4605 } 4606 4607 /* 4608 * returns a sockaddr_in6 with embedded scope recovered and removed 4609 */ 4610 struct sockaddr_in6 * 4611 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4612 { 4613 /* check and strip embedded scope junk */ 4614 if (addr->sin6_family == AF_INET6) { 4615 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4616 if (addr->sin6_scope_id == 0) { 4617 *store = *addr; 4618 if (!sa6_recoverscope(store)) { 4619 /* use the recovered scope */ 4620 addr = store; 4621 } 4622 } else { 4623 /* else, return the original "to" addr */ 4624 in6_clearscope(&addr->sin6_addr); 4625 } 4626 } 4627 } 4628 return (addr); 4629 } 4630 #endif 4631 4632 /* 4633 * are the two addresses the same? currently a "scopeless" check returns: 1 4634 * if same, 0 if not 4635 */ 4636 int 4637 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4638 { 4639 4640 /* must be valid */ 4641 if (sa1 == NULL || sa2 == NULL) 4642 return (0); 4643 4644 /* must be the same family */ 4645 if (sa1->sa_family != sa2->sa_family) 4646 return (0); 4647 4648 switch (sa1->sa_family) { 4649 #ifdef INET6 4650 case AF_INET6: 4651 { 4652 /* IPv6 addresses */ 4653 struct sockaddr_in6 *sin6_1, *sin6_2; 4654 4655 sin6_1 = (struct sockaddr_in6 *)sa1; 4656 sin6_2 = (struct sockaddr_in6 *)sa2; 4657 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4658 sin6_2)); 4659 } 4660 #endif 4661 #ifdef INET 4662 case AF_INET: 4663 { 4664 /* IPv4 addresses */ 4665 struct sockaddr_in *sin_1, *sin_2; 4666 4667 sin_1 = (struct sockaddr_in *)sa1; 4668 sin_2 = (struct sockaddr_in *)sa2; 4669 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4670 } 4671 #endif 4672 default: 4673 /* we don't do these... */ 4674 return (0); 4675 } 4676 } 4677 4678 void 4679 sctp_print_address(struct sockaddr *sa) 4680 { 4681 #ifdef INET6 4682 char ip6buf[INET6_ADDRSTRLEN]; 4683 #endif 4684 4685 switch (sa->sa_family) { 4686 #ifdef INET6 4687 case AF_INET6: 4688 { 4689 struct sockaddr_in6 *sin6; 4690 4691 sin6 = (struct sockaddr_in6 *)sa; 4692 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4693 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4694 ntohs(sin6->sin6_port), 4695 sin6->sin6_scope_id); 4696 break; 4697 } 4698 #endif 4699 #ifdef INET 4700 case AF_INET: 4701 { 4702 struct sockaddr_in *sin; 4703 unsigned char *p; 4704 4705 sin = (struct sockaddr_in *)sa; 4706 p = (unsigned char *)&sin->sin_addr; 4707 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4708 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4709 break; 4710 } 4711 #endif 4712 default: 4713 SCTP_PRINTF("?\n"); 4714 break; 4715 } 4716 } 4717 4718 void 4719 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4720 struct sctp_inpcb *new_inp, 4721 struct sctp_tcb *stcb, 4722 int waitflags) 4723 { 4724 /* 4725 * go through our old INP and pull off any control structures that 4726 * belong to stcb and move then to the new inp. 4727 */ 4728 struct socket *old_so, *new_so; 4729 struct sctp_queued_to_read *control, *nctl; 4730 struct sctp_readhead tmp_queue; 4731 struct mbuf *m; 4732 int error = 0; 4733 4734 old_so = old_inp->sctp_socket; 4735 new_so = new_inp->sctp_socket; 4736 TAILQ_INIT(&tmp_queue); 4737 error = sblock(&old_so->so_rcv, waitflags); 4738 if (error) { 4739 /* 4740 * Gak, can't get sblock, we have a problem. data will be 4741 * left stranded.. and we don't dare look at it since the 4742 * other thread may be reading something. Oh well, its a 4743 * screwed up app that does a peeloff OR a accept while 4744 * reading from the main socket... actually its only the 4745 * peeloff() case, since I think read will fail on a 4746 * listening socket.. 4747 */ 4748 return; 4749 } 4750 /* lock the socket buffers */ 4751 SCTP_INP_READ_LOCK(old_inp); 4752 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4753 /* Pull off all for out target stcb */ 4754 if (control->stcb == stcb) { 4755 /* remove it we want it */ 4756 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4757 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4758 m = control->data; 4759 while (m) { 4760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4761 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4762 } 4763 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4765 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4766 } 4767 m = SCTP_BUF_NEXT(m); 4768 } 4769 } 4770 } 4771 SCTP_INP_READ_UNLOCK(old_inp); 4772 /* Remove the sb-lock on the old socket */ 4773 4774 sbunlock(&old_so->so_rcv); 4775 /* Now we move them over to the new socket buffer */ 4776 SCTP_INP_READ_LOCK(new_inp); 4777 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4778 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4779 m = control->data; 4780 while (m) { 4781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4782 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4783 } 4784 sctp_sballoc(stcb, &new_so->so_rcv, m); 4785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4786 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4787 } 4788 m = SCTP_BUF_NEXT(m); 4789 } 4790 } 4791 SCTP_INP_READ_UNLOCK(new_inp); 4792 } 4793 4794 void 4795 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4796 struct sctp_tcb *stcb, 4797 int so_locked 4798 SCTP_UNUSED 4799 ) 4800 { 4801 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4802 sctp_sorwakeup(inp, inp->sctp_socket); 4803 } 4804 } 4805 4806 void 4807 sctp_add_to_readq(struct sctp_inpcb *inp, 4808 struct sctp_tcb *stcb, 4809 struct sctp_queued_to_read *control, 4810 struct sockbuf *sb, 4811 int end, 4812 int inp_read_lock_held, 4813 int so_locked) 4814 { 4815 /* 4816 * Here we must place the control on the end of the socket read 4817 * queue AND increment sb_cc so that select will work properly on 4818 * read. 4819 */ 4820 struct mbuf *m, *prev = NULL; 4821 4822 if (inp == NULL) { 4823 /* Gak, TSNH!! */ 4824 #ifdef INVARIANTS 4825 panic("Gak, inp NULL on add_to_readq"); 4826 #endif 4827 return; 4828 } 4829 if (inp_read_lock_held == 0) 4830 SCTP_INP_READ_LOCK(inp); 4831 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4832 if (!control->on_strm_q) { 4833 sctp_free_remote_addr(control->whoFrom); 4834 if (control->data) { 4835 sctp_m_freem(control->data); 4836 control->data = NULL; 4837 } 4838 sctp_free_a_readq(stcb, control); 4839 } 4840 if (inp_read_lock_held == 0) 4841 SCTP_INP_READ_UNLOCK(inp); 4842 return; 4843 } 4844 if (!(control->spec_flags & M_NOTIFICATION)) { 4845 atomic_add_int(&inp->total_recvs, 1); 4846 if (!control->do_not_ref_stcb) { 4847 atomic_add_int(&stcb->total_recvs, 1); 4848 } 4849 } 4850 m = control->data; 4851 control->held_length = 0; 4852 control->length = 0; 4853 while (m) { 4854 if (SCTP_BUF_LEN(m) == 0) { 4855 /* Skip mbufs with NO length */ 4856 if (prev == NULL) { 4857 /* First one */ 4858 control->data = sctp_m_free(m); 4859 m = control->data; 4860 } else { 4861 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4862 m = SCTP_BUF_NEXT(prev); 4863 } 4864 if (m == NULL) { 4865 control->tail_mbuf = prev; 4866 } 4867 continue; 4868 } 4869 prev = m; 4870 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4871 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4872 } 4873 sctp_sballoc(stcb, sb, m); 4874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4875 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4876 } 4877 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4878 m = SCTP_BUF_NEXT(m); 4879 } 4880 if (prev != NULL) { 4881 control->tail_mbuf = prev; 4882 } else { 4883 /* Everything got collapsed out?? */ 4884 if (!control->on_strm_q) { 4885 sctp_free_remote_addr(control->whoFrom); 4886 sctp_free_a_readq(stcb, control); 4887 } 4888 if (inp_read_lock_held == 0) 4889 SCTP_INP_READ_UNLOCK(inp); 4890 return; 4891 } 4892 if (end) { 4893 control->end_added = 1; 4894 } 4895 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4896 control->on_read_q = 1; 4897 if (inp_read_lock_held == 0) 4898 SCTP_INP_READ_UNLOCK(inp); 4899 if (inp && inp->sctp_socket) { 4900 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4901 } 4902 } 4903 4904 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4905 *************ALTERNATE ROUTING CODE 4906 */ 4907 4908 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4909 *************ALTERNATE ROUTING CODE 4910 */ 4911 4912 struct mbuf * 4913 sctp_generate_cause(uint16_t code, char *info) 4914 { 4915 struct mbuf *m; 4916 struct sctp_gen_error_cause *cause; 4917 size_t info_len; 4918 uint16_t len; 4919 4920 if ((code == 0) || (info == NULL)) { 4921 return (NULL); 4922 } 4923 info_len = strlen(info); 4924 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4925 return (NULL); 4926 } 4927 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4928 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4929 if (m != NULL) { 4930 SCTP_BUF_LEN(m) = len; 4931 cause = mtod(m, struct sctp_gen_error_cause *); 4932 cause->code = htons(code); 4933 cause->length = htons(len); 4934 memcpy(cause->info, info, info_len); 4935 } 4936 return (m); 4937 } 4938 4939 struct mbuf * 4940 sctp_generate_no_user_data_cause(uint32_t tsn) 4941 { 4942 struct mbuf *m; 4943 struct sctp_error_no_user_data *no_user_data_cause; 4944 uint16_t len; 4945 4946 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4947 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4948 if (m != NULL) { 4949 SCTP_BUF_LEN(m) = len; 4950 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4951 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4952 no_user_data_cause->cause.length = htons(len); 4953 no_user_data_cause->tsn = htonl(tsn); 4954 } 4955 return (m); 4956 } 4957 4958 #ifdef SCTP_MBCNT_LOGGING 4959 void 4960 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4961 struct sctp_tmit_chunk *tp1, int chk_cnt) 4962 { 4963 if (tp1->data == NULL) { 4964 return; 4965 } 4966 asoc->chunks_on_out_queue -= chk_cnt; 4967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4968 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4969 asoc->total_output_queue_size, 4970 tp1->book_size, 4971 0, 4972 tp1->mbcnt); 4973 } 4974 if (asoc->total_output_queue_size >= tp1->book_size) { 4975 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4976 } else { 4977 asoc->total_output_queue_size = 0; 4978 } 4979 4980 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4981 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4982 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4983 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4984 } else { 4985 stcb->sctp_socket->so_snd.sb_cc = 0; 4986 4987 } 4988 } 4989 } 4990 4991 #endif 4992 4993 int 4994 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4995 uint8_t sent, int so_locked) 4996 { 4997 struct sctp_stream_out *strq; 4998 struct sctp_tmit_chunk *chk = NULL, *tp2; 4999 struct sctp_stream_queue_pending *sp; 5000 uint32_t mid; 5001 uint16_t sid; 5002 uint8_t foundeom = 0; 5003 int ret_sz = 0; 5004 int notdone; 5005 int do_wakeup_routine = 0; 5006 5007 sid = tp1->rec.data.sid; 5008 mid = tp1->rec.data.mid; 5009 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5010 stcb->asoc.abandoned_sent[0]++; 5011 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5012 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5013 #if defined(SCTP_DETAILED_STR_STATS) 5014 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5015 #endif 5016 } else { 5017 stcb->asoc.abandoned_unsent[0]++; 5018 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5019 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5020 #if defined(SCTP_DETAILED_STR_STATS) 5021 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5022 #endif 5023 } 5024 do { 5025 ret_sz += tp1->book_size; 5026 if (tp1->data != NULL) { 5027 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5028 sctp_flight_size_decrease(tp1); 5029 sctp_total_flight_decrease(stcb, tp1); 5030 } 5031 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5032 stcb->asoc.peers_rwnd += tp1->send_size; 5033 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5034 if (sent) { 5035 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5036 } else { 5037 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5038 } 5039 if (tp1->data) { 5040 sctp_m_freem(tp1->data); 5041 tp1->data = NULL; 5042 } 5043 do_wakeup_routine = 1; 5044 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5045 stcb->asoc.sent_queue_cnt_removeable--; 5046 } 5047 } 5048 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5049 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5050 SCTP_DATA_NOT_FRAG) { 5051 /* not frag'ed we ae done */ 5052 notdone = 0; 5053 foundeom = 1; 5054 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5055 /* end of frag, we are done */ 5056 notdone = 0; 5057 foundeom = 1; 5058 } else { 5059 /* 5060 * Its a begin or middle piece, we must mark all of 5061 * it 5062 */ 5063 notdone = 1; 5064 tp1 = TAILQ_NEXT(tp1, sctp_next); 5065 } 5066 } while (tp1 && notdone); 5067 if (foundeom == 0) { 5068 /* 5069 * The multi-part message was scattered across the send and 5070 * sent queue. 5071 */ 5072 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5073 if ((tp1->rec.data.sid != sid) || 5074 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5075 break; 5076 } 5077 /* 5078 * save to chk in case we have some on stream out 5079 * queue. If so and we have an un-transmitted one we 5080 * don't have to fudge the TSN. 5081 */ 5082 chk = tp1; 5083 ret_sz += tp1->book_size; 5084 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5085 if (sent) { 5086 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5087 } else { 5088 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5089 } 5090 if (tp1->data) { 5091 sctp_m_freem(tp1->data); 5092 tp1->data = NULL; 5093 } 5094 /* No flight involved here book the size to 0 */ 5095 tp1->book_size = 0; 5096 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5097 foundeom = 1; 5098 } 5099 do_wakeup_routine = 1; 5100 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5101 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5102 /* 5103 * on to the sent queue so we can wait for it to be 5104 * passed by. 5105 */ 5106 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5107 sctp_next); 5108 stcb->asoc.send_queue_cnt--; 5109 stcb->asoc.sent_queue_cnt++; 5110 } 5111 } 5112 if (foundeom == 0) { 5113 /* 5114 * Still no eom found. That means there is stuff left on the 5115 * stream out queue.. yuck. 5116 */ 5117 SCTP_TCB_SEND_LOCK(stcb); 5118 strq = &stcb->asoc.strmout[sid]; 5119 sp = TAILQ_FIRST(&strq->outqueue); 5120 if (sp != NULL) { 5121 sp->discard_rest = 1; 5122 /* 5123 * We may need to put a chunk on the queue that 5124 * holds the TSN that would have been sent with the 5125 * LAST bit. 5126 */ 5127 if (chk == NULL) { 5128 /* Yep, we have to */ 5129 sctp_alloc_a_chunk(stcb, chk); 5130 if (chk == NULL) { 5131 /* 5132 * we are hosed. All we can do is 5133 * nothing.. which will cause an 5134 * abort if the peer is paying 5135 * attention. 5136 */ 5137 goto oh_well; 5138 } 5139 memset(chk, 0, sizeof(*chk)); 5140 chk->rec.data.rcv_flags = 0; 5141 chk->sent = SCTP_FORWARD_TSN_SKIP; 5142 chk->asoc = &stcb->asoc; 5143 if (stcb->asoc.idata_supported == 0) { 5144 if (sp->sinfo_flags & SCTP_UNORDERED) { 5145 chk->rec.data.mid = 0; 5146 } else { 5147 chk->rec.data.mid = strq->next_mid_ordered; 5148 } 5149 } else { 5150 if (sp->sinfo_flags & SCTP_UNORDERED) { 5151 chk->rec.data.mid = strq->next_mid_unordered; 5152 } else { 5153 chk->rec.data.mid = strq->next_mid_ordered; 5154 } 5155 } 5156 chk->rec.data.sid = sp->sid; 5157 chk->rec.data.ppid = sp->ppid; 5158 chk->rec.data.context = sp->context; 5159 chk->flags = sp->act_flags; 5160 chk->whoTo = NULL; 5161 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5162 strq->chunks_on_queues++; 5163 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5164 stcb->asoc.sent_queue_cnt++; 5165 stcb->asoc.pr_sctp_cnt++; 5166 } 5167 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5168 if (sp->sinfo_flags & SCTP_UNORDERED) { 5169 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5170 } 5171 if (stcb->asoc.idata_supported == 0) { 5172 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5173 strq->next_mid_ordered++; 5174 } 5175 } else { 5176 if (sp->sinfo_flags & SCTP_UNORDERED) { 5177 strq->next_mid_unordered++; 5178 } else { 5179 strq->next_mid_ordered++; 5180 } 5181 } 5182 oh_well: 5183 if (sp->data) { 5184 /* 5185 * Pull any data to free up the SB and allow 5186 * sender to "add more" while we will throw 5187 * away :-) 5188 */ 5189 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5190 ret_sz += sp->length; 5191 do_wakeup_routine = 1; 5192 sp->some_taken = 1; 5193 sctp_m_freem(sp->data); 5194 sp->data = NULL; 5195 sp->tail_mbuf = NULL; 5196 sp->length = 0; 5197 } 5198 } 5199 SCTP_TCB_SEND_UNLOCK(stcb); 5200 } 5201 if (do_wakeup_routine) { 5202 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5203 } 5204 return (ret_sz); 5205 } 5206 5207 /* 5208 * checks to see if the given address, sa, is one that is currently known by 5209 * the kernel note: can't distinguish the same address on multiple interfaces 5210 * and doesn't handle multiple addresses with different zone/scope id's note: 5211 * ifa_ifwithaddr() compares the entire sockaddr struct 5212 */ 5213 struct sctp_ifa * 5214 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5215 int holds_lock) 5216 { 5217 struct sctp_laddr *laddr; 5218 5219 if (holds_lock == 0) { 5220 SCTP_INP_RLOCK(inp); 5221 } 5222 5223 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5224 if (laddr->ifa == NULL) 5225 continue; 5226 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5227 continue; 5228 #ifdef INET 5229 if (addr->sa_family == AF_INET) { 5230 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5231 laddr->ifa->address.sin.sin_addr.s_addr) { 5232 /* found him. */ 5233 break; 5234 } 5235 } 5236 #endif 5237 #ifdef INET6 5238 if (addr->sa_family == AF_INET6) { 5239 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5240 &laddr->ifa->address.sin6)) { 5241 /* found him. */ 5242 break; 5243 } 5244 } 5245 #endif 5246 } 5247 if (holds_lock == 0) { 5248 SCTP_INP_RUNLOCK(inp); 5249 } 5250 if (laddr != NULL) { 5251 return (laddr->ifa); 5252 } else { 5253 return (NULL); 5254 } 5255 } 5256 5257 uint32_t 5258 sctp_get_ifa_hash_val(struct sockaddr *addr) 5259 { 5260 switch (addr->sa_family) { 5261 #ifdef INET 5262 case AF_INET: 5263 { 5264 struct sockaddr_in *sin; 5265 5266 sin = (struct sockaddr_in *)addr; 5267 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5268 } 5269 #endif 5270 #ifdef INET6 5271 case AF_INET6: 5272 { 5273 struct sockaddr_in6 *sin6; 5274 uint32_t hash_of_addr; 5275 5276 sin6 = (struct sockaddr_in6 *)addr; 5277 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5278 sin6->sin6_addr.s6_addr32[1] + 5279 sin6->sin6_addr.s6_addr32[2] + 5280 sin6->sin6_addr.s6_addr32[3]); 5281 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5282 return (hash_of_addr); 5283 } 5284 #endif 5285 default: 5286 break; 5287 } 5288 return (0); 5289 } 5290 5291 struct sctp_ifa * 5292 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5293 { 5294 struct sctp_ifa *sctp_ifap; 5295 struct sctp_vrf *vrf; 5296 struct sctp_ifalist *hash_head; 5297 uint32_t hash_of_addr; 5298 5299 if (holds_lock == 0) { 5300 SCTP_IPI_ADDR_RLOCK(); 5301 } else { 5302 SCTP_IPI_ADDR_LOCK_ASSERT(); 5303 } 5304 5305 vrf = sctp_find_vrf(vrf_id); 5306 if (vrf == NULL) { 5307 if (holds_lock == 0) 5308 SCTP_IPI_ADDR_RUNLOCK(); 5309 return (NULL); 5310 } 5311 5312 hash_of_addr = sctp_get_ifa_hash_val(addr); 5313 5314 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5315 if (hash_head == NULL) { 5316 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5317 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5318 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5319 sctp_print_address(addr); 5320 SCTP_PRINTF("No such bucket for address\n"); 5321 if (holds_lock == 0) 5322 SCTP_IPI_ADDR_RUNLOCK(); 5323 5324 return (NULL); 5325 } 5326 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5327 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5328 continue; 5329 #ifdef INET 5330 if (addr->sa_family == AF_INET) { 5331 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5332 sctp_ifap->address.sin.sin_addr.s_addr) { 5333 /* found him. */ 5334 break; 5335 } 5336 } 5337 #endif 5338 #ifdef INET6 5339 if (addr->sa_family == AF_INET6) { 5340 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5341 &sctp_ifap->address.sin6)) { 5342 /* found him. */ 5343 break; 5344 } 5345 } 5346 #endif 5347 } 5348 if (holds_lock == 0) 5349 SCTP_IPI_ADDR_RUNLOCK(); 5350 return (sctp_ifap); 5351 } 5352 5353 static void 5354 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5355 uint32_t rwnd_req) 5356 { 5357 /* User pulled some data, do we need a rwnd update? */ 5358 struct epoch_tracker et; 5359 int r_unlocked = 0; 5360 uint32_t dif, rwnd; 5361 struct socket *so = NULL; 5362 5363 if (stcb == NULL) 5364 return; 5365 5366 atomic_add_int(&stcb->asoc.refcnt, 1); 5367 5368 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5369 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5370 /* Pre-check If we are freeing no update */ 5371 goto no_lock; 5372 } 5373 SCTP_INP_INCR_REF(stcb->sctp_ep); 5374 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5375 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5376 goto out; 5377 } 5378 so = stcb->sctp_socket; 5379 if (so == NULL) { 5380 goto out; 5381 } 5382 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5383 /* Have you have freed enough to look */ 5384 *freed_so_far = 0; 5385 /* Yep, its worth a look and the lock overhead */ 5386 5387 /* Figure out what the rwnd would be */ 5388 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5389 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5390 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5391 } else { 5392 dif = 0; 5393 } 5394 if (dif >= rwnd_req) { 5395 if (hold_rlock) { 5396 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5397 r_unlocked = 1; 5398 } 5399 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5400 /* 5401 * One last check before we allow the guy possibly 5402 * to get in. There is a race, where the guy has not 5403 * reached the gate. In that case 5404 */ 5405 goto out; 5406 } 5407 SCTP_TCB_LOCK(stcb); 5408 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5409 /* No reports here */ 5410 SCTP_TCB_UNLOCK(stcb); 5411 goto out; 5412 } 5413 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5414 NET_EPOCH_ENTER(et); 5415 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5416 5417 sctp_chunk_output(stcb->sctp_ep, stcb, 5418 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5419 /* make sure no timer is running */ 5420 NET_EPOCH_EXIT(et); 5421 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5422 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5423 SCTP_TCB_UNLOCK(stcb); 5424 } else { 5425 /* Update how much we have pending */ 5426 stcb->freed_by_sorcv_sincelast = dif; 5427 } 5428 out: 5429 if (so && r_unlocked && hold_rlock) { 5430 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5431 } 5432 5433 SCTP_INP_DECR_REF(stcb->sctp_ep); 5434 no_lock: 5435 atomic_add_int(&stcb->asoc.refcnt, -1); 5436 return; 5437 } 5438 5439 int 5440 sctp_sorecvmsg(struct socket *so, 5441 struct uio *uio, 5442 struct mbuf **mp, 5443 struct sockaddr *from, 5444 int fromlen, 5445 int *msg_flags, 5446 struct sctp_sndrcvinfo *sinfo, 5447 int filling_sinfo) 5448 { 5449 /* 5450 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5451 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5452 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5453 * On the way out we may send out any combination of: 5454 * MSG_NOTIFICATION MSG_EOR 5455 * 5456 */ 5457 struct sctp_inpcb *inp = NULL; 5458 ssize_t my_len = 0; 5459 ssize_t cp_len = 0; 5460 int error = 0; 5461 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5462 struct mbuf *m = NULL; 5463 struct sctp_tcb *stcb = NULL; 5464 int wakeup_read_socket = 0; 5465 int freecnt_applied = 0; 5466 int out_flags = 0, in_flags = 0; 5467 int block_allowed = 1; 5468 uint32_t freed_so_far = 0; 5469 ssize_t copied_so_far = 0; 5470 int in_eeor_mode = 0; 5471 int no_rcv_needed = 0; 5472 uint32_t rwnd_req = 0; 5473 int hold_sblock = 0; 5474 int hold_rlock = 0; 5475 ssize_t slen = 0; 5476 uint32_t held_length = 0; 5477 int sockbuf_lock = 0; 5478 5479 if (uio == NULL) { 5480 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5481 return (EINVAL); 5482 } 5483 5484 if (msg_flags) { 5485 in_flags = *msg_flags; 5486 if (in_flags & MSG_PEEK) 5487 SCTP_STAT_INCR(sctps_read_peeks); 5488 } else { 5489 in_flags = 0; 5490 } 5491 slen = uio->uio_resid; 5492 5493 /* Pull in and set up our int flags */ 5494 if (in_flags & MSG_OOB) { 5495 /* Out of band's NOT supported */ 5496 return (EOPNOTSUPP); 5497 } 5498 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5499 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5500 return (EINVAL); 5501 } 5502 if ((in_flags & (MSG_DONTWAIT 5503 | MSG_NBIO 5504 )) || 5505 SCTP_SO_IS_NBIO(so)) { 5506 block_allowed = 0; 5507 } 5508 /* setup the endpoint */ 5509 inp = (struct sctp_inpcb *)so->so_pcb; 5510 if (inp == NULL) { 5511 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5512 return (EFAULT); 5513 } 5514 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5515 /* Must be at least a MTU's worth */ 5516 if (rwnd_req < SCTP_MIN_RWND) 5517 rwnd_req = SCTP_MIN_RWND; 5518 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5520 sctp_misc_ints(SCTP_SORECV_ENTER, 5521 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5522 } 5523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5524 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5525 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5526 } 5527 5528 5529 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5530 if (error) { 5531 goto release_unlocked; 5532 } 5533 sockbuf_lock = 1; 5534 restart: 5535 5536 restart_nosblocks: 5537 if (hold_sblock == 0) { 5538 SOCKBUF_LOCK(&so->so_rcv); 5539 hold_sblock = 1; 5540 } 5541 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5542 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5543 goto out; 5544 } 5545 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5546 if (so->so_error) { 5547 error = so->so_error; 5548 if ((in_flags & MSG_PEEK) == 0) 5549 so->so_error = 0; 5550 goto out; 5551 } else { 5552 if (so->so_rcv.sb_cc == 0) { 5553 /* indicate EOF */ 5554 error = 0; 5555 goto out; 5556 } 5557 } 5558 } 5559 if (so->so_rcv.sb_cc <= held_length) { 5560 if (so->so_error) { 5561 error = so->so_error; 5562 if ((in_flags & MSG_PEEK) == 0) { 5563 so->so_error = 0; 5564 } 5565 goto out; 5566 } 5567 if ((so->so_rcv.sb_cc == 0) && 5568 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5569 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5570 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5571 /* 5572 * For active open side clear flags for 5573 * re-use passive open is blocked by 5574 * connect. 5575 */ 5576 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5577 /* 5578 * You were aborted, passive side 5579 * always hits here 5580 */ 5581 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5582 error = ECONNRESET; 5583 } 5584 so->so_state &= ~(SS_ISCONNECTING | 5585 SS_ISDISCONNECTING | 5586 SS_ISCONFIRMING | 5587 SS_ISCONNECTED); 5588 if (error == 0) { 5589 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5590 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5591 error = ENOTCONN; 5592 } 5593 } 5594 goto out; 5595 } 5596 } 5597 if (block_allowed) { 5598 error = sbwait(&so->so_rcv); 5599 if (error) { 5600 goto out; 5601 } 5602 held_length = 0; 5603 goto restart_nosblocks; 5604 } else { 5605 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5606 error = EWOULDBLOCK; 5607 goto out; 5608 } 5609 } 5610 if (hold_sblock == 1) { 5611 SOCKBUF_UNLOCK(&so->so_rcv); 5612 hold_sblock = 0; 5613 } 5614 /* we possibly have data we can read */ 5615 /* sa_ignore FREED_MEMORY */ 5616 control = TAILQ_FIRST(&inp->read_queue); 5617 if (control == NULL) { 5618 /* 5619 * This could be happening since the appender did the 5620 * increment but as not yet did the tailq insert onto the 5621 * read_queue 5622 */ 5623 if (hold_rlock == 0) { 5624 SCTP_INP_READ_LOCK(inp); 5625 } 5626 control = TAILQ_FIRST(&inp->read_queue); 5627 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5628 #ifdef INVARIANTS 5629 panic("Huh, its non zero and nothing on control?"); 5630 #endif 5631 so->so_rcv.sb_cc = 0; 5632 } 5633 SCTP_INP_READ_UNLOCK(inp); 5634 hold_rlock = 0; 5635 goto restart; 5636 } 5637 5638 if ((control->length == 0) && 5639 (control->do_not_ref_stcb)) { 5640 /* 5641 * Clean up code for freeing assoc that left behind a 5642 * pdapi.. maybe a peer in EEOR that just closed after 5643 * sending and never indicated a EOR. 5644 */ 5645 if (hold_rlock == 0) { 5646 hold_rlock = 1; 5647 SCTP_INP_READ_LOCK(inp); 5648 } 5649 control->held_length = 0; 5650 if (control->data) { 5651 /* Hmm there is data here .. fix */ 5652 struct mbuf *m_tmp; 5653 int cnt = 0; 5654 5655 m_tmp = control->data; 5656 while (m_tmp) { 5657 cnt += SCTP_BUF_LEN(m_tmp); 5658 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5659 control->tail_mbuf = m_tmp; 5660 control->end_added = 1; 5661 } 5662 m_tmp = SCTP_BUF_NEXT(m_tmp); 5663 } 5664 control->length = cnt; 5665 } else { 5666 /* remove it */ 5667 TAILQ_REMOVE(&inp->read_queue, control, next); 5668 /* Add back any hiddend data */ 5669 sctp_free_remote_addr(control->whoFrom); 5670 sctp_free_a_readq(stcb, control); 5671 } 5672 if (hold_rlock) { 5673 hold_rlock = 0; 5674 SCTP_INP_READ_UNLOCK(inp); 5675 } 5676 goto restart; 5677 } 5678 if ((control->length == 0) && 5679 (control->end_added == 1)) { 5680 /* 5681 * Do we also need to check for (control->pdapi_aborted == 5682 * 1)? 5683 */ 5684 if (hold_rlock == 0) { 5685 hold_rlock = 1; 5686 SCTP_INP_READ_LOCK(inp); 5687 } 5688 TAILQ_REMOVE(&inp->read_queue, control, next); 5689 if (control->data) { 5690 #ifdef INVARIANTS 5691 panic("control->data not null but control->length == 0"); 5692 #else 5693 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5694 sctp_m_freem(control->data); 5695 control->data = NULL; 5696 #endif 5697 } 5698 if (control->aux_data) { 5699 sctp_m_free(control->aux_data); 5700 control->aux_data = NULL; 5701 } 5702 #ifdef INVARIANTS 5703 if (control->on_strm_q) { 5704 panic("About to free ctl:%p so:%p and its in %d", 5705 control, so, control->on_strm_q); 5706 } 5707 #endif 5708 sctp_free_remote_addr(control->whoFrom); 5709 sctp_free_a_readq(stcb, control); 5710 if (hold_rlock) { 5711 hold_rlock = 0; 5712 SCTP_INP_READ_UNLOCK(inp); 5713 } 5714 goto restart; 5715 } 5716 if (control->length == 0) { 5717 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5718 (filling_sinfo)) { 5719 /* find a more suitable one then this */ 5720 ctl = TAILQ_NEXT(control, next); 5721 while (ctl) { 5722 if ((ctl->stcb != control->stcb) && (ctl->length) && 5723 (ctl->some_taken || 5724 (ctl->spec_flags & M_NOTIFICATION) || 5725 ((ctl->do_not_ref_stcb == 0) && 5726 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5727 ) { 5728 /*- 5729 * If we have a different TCB next, and there is data 5730 * present. If we have already taken some (pdapi), OR we can 5731 * ref the tcb and no delivery as started on this stream, we 5732 * take it. Note we allow a notification on a different 5733 * assoc to be delivered.. 5734 */ 5735 control = ctl; 5736 goto found_one; 5737 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5738 (ctl->length) && 5739 ((ctl->some_taken) || 5740 ((ctl->do_not_ref_stcb == 0) && 5741 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5742 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5743 /*- 5744 * If we have the same tcb, and there is data present, and we 5745 * have the strm interleave feature present. Then if we have 5746 * taken some (pdapi) or we can refer to tht tcb AND we have 5747 * not started a delivery for this stream, we can take it. 5748 * Note we do NOT allow a notificaiton on the same assoc to 5749 * be delivered. 5750 */ 5751 control = ctl; 5752 goto found_one; 5753 } 5754 ctl = TAILQ_NEXT(ctl, next); 5755 } 5756 } 5757 /* 5758 * if we reach here, not suitable replacement is available 5759 * <or> fragment interleave is NOT on. So stuff the sb_cc 5760 * into the our held count, and its time to sleep again. 5761 */ 5762 held_length = so->so_rcv.sb_cc; 5763 control->held_length = so->so_rcv.sb_cc; 5764 goto restart; 5765 } 5766 /* Clear the held length since there is something to read */ 5767 control->held_length = 0; 5768 found_one: 5769 /* 5770 * If we reach here, control has a some data for us to read off. 5771 * Note that stcb COULD be NULL. 5772 */ 5773 if (hold_rlock == 0) { 5774 hold_rlock = 1; 5775 SCTP_INP_READ_LOCK(inp); 5776 } 5777 control->some_taken++; 5778 stcb = control->stcb; 5779 if (stcb) { 5780 if ((control->do_not_ref_stcb == 0) && 5781 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5782 if (freecnt_applied == 0) 5783 stcb = NULL; 5784 } else if (control->do_not_ref_stcb == 0) { 5785 /* you can't free it on me please */ 5786 /* 5787 * The lock on the socket buffer protects us so the 5788 * free code will stop. But since we used the 5789 * socketbuf lock and the sender uses the tcb_lock 5790 * to increment, we need to use the atomic add to 5791 * the refcnt 5792 */ 5793 if (freecnt_applied) { 5794 #ifdef INVARIANTS 5795 panic("refcnt already incremented"); 5796 #else 5797 SCTP_PRINTF("refcnt already incremented?\n"); 5798 #endif 5799 } else { 5800 atomic_add_int(&stcb->asoc.refcnt, 1); 5801 freecnt_applied = 1; 5802 } 5803 /* 5804 * Setup to remember how much we have not yet told 5805 * the peer our rwnd has opened up. Note we grab the 5806 * value from the tcb from last time. Note too that 5807 * sack sending clears this when a sack is sent, 5808 * which is fine. Once we hit the rwnd_req, we then 5809 * will go to the sctp_user_rcvd() that will not 5810 * lock until it KNOWs it MUST send a WUP-SACK. 5811 */ 5812 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5813 stcb->freed_by_sorcv_sincelast = 0; 5814 } 5815 } 5816 if (stcb && 5817 ((control->spec_flags & M_NOTIFICATION) == 0) && 5818 control->do_not_ref_stcb == 0) { 5819 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5820 } 5821 5822 /* First lets get off the sinfo and sockaddr info */ 5823 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5824 sinfo->sinfo_stream = control->sinfo_stream; 5825 sinfo->sinfo_ssn = (uint16_t)control->mid; 5826 sinfo->sinfo_flags = control->sinfo_flags; 5827 sinfo->sinfo_ppid = control->sinfo_ppid; 5828 sinfo->sinfo_context = control->sinfo_context; 5829 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5830 sinfo->sinfo_tsn = control->sinfo_tsn; 5831 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5832 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5833 nxt = TAILQ_NEXT(control, next); 5834 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5835 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5836 struct sctp_extrcvinfo *s_extra; 5837 5838 s_extra = (struct sctp_extrcvinfo *)sinfo; 5839 if ((nxt) && 5840 (nxt->length)) { 5841 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5842 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5843 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5844 } 5845 if (nxt->spec_flags & M_NOTIFICATION) { 5846 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5847 } 5848 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5849 s_extra->serinfo_next_length = nxt->length; 5850 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5851 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5852 if (nxt->tail_mbuf != NULL) { 5853 if (nxt->end_added) { 5854 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5855 } 5856 } 5857 } else { 5858 /* 5859 * we explicitly 0 this, since the memcpy 5860 * got some other things beyond the older 5861 * sinfo_ that is on the control's structure 5862 * :-D 5863 */ 5864 nxt = NULL; 5865 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5866 s_extra->serinfo_next_aid = 0; 5867 s_extra->serinfo_next_length = 0; 5868 s_extra->serinfo_next_ppid = 0; 5869 s_extra->serinfo_next_stream = 0; 5870 } 5871 } 5872 /* 5873 * update off the real current cum-ack, if we have an stcb. 5874 */ 5875 if ((control->do_not_ref_stcb == 0) && stcb) 5876 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5877 /* 5878 * mask off the high bits, we keep the actual chunk bits in 5879 * there. 5880 */ 5881 sinfo->sinfo_flags &= 0x00ff; 5882 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5883 sinfo->sinfo_flags |= SCTP_UNORDERED; 5884 } 5885 } 5886 #ifdef SCTP_ASOCLOG_OF_TSNS 5887 { 5888 int index, newindex; 5889 struct sctp_pcbtsn_rlog *entry; 5890 5891 do { 5892 index = inp->readlog_index; 5893 newindex = index + 1; 5894 if (newindex >= SCTP_READ_LOG_SIZE) { 5895 newindex = 0; 5896 } 5897 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5898 entry = &inp->readlog[index]; 5899 entry->vtag = control->sinfo_assoc_id; 5900 entry->strm = control->sinfo_stream; 5901 entry->seq = (uint16_t)control->mid; 5902 entry->sz = control->length; 5903 entry->flgs = control->sinfo_flags; 5904 } 5905 #endif 5906 if ((fromlen > 0) && (from != NULL)) { 5907 union sctp_sockstore store; 5908 size_t len; 5909 5910 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5911 #ifdef INET6 5912 case AF_INET6: 5913 len = sizeof(struct sockaddr_in6); 5914 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5915 store.sin6.sin6_port = control->port_from; 5916 break; 5917 #endif 5918 #ifdef INET 5919 case AF_INET: 5920 #ifdef INET6 5921 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5922 len = sizeof(struct sockaddr_in6); 5923 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5924 &store.sin6); 5925 store.sin6.sin6_port = control->port_from; 5926 } else { 5927 len = sizeof(struct sockaddr_in); 5928 store.sin = control->whoFrom->ro._l_addr.sin; 5929 store.sin.sin_port = control->port_from; 5930 } 5931 #else 5932 len = sizeof(struct sockaddr_in); 5933 store.sin = control->whoFrom->ro._l_addr.sin; 5934 store.sin.sin_port = control->port_from; 5935 #endif 5936 break; 5937 #endif 5938 default: 5939 len = 0; 5940 break; 5941 } 5942 memcpy(from, &store, min((size_t)fromlen, len)); 5943 #ifdef INET6 5944 { 5945 struct sockaddr_in6 lsa6, *from6; 5946 5947 from6 = (struct sockaddr_in6 *)from; 5948 sctp_recover_scope_mac(from6, (&lsa6)); 5949 } 5950 #endif 5951 } 5952 if (hold_rlock) { 5953 SCTP_INP_READ_UNLOCK(inp); 5954 hold_rlock = 0; 5955 } 5956 if (hold_sblock) { 5957 SOCKBUF_UNLOCK(&so->so_rcv); 5958 hold_sblock = 0; 5959 } 5960 /* now copy out what data we can */ 5961 if (mp == NULL) { 5962 /* copy out each mbuf in the chain up to length */ 5963 get_more_data: 5964 m = control->data; 5965 while (m) { 5966 /* Move out all we can */ 5967 cp_len = uio->uio_resid; 5968 my_len = SCTP_BUF_LEN(m); 5969 if (cp_len > my_len) { 5970 /* not enough in this buf */ 5971 cp_len = my_len; 5972 } 5973 if (hold_rlock) { 5974 SCTP_INP_READ_UNLOCK(inp); 5975 hold_rlock = 0; 5976 } 5977 if (cp_len > 0) 5978 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5979 /* re-read */ 5980 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5981 goto release; 5982 } 5983 5984 if ((control->do_not_ref_stcb == 0) && stcb && 5985 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5986 no_rcv_needed = 1; 5987 } 5988 if (error) { 5989 /* error we are out of here */ 5990 goto release; 5991 } 5992 SCTP_INP_READ_LOCK(inp); 5993 hold_rlock = 1; 5994 if (cp_len == SCTP_BUF_LEN(m)) { 5995 if ((SCTP_BUF_NEXT(m) == NULL) && 5996 (control->end_added)) { 5997 out_flags |= MSG_EOR; 5998 if ((control->do_not_ref_stcb == 0) && 5999 (control->stcb != NULL) && 6000 ((control->spec_flags & M_NOTIFICATION) == 0)) 6001 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6002 } 6003 if (control->spec_flags & M_NOTIFICATION) { 6004 out_flags |= MSG_NOTIFICATION; 6005 } 6006 /* we ate up the mbuf */ 6007 if (in_flags & MSG_PEEK) { 6008 /* just looking */ 6009 m = SCTP_BUF_NEXT(m); 6010 copied_so_far += cp_len; 6011 } else { 6012 /* dispose of the mbuf */ 6013 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6014 sctp_sblog(&so->so_rcv, 6015 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6016 } 6017 sctp_sbfree(control, stcb, &so->so_rcv, m); 6018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6019 sctp_sblog(&so->so_rcv, 6020 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6021 } 6022 copied_so_far += cp_len; 6023 freed_so_far += (uint32_t)cp_len; 6024 freed_so_far += MSIZE; 6025 atomic_subtract_int(&control->length, cp_len); 6026 control->data = sctp_m_free(m); 6027 m = control->data; 6028 /* 6029 * been through it all, must hold sb 6030 * lock ok to null tail 6031 */ 6032 if (control->data == NULL) { 6033 #ifdef INVARIANTS 6034 if ((control->end_added == 0) || 6035 (TAILQ_NEXT(control, next) == NULL)) { 6036 /* 6037 * If the end is not 6038 * added, OR the 6039 * next is NOT null 6040 * we MUST have the 6041 * lock. 6042 */ 6043 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6044 panic("Hmm we don't own the lock?"); 6045 } 6046 } 6047 #endif 6048 control->tail_mbuf = NULL; 6049 #ifdef INVARIANTS 6050 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6051 panic("end_added, nothing left and no MSG_EOR"); 6052 } 6053 #endif 6054 } 6055 } 6056 } else { 6057 /* Do we need to trim the mbuf? */ 6058 if (control->spec_flags & M_NOTIFICATION) { 6059 out_flags |= MSG_NOTIFICATION; 6060 } 6061 if ((in_flags & MSG_PEEK) == 0) { 6062 SCTP_BUF_RESV_UF(m, cp_len); 6063 SCTP_BUF_LEN(m) -= (int)cp_len; 6064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6065 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6066 } 6067 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6068 if ((control->do_not_ref_stcb == 0) && 6069 stcb) { 6070 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6071 } 6072 copied_so_far += cp_len; 6073 freed_so_far += (uint32_t)cp_len; 6074 freed_so_far += MSIZE; 6075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6076 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6077 SCTP_LOG_SBRESULT, 0); 6078 } 6079 atomic_subtract_int(&control->length, cp_len); 6080 } else { 6081 copied_so_far += cp_len; 6082 } 6083 } 6084 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6085 break; 6086 } 6087 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6088 (control->do_not_ref_stcb == 0) && 6089 (freed_so_far >= rwnd_req)) { 6090 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6091 } 6092 } /* end while(m) */ 6093 /* 6094 * At this point we have looked at it all and we either have 6095 * a MSG_EOR/or read all the user wants... <OR> 6096 * control->length == 0. 6097 */ 6098 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6099 /* we are done with this control */ 6100 if (control->length == 0) { 6101 if (control->data) { 6102 #ifdef INVARIANTS 6103 panic("control->data not null at read eor?"); 6104 #else 6105 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6106 sctp_m_freem(control->data); 6107 control->data = NULL; 6108 #endif 6109 } 6110 done_with_control: 6111 if (hold_rlock == 0) { 6112 SCTP_INP_READ_LOCK(inp); 6113 hold_rlock = 1; 6114 } 6115 TAILQ_REMOVE(&inp->read_queue, control, next); 6116 /* Add back any hiddend data */ 6117 if (control->held_length) { 6118 held_length = 0; 6119 control->held_length = 0; 6120 wakeup_read_socket = 1; 6121 } 6122 if (control->aux_data) { 6123 sctp_m_free(control->aux_data); 6124 control->aux_data = NULL; 6125 } 6126 no_rcv_needed = control->do_not_ref_stcb; 6127 sctp_free_remote_addr(control->whoFrom); 6128 control->data = NULL; 6129 #ifdef INVARIANTS 6130 if (control->on_strm_q) { 6131 panic("About to free ctl:%p so:%p and its in %d", 6132 control, so, control->on_strm_q); 6133 } 6134 #endif 6135 sctp_free_a_readq(stcb, control); 6136 control = NULL; 6137 if ((freed_so_far >= rwnd_req) && 6138 (no_rcv_needed == 0)) 6139 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6140 6141 } else { 6142 /* 6143 * The user did not read all of this 6144 * message, turn off the returned MSG_EOR 6145 * since we are leaving more behind on the 6146 * control to read. 6147 */ 6148 #ifdef INVARIANTS 6149 if (control->end_added && 6150 (control->data == NULL) && 6151 (control->tail_mbuf == NULL)) { 6152 panic("Gak, control->length is corrupt?"); 6153 } 6154 #endif 6155 no_rcv_needed = control->do_not_ref_stcb; 6156 out_flags &= ~MSG_EOR; 6157 } 6158 } 6159 if (out_flags & MSG_EOR) { 6160 goto release; 6161 } 6162 if ((uio->uio_resid == 0) || 6163 ((in_eeor_mode) && 6164 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6165 goto release; 6166 } 6167 /* 6168 * If I hit here the receiver wants more and this message is 6169 * NOT done (pd-api). So two questions. Can we block? if not 6170 * we are done. Did the user NOT set MSG_WAITALL? 6171 */ 6172 if (block_allowed == 0) { 6173 goto release; 6174 } 6175 /* 6176 * We need to wait for more data a few things: - We don't 6177 * sbunlock() so we don't get someone else reading. - We 6178 * must be sure to account for the case where what is added 6179 * is NOT to our control when we wakeup. 6180 */ 6181 6182 /* 6183 * Do we need to tell the transport a rwnd update might be 6184 * needed before we go to sleep? 6185 */ 6186 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6187 ((freed_so_far >= rwnd_req) && 6188 (control->do_not_ref_stcb == 0) && 6189 (no_rcv_needed == 0))) { 6190 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6191 } 6192 wait_some_more: 6193 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6194 goto release; 6195 } 6196 6197 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6198 goto release; 6199 6200 if (hold_rlock == 1) { 6201 SCTP_INP_READ_UNLOCK(inp); 6202 hold_rlock = 0; 6203 } 6204 if (hold_sblock == 0) { 6205 SOCKBUF_LOCK(&so->so_rcv); 6206 hold_sblock = 1; 6207 } 6208 if ((copied_so_far) && (control->length == 0) && 6209 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6210 goto release; 6211 } 6212 if (so->so_rcv.sb_cc <= control->held_length) { 6213 error = sbwait(&so->so_rcv); 6214 if (error) { 6215 goto release; 6216 } 6217 control->held_length = 0; 6218 } 6219 if (hold_sblock) { 6220 SOCKBUF_UNLOCK(&so->so_rcv); 6221 hold_sblock = 0; 6222 } 6223 if (control->length == 0) { 6224 /* still nothing here */ 6225 if (control->end_added == 1) { 6226 /* he aborted, or is done i.e.did a shutdown */ 6227 out_flags |= MSG_EOR; 6228 if (control->pdapi_aborted) { 6229 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6230 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6231 6232 out_flags |= MSG_TRUNC; 6233 } else { 6234 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6235 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6236 } 6237 goto done_with_control; 6238 } 6239 if (so->so_rcv.sb_cc > held_length) { 6240 control->held_length = so->so_rcv.sb_cc; 6241 held_length = 0; 6242 } 6243 goto wait_some_more; 6244 } else if (control->data == NULL) { 6245 /* 6246 * we must re-sync since data is probably being 6247 * added 6248 */ 6249 SCTP_INP_READ_LOCK(inp); 6250 if ((control->length > 0) && (control->data == NULL)) { 6251 /* 6252 * big trouble.. we have the lock and its 6253 * corrupt? 6254 */ 6255 #ifdef INVARIANTS 6256 panic("Impossible data==NULL length !=0"); 6257 #endif 6258 out_flags |= MSG_EOR; 6259 out_flags |= MSG_TRUNC; 6260 control->length = 0; 6261 SCTP_INP_READ_UNLOCK(inp); 6262 goto done_with_control; 6263 } 6264 SCTP_INP_READ_UNLOCK(inp); 6265 /* We will fall around to get more data */ 6266 } 6267 goto get_more_data; 6268 } else { 6269 /*- 6270 * Give caller back the mbuf chain, 6271 * store in uio_resid the length 6272 */ 6273 wakeup_read_socket = 0; 6274 if ((control->end_added == 0) || 6275 (TAILQ_NEXT(control, next) == NULL)) { 6276 /* Need to get rlock */ 6277 if (hold_rlock == 0) { 6278 SCTP_INP_READ_LOCK(inp); 6279 hold_rlock = 1; 6280 } 6281 } 6282 if (control->end_added) { 6283 out_flags |= MSG_EOR; 6284 if ((control->do_not_ref_stcb == 0) && 6285 (control->stcb != NULL) && 6286 ((control->spec_flags & M_NOTIFICATION) == 0)) 6287 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6288 } 6289 if (control->spec_flags & M_NOTIFICATION) { 6290 out_flags |= MSG_NOTIFICATION; 6291 } 6292 uio->uio_resid = control->length; 6293 *mp = control->data; 6294 m = control->data; 6295 while (m) { 6296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6297 sctp_sblog(&so->so_rcv, 6298 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6299 } 6300 sctp_sbfree(control, stcb, &so->so_rcv, m); 6301 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6302 freed_so_far += MSIZE; 6303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6304 sctp_sblog(&so->so_rcv, 6305 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6306 } 6307 m = SCTP_BUF_NEXT(m); 6308 } 6309 control->data = control->tail_mbuf = NULL; 6310 control->length = 0; 6311 if (out_flags & MSG_EOR) { 6312 /* Done with this control */ 6313 goto done_with_control; 6314 } 6315 } 6316 release: 6317 if (hold_rlock == 1) { 6318 SCTP_INP_READ_UNLOCK(inp); 6319 hold_rlock = 0; 6320 } 6321 if (hold_sblock == 1) { 6322 SOCKBUF_UNLOCK(&so->so_rcv); 6323 hold_sblock = 0; 6324 } 6325 6326 sbunlock(&so->so_rcv); 6327 sockbuf_lock = 0; 6328 6329 release_unlocked: 6330 if (hold_sblock) { 6331 SOCKBUF_UNLOCK(&so->so_rcv); 6332 hold_sblock = 0; 6333 } 6334 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6335 if ((freed_so_far >= rwnd_req) && 6336 (control && (control->do_not_ref_stcb == 0)) && 6337 (no_rcv_needed == 0)) 6338 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6339 } 6340 out: 6341 if (msg_flags) { 6342 *msg_flags = out_flags; 6343 } 6344 if (((out_flags & MSG_EOR) == 0) && 6345 ((in_flags & MSG_PEEK) == 0) && 6346 (sinfo) && 6347 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6348 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6349 struct sctp_extrcvinfo *s_extra; 6350 6351 s_extra = (struct sctp_extrcvinfo *)sinfo; 6352 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6353 } 6354 if (hold_rlock == 1) { 6355 SCTP_INP_READ_UNLOCK(inp); 6356 } 6357 if (hold_sblock) { 6358 SOCKBUF_UNLOCK(&so->so_rcv); 6359 } 6360 if (sockbuf_lock) { 6361 sbunlock(&so->so_rcv); 6362 } 6363 6364 if (freecnt_applied) { 6365 /* 6366 * The lock on the socket buffer protects us so the free 6367 * code will stop. But since we used the socketbuf lock and 6368 * the sender uses the tcb_lock to increment, we need to use 6369 * the atomic add to the refcnt. 6370 */ 6371 if (stcb == NULL) { 6372 #ifdef INVARIANTS 6373 panic("stcb for refcnt has gone NULL?"); 6374 goto stage_left; 6375 #else 6376 goto stage_left; 6377 #endif 6378 } 6379 /* Save the value back for next time */ 6380 stcb->freed_by_sorcv_sincelast = freed_so_far; 6381 atomic_add_int(&stcb->asoc.refcnt, -1); 6382 } 6383 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6384 if (stcb) { 6385 sctp_misc_ints(SCTP_SORECV_DONE, 6386 freed_so_far, 6387 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6388 stcb->asoc.my_rwnd, 6389 so->so_rcv.sb_cc); 6390 } else { 6391 sctp_misc_ints(SCTP_SORECV_DONE, 6392 freed_so_far, 6393 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6394 0, 6395 so->so_rcv.sb_cc); 6396 } 6397 } 6398 stage_left: 6399 if (wakeup_read_socket) { 6400 sctp_sorwakeup(inp, so); 6401 } 6402 return (error); 6403 } 6404 6405 6406 #ifdef SCTP_MBUF_LOGGING 6407 struct mbuf * 6408 sctp_m_free(struct mbuf *m) 6409 { 6410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6411 sctp_log_mb(m, SCTP_MBUF_IFREE); 6412 } 6413 return (m_free(m)); 6414 } 6415 6416 void 6417 sctp_m_freem(struct mbuf *mb) 6418 { 6419 while (mb != NULL) 6420 mb = sctp_m_free(mb); 6421 } 6422 6423 #endif 6424 6425 int 6426 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6427 { 6428 /* 6429 * Given a local address. For all associations that holds the 6430 * address, request a peer-set-primary. 6431 */ 6432 struct sctp_ifa *ifa; 6433 struct sctp_laddr *wi; 6434 6435 ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); 6436 if (ifa == NULL) { 6437 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6438 return (EADDRNOTAVAIL); 6439 } 6440 /* 6441 * Now that we have the ifa we must awaken the iterator with this 6442 * message. 6443 */ 6444 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6445 if (wi == NULL) { 6446 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6447 return (ENOMEM); 6448 } 6449 /* Now incr the count and int wi structure */ 6450 SCTP_INCR_LADDR_COUNT(); 6451 memset(wi, 0, sizeof(*wi)); 6452 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6453 wi->ifa = ifa; 6454 wi->action = SCTP_SET_PRIM_ADDR; 6455 atomic_add_int(&ifa->refcount, 1); 6456 6457 /* Now add it to the work queue */ 6458 SCTP_WQ_ADDR_LOCK(); 6459 /* 6460 * Should this really be a tailq? As it is we will process the 6461 * newest first :-0 6462 */ 6463 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6464 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6465 (struct sctp_inpcb *)NULL, 6466 (struct sctp_tcb *)NULL, 6467 (struct sctp_nets *)NULL); 6468 SCTP_WQ_ADDR_UNLOCK(); 6469 return (0); 6470 } 6471 6472 6473 int 6474 sctp_soreceive(struct socket *so, 6475 struct sockaddr **psa, 6476 struct uio *uio, 6477 struct mbuf **mp0, 6478 struct mbuf **controlp, 6479 int *flagsp) 6480 { 6481 int error, fromlen; 6482 uint8_t sockbuf[256]; 6483 struct sockaddr *from; 6484 struct sctp_extrcvinfo sinfo; 6485 int filling_sinfo = 1; 6486 int flags; 6487 struct sctp_inpcb *inp; 6488 6489 inp = (struct sctp_inpcb *)so->so_pcb; 6490 /* pickup the assoc we are reading from */ 6491 if (inp == NULL) { 6492 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6493 return (EINVAL); 6494 } 6495 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6496 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6497 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6498 (controlp == NULL)) { 6499 /* user does not want the sndrcv ctl */ 6500 filling_sinfo = 0; 6501 } 6502 if (psa) { 6503 from = (struct sockaddr *)sockbuf; 6504 fromlen = sizeof(sockbuf); 6505 from->sa_len = 0; 6506 } else { 6507 from = NULL; 6508 fromlen = 0; 6509 } 6510 6511 if (filling_sinfo) { 6512 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6513 } 6514 if (flagsp != NULL) { 6515 flags = *flagsp; 6516 } else { 6517 flags = 0; 6518 } 6519 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6520 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6521 if (flagsp != NULL) { 6522 *flagsp = flags; 6523 } 6524 if (controlp != NULL) { 6525 /* copy back the sinfo in a CMSG format */ 6526 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6527 *controlp = sctp_build_ctl_nchunk(inp, 6528 (struct sctp_sndrcvinfo *)&sinfo); 6529 } else { 6530 *controlp = NULL; 6531 } 6532 } 6533 if (psa) { 6534 /* copy back the address info */ 6535 if (from && from->sa_len) { 6536 *psa = sodupsockaddr(from, M_NOWAIT); 6537 } else { 6538 *psa = NULL; 6539 } 6540 } 6541 return (error); 6542 } 6543 6544 6545 6546 6547 6548 int 6549 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6550 int totaddr, int *error) 6551 { 6552 int added = 0; 6553 int i; 6554 struct sctp_inpcb *inp; 6555 struct sockaddr *sa; 6556 size_t incr = 0; 6557 #ifdef INET 6558 struct sockaddr_in *sin; 6559 #endif 6560 #ifdef INET6 6561 struct sockaddr_in6 *sin6; 6562 #endif 6563 6564 sa = addr; 6565 inp = stcb->sctp_ep; 6566 *error = 0; 6567 for (i = 0; i < totaddr; i++) { 6568 switch (sa->sa_family) { 6569 #ifdef INET 6570 case AF_INET: 6571 incr = sizeof(struct sockaddr_in); 6572 sin = (struct sockaddr_in *)sa; 6573 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6574 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6575 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6576 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6577 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6578 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6579 *error = EINVAL; 6580 goto out_now; 6581 } 6582 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6583 SCTP_DONOT_SETSCOPE, 6584 SCTP_ADDR_IS_CONFIRMED)) { 6585 /* assoc gone no un-lock */ 6586 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6587 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6588 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6589 *error = ENOBUFS; 6590 goto out_now; 6591 } 6592 added++; 6593 break; 6594 #endif 6595 #ifdef INET6 6596 case AF_INET6: 6597 incr = sizeof(struct sockaddr_in6); 6598 sin6 = (struct sockaddr_in6 *)sa; 6599 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6600 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6601 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6602 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6603 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6604 *error = EINVAL; 6605 goto out_now; 6606 } 6607 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6608 SCTP_DONOT_SETSCOPE, 6609 SCTP_ADDR_IS_CONFIRMED)) { 6610 /* assoc gone no un-lock */ 6611 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6612 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6613 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6614 *error = ENOBUFS; 6615 goto out_now; 6616 } 6617 added++; 6618 break; 6619 #endif 6620 default: 6621 break; 6622 } 6623 sa = (struct sockaddr *)((caddr_t)sa + incr); 6624 } 6625 out_now: 6626 return (added); 6627 } 6628 6629 int 6630 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6631 unsigned int totaddr, 6632 unsigned int *num_v4, unsigned int *num_v6, 6633 unsigned int limit) 6634 { 6635 struct sockaddr *sa; 6636 struct sctp_tcb *stcb; 6637 unsigned int incr, at, i; 6638 6639 at = 0; 6640 sa = addr; 6641 *num_v6 = *num_v4 = 0; 6642 /* account and validate addresses */ 6643 if (totaddr == 0) { 6644 return (EINVAL); 6645 } 6646 for (i = 0; i < totaddr; i++) { 6647 if (at + sizeof(struct sockaddr) > limit) { 6648 return (EINVAL); 6649 } 6650 switch (sa->sa_family) { 6651 #ifdef INET 6652 case AF_INET: 6653 incr = (unsigned int)sizeof(struct sockaddr_in); 6654 if (sa->sa_len != incr) { 6655 return (EINVAL); 6656 } 6657 (*num_v4) += 1; 6658 break; 6659 #endif 6660 #ifdef INET6 6661 case AF_INET6: 6662 { 6663 struct sockaddr_in6 *sin6; 6664 6665 sin6 = (struct sockaddr_in6 *)sa; 6666 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6667 /* Must be non-mapped for connectx */ 6668 return (EINVAL); 6669 } 6670 incr = (unsigned int)sizeof(struct sockaddr_in6); 6671 if (sa->sa_len != incr) { 6672 return (EINVAL); 6673 } 6674 (*num_v6) += 1; 6675 break; 6676 } 6677 #endif 6678 default: 6679 return (EINVAL); 6680 } 6681 if ((at + incr) > limit) { 6682 return (EINVAL); 6683 } 6684 SCTP_INP_INCR_REF(inp); 6685 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6686 if (stcb != NULL) { 6687 SCTP_TCB_UNLOCK(stcb); 6688 return (EALREADY); 6689 } else { 6690 SCTP_INP_DECR_REF(inp); 6691 } 6692 at += incr; 6693 sa = (struct sockaddr *)((caddr_t)sa + incr); 6694 } 6695 return (0); 6696 } 6697 6698 /* 6699 * sctp_bindx(ADD) for one address. 6700 * assumes all arguments are valid/checked by caller. 6701 */ 6702 void 6703 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6704 struct sockaddr *sa, uint32_t vrf_id, int *error, 6705 void *p) 6706 { 6707 #if defined(INET) && defined(INET6) 6708 struct sockaddr_in sin; 6709 #endif 6710 #ifdef INET6 6711 struct sockaddr_in6 *sin6; 6712 #endif 6713 #ifdef INET 6714 struct sockaddr_in *sinp; 6715 #endif 6716 struct sockaddr *addr_to_use; 6717 struct sctp_inpcb *lep; 6718 uint16_t port; 6719 6720 /* see if we're bound all already! */ 6721 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6722 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6723 *error = EINVAL; 6724 return; 6725 } 6726 switch (sa->sa_family) { 6727 #ifdef INET6 6728 case AF_INET6: 6729 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6730 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6731 *error = EINVAL; 6732 return; 6733 } 6734 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6735 /* can only bind v6 on PF_INET6 sockets */ 6736 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6737 *error = EINVAL; 6738 return; 6739 } 6740 sin6 = (struct sockaddr_in6 *)sa; 6741 port = sin6->sin6_port; 6742 #ifdef INET 6743 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6744 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6745 SCTP_IPV6_V6ONLY(inp)) { 6746 /* can't bind v4-mapped on PF_INET sockets */ 6747 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6748 *error = EINVAL; 6749 return; 6750 } 6751 in6_sin6_2_sin(&sin, sin6); 6752 addr_to_use = (struct sockaddr *)&sin; 6753 } else { 6754 addr_to_use = sa; 6755 } 6756 #else 6757 addr_to_use = sa; 6758 #endif 6759 break; 6760 #endif 6761 #ifdef INET 6762 case AF_INET: 6763 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6764 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6765 *error = EINVAL; 6766 return; 6767 } 6768 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6769 SCTP_IPV6_V6ONLY(inp)) { 6770 /* can't bind v4 on PF_INET sockets */ 6771 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6772 *error = EINVAL; 6773 return; 6774 } 6775 sinp = (struct sockaddr_in *)sa; 6776 port = sinp->sin_port; 6777 addr_to_use = sa; 6778 break; 6779 #endif 6780 default: 6781 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6782 *error = EINVAL; 6783 return; 6784 } 6785 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6786 if (p == NULL) { 6787 /* Can't get proc for Net/Open BSD */ 6788 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6789 *error = EINVAL; 6790 return; 6791 } 6792 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6793 return; 6794 } 6795 /* Validate the incoming port. */ 6796 if ((port != 0) && (port != inp->sctp_lport)) { 6797 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6798 *error = EINVAL; 6799 return; 6800 } 6801 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6802 if (lep == NULL) { 6803 /* add the address */ 6804 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6805 SCTP_ADD_IP_ADDRESS, vrf_id); 6806 } else { 6807 if (lep != inp) { 6808 *error = EADDRINUSE; 6809 } 6810 SCTP_INP_DECR_REF(lep); 6811 } 6812 } 6813 6814 /* 6815 * sctp_bindx(DELETE) for one address. 6816 * assumes all arguments are valid/checked by caller. 6817 */ 6818 void 6819 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6820 struct sockaddr *sa, uint32_t vrf_id, int *error) 6821 { 6822 struct sockaddr *addr_to_use; 6823 #if defined(INET) && defined(INET6) 6824 struct sockaddr_in6 *sin6; 6825 struct sockaddr_in sin; 6826 #endif 6827 6828 /* see if we're bound all already! */ 6829 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6830 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6831 *error = EINVAL; 6832 return; 6833 } 6834 switch (sa->sa_family) { 6835 #ifdef INET6 6836 case AF_INET6: 6837 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6838 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6839 *error = EINVAL; 6840 return; 6841 } 6842 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6843 /* can only bind v6 on PF_INET6 sockets */ 6844 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6845 *error = EINVAL; 6846 return; 6847 } 6848 #ifdef INET 6849 sin6 = (struct sockaddr_in6 *)sa; 6850 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6851 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6852 SCTP_IPV6_V6ONLY(inp)) { 6853 /* can't bind mapped-v4 on PF_INET sockets */ 6854 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6855 *error = EINVAL; 6856 return; 6857 } 6858 in6_sin6_2_sin(&sin, sin6); 6859 addr_to_use = (struct sockaddr *)&sin; 6860 } else { 6861 addr_to_use = sa; 6862 } 6863 #else 6864 addr_to_use = sa; 6865 #endif 6866 break; 6867 #endif 6868 #ifdef INET 6869 case AF_INET: 6870 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6871 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6872 *error = EINVAL; 6873 return; 6874 } 6875 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6876 SCTP_IPV6_V6ONLY(inp)) { 6877 /* can't bind v4 on PF_INET sockets */ 6878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6879 *error = EINVAL; 6880 return; 6881 } 6882 addr_to_use = sa; 6883 break; 6884 #endif 6885 default: 6886 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6887 *error = EINVAL; 6888 return; 6889 } 6890 /* No lock required mgmt_ep_sa does its own locking. */ 6891 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6892 vrf_id); 6893 } 6894 6895 /* 6896 * returns the valid local address count for an assoc, taking into account 6897 * all scoping rules 6898 */ 6899 int 6900 sctp_local_addr_count(struct sctp_tcb *stcb) 6901 { 6902 int loopback_scope; 6903 #if defined(INET) 6904 int ipv4_local_scope, ipv4_addr_legal; 6905 #endif 6906 #if defined(INET6) 6907 int local_scope, site_scope, ipv6_addr_legal; 6908 #endif 6909 struct sctp_vrf *vrf; 6910 struct sctp_ifn *sctp_ifn; 6911 struct sctp_ifa *sctp_ifa; 6912 int count = 0; 6913 6914 /* Turn on all the appropriate scopes */ 6915 loopback_scope = stcb->asoc.scope.loopback_scope; 6916 #if defined(INET) 6917 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6918 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6919 #endif 6920 #if defined(INET6) 6921 local_scope = stcb->asoc.scope.local_scope; 6922 site_scope = stcb->asoc.scope.site_scope; 6923 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6924 #endif 6925 SCTP_IPI_ADDR_RLOCK(); 6926 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6927 if (vrf == NULL) { 6928 /* no vrf, no addresses */ 6929 SCTP_IPI_ADDR_RUNLOCK(); 6930 return (0); 6931 } 6932 6933 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6934 /* 6935 * bound all case: go through all ifns on the vrf 6936 */ 6937 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6938 if ((loopback_scope == 0) && 6939 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6940 continue; 6941 } 6942 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6943 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6944 continue; 6945 switch (sctp_ifa->address.sa.sa_family) { 6946 #ifdef INET 6947 case AF_INET: 6948 if (ipv4_addr_legal) { 6949 struct sockaddr_in *sin; 6950 6951 sin = &sctp_ifa->address.sin; 6952 if (sin->sin_addr.s_addr == 0) { 6953 /* 6954 * skip unspecified 6955 * addrs 6956 */ 6957 continue; 6958 } 6959 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6960 &sin->sin_addr) != 0) { 6961 continue; 6962 } 6963 if ((ipv4_local_scope == 0) && 6964 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6965 continue; 6966 } 6967 /* count this one */ 6968 count++; 6969 } else { 6970 continue; 6971 } 6972 break; 6973 #endif 6974 #ifdef INET6 6975 case AF_INET6: 6976 if (ipv6_addr_legal) { 6977 struct sockaddr_in6 *sin6; 6978 6979 sin6 = &sctp_ifa->address.sin6; 6980 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6981 continue; 6982 } 6983 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6984 &sin6->sin6_addr) != 0) { 6985 continue; 6986 } 6987 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6988 if (local_scope == 0) 6989 continue; 6990 if (sin6->sin6_scope_id == 0) { 6991 if (sa6_recoverscope(sin6) != 0) 6992 /* 6993 * 6994 * bad 6995 * link 6996 * 6997 * local 6998 * 6999 * address 7000 */ 7001 continue; 7002 } 7003 } 7004 if ((site_scope == 0) && 7005 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7006 continue; 7007 } 7008 /* count this one */ 7009 count++; 7010 } 7011 break; 7012 #endif 7013 default: 7014 /* TSNH */ 7015 break; 7016 } 7017 } 7018 } 7019 } else { 7020 /* 7021 * subset bound case 7022 */ 7023 struct sctp_laddr *laddr; 7024 7025 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7026 sctp_nxt_addr) { 7027 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7028 continue; 7029 } 7030 /* count this one */ 7031 count++; 7032 } 7033 } 7034 SCTP_IPI_ADDR_RUNLOCK(); 7035 return (count); 7036 } 7037 7038 #if defined(SCTP_LOCAL_TRACE_BUF) 7039 7040 void 7041 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7042 { 7043 uint32_t saveindex, newindex; 7044 7045 do { 7046 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7047 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7048 newindex = 1; 7049 } else { 7050 newindex = saveindex + 1; 7051 } 7052 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7053 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7054 saveindex = 0; 7055 } 7056 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7057 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7058 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7059 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7060 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7061 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7062 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7063 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7064 } 7065 7066 #endif 7067 static void 7068 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7069 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7070 { 7071 struct ip *iph; 7072 #ifdef INET6 7073 struct ip6_hdr *ip6; 7074 #endif 7075 struct mbuf *sp, *last; 7076 struct udphdr *uhdr; 7077 uint16_t port; 7078 7079 if ((m->m_flags & M_PKTHDR) == 0) { 7080 /* Can't handle one that is not a pkt hdr */ 7081 goto out; 7082 } 7083 /* Pull the src port */ 7084 iph = mtod(m, struct ip *); 7085 uhdr = (struct udphdr *)((caddr_t)iph + off); 7086 port = uhdr->uh_sport; 7087 /* 7088 * Split out the mbuf chain. Leave the IP header in m, place the 7089 * rest in the sp. 7090 */ 7091 sp = m_split(m, off, M_NOWAIT); 7092 if (sp == NULL) { 7093 /* Gak, drop packet, we can't do a split */ 7094 goto out; 7095 } 7096 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7097 /* Gak, packet can't have an SCTP header in it - too small */ 7098 m_freem(sp); 7099 goto out; 7100 } 7101 /* Now pull up the UDP header and SCTP header together */ 7102 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7103 if (sp == NULL) { 7104 /* Gak pullup failed */ 7105 goto out; 7106 } 7107 /* Trim out the UDP header */ 7108 m_adj(sp, sizeof(struct udphdr)); 7109 7110 /* Now reconstruct the mbuf chain */ 7111 for (last = m; last->m_next; last = last->m_next); 7112 last->m_next = sp; 7113 m->m_pkthdr.len += sp->m_pkthdr.len; 7114 /* 7115 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7116 * checksum and it was valid. Since CSUM_DATA_VALID == 7117 * CSUM_SCTP_VALID this would imply that the HW also verified the 7118 * SCTP checksum. Therefore, clear the bit. 7119 */ 7120 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7121 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7122 m->m_pkthdr.len, 7123 if_name(m->m_pkthdr.rcvif), 7124 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7125 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7126 iph = mtod(m, struct ip *); 7127 switch (iph->ip_v) { 7128 #ifdef INET 7129 case IPVERSION: 7130 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7131 sctp_input_with_port(m, off, port); 7132 break; 7133 #endif 7134 #ifdef INET6 7135 case IPV6_VERSION >> 4: 7136 ip6 = mtod(m, struct ip6_hdr *); 7137 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7138 sctp6_input_with_port(&m, &off, port); 7139 break; 7140 #endif 7141 default: 7142 goto out; 7143 break; 7144 } 7145 return; 7146 out: 7147 m_freem(m); 7148 } 7149 7150 #ifdef INET 7151 static void 7152 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7153 { 7154 struct ip *outer_ip, *inner_ip; 7155 struct sctphdr *sh; 7156 struct icmp *icmp; 7157 struct udphdr *udp; 7158 struct sctp_inpcb *inp; 7159 struct sctp_tcb *stcb; 7160 struct sctp_nets *net; 7161 struct sctp_init_chunk *ch; 7162 struct sockaddr_in src, dst; 7163 uint8_t type, code; 7164 7165 inner_ip = (struct ip *)vip; 7166 icmp = (struct icmp *)((caddr_t)inner_ip - 7167 (sizeof(struct icmp) - sizeof(struct ip))); 7168 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7169 if (ntohs(outer_ip->ip_len) < 7170 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7171 return; 7172 } 7173 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7174 sh = (struct sctphdr *)(udp + 1); 7175 memset(&src, 0, sizeof(struct sockaddr_in)); 7176 src.sin_family = AF_INET; 7177 src.sin_len = sizeof(struct sockaddr_in); 7178 src.sin_port = sh->src_port; 7179 src.sin_addr = inner_ip->ip_src; 7180 memset(&dst, 0, sizeof(struct sockaddr_in)); 7181 dst.sin_family = AF_INET; 7182 dst.sin_len = sizeof(struct sockaddr_in); 7183 dst.sin_port = sh->dest_port; 7184 dst.sin_addr = inner_ip->ip_dst; 7185 /* 7186 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7187 * holds our local endpoint address. Thus we reverse the dst and the 7188 * src in the lookup. 7189 */ 7190 inp = NULL; 7191 net = NULL; 7192 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7193 (struct sockaddr *)&src, 7194 &inp, &net, 1, 7195 SCTP_DEFAULT_VRFID); 7196 if ((stcb != NULL) && 7197 (net != NULL) && 7198 (inp != NULL)) { 7199 /* Check the UDP port numbers */ 7200 if ((udp->uh_dport != net->port) || 7201 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7202 SCTP_TCB_UNLOCK(stcb); 7203 return; 7204 } 7205 /* Check the verification tag */ 7206 if (ntohl(sh->v_tag) != 0) { 7207 /* 7208 * This must be the verification tag used for 7209 * sending out packets. We don't consider packets 7210 * reflecting the verification tag. 7211 */ 7212 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7213 SCTP_TCB_UNLOCK(stcb); 7214 return; 7215 } 7216 } else { 7217 if (ntohs(outer_ip->ip_len) >= 7218 sizeof(struct ip) + 7219 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7220 /* 7221 * In this case we can check if we got an 7222 * INIT chunk and if the initiate tag 7223 * matches. 7224 */ 7225 ch = (struct sctp_init_chunk *)(sh + 1); 7226 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7227 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7228 SCTP_TCB_UNLOCK(stcb); 7229 return; 7230 } 7231 } else { 7232 SCTP_TCB_UNLOCK(stcb); 7233 return; 7234 } 7235 } 7236 type = icmp->icmp_type; 7237 code = icmp->icmp_code; 7238 if ((type == ICMP_UNREACH) && 7239 (code == ICMP_UNREACH_PORT)) { 7240 code = ICMP_UNREACH_PROTOCOL; 7241 } 7242 sctp_notify(inp, stcb, net, type, code, 7243 ntohs(inner_ip->ip_len), 7244 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7245 } else { 7246 if ((stcb == NULL) && (inp != NULL)) { 7247 /* reduce ref-count */ 7248 SCTP_INP_WLOCK(inp); 7249 SCTP_INP_DECR_REF(inp); 7250 SCTP_INP_WUNLOCK(inp); 7251 } 7252 if (stcb) { 7253 SCTP_TCB_UNLOCK(stcb); 7254 } 7255 } 7256 return; 7257 } 7258 #endif 7259 7260 #ifdef INET6 7261 static void 7262 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7263 { 7264 struct ip6ctlparam *ip6cp; 7265 struct sctp_inpcb *inp; 7266 struct sctp_tcb *stcb; 7267 struct sctp_nets *net; 7268 struct sctphdr sh; 7269 struct udphdr udp; 7270 struct sockaddr_in6 src, dst; 7271 uint8_t type, code; 7272 7273 ip6cp = (struct ip6ctlparam *)d; 7274 /* 7275 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7276 */ 7277 if (ip6cp->ip6c_m == NULL) { 7278 return; 7279 } 7280 /* 7281 * Check if we can safely examine the ports and the verification tag 7282 * of the SCTP common header. 7283 */ 7284 if (ip6cp->ip6c_m->m_pkthdr.len < 7285 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7286 return; 7287 } 7288 /* Copy out the UDP header. */ 7289 memset(&udp, 0, sizeof(struct udphdr)); 7290 m_copydata(ip6cp->ip6c_m, 7291 ip6cp->ip6c_off, 7292 sizeof(struct udphdr), 7293 (caddr_t)&udp); 7294 /* Copy out the port numbers and the verification tag. */ 7295 memset(&sh, 0, sizeof(struct sctphdr)); 7296 m_copydata(ip6cp->ip6c_m, 7297 ip6cp->ip6c_off + sizeof(struct udphdr), 7298 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7299 (caddr_t)&sh); 7300 memset(&src, 0, sizeof(struct sockaddr_in6)); 7301 src.sin6_family = AF_INET6; 7302 src.sin6_len = sizeof(struct sockaddr_in6); 7303 src.sin6_port = sh.src_port; 7304 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7305 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7306 return; 7307 } 7308 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7309 dst.sin6_family = AF_INET6; 7310 dst.sin6_len = sizeof(struct sockaddr_in6); 7311 dst.sin6_port = sh.dest_port; 7312 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7313 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7314 return; 7315 } 7316 inp = NULL; 7317 net = NULL; 7318 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7319 (struct sockaddr *)&src, 7320 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7321 if ((stcb != NULL) && 7322 (net != NULL) && 7323 (inp != NULL)) { 7324 /* Check the UDP port numbers */ 7325 if ((udp.uh_dport != net->port) || 7326 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7327 SCTP_TCB_UNLOCK(stcb); 7328 return; 7329 } 7330 /* Check the verification tag */ 7331 if (ntohl(sh.v_tag) != 0) { 7332 /* 7333 * This must be the verification tag used for 7334 * sending out packets. We don't consider packets 7335 * reflecting the verification tag. 7336 */ 7337 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7338 SCTP_TCB_UNLOCK(stcb); 7339 return; 7340 } 7341 } else { 7342 if (ip6cp->ip6c_m->m_pkthdr.len >= 7343 ip6cp->ip6c_off + sizeof(struct udphdr) + 7344 sizeof(struct sctphdr) + 7345 sizeof(struct sctp_chunkhdr) + 7346 offsetof(struct sctp_init, a_rwnd)) { 7347 /* 7348 * In this case we can check if we got an 7349 * INIT chunk and if the initiate tag 7350 * matches. 7351 */ 7352 uint32_t initiate_tag; 7353 uint8_t chunk_type; 7354 7355 m_copydata(ip6cp->ip6c_m, 7356 ip6cp->ip6c_off + 7357 sizeof(struct udphdr) + 7358 sizeof(struct sctphdr), 7359 sizeof(uint8_t), 7360 (caddr_t)&chunk_type); 7361 m_copydata(ip6cp->ip6c_m, 7362 ip6cp->ip6c_off + 7363 sizeof(struct udphdr) + 7364 sizeof(struct sctphdr) + 7365 sizeof(struct sctp_chunkhdr), 7366 sizeof(uint32_t), 7367 (caddr_t)&initiate_tag); 7368 if ((chunk_type != SCTP_INITIATION) || 7369 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7370 SCTP_TCB_UNLOCK(stcb); 7371 return; 7372 } 7373 } else { 7374 SCTP_TCB_UNLOCK(stcb); 7375 return; 7376 } 7377 } 7378 type = ip6cp->ip6c_icmp6->icmp6_type; 7379 code = ip6cp->ip6c_icmp6->icmp6_code; 7380 if ((type == ICMP6_DST_UNREACH) && 7381 (code == ICMP6_DST_UNREACH_NOPORT)) { 7382 type = ICMP6_PARAM_PROB; 7383 code = ICMP6_PARAMPROB_NEXTHEADER; 7384 } 7385 sctp6_notify(inp, stcb, net, type, code, 7386 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7387 } else { 7388 if ((stcb == NULL) && (inp != NULL)) { 7389 /* reduce inp's ref-count */ 7390 SCTP_INP_WLOCK(inp); 7391 SCTP_INP_DECR_REF(inp); 7392 SCTP_INP_WUNLOCK(inp); 7393 } 7394 if (stcb) { 7395 SCTP_TCB_UNLOCK(stcb); 7396 } 7397 } 7398 } 7399 #endif 7400 7401 void 7402 sctp_over_udp_stop(void) 7403 { 7404 /* 7405 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7406 * for writting! 7407 */ 7408 #ifdef INET 7409 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7410 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7411 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7412 } 7413 #endif 7414 #ifdef INET6 7415 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7416 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7417 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7418 } 7419 #endif 7420 } 7421 7422 int 7423 sctp_over_udp_start(void) 7424 { 7425 uint16_t port; 7426 int ret; 7427 #ifdef INET 7428 struct sockaddr_in sin; 7429 #endif 7430 #ifdef INET6 7431 struct sockaddr_in6 sin6; 7432 #endif 7433 /* 7434 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7435 * for writting! 7436 */ 7437 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7438 if (ntohs(port) == 0) { 7439 /* Must have a port set */ 7440 return (EINVAL); 7441 } 7442 #ifdef INET 7443 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7444 /* Already running -- must stop first */ 7445 return (EALREADY); 7446 } 7447 #endif 7448 #ifdef INET6 7449 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7450 /* Already running -- must stop first */ 7451 return (EALREADY); 7452 } 7453 #endif 7454 #ifdef INET 7455 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7456 SOCK_DGRAM, IPPROTO_UDP, 7457 curthread->td_ucred, curthread))) { 7458 sctp_over_udp_stop(); 7459 return (ret); 7460 } 7461 /* Call the special UDP hook. */ 7462 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7463 sctp_recv_udp_tunneled_packet, 7464 sctp_recv_icmp_tunneled_packet, 7465 NULL))) { 7466 sctp_over_udp_stop(); 7467 return (ret); 7468 } 7469 /* Ok, we have a socket, bind it to the port. */ 7470 memset(&sin, 0, sizeof(struct sockaddr_in)); 7471 sin.sin_len = sizeof(struct sockaddr_in); 7472 sin.sin_family = AF_INET; 7473 sin.sin_port = htons(port); 7474 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7475 (struct sockaddr *)&sin, curthread))) { 7476 sctp_over_udp_stop(); 7477 return (ret); 7478 } 7479 #endif 7480 #ifdef INET6 7481 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7482 SOCK_DGRAM, IPPROTO_UDP, 7483 curthread->td_ucred, curthread))) { 7484 sctp_over_udp_stop(); 7485 return (ret); 7486 } 7487 /* Call the special UDP hook. */ 7488 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7489 sctp_recv_udp_tunneled_packet, 7490 sctp_recv_icmp6_tunneled_packet, 7491 NULL))) { 7492 sctp_over_udp_stop(); 7493 return (ret); 7494 } 7495 /* Ok, we have a socket, bind it to the port. */ 7496 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7497 sin6.sin6_len = sizeof(struct sockaddr_in6); 7498 sin6.sin6_family = AF_INET6; 7499 sin6.sin6_port = htons(port); 7500 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7501 (struct sockaddr *)&sin6, curthread))) { 7502 sctp_over_udp_stop(); 7503 return (ret); 7504 } 7505 #endif 7506 return (0); 7507 } 7508 7509 /* 7510 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7511 * If all arguments are zero, zero is returned. 7512 */ 7513 uint32_t 7514 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7515 { 7516 if (mtu1 > 0) { 7517 if (mtu2 > 0) { 7518 if (mtu3 > 0) { 7519 return (min(mtu1, min(mtu2, mtu3))); 7520 } else { 7521 return (min(mtu1, mtu2)); 7522 } 7523 } else { 7524 if (mtu3 > 0) { 7525 return (min(mtu1, mtu3)); 7526 } else { 7527 return (mtu1); 7528 } 7529 } 7530 } else { 7531 if (mtu2 > 0) { 7532 if (mtu3 > 0) { 7533 return (min(mtu2, mtu3)); 7534 } else { 7535 return (mtu2); 7536 } 7537 } else { 7538 return (mtu3); 7539 } 7540 } 7541 } 7542 7543 void 7544 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7545 { 7546 struct in_conninfo inc; 7547 7548 memset(&inc, 0, sizeof(struct in_conninfo)); 7549 inc.inc_fibnum = fibnum; 7550 switch (addr->sa.sa_family) { 7551 #ifdef INET 7552 case AF_INET: 7553 inc.inc_faddr = addr->sin.sin_addr; 7554 break; 7555 #endif 7556 #ifdef INET6 7557 case AF_INET6: 7558 inc.inc_flags |= INC_ISIPV6; 7559 inc.inc6_faddr = addr->sin6.sin6_addr; 7560 break; 7561 #endif 7562 default: 7563 return; 7564 } 7565 tcp_hc_updatemtu(&inc, (u_long)mtu); 7566 } 7567 7568 uint32_t 7569 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7570 { 7571 struct in_conninfo inc; 7572 7573 memset(&inc, 0, sizeof(struct in_conninfo)); 7574 inc.inc_fibnum = fibnum; 7575 switch (addr->sa.sa_family) { 7576 #ifdef INET 7577 case AF_INET: 7578 inc.inc_faddr = addr->sin.sin_addr; 7579 break; 7580 #endif 7581 #ifdef INET6 7582 case AF_INET6: 7583 inc.inc_flags |= INC_ISIPV6; 7584 inc.inc6_faddr = addr->sin6.sin6_addr; 7585 break; 7586 #endif 7587 default: 7588 return (0); 7589 } 7590 return ((uint32_t)tcp_hc_getmtu(&inc)); 7591 } 7592 7593 void 7594 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7595 { 7596 #if defined(KDTRACE_HOOKS) 7597 int old_state = stcb->asoc.state; 7598 #endif 7599 7600 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7601 ("sctp_set_state: Can't set substate (new_state = %x)", 7602 new_state)); 7603 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7604 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7605 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7606 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7607 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7608 } 7609 #if defined(KDTRACE_HOOKS) 7610 if (((old_state & SCTP_STATE_MASK) != new_state) && 7611 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7612 (new_state == SCTP_STATE_INUSE))) { 7613 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7614 } 7615 #endif 7616 } 7617 7618 void 7619 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7620 { 7621 #if defined(KDTRACE_HOOKS) 7622 int old_state = stcb->asoc.state; 7623 #endif 7624 7625 KASSERT((substate & SCTP_STATE_MASK) == 0, 7626 ("sctp_add_substate: Can't set state (substate = %x)", 7627 substate)); 7628 stcb->asoc.state |= substate; 7629 #if defined(KDTRACE_HOOKS) 7630 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7631 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7632 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7633 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7634 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7635 } 7636 #endif 7637 } 7638