1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 740 tot_out = 0; 741 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 742 if ((chk->whoTo == lnet) && 743 (chk->sent < SCTP_DATAGRAM_RESEND)) { 744 tot_out += chk->book_size; 745 } 746 } 747 if (lnet->flight_size != tot_out) { 748 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 749 (void *)lnet, lnet->flight_size, 750 tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * The conversion from time to ticks and vice versa is done by rounding 776 * upwards. This way we can test in the code the time to be positive and 777 * know that this corresponds to a positive number of ticks. 778 */ 779 780 uint32_t 781 sctp_msecs_to_ticks(uint32_t msecs) 782 { 783 uint64_t temp; 784 uint32_t ticks; 785 786 if (hz == 1000) { 787 ticks = msecs; 788 } else { 789 temp = (((uint64_t)msecs * hz) + 999) / 1000; 790 if (temp > UINT32_MAX) { 791 ticks = UINT32_MAX; 792 } else { 793 ticks = (uint32_t)temp; 794 } 795 } 796 return (ticks); 797 } 798 799 uint32_t 800 sctp_ticks_to_msecs(uint32_t ticks) 801 { 802 uint64_t temp; 803 uint32_t msecs; 804 805 if (hz == 1000) { 806 msecs = ticks; 807 } else { 808 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 809 if (temp > UINT32_MAX) { 810 msecs = UINT32_MAX; 811 } else { 812 msecs = (uint32_t)temp; 813 } 814 } 815 return (msecs); 816 } 817 818 uint32_t 819 sctp_secs_to_ticks(uint32_t secs) 820 { 821 uint64_t temp; 822 uint32_t ticks; 823 824 temp = (uint64_t)secs * hz; 825 if (temp > UINT32_MAX) { 826 ticks = UINT32_MAX; 827 } else { 828 ticks = (uint32_t)temp; 829 } 830 return (ticks); 831 } 832 833 uint32_t 834 sctp_ticks_to_secs(uint32_t ticks) 835 { 836 uint64_t temp; 837 uint32_t secs; 838 839 temp = ((uint64_t)ticks + (hz - 1)) / hz; 840 if (temp > UINT32_MAX) { 841 secs = UINT32_MAX; 842 } else { 843 secs = (uint32_t)temp; 844 } 845 return (secs); 846 } 847 848 /* 849 * sctp_stop_timers_for_shutdown() should be called 850 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 851 * state to make sure that all timers are stopped. 852 */ 853 void 854 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 855 { 856 struct sctp_inpcb *inp; 857 struct sctp_nets *net; 858 859 inp = stcb->sctp_ep; 860 861 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 863 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 865 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 866 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 867 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 868 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 869 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 870 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 871 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 872 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 873 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 874 } 875 } 876 877 void 878 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 879 { 880 struct sctp_inpcb *inp; 881 struct sctp_nets *net; 882 883 inp = stcb->sctp_ep; 884 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 885 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 886 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 887 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 888 if (stop_assoc_kill_timer) { 889 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 890 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 891 } 892 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 894 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 895 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 896 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 897 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 898 /* Mobility adaptation */ 899 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 900 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 901 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 902 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 904 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 908 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 910 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 912 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 913 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 914 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 915 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 916 } 917 } 918 919 /* 920 * A list of sizes based on typical mtu's, used only if next hop size not 921 * returned. These values MUST be multiples of 4 and MUST be ordered. 922 */ 923 static uint32_t sctp_mtu_sizes[] = { 924 68, 925 296, 926 508, 927 512, 928 544, 929 576, 930 1004, 931 1492, 932 1500, 933 1536, 934 2000, 935 2048, 936 4352, 937 4464, 938 8168, 939 17912, 940 32000, 941 65532 942 }; 943 944 /* 945 * Return the largest MTU in sctp_mtu_sizes smaller than val. 946 * If val is smaller than the minimum, just return the largest 947 * multiple of 4 smaller or equal to val. 948 * Ensure that the result is a multiple of 4. 949 */ 950 uint32_t 951 sctp_get_prev_mtu(uint32_t val) 952 { 953 uint32_t i; 954 955 val &= 0xfffffffc; 956 if (val <= sctp_mtu_sizes[0]) { 957 return (val); 958 } 959 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 960 if (val <= sctp_mtu_sizes[i]) { 961 break; 962 } 963 } 964 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 965 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 966 return (sctp_mtu_sizes[i - 1]); 967 } 968 969 /* 970 * Return the smallest MTU in sctp_mtu_sizes larger than val. 971 * If val is larger than the maximum, just return the largest multiple of 4 smaller 972 * or equal to val. 973 * Ensure that the result is a multiple of 4. 974 */ 975 uint32_t 976 sctp_get_next_mtu(uint32_t val) 977 { 978 /* select another MTU that is just bigger than this one */ 979 uint32_t i; 980 981 val &= 0xfffffffc; 982 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 983 if (val < sctp_mtu_sizes[i]) { 984 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 985 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 986 return (sctp_mtu_sizes[i]); 987 } 988 } 989 return (val); 990 } 991 992 void 993 sctp_fill_random_store(struct sctp_pcb *m) 994 { 995 /* 996 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 997 * our counter. The result becomes our good random numbers and we 998 * then setup to give these out. Note that we do no locking to 999 * protect this. This is ok, since if competing folks call this we 1000 * will get more gobbled gook in the random store which is what we 1001 * want. There is a danger that two guys will use the same random 1002 * numbers, but thats ok too since that is random as well :-> 1003 */ 1004 m->store_at = 0; 1005 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1006 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1007 sizeof(m->random_counter), (uint8_t *)m->random_store); 1008 m->random_counter++; 1009 } 1010 1011 uint32_t 1012 sctp_select_initial_TSN(struct sctp_pcb *inp) 1013 { 1014 /* 1015 * A true implementation should use random selection process to get 1016 * the initial stream sequence number, using RFC1750 as a good 1017 * guideline 1018 */ 1019 uint32_t x, *xp; 1020 uint8_t *p; 1021 int store_at, new_store; 1022 1023 if (inp->initial_sequence_debug != 0) { 1024 uint32_t ret; 1025 1026 ret = inp->initial_sequence_debug; 1027 inp->initial_sequence_debug++; 1028 return (ret); 1029 } 1030 retry: 1031 store_at = inp->store_at; 1032 new_store = store_at + sizeof(uint32_t); 1033 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1034 new_store = 0; 1035 } 1036 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1037 goto retry; 1038 } 1039 if (new_store == 0) { 1040 /* Refill the random store */ 1041 sctp_fill_random_store(inp); 1042 } 1043 p = &inp->random_store[store_at]; 1044 xp = (uint32_t *)p; 1045 x = *xp; 1046 return (x); 1047 } 1048 1049 uint32_t 1050 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1051 { 1052 uint32_t x; 1053 struct timeval now; 1054 1055 if (check) { 1056 (void)SCTP_GETTIME_TIMEVAL(&now); 1057 } 1058 for (;;) { 1059 x = sctp_select_initial_TSN(&inp->sctp_ep); 1060 if (x == 0) { 1061 /* we never use 0 */ 1062 continue; 1063 } 1064 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1065 break; 1066 } 1067 } 1068 return (x); 1069 } 1070 1071 int32_t 1072 sctp_map_assoc_state(int kernel_state) 1073 { 1074 int32_t user_state; 1075 1076 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1077 user_state = SCTP_CLOSED; 1078 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1079 user_state = SCTP_SHUTDOWN_PENDING; 1080 } else { 1081 switch (kernel_state & SCTP_STATE_MASK) { 1082 case SCTP_STATE_EMPTY: 1083 user_state = SCTP_CLOSED; 1084 break; 1085 case SCTP_STATE_INUSE: 1086 user_state = SCTP_CLOSED; 1087 break; 1088 case SCTP_STATE_COOKIE_WAIT: 1089 user_state = SCTP_COOKIE_WAIT; 1090 break; 1091 case SCTP_STATE_COOKIE_ECHOED: 1092 user_state = SCTP_COOKIE_ECHOED; 1093 break; 1094 case SCTP_STATE_OPEN: 1095 user_state = SCTP_ESTABLISHED; 1096 break; 1097 case SCTP_STATE_SHUTDOWN_SENT: 1098 user_state = SCTP_SHUTDOWN_SENT; 1099 break; 1100 case SCTP_STATE_SHUTDOWN_RECEIVED: 1101 user_state = SCTP_SHUTDOWN_RECEIVED; 1102 break; 1103 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1104 user_state = SCTP_SHUTDOWN_ACK_SENT; 1105 break; 1106 default: 1107 user_state = SCTP_CLOSED; 1108 break; 1109 } 1110 } 1111 return (user_state); 1112 } 1113 1114 int 1115 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1116 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1195 sctp_select_initial_TSN(&inp->sctp_ep); 1196 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1197 /* we are optimisitic here */ 1198 asoc->peer_supports_nat = 0; 1199 asoc->sent_queue_retran_cnt = 0; 1200 1201 /* for CMT */ 1202 asoc->last_net_cmt_send_started = NULL; 1203 1204 /* This will need to be adjusted */ 1205 asoc->last_acked_seq = asoc->init_seq_number - 1; 1206 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1207 asoc->asconf_seq_in = asoc->last_acked_seq; 1208 1209 /* here we are different, we hold the next one we expect */ 1210 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1211 1212 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1213 asoc->initial_rto = inp->sctp_ep.initial_rto; 1214 1215 asoc->default_mtu = inp->sctp_ep.default_mtu; 1216 asoc->max_init_times = inp->sctp_ep.max_init_times; 1217 asoc->max_send_times = inp->sctp_ep.max_send_times; 1218 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1219 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1220 asoc->free_chunk_cnt = 0; 1221 1222 asoc->iam_blocking = 0; 1223 asoc->context = inp->sctp_context; 1224 asoc->local_strreset_support = inp->local_strreset_support; 1225 asoc->def_send = inp->def_send; 1226 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1227 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1228 asoc->pr_sctp_cnt = 0; 1229 asoc->total_output_queue_size = 0; 1230 1231 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1232 asoc->scope.ipv6_addr_legal = 1; 1233 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1234 asoc->scope.ipv4_addr_legal = 1; 1235 } else { 1236 asoc->scope.ipv4_addr_legal = 0; 1237 } 1238 } else { 1239 asoc->scope.ipv6_addr_legal = 0; 1240 asoc->scope.ipv4_addr_legal = 1; 1241 } 1242 1243 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1244 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1245 1246 asoc->smallest_mtu = inp->sctp_frag_point; 1247 asoc->minrto = inp->sctp_ep.sctp_minrto; 1248 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1249 1250 asoc->stream_locked_on = 0; 1251 asoc->ecn_echo_cnt_onq = 0; 1252 asoc->stream_locked = 0; 1253 1254 asoc->send_sack = 1; 1255 1256 LIST_INIT(&asoc->sctp_restricted_addrs); 1257 1258 TAILQ_INIT(&asoc->nets); 1259 TAILQ_INIT(&asoc->pending_reply_queue); 1260 TAILQ_INIT(&asoc->asconf_ack_sent); 1261 /* Setup to fill the hb random cache at first HB */ 1262 asoc->hb_random_idx = 4; 1263 1264 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1265 1266 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1267 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1268 1269 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1270 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1271 1272 /* 1273 * Now the stream parameters, here we allocate space for all streams 1274 * that we request by default. 1275 */ 1276 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1277 o_strms; 1278 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1279 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1280 SCTP_M_STRMO); 1281 if (asoc->strmout == NULL) { 1282 /* big trouble no memory */ 1283 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1284 return (ENOMEM); 1285 } 1286 for (i = 0; i < asoc->streamoutcnt; i++) { 1287 /* 1288 * inbound side must be set to 0xffff, also NOTE when we get 1289 * the INIT-ACK back (for INIT sender) we MUST reduce the 1290 * count (streamoutcnt) but first check if we sent to any of 1291 * the upper streams that were dropped (if some were). Those 1292 * that were dropped must be notified to the upper layer as 1293 * failed to send. 1294 */ 1295 asoc->strmout[i].next_mid_ordered = 0; 1296 asoc->strmout[i].next_mid_unordered = 0; 1297 TAILQ_INIT(&asoc->strmout[i].outqueue); 1298 asoc->strmout[i].chunks_on_queues = 0; 1299 #if defined(SCTP_DETAILED_STR_STATS) 1300 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1301 asoc->strmout[i].abandoned_sent[j] = 0; 1302 asoc->strmout[i].abandoned_unsent[j] = 0; 1303 } 1304 #else 1305 asoc->strmout[i].abandoned_sent[0] = 0; 1306 asoc->strmout[i].abandoned_unsent[0] = 0; 1307 #endif 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1312 } 1313 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1314 1315 /* Now the mapping array */ 1316 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1317 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1318 SCTP_M_MAP); 1319 if (asoc->mapping_array == NULL) { 1320 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1321 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1322 return (ENOMEM); 1323 } 1324 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1325 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1326 SCTP_M_MAP); 1327 if (asoc->nr_mapping_array == NULL) { 1328 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1329 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1334 1335 /* Now the init of the other outqueues */ 1336 TAILQ_INIT(&asoc->free_chunks); 1337 TAILQ_INIT(&asoc->control_send_queue); 1338 TAILQ_INIT(&asoc->asconf_send_queue); 1339 TAILQ_INIT(&asoc->send_queue); 1340 TAILQ_INIT(&asoc->sent_queue); 1341 TAILQ_INIT(&asoc->resetHead); 1342 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1343 TAILQ_INIT(&asoc->asconf_queue); 1344 /* authentication fields */ 1345 asoc->authinfo.random = NULL; 1346 asoc->authinfo.active_keyid = 0; 1347 asoc->authinfo.assoc_key = NULL; 1348 asoc->authinfo.assoc_keyid = 0; 1349 asoc->authinfo.recv_key = NULL; 1350 asoc->authinfo.recv_keyid = 0; 1351 LIST_INIT(&asoc->shared_keys); 1352 asoc->marked_retrans = 0; 1353 asoc->port = inp->sctp_ep.port; 1354 asoc->timoinit = 0; 1355 asoc->timodata = 0; 1356 asoc->timosack = 0; 1357 asoc->timoshutdown = 0; 1358 asoc->timoheartbeat = 0; 1359 asoc->timocookie = 0; 1360 asoc->timoshutdownack = 0; 1361 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1362 asoc->discontinuity_time = asoc->start_time; 1363 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1364 asoc->abandoned_unsent[i] = 0; 1365 asoc->abandoned_sent[i] = 0; 1366 } 1367 /* 1368 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1369 * freed later when the association is freed. 1370 */ 1371 return (0); 1372 } 1373 1374 void 1375 sctp_print_mapping_array(struct sctp_association *asoc) 1376 { 1377 unsigned int i, limit; 1378 1379 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1380 asoc->mapping_array_size, 1381 asoc->mapping_array_base_tsn, 1382 asoc->cumulative_tsn, 1383 asoc->highest_tsn_inside_map, 1384 asoc->highest_tsn_inside_nr_map); 1385 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1386 if (asoc->mapping_array[limit - 1] != 0) { 1387 break; 1388 } 1389 } 1390 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1391 for (i = 0; i < limit; i++) { 1392 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1393 } 1394 if (limit % 16) 1395 SCTP_PRINTF("\n"); 1396 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1397 if (asoc->nr_mapping_array[limit - 1]) { 1398 break; 1399 } 1400 } 1401 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1402 for (i = 0; i < limit; i++) { 1403 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1404 } 1405 if (limit % 16) 1406 SCTP_PRINTF("\n"); 1407 } 1408 1409 int 1410 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1411 { 1412 /* mapping array needs to grow */ 1413 uint8_t *new_array1, *new_array2; 1414 uint32_t new_size; 1415 1416 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1417 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1418 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1419 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1420 /* can't get more, forget it */ 1421 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1422 if (new_array1) { 1423 SCTP_FREE(new_array1, SCTP_M_MAP); 1424 } 1425 if (new_array2) { 1426 SCTP_FREE(new_array2, SCTP_M_MAP); 1427 } 1428 return (-1); 1429 } 1430 memset(new_array1, 0, new_size); 1431 memset(new_array2, 0, new_size); 1432 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1433 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1434 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1435 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1436 asoc->mapping_array = new_array1; 1437 asoc->nr_mapping_array = new_array2; 1438 asoc->mapping_array_size = new_size; 1439 return (0); 1440 } 1441 1442 1443 static void 1444 sctp_iterator_work(struct sctp_iterator *it) 1445 { 1446 struct epoch_tracker et; 1447 struct sctp_inpcb *tinp; 1448 int iteration_count = 0; 1449 int inp_skip = 0; 1450 int first_in = 1; 1451 1452 NET_EPOCH_ENTER(et); 1453 SCTP_INP_INFO_RLOCK(); 1454 SCTP_ITERATOR_LOCK(); 1455 sctp_it_ctl.cur_it = it; 1456 if (it->inp) { 1457 SCTP_INP_RLOCK(it->inp); 1458 SCTP_INP_DECR_REF(it->inp); 1459 } 1460 if (it->inp == NULL) { 1461 /* iterator is complete */ 1462 done_with_iterator: 1463 sctp_it_ctl.cur_it = NULL; 1464 SCTP_ITERATOR_UNLOCK(); 1465 SCTP_INP_INFO_RUNLOCK(); 1466 if (it->function_atend != NULL) { 1467 (*it->function_atend) (it->pointer, it->val); 1468 } 1469 SCTP_FREE(it, SCTP_M_ITER); 1470 NET_EPOCH_EXIT(et); 1471 return; 1472 } 1473 select_a_new_ep: 1474 if (first_in) { 1475 first_in = 0; 1476 } else { 1477 SCTP_INP_RLOCK(it->inp); 1478 } 1479 while (((it->pcb_flags) && 1480 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1481 ((it->pcb_features) && 1482 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1483 /* endpoint flags or features don't match, so keep looking */ 1484 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1485 SCTP_INP_RUNLOCK(it->inp); 1486 goto done_with_iterator; 1487 } 1488 tinp = it->inp; 1489 it->inp = LIST_NEXT(it->inp, sctp_list); 1490 it->stcb = NULL; 1491 SCTP_INP_RUNLOCK(tinp); 1492 if (it->inp == NULL) { 1493 goto done_with_iterator; 1494 } 1495 SCTP_INP_RLOCK(it->inp); 1496 } 1497 /* now go through each assoc which is in the desired state */ 1498 if (it->done_current_ep == 0) { 1499 if (it->function_inp != NULL) 1500 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1501 it->done_current_ep = 1; 1502 } 1503 if (it->stcb == NULL) { 1504 /* run the per instance function */ 1505 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1506 } 1507 if ((inp_skip) || it->stcb == NULL) { 1508 if (it->function_inp_end != NULL) { 1509 inp_skip = (*it->function_inp_end) (it->inp, 1510 it->pointer, 1511 it->val); 1512 } 1513 SCTP_INP_RUNLOCK(it->inp); 1514 goto no_stcb; 1515 } 1516 while (it->stcb) { 1517 SCTP_TCB_LOCK(it->stcb); 1518 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1519 /* not in the right state... keep looking */ 1520 SCTP_TCB_UNLOCK(it->stcb); 1521 goto next_assoc; 1522 } 1523 /* see if we have limited out the iterator loop */ 1524 iteration_count++; 1525 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1526 /* Pause to let others grab the lock */ 1527 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 SCTP_INP_INCR_REF(it->inp); 1530 SCTP_INP_RUNLOCK(it->inp); 1531 SCTP_ITERATOR_UNLOCK(); 1532 SCTP_INP_INFO_RUNLOCK(); 1533 SCTP_INP_INFO_RLOCK(); 1534 SCTP_ITERATOR_LOCK(); 1535 if (sctp_it_ctl.iterator_flags) { 1536 /* We won't be staying here */ 1537 SCTP_INP_DECR_REF(it->inp); 1538 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1539 if (sctp_it_ctl.iterator_flags & 1540 SCTP_ITERATOR_STOP_CUR_IT) { 1541 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1542 goto done_with_iterator; 1543 } 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_INP) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1547 goto no_stcb; 1548 } 1549 /* If we reach here huh? */ 1550 SCTP_PRINTF("Unknown it ctl flag %x\n", 1551 sctp_it_ctl.iterator_flags); 1552 sctp_it_ctl.iterator_flags = 0; 1553 } 1554 SCTP_INP_RLOCK(it->inp); 1555 SCTP_INP_DECR_REF(it->inp); 1556 SCTP_TCB_LOCK(it->stcb); 1557 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1558 iteration_count = 0; 1559 } 1560 KASSERT(it->inp == it->stcb->sctp_ep, 1561 ("%s: stcb %p does not belong to inp %p, but inp %p", 1562 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1563 1564 /* run function on this one */ 1565 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1566 1567 /* 1568 * we lie here, it really needs to have its own type but 1569 * first I must verify that this won't effect things :-0 1570 */ 1571 if (it->no_chunk_output == 0) 1572 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1573 1574 SCTP_TCB_UNLOCK(it->stcb); 1575 next_assoc: 1576 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1577 if (it->stcb == NULL) { 1578 /* Run last function */ 1579 if (it->function_inp_end != NULL) { 1580 inp_skip = (*it->function_inp_end) (it->inp, 1581 it->pointer, 1582 it->val); 1583 } 1584 } 1585 } 1586 SCTP_INP_RUNLOCK(it->inp); 1587 no_stcb: 1588 /* done with all assocs on this endpoint, move on to next endpoint */ 1589 it->done_current_ep = 0; 1590 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1591 it->inp = NULL; 1592 } else { 1593 it->inp = LIST_NEXT(it->inp, sctp_list); 1594 } 1595 it->stcb = NULL; 1596 if (it->inp == NULL) { 1597 goto done_with_iterator; 1598 } 1599 goto select_a_new_ep; 1600 } 1601 1602 void 1603 sctp_iterator_worker(void) 1604 { 1605 struct sctp_iterator *it; 1606 1607 /* This function is called with the WQ lock in place */ 1608 sctp_it_ctl.iterator_running = 1; 1609 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1610 /* now lets work on this one */ 1611 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1612 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1613 CURVNET_SET(it->vn); 1614 sctp_iterator_work(it); 1615 CURVNET_RESTORE(); 1616 SCTP_IPI_ITERATOR_WQ_LOCK(); 1617 /* sa_ignore FREED_MEMORY */ 1618 } 1619 sctp_it_ctl.iterator_running = 0; 1620 return; 1621 } 1622 1623 1624 static void 1625 sctp_handle_addr_wq(void) 1626 { 1627 /* deal with the ADDR wq from the rtsock calls */ 1628 struct sctp_laddr *wi, *nwi; 1629 struct sctp_asconf_iterator *asc; 1630 1631 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1632 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1633 if (asc == NULL) { 1634 /* Try later, no memory */ 1635 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1636 (struct sctp_inpcb *)NULL, 1637 (struct sctp_tcb *)NULL, 1638 (struct sctp_nets *)NULL); 1639 return; 1640 } 1641 LIST_INIT(&asc->list_of_work); 1642 asc->cnt = 0; 1643 1644 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1645 LIST_REMOVE(wi, sctp_nxt_addr); 1646 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1647 asc->cnt++; 1648 } 1649 1650 if (asc->cnt == 0) { 1651 SCTP_FREE(asc, SCTP_M_ASC_IT); 1652 } else { 1653 int ret; 1654 1655 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1656 sctp_asconf_iterator_stcb, 1657 NULL, /* No ep end for boundall */ 1658 SCTP_PCB_FLAGS_BOUNDALL, 1659 SCTP_PCB_ANY_FEATURES, 1660 SCTP_ASOC_ANY_STATE, 1661 (void *)asc, 0, 1662 sctp_asconf_iterator_end, NULL, 0); 1663 if (ret) { 1664 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1665 /* 1666 * Freeing if we are stopping or put back on the 1667 * addr_wq. 1668 */ 1669 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1670 sctp_asconf_iterator_end(asc, 0); 1671 } else { 1672 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1673 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1674 } 1675 SCTP_FREE(asc, SCTP_M_ASC_IT); 1676 } 1677 } 1678 } 1679 } 1680 1681 /*- 1682 * The following table shows which pointers for the inp, stcb, or net are 1683 * stored for each timer after it was started. 1684 * 1685 *|Name |Timer |inp |stcb|net | 1686 *|-----------------------------|-----------------------------|----|----|----| 1687 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1690 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1694 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1697 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1698 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1700 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1701 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1703 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1704 */ 1705 1706 void 1707 sctp_timeout_handler(void *t) 1708 { 1709 struct epoch_tracker et; 1710 struct timeval tv; 1711 struct sctp_inpcb *inp; 1712 struct sctp_tcb *stcb; 1713 struct sctp_nets *net; 1714 struct sctp_timer *tmr; 1715 struct mbuf *op_err; 1716 int did_output; 1717 int type; 1718 int i, secret; 1719 1720 tmr = (struct sctp_timer *)t; 1721 inp = (struct sctp_inpcb *)tmr->ep; 1722 stcb = (struct sctp_tcb *)tmr->tcb; 1723 net = (struct sctp_nets *)tmr->net; 1724 CURVNET_SET((struct vnet *)tmr->vnet); 1725 did_output = 1; 1726 1727 #ifdef SCTP_AUDITING_ENABLED 1728 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1729 sctp_auditing(3, inp, stcb, net); 1730 #endif 1731 1732 /* sanity checks... */ 1733 KASSERT(tmr->self == NULL || tmr->self == tmr, 1734 ("sctp_timeout_handler: tmr->self corrupted")); 1735 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1736 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1737 type = tmr->type; 1738 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1739 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1740 type, stcb, stcb->sctp_ep)); 1741 if (inp) { 1742 SCTP_INP_INCR_REF(inp); 1743 } 1744 tmr->stopped_from = 0xa001; 1745 if (stcb) { 1746 atomic_add_int(&stcb->asoc.refcnt, 1); 1747 if (stcb->asoc.state == 0) { 1748 atomic_add_int(&stcb->asoc.refcnt, -1); 1749 if (inp) { 1750 SCTP_INP_DECR_REF(inp); 1751 } 1752 SCTPDBG(SCTP_DEBUG_TIMER2, 1753 "Timer type %d handler exiting due to CLOSED association.\n", 1754 type); 1755 CURVNET_RESTORE(); 1756 return; 1757 } 1758 } 1759 tmr->stopped_from = 0xa002; 1760 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1761 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1762 if (inp) { 1763 SCTP_INP_DECR_REF(inp); 1764 } 1765 if (stcb) { 1766 atomic_add_int(&stcb->asoc.refcnt, -1); 1767 } 1768 SCTPDBG(SCTP_DEBUG_TIMER2, 1769 "Timer type %d handler exiting due to not being active.\n", 1770 type); 1771 CURVNET_RESTORE(); 1772 return; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 atomic_add_int(&stcb->asoc.refcnt, -1); 1779 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1780 ((stcb->asoc.state == 0) || 1781 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1782 SCTP_TCB_UNLOCK(stcb); 1783 if (inp) { 1784 SCTP_INP_DECR_REF(inp); 1785 } 1786 SCTPDBG(SCTP_DEBUG_TIMER2, 1787 "Timer type %d handler exiting due to CLOSED association.\n", 1788 type); 1789 CURVNET_RESTORE(); 1790 return; 1791 } 1792 } else if (inp != NULL) { 1793 SCTP_INP_WLOCK(inp); 1794 } else { 1795 SCTP_WQ_ADDR_LOCK(); 1796 } 1797 1798 /* Record in stopped_from which timeout occurred. */ 1799 tmr->stopped_from = type; 1800 NET_EPOCH_ENTER(et); 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto get_out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto get_out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 if ((stcb->asoc.num_send_timers_up == 0) && 1840 (stcb->asoc.sent_queue_cnt > 0)) { 1841 struct sctp_tmit_chunk *chk; 1842 1843 /* 1844 * Safeguard. If there on some on the sent queue 1845 * somewhere but no timers running something is 1846 * wrong... so we start a timer on the first chunk 1847 * on the send queue on whatever net it is sent to. 1848 */ 1849 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1850 if (chk->whoTo != NULL) { 1851 break; 1852 } 1853 } 1854 if (chk != NULL) { 1855 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1856 } 1857 } 1858 break; 1859 case SCTP_TIMER_TYPE_INIT: 1860 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1861 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1862 type, inp, stcb, net)); 1863 SCTP_STAT_INCR(sctps_timoinit); 1864 stcb->asoc.timoinit++; 1865 if (sctp_t1init_timer(inp, stcb, net)) { 1866 /* no need to unlock on tcb its gone */ 1867 goto out_decr; 1868 } 1869 /* We do output but not here */ 1870 did_output = 0; 1871 break; 1872 case SCTP_TIMER_TYPE_RECV: 1873 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1874 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1875 type, inp, stcb, net)); 1876 SCTP_STAT_INCR(sctps_timosack); 1877 stcb->asoc.timosack++; 1878 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1879 #ifdef SCTP_AUDITING_ENABLED 1880 sctp_auditing(4, inp, stcb, NULL); 1881 #endif 1882 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1883 break; 1884 case SCTP_TIMER_TYPE_SHUTDOWN: 1885 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1886 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1887 type, inp, stcb, net)); 1888 SCTP_STAT_INCR(sctps_timoshutdown); 1889 stcb->asoc.timoshutdown++; 1890 if (sctp_shutdown_timer(inp, stcb, net)) { 1891 /* no need to unlock on tcb its gone */ 1892 goto out_decr; 1893 } 1894 #ifdef SCTP_AUDITING_ENABLED 1895 sctp_auditing(4, inp, stcb, net); 1896 #endif 1897 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1898 break; 1899 case SCTP_TIMER_TYPE_HEARTBEAT: 1900 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1901 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1902 type, inp, stcb, net)); 1903 SCTP_STAT_INCR(sctps_timoheartbeat); 1904 stcb->asoc.timoheartbeat++; 1905 if (sctp_heartbeat_timer(inp, stcb, net)) { 1906 /* no need to unlock on tcb its gone */ 1907 goto out_decr; 1908 } 1909 #ifdef SCTP_AUDITING_ENABLED 1910 sctp_auditing(4, inp, stcb, net); 1911 #endif 1912 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1913 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1914 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1915 } 1916 break; 1917 case SCTP_TIMER_TYPE_COOKIE: 1918 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1919 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1920 type, inp, stcb, net)); 1921 SCTP_STAT_INCR(sctps_timocookie); 1922 stcb->asoc.timocookie++; 1923 if (sctp_cookie_timer(inp, stcb, net)) { 1924 /* no need to unlock on tcb its gone */ 1925 goto out_decr; 1926 } 1927 #ifdef SCTP_AUDITING_ENABLED 1928 sctp_auditing(4, inp, stcb, net); 1929 #endif 1930 /* 1931 * We consider T3 and Cookie timer pretty much the same with 1932 * respect to where from in chunk_output. 1933 */ 1934 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1935 break; 1936 case SCTP_TIMER_TYPE_NEWCOOKIE: 1937 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1938 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1939 type, inp, stcb, net)); 1940 SCTP_STAT_INCR(sctps_timosecret); 1941 (void)SCTP_GETTIME_TIMEVAL(&tv); 1942 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1943 inp->sctp_ep.last_secret_number = 1944 inp->sctp_ep.current_secret_number; 1945 inp->sctp_ep.current_secret_number++; 1946 if (inp->sctp_ep.current_secret_number >= 1947 SCTP_HOW_MANY_SECRETS) { 1948 inp->sctp_ep.current_secret_number = 0; 1949 } 1950 secret = (int)inp->sctp_ep.current_secret_number; 1951 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1952 inp->sctp_ep.secret_key[secret][i] = 1953 sctp_select_initial_TSN(&inp->sctp_ep); 1954 } 1955 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1956 did_output = 0; 1957 break; 1958 case SCTP_TIMER_TYPE_PATHMTURAISE: 1959 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1960 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1961 type, inp, stcb, net)); 1962 SCTP_STAT_INCR(sctps_timopathmtu); 1963 sctp_pathmtu_timer(inp, stcb, net); 1964 did_output = 0; 1965 break; 1966 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1967 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1968 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1969 type, inp, stcb, net)); 1970 if (sctp_shutdownack_timer(inp, stcb, net)) { 1971 /* no need to unlock on tcb its gone */ 1972 goto out_decr; 1973 } 1974 SCTP_STAT_INCR(sctps_timoshutdownack); 1975 stcb->asoc.timoshutdownack++; 1976 #ifdef SCTP_AUDITING_ENABLED 1977 sctp_auditing(4, inp, stcb, net); 1978 #endif 1979 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1980 break; 1981 case SCTP_TIMER_TYPE_ASCONF: 1982 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1983 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1984 type, inp, stcb, net)); 1985 SCTP_STAT_INCR(sctps_timoasconf); 1986 if (sctp_asconf_timer(inp, stcb, net)) { 1987 /* no need to unlock on tcb its gone */ 1988 goto out_decr; 1989 } 1990 #ifdef SCTP_AUDITING_ENABLED 1991 sctp_auditing(4, inp, stcb, net); 1992 #endif 1993 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1994 break; 1995 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1996 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1997 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1998 type, inp, stcb, net)); 1999 SCTP_STAT_INCR(sctps_timoshutdownguard); 2000 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2001 "Shutdown guard timer expired"); 2002 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2003 /* no need to unlock on tcb its gone */ 2004 goto out_decr; 2005 case SCTP_TIMER_TYPE_AUTOCLOSE: 2006 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2007 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2008 type, inp, stcb, net)); 2009 SCTP_STAT_INCR(sctps_timoautoclose); 2010 sctp_autoclose_timer(inp, stcb); 2011 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2012 did_output = 0; 2013 break; 2014 case SCTP_TIMER_TYPE_STRRESET: 2015 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2016 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2017 type, inp, stcb, net)); 2018 SCTP_STAT_INCR(sctps_timostrmrst); 2019 if (sctp_strreset_timer(inp, stcb)) { 2020 /* no need to unlock on tcb its gone */ 2021 goto out_decr; 2022 } 2023 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2024 break; 2025 case SCTP_TIMER_TYPE_INPKILL: 2026 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2027 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2028 type, inp, stcb, net)); 2029 SCTP_STAT_INCR(sctps_timoinpkill); 2030 /* 2031 * special case, take away our increment since WE are the 2032 * killer 2033 */ 2034 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2035 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2036 SCTP_INP_DECR_REF(inp); 2037 SCTP_INP_WUNLOCK(inp); 2038 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2039 SCTP_CALLED_FROM_INPKILL_TIMER); 2040 inp = NULL; 2041 goto out_no_decr; 2042 case SCTP_TIMER_TYPE_ASOCKILL: 2043 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2044 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2045 type, inp, stcb, net)); 2046 SCTP_STAT_INCR(sctps_timoassockill); 2047 /* Can we free it yet? */ 2048 SCTP_INP_DECR_REF(inp); 2049 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2050 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2051 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2052 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2053 /* 2054 * free asoc, always unlocks (or destroy's) so prevent 2055 * duplicate unlock or unlock of a free mtx :-0 2056 */ 2057 stcb = NULL; 2058 goto out_no_decr; 2059 case SCTP_TIMER_TYPE_ADDR_WQ: 2060 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2061 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2062 type, inp, stcb, net)); 2063 sctp_handle_addr_wq(); 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 break; 2072 default: 2073 #ifdef INVARIANTS 2074 panic("Unknown timer type %d", type); 2075 #else 2076 goto get_out; 2077 #endif 2078 } 2079 #ifdef SCTP_AUDITING_ENABLED 2080 sctp_audit_log(0xF1, (uint8_t)type); 2081 if (inp) 2082 sctp_auditing(5, inp, stcb, net); 2083 #endif 2084 if ((did_output) && stcb) { 2085 /* 2086 * Now we need to clean up the control chunk chain if an 2087 * ECNE is on it. It must be marked as UNSENT again so next 2088 * call will continue to send it until such time that we get 2089 * a CWR, to remove it. It is, however, less likely that we 2090 * will find a ecn echo on the chain though. 2091 */ 2092 sctp_fix_ecn_echo(&stcb->asoc); 2093 } 2094 get_out: 2095 if (stcb) { 2096 SCTP_TCB_UNLOCK(stcb); 2097 } else if (inp != NULL) { 2098 SCTP_INP_WUNLOCK(inp); 2099 } else { 2100 SCTP_WQ_ADDR_UNLOCK(); 2101 } 2102 2103 out_decr: 2104 if (inp) { 2105 SCTP_INP_DECR_REF(inp); 2106 } 2107 2108 out_no_decr: 2109 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2110 CURVNET_RESTORE(); 2111 NET_EPOCH_EXIT(et); 2112 } 2113 2114 /*- 2115 * The following table shows which parameters must be provided 2116 * when calling sctp_timer_start(). For parameters not being 2117 * provided, NULL must be used. 2118 * 2119 * |Name |inp |stcb|net | 2120 * |-----------------------------|----|----|----| 2121 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2122 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2123 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2124 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2126 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2128 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2133 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2135 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2136 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2137 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2138 * 2139 */ 2140 2141 void 2142 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2143 struct sctp_nets *net) 2144 { 2145 struct sctp_timer *tmr; 2146 uint32_t to_ticks; 2147 uint32_t rndval, jitter; 2148 2149 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2150 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2151 t_type, stcb, stcb->sctp_ep)); 2152 tmr = NULL; 2153 to_ticks = 0; 2154 if (stcb != NULL) { 2155 SCTP_TCB_LOCK_ASSERT(stcb); 2156 } else if (inp != NULL) { 2157 SCTP_INP_WLOCK_ASSERT(inp); 2158 } else { 2159 SCTP_WQ_ADDR_LOCK_ASSERT(); 2160 } 2161 if (stcb != NULL) { 2162 /* 2163 * Don't restart timer on association that's about to be 2164 * killed. 2165 */ 2166 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2167 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2168 SCTPDBG(SCTP_DEBUG_TIMER2, 2169 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2170 t_type, inp, stcb, net); 2171 return; 2172 } 2173 /* Don't restart timer on net that's been removed. */ 2174 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2175 SCTPDBG(SCTP_DEBUG_TIMER2, 2176 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2177 t_type, inp, stcb, net); 2178 return; 2179 } 2180 } 2181 switch (t_type) { 2182 case SCTP_TIMER_TYPE_SEND: 2183 /* Here we use the RTO timer. */ 2184 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2185 #ifdef INVARIANTS 2186 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2187 t_type, inp, stcb, net); 2188 #else 2189 return; 2190 #endif 2191 } 2192 tmr = &net->rxt_timer; 2193 if (net->RTO == 0) { 2194 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2195 } else { 2196 to_ticks = sctp_msecs_to_ticks(net->RTO); 2197 } 2198 break; 2199 case SCTP_TIMER_TYPE_INIT: 2200 /* 2201 * Here we use the INIT timer default usually about 1 2202 * second. 2203 */ 2204 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2205 #ifdef INVARIANTS 2206 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2207 t_type, inp, stcb, net); 2208 #else 2209 return; 2210 #endif 2211 } 2212 tmr = &net->rxt_timer; 2213 if (net->RTO == 0) { 2214 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2215 } else { 2216 to_ticks = sctp_msecs_to_ticks(net->RTO); 2217 } 2218 break; 2219 case SCTP_TIMER_TYPE_RECV: 2220 /* 2221 * Here we use the Delayed-Ack timer value from the inp, 2222 * ususually about 200ms. 2223 */ 2224 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2225 #ifdef INVARIANTS 2226 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2227 t_type, inp, stcb, net); 2228 #else 2229 return; 2230 #endif 2231 } 2232 tmr = &stcb->asoc.dack_timer; 2233 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2234 break; 2235 case SCTP_TIMER_TYPE_SHUTDOWN: 2236 /* Here we use the RTO of the destination. */ 2237 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2238 #ifdef INVARIANTS 2239 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2240 t_type, inp, stcb, net); 2241 #else 2242 return; 2243 #endif 2244 } 2245 tmr = &net->rxt_timer; 2246 if (net->RTO == 0) { 2247 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2248 } else { 2249 to_ticks = sctp_msecs_to_ticks(net->RTO); 2250 } 2251 break; 2252 case SCTP_TIMER_TYPE_HEARTBEAT: 2253 /* 2254 * The net is used here so that we can add in the RTO. Even 2255 * though we use a different timer. We also add the HB timer 2256 * PLUS a random jitter. 2257 */ 2258 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2259 #ifdef INVARIANTS 2260 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2261 t_type, inp, stcb, net); 2262 #else 2263 return; 2264 #endif 2265 } 2266 if ((net->dest_state & SCTP_ADDR_NOHB) && 2267 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2268 SCTPDBG(SCTP_DEBUG_TIMER2, 2269 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2270 t_type, inp, stcb, net); 2271 return; 2272 } 2273 tmr = &net->hb_timer; 2274 if (net->RTO == 0) { 2275 to_ticks = stcb->asoc.initial_rto; 2276 } else { 2277 to_ticks = net->RTO; 2278 } 2279 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2280 jitter = rndval % to_ticks; 2281 if (jitter >= (to_ticks >> 1)) { 2282 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2283 } else { 2284 to_ticks = to_ticks - jitter; 2285 } 2286 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2287 !(net->dest_state & SCTP_ADDR_PF)) { 2288 to_ticks += net->heart_beat_delay; 2289 } 2290 /* 2291 * Now we must convert the to_ticks that are now in ms to 2292 * ticks. 2293 */ 2294 to_ticks = sctp_msecs_to_ticks(to_ticks); 2295 break; 2296 case SCTP_TIMER_TYPE_COOKIE: 2297 /* 2298 * Here we can use the RTO timer from the network since one 2299 * RTT was complete. If a retransmission happened then we 2300 * will be using the RTO initial value. 2301 */ 2302 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2303 #ifdef INVARIANTS 2304 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2305 t_type, inp, stcb, net); 2306 #else 2307 return; 2308 #endif 2309 } 2310 tmr = &net->rxt_timer; 2311 if (net->RTO == 0) { 2312 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2313 } else { 2314 to_ticks = sctp_msecs_to_ticks(net->RTO); 2315 } 2316 break; 2317 case SCTP_TIMER_TYPE_NEWCOOKIE: 2318 /* 2319 * Nothing needed but the endpoint here ususually about 60 2320 * minutes. 2321 */ 2322 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2323 #ifdef INVARIANTS 2324 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2325 t_type, inp, stcb, net); 2326 #else 2327 return; 2328 #endif 2329 } 2330 tmr = &inp->sctp_ep.signature_change; 2331 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2332 break; 2333 case SCTP_TIMER_TYPE_PATHMTURAISE: 2334 /* 2335 * Here we use the value found in the EP for PMTUD, 2336 * ususually about 10 minutes. 2337 */ 2338 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2339 #ifdef INVARIANTS 2340 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2341 t_type, inp, stcb, net); 2342 #else 2343 return; 2344 #endif 2345 } 2346 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2347 SCTPDBG(SCTP_DEBUG_TIMER2, 2348 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2349 t_type, inp, stcb, net); 2350 return; 2351 } 2352 tmr = &net->pmtu_timer; 2353 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2354 break; 2355 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2356 /* Here we use the RTO of the destination. */ 2357 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2358 #ifdef INVARIANTS 2359 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2360 t_type, inp, stcb, net); 2361 #else 2362 return; 2363 #endif 2364 } 2365 tmr = &net->rxt_timer; 2366 if (net->RTO == 0) { 2367 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2368 } else { 2369 to_ticks = sctp_msecs_to_ticks(net->RTO); 2370 } 2371 break; 2372 case SCTP_TIMER_TYPE_ASCONF: 2373 /* 2374 * Here the timer comes from the stcb but its value is from 2375 * the net's RTO. 2376 */ 2377 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2378 #ifdef INVARIANTS 2379 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2380 t_type, inp, stcb, net); 2381 #else 2382 return; 2383 #endif 2384 } 2385 tmr = &stcb->asoc.asconf_timer; 2386 if (net->RTO == 0) { 2387 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2388 } else { 2389 to_ticks = sctp_msecs_to_ticks(net->RTO); 2390 } 2391 break; 2392 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2393 /* 2394 * Here we use the endpoints shutdown guard timer usually 2395 * about 3 minutes. 2396 */ 2397 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2398 #ifdef INVARIANTS 2399 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2400 t_type, inp, stcb, net); 2401 #else 2402 return; 2403 #endif 2404 } 2405 tmr = &stcb->asoc.shut_guard_timer; 2406 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2407 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2408 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2411 } 2412 } else { 2413 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2414 } 2415 break; 2416 case SCTP_TIMER_TYPE_AUTOCLOSE: 2417 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2418 #ifdef INVARIANTS 2419 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2420 t_type, inp, stcb, net); 2421 #else 2422 return; 2423 #endif 2424 } 2425 tmr = &stcb->asoc.autoclose_timer; 2426 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2427 break; 2428 case SCTP_TIMER_TYPE_STRRESET: 2429 /* 2430 * Here the timer comes from the stcb but its value is from 2431 * the net's RTO. 2432 */ 2433 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2434 #ifdef INVARIANTS 2435 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2436 t_type, inp, stcb, net); 2437 #else 2438 return; 2439 #endif 2440 } 2441 tmr = &stcb->asoc.strreset_timer; 2442 if (net->RTO == 0) { 2443 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2444 } else { 2445 to_ticks = sctp_msecs_to_ticks(net->RTO); 2446 } 2447 break; 2448 case SCTP_TIMER_TYPE_INPKILL: 2449 /* 2450 * The inp is setup to die. We re-use the signature_chage 2451 * timer since that has stopped and we are in the GONE 2452 * state. 2453 */ 2454 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &inp->sctp_ep.signature_change; 2463 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2464 break; 2465 case SCTP_TIMER_TYPE_ASOCKILL: 2466 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2467 #ifdef INVARIANTS 2468 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2469 t_type, inp, stcb, net); 2470 #else 2471 return; 2472 #endif 2473 } 2474 tmr = &stcb->asoc.strreset_timer; 2475 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2476 break; 2477 case SCTP_TIMER_TYPE_ADDR_WQ: 2478 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 /* Only 1 tick away :-) */ 2487 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2488 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2489 break; 2490 case SCTP_TIMER_TYPE_PRIM_DELETED: 2491 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2492 #ifdef INVARIANTS 2493 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2494 t_type, inp, stcb, net); 2495 #else 2496 return; 2497 #endif 2498 } 2499 tmr = &stcb->asoc.delete_prim_timer; 2500 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2501 break; 2502 default: 2503 #ifdef INVARIANTS 2504 panic("Unknown timer type %d", t_type); 2505 #else 2506 return; 2507 #endif 2508 } 2509 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2510 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2511 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2512 /* 2513 * We do NOT allow you to have it already running. If it is, 2514 * we leave the current one up unchanged. 2515 */ 2516 SCTPDBG(SCTP_DEBUG_TIMER2, 2517 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2518 t_type, inp, stcb, net); 2519 return; 2520 } 2521 /* At this point we can proceed. */ 2522 if (t_type == SCTP_TIMER_TYPE_SEND) { 2523 stcb->asoc.num_send_timers_up++; 2524 } 2525 tmr->stopped_from = 0; 2526 tmr->type = t_type; 2527 tmr->ep = (void *)inp; 2528 tmr->tcb = (void *)stcb; 2529 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2530 tmr->net = NULL; 2531 } else { 2532 tmr->net = (void *)net; 2533 } 2534 tmr->self = (void *)tmr; 2535 tmr->vnet = (void *)curvnet; 2536 tmr->ticks = sctp_get_tick_count(); 2537 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2538 SCTPDBG(SCTP_DEBUG_TIMER2, 2539 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2540 t_type, to_ticks, inp, stcb, net); 2541 } else { 2542 /* 2543 * This should not happen, since we checked for pending 2544 * above. 2545 */ 2546 SCTPDBG(SCTP_DEBUG_TIMER2, 2547 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2548 t_type, to_ticks, inp, stcb, net); 2549 } 2550 return; 2551 } 2552 2553 /*- 2554 * The following table shows which parameters must be provided 2555 * when calling sctp_timer_stop(). For parameters not being 2556 * provided, NULL must be used. 2557 * 2558 * |Name |inp |stcb|net | 2559 * |-----------------------------|----|----|----| 2560 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2561 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2562 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2563 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2564 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2565 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2566 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2567 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2568 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2569 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2570 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2571 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2572 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2573 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2574 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2575 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2576 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2577 * 2578 */ 2579 2580 void 2581 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2582 struct sctp_nets *net, uint32_t from) 2583 { 2584 struct sctp_timer *tmr; 2585 2586 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2587 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2588 t_type, stcb, stcb->sctp_ep)); 2589 if (stcb != NULL) { 2590 SCTP_TCB_LOCK_ASSERT(stcb); 2591 } else if (inp != NULL) { 2592 SCTP_INP_WLOCK_ASSERT(inp); 2593 } else { 2594 SCTP_WQ_ADDR_LOCK_ASSERT(); 2595 } 2596 tmr = NULL; 2597 switch (t_type) { 2598 case SCTP_TIMER_TYPE_SEND: 2599 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2600 #ifdef INVARIANTS 2601 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2602 t_type, inp, stcb, net); 2603 #else 2604 return; 2605 #endif 2606 } 2607 tmr = &net->rxt_timer; 2608 break; 2609 case SCTP_TIMER_TYPE_INIT: 2610 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2611 #ifdef INVARIANTS 2612 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2613 t_type, inp, stcb, net); 2614 #else 2615 return; 2616 #endif 2617 } 2618 tmr = &net->rxt_timer; 2619 break; 2620 case SCTP_TIMER_TYPE_RECV: 2621 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2622 #ifdef INVARIANTS 2623 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2624 t_type, inp, stcb, net); 2625 #else 2626 return; 2627 #endif 2628 } 2629 tmr = &stcb->asoc.dack_timer; 2630 break; 2631 case SCTP_TIMER_TYPE_SHUTDOWN: 2632 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2633 #ifdef INVARIANTS 2634 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2635 t_type, inp, stcb, net); 2636 #else 2637 return; 2638 #endif 2639 } 2640 tmr = &net->rxt_timer; 2641 break; 2642 case SCTP_TIMER_TYPE_HEARTBEAT: 2643 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2644 #ifdef INVARIANTS 2645 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2646 t_type, inp, stcb, net); 2647 #else 2648 return; 2649 #endif 2650 } 2651 tmr = &net->hb_timer; 2652 break; 2653 case SCTP_TIMER_TYPE_COOKIE: 2654 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2655 #ifdef INVARIANTS 2656 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2657 t_type, inp, stcb, net); 2658 #else 2659 return; 2660 #endif 2661 } 2662 tmr = &net->rxt_timer; 2663 break; 2664 case SCTP_TIMER_TYPE_NEWCOOKIE: 2665 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2666 #ifdef INVARIANTS 2667 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2668 t_type, inp, stcb, net); 2669 #else 2670 return; 2671 #endif 2672 } 2673 tmr = &inp->sctp_ep.signature_change; 2674 break; 2675 case SCTP_TIMER_TYPE_PATHMTURAISE: 2676 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2677 #ifdef INVARIANTS 2678 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2679 t_type, inp, stcb, net); 2680 #else 2681 return; 2682 #endif 2683 } 2684 tmr = &net->pmtu_timer; 2685 break; 2686 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2687 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2688 #ifdef INVARIANTS 2689 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2690 t_type, inp, stcb, net); 2691 #else 2692 return; 2693 #endif 2694 } 2695 tmr = &net->rxt_timer; 2696 break; 2697 case SCTP_TIMER_TYPE_ASCONF: 2698 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2699 #ifdef INVARIANTS 2700 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2701 t_type, inp, stcb, net); 2702 #else 2703 return; 2704 #endif 2705 } 2706 tmr = &stcb->asoc.asconf_timer; 2707 break; 2708 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2709 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2710 #ifdef INVARIANTS 2711 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2712 t_type, inp, stcb, net); 2713 #else 2714 return; 2715 #endif 2716 } 2717 tmr = &stcb->asoc.shut_guard_timer; 2718 break; 2719 case SCTP_TIMER_TYPE_AUTOCLOSE: 2720 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2721 #ifdef INVARIANTS 2722 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2723 t_type, inp, stcb, net); 2724 #else 2725 return; 2726 #endif 2727 } 2728 tmr = &stcb->asoc.autoclose_timer; 2729 break; 2730 case SCTP_TIMER_TYPE_STRRESET: 2731 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2732 #ifdef INVARIANTS 2733 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2734 t_type, inp, stcb, net); 2735 #else 2736 return; 2737 #endif 2738 } 2739 tmr = &stcb->asoc.strreset_timer; 2740 break; 2741 case SCTP_TIMER_TYPE_INPKILL: 2742 /* 2743 * The inp is setup to die. We re-use the signature_chage 2744 * timer since that has stopped and we are in the GONE 2745 * state. 2746 */ 2747 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2748 #ifdef INVARIANTS 2749 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2750 t_type, inp, stcb, net); 2751 #else 2752 return; 2753 #endif 2754 } 2755 tmr = &inp->sctp_ep.signature_change; 2756 break; 2757 case SCTP_TIMER_TYPE_ASOCKILL: 2758 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2759 #ifdef INVARIANTS 2760 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2761 t_type, inp, stcb, net); 2762 #else 2763 return; 2764 #endif 2765 } 2766 tmr = &stcb->asoc.strreset_timer; 2767 break; 2768 case SCTP_TIMER_TYPE_ADDR_WQ: 2769 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2770 #ifdef INVARIANTS 2771 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2772 t_type, inp, stcb, net); 2773 #else 2774 return; 2775 #endif 2776 } 2777 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2778 break; 2779 case SCTP_TIMER_TYPE_PRIM_DELETED: 2780 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2781 #ifdef INVARIANTS 2782 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2783 t_type, inp, stcb, net); 2784 #else 2785 return; 2786 #endif 2787 } 2788 tmr = &stcb->asoc.delete_prim_timer; 2789 break; 2790 default: 2791 #ifdef INVARIANTS 2792 panic("Unknown timer type %d", t_type); 2793 #else 2794 return; 2795 #endif 2796 } 2797 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2798 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2799 (tmr->type != t_type)) { 2800 /* 2801 * Ok we have a timer that is under joint use. Cookie timer 2802 * per chance with the SEND timer. We therefore are NOT 2803 * running the timer that the caller wants stopped. So just 2804 * return. 2805 */ 2806 SCTPDBG(SCTP_DEBUG_TIMER2, 2807 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2808 t_type, inp, stcb, net); 2809 return; 2810 } 2811 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2812 stcb->asoc.num_send_timers_up--; 2813 if (stcb->asoc.num_send_timers_up < 0) { 2814 stcb->asoc.num_send_timers_up = 0; 2815 } 2816 } 2817 tmr->self = NULL; 2818 tmr->stopped_from = from; 2819 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2820 KASSERT(tmr->ep == inp, 2821 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2822 t_type, inp, tmr->ep)); 2823 KASSERT(tmr->tcb == stcb, 2824 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2825 t_type, stcb, tmr->tcb)); 2826 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2827 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2828 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2829 t_type, net, tmr->net)); 2830 SCTPDBG(SCTP_DEBUG_TIMER2, 2831 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2832 t_type, inp, stcb, net); 2833 tmr->ep = NULL; 2834 tmr->tcb = NULL; 2835 tmr->net = NULL; 2836 } else { 2837 SCTPDBG(SCTP_DEBUG_TIMER2, 2838 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2839 t_type, inp, stcb, net); 2840 } 2841 return; 2842 } 2843 2844 uint32_t 2845 sctp_calculate_len(struct mbuf *m) 2846 { 2847 uint32_t tlen = 0; 2848 struct mbuf *at; 2849 2850 at = m; 2851 while (at) { 2852 tlen += SCTP_BUF_LEN(at); 2853 at = SCTP_BUF_NEXT(at); 2854 } 2855 return (tlen); 2856 } 2857 2858 void 2859 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2860 struct sctp_association *asoc, uint32_t mtu) 2861 { 2862 /* 2863 * Reset the P-MTU size on this association, this involves changing 2864 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2865 * allow the DF flag to be cleared. 2866 */ 2867 struct sctp_tmit_chunk *chk; 2868 unsigned int eff_mtu, ovh; 2869 2870 asoc->smallest_mtu = mtu; 2871 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2872 ovh = SCTP_MIN_OVERHEAD; 2873 } else { 2874 ovh = SCTP_MIN_V4_OVERHEAD; 2875 } 2876 eff_mtu = mtu - ovh; 2877 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2878 if (chk->send_size > eff_mtu) { 2879 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2880 } 2881 } 2882 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2883 if (chk->send_size > eff_mtu) { 2884 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2885 } 2886 } 2887 } 2888 2889 2890 /* 2891 * Given an association and starting time of the current RTT period, update 2892 * RTO in number of msecs. net should point to the current network. 2893 * Return 1, if an RTO update was performed, return 0 if no update was 2894 * performed due to invalid starting point. 2895 */ 2896 2897 int 2898 sctp_calculate_rto(struct sctp_tcb *stcb, 2899 struct sctp_association *asoc, 2900 struct sctp_nets *net, 2901 struct timeval *old, 2902 int rtt_from_sack) 2903 { 2904 struct timeval now; 2905 uint64_t rtt_us; /* RTT in us */ 2906 int32_t rtt; /* RTT in ms */ 2907 uint32_t new_rto; 2908 int first_measure = 0; 2909 2910 /************************/ 2911 /* 1. calculate new RTT */ 2912 /************************/ 2913 /* get the current time */ 2914 if (stcb->asoc.use_precise_time) { 2915 (void)SCTP_GETPTIME_TIMEVAL(&now); 2916 } else { 2917 (void)SCTP_GETTIME_TIMEVAL(&now); 2918 } 2919 if ((old->tv_sec > now.tv_sec) || 2920 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2921 /* The starting point is in the future. */ 2922 return (0); 2923 } 2924 timevalsub(&now, old); 2925 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2926 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2927 /* The RTT is larger than a sane value. */ 2928 return (0); 2929 } 2930 /* store the current RTT in us */ 2931 net->rtt = rtt_us; 2932 /* compute rtt in ms */ 2933 rtt = (int32_t)(net->rtt / 1000); 2934 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2935 /* 2936 * Tell the CC module that a new update has just occurred 2937 * from a sack 2938 */ 2939 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2940 } 2941 /* 2942 * Do we need to determine the lan? We do this only on sacks i.e. 2943 * RTT being determined from data not non-data (HB/INIT->INITACK). 2944 */ 2945 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2946 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2947 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2948 net->lan_type = SCTP_LAN_INTERNET; 2949 } else { 2950 net->lan_type = SCTP_LAN_LOCAL; 2951 } 2952 } 2953 2954 /***************************/ 2955 /* 2. update RTTVAR & SRTT */ 2956 /***************************/ 2957 /*- 2958 * Compute the scaled average lastsa and the 2959 * scaled variance lastsv as described in van Jacobson 2960 * Paper "Congestion Avoidance and Control", Annex A. 2961 * 2962 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2963 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2964 */ 2965 if (net->RTO_measured) { 2966 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2967 net->lastsa += rtt; 2968 if (rtt < 0) { 2969 rtt = -rtt; 2970 } 2971 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2972 net->lastsv += rtt; 2973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2974 rto_logging(net, SCTP_LOG_RTTVAR); 2975 } 2976 } else { 2977 /* First RTO measurment */ 2978 net->RTO_measured = 1; 2979 first_measure = 1; 2980 net->lastsa = rtt << SCTP_RTT_SHIFT; 2981 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2983 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2984 } 2985 } 2986 if (net->lastsv == 0) { 2987 net->lastsv = SCTP_CLOCK_GRANULARITY; 2988 } 2989 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2990 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2991 (stcb->asoc.sat_network_lockout == 0)) { 2992 stcb->asoc.sat_network = 1; 2993 } else if ((!first_measure) && stcb->asoc.sat_network) { 2994 stcb->asoc.sat_network = 0; 2995 stcb->asoc.sat_network_lockout = 1; 2996 } 2997 /* bound it, per C6/C7 in Section 5.3.1 */ 2998 if (new_rto < stcb->asoc.minrto) { 2999 new_rto = stcb->asoc.minrto; 3000 } 3001 if (new_rto > stcb->asoc.maxrto) { 3002 new_rto = stcb->asoc.maxrto; 3003 } 3004 net->RTO = new_rto; 3005 return (1); 3006 } 3007 3008 /* 3009 * return a pointer to a contiguous piece of data from the given mbuf chain 3010 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3011 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3012 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3013 */ 3014 caddr_t 3015 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3016 { 3017 uint32_t count; 3018 uint8_t *ptr; 3019 3020 ptr = in_ptr; 3021 if ((off < 0) || (len <= 0)) 3022 return (NULL); 3023 3024 /* find the desired start location */ 3025 while ((m != NULL) && (off > 0)) { 3026 if (off < SCTP_BUF_LEN(m)) 3027 break; 3028 off -= SCTP_BUF_LEN(m); 3029 m = SCTP_BUF_NEXT(m); 3030 } 3031 if (m == NULL) 3032 return (NULL); 3033 3034 /* is the current mbuf large enough (eg. contiguous)? */ 3035 if ((SCTP_BUF_LEN(m) - off) >= len) { 3036 return (mtod(m, caddr_t)+off); 3037 } else { 3038 /* else, it spans more than one mbuf, so save a temp copy... */ 3039 while ((m != NULL) && (len > 0)) { 3040 count = min(SCTP_BUF_LEN(m) - off, len); 3041 memcpy(ptr, mtod(m, caddr_t)+off, count); 3042 len -= count; 3043 ptr += count; 3044 off = 0; 3045 m = SCTP_BUF_NEXT(m); 3046 } 3047 if ((m == NULL) && (len > 0)) 3048 return (NULL); 3049 else 3050 return ((caddr_t)in_ptr); 3051 } 3052 } 3053 3054 3055 3056 struct sctp_paramhdr * 3057 sctp_get_next_param(struct mbuf *m, 3058 int offset, 3059 struct sctp_paramhdr *pull, 3060 int pull_limit) 3061 { 3062 /* This just provides a typed signature to Peter's Pull routine */ 3063 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3064 (uint8_t *)pull)); 3065 } 3066 3067 3068 struct mbuf * 3069 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3070 { 3071 struct mbuf *m_last; 3072 caddr_t dp; 3073 3074 if (padlen > 3) { 3075 return (NULL); 3076 } 3077 if (padlen <= M_TRAILINGSPACE(m)) { 3078 /* 3079 * The easy way. We hope the majority of the time we hit 3080 * here :) 3081 */ 3082 m_last = m; 3083 } else { 3084 /* Hard way we must grow the mbuf chain */ 3085 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3086 if (m_last == NULL) { 3087 return (NULL); 3088 } 3089 SCTP_BUF_LEN(m_last) = 0; 3090 SCTP_BUF_NEXT(m_last) = NULL; 3091 SCTP_BUF_NEXT(m) = m_last; 3092 } 3093 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3094 SCTP_BUF_LEN(m_last) += padlen; 3095 memset(dp, 0, padlen); 3096 return (m_last); 3097 } 3098 3099 struct mbuf * 3100 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3101 { 3102 /* find the last mbuf in chain and pad it */ 3103 struct mbuf *m_at; 3104 3105 if (last_mbuf != NULL) { 3106 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3107 } else { 3108 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3109 if (SCTP_BUF_NEXT(m_at) == NULL) { 3110 return (sctp_add_pad_tombuf(m_at, padval)); 3111 } 3112 } 3113 } 3114 return (NULL); 3115 } 3116 3117 static void 3118 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3119 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3120 { 3121 struct mbuf *m_notify; 3122 struct sctp_assoc_change *sac; 3123 struct sctp_queued_to_read *control; 3124 unsigned int notif_len; 3125 uint16_t abort_len; 3126 unsigned int i; 3127 3128 if (stcb == NULL) { 3129 return; 3130 } 3131 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3132 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3133 if (abort != NULL) { 3134 abort_len = ntohs(abort->ch.chunk_length); 3135 /* 3136 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3137 * contiguous. 3138 */ 3139 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3140 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3141 } 3142 } else { 3143 abort_len = 0; 3144 } 3145 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3146 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3147 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3148 notif_len += abort_len; 3149 } 3150 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3151 if (m_notify == NULL) { 3152 /* Retry with smaller value. */ 3153 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3154 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3155 if (m_notify == NULL) { 3156 goto set_error; 3157 } 3158 } 3159 SCTP_BUF_NEXT(m_notify) = NULL; 3160 sac = mtod(m_notify, struct sctp_assoc_change *); 3161 memset(sac, 0, notif_len); 3162 sac->sac_type = SCTP_ASSOC_CHANGE; 3163 sac->sac_flags = 0; 3164 sac->sac_length = sizeof(struct sctp_assoc_change); 3165 sac->sac_state = state; 3166 sac->sac_error = error; 3167 /* XXX verify these stream counts */ 3168 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3169 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3170 sac->sac_assoc_id = sctp_get_associd(stcb); 3171 if (notif_len > sizeof(struct sctp_assoc_change)) { 3172 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3173 i = 0; 3174 if (stcb->asoc.prsctp_supported == 1) { 3175 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3176 } 3177 if (stcb->asoc.auth_supported == 1) { 3178 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3179 } 3180 if (stcb->asoc.asconf_supported == 1) { 3181 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3182 } 3183 if (stcb->asoc.idata_supported == 1) { 3184 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3185 } 3186 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3187 if (stcb->asoc.reconfig_supported == 1) { 3188 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3189 } 3190 sac->sac_length += i; 3191 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3192 memcpy(sac->sac_info, abort, abort_len); 3193 sac->sac_length += abort_len; 3194 } 3195 } 3196 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3197 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3198 0, 0, stcb->asoc.context, 0, 0, 0, 3199 m_notify); 3200 if (control != NULL) { 3201 control->length = SCTP_BUF_LEN(m_notify); 3202 control->spec_flags = M_NOTIFICATION; 3203 /* not that we need this */ 3204 control->tail_mbuf = m_notify; 3205 sctp_add_to_readq(stcb->sctp_ep, stcb, 3206 control, 3207 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3208 so_locked); 3209 } else { 3210 sctp_m_freem(m_notify); 3211 } 3212 } 3213 /* 3214 * For 1-to-1 style sockets, we send up and error when an ABORT 3215 * comes in. 3216 */ 3217 set_error: 3218 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3219 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3220 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3221 SOCK_LOCK(stcb->sctp_socket); 3222 if (from_peer) { 3223 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3224 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3225 stcb->sctp_socket->so_error = ECONNREFUSED; 3226 } else { 3227 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3228 stcb->sctp_socket->so_error = ECONNRESET; 3229 } 3230 } else { 3231 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3232 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3233 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3234 stcb->sctp_socket->so_error = ETIMEDOUT; 3235 } else { 3236 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3237 stcb->sctp_socket->so_error = ECONNABORTED; 3238 } 3239 } 3240 SOCK_UNLOCK(stcb->sctp_socket); 3241 } 3242 /* Wake ANY sleepers */ 3243 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3244 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3245 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3246 socantrcvmore(stcb->sctp_socket); 3247 } 3248 sorwakeup(stcb->sctp_socket); 3249 sowwakeup(stcb->sctp_socket); 3250 } 3251 3252 static void 3253 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3254 struct sockaddr *sa, uint32_t error, int so_locked) 3255 { 3256 struct mbuf *m_notify; 3257 struct sctp_paddr_change *spc; 3258 struct sctp_queued_to_read *control; 3259 3260 if ((stcb == NULL) || 3261 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3262 /* event not enabled */ 3263 return; 3264 } 3265 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3266 if (m_notify == NULL) 3267 return; 3268 SCTP_BUF_LEN(m_notify) = 0; 3269 spc = mtod(m_notify, struct sctp_paddr_change *); 3270 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3271 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3272 spc->spc_flags = 0; 3273 spc->spc_length = sizeof(struct sctp_paddr_change); 3274 switch (sa->sa_family) { 3275 #ifdef INET 3276 case AF_INET: 3277 #ifdef INET6 3278 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3279 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3280 (struct sockaddr_in6 *)&spc->spc_aaddr); 3281 } else { 3282 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3283 } 3284 #else 3285 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3286 #endif 3287 break; 3288 #endif 3289 #ifdef INET6 3290 case AF_INET6: 3291 { 3292 struct sockaddr_in6 *sin6; 3293 3294 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3295 3296 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3297 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3298 if (sin6->sin6_scope_id == 0) { 3299 /* recover scope_id for user */ 3300 (void)sa6_recoverscope(sin6); 3301 } else { 3302 /* clear embedded scope_id for user */ 3303 in6_clearscope(&sin6->sin6_addr); 3304 } 3305 } 3306 break; 3307 } 3308 #endif 3309 default: 3310 /* TSNH */ 3311 break; 3312 } 3313 spc->spc_state = state; 3314 spc->spc_error = error; 3315 spc->spc_assoc_id = sctp_get_associd(stcb); 3316 3317 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3318 SCTP_BUF_NEXT(m_notify) = NULL; 3319 3320 /* append to socket */ 3321 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3322 0, 0, stcb->asoc.context, 0, 0, 0, 3323 m_notify); 3324 if (control == NULL) { 3325 /* no memory */ 3326 sctp_m_freem(m_notify); 3327 return; 3328 } 3329 control->length = SCTP_BUF_LEN(m_notify); 3330 control->spec_flags = M_NOTIFICATION; 3331 /* not that we need this */ 3332 control->tail_mbuf = m_notify; 3333 sctp_add_to_readq(stcb->sctp_ep, stcb, 3334 control, 3335 &stcb->sctp_socket->so_rcv, 1, 3336 SCTP_READ_LOCK_NOT_HELD, 3337 so_locked); 3338 } 3339 3340 3341 static void 3342 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3343 struct sctp_tmit_chunk *chk, int so_locked) 3344 { 3345 struct mbuf *m_notify; 3346 struct sctp_send_failed *ssf; 3347 struct sctp_send_failed_event *ssfe; 3348 struct sctp_queued_to_read *control; 3349 struct sctp_chunkhdr *chkhdr; 3350 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3351 3352 if ((stcb == NULL) || 3353 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3354 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3355 /* event not enabled */ 3356 return; 3357 } 3358 3359 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3360 notifhdr_len = sizeof(struct sctp_send_failed_event); 3361 } else { 3362 notifhdr_len = sizeof(struct sctp_send_failed); 3363 } 3364 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3365 if (m_notify == NULL) 3366 /* no space left */ 3367 return; 3368 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3369 if (stcb->asoc.idata_supported) { 3370 chkhdr_len = sizeof(struct sctp_idata_chunk); 3371 } else { 3372 chkhdr_len = sizeof(struct sctp_data_chunk); 3373 } 3374 /* Use some defaults in case we can't access the chunk header */ 3375 if (chk->send_size >= chkhdr_len) { 3376 payload_len = chk->send_size - chkhdr_len; 3377 } else { 3378 payload_len = 0; 3379 } 3380 padding_len = 0; 3381 if (chk->data != NULL) { 3382 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3383 if (chkhdr != NULL) { 3384 chk_len = ntohs(chkhdr->chunk_length); 3385 if ((chk_len >= chkhdr_len) && 3386 (chk->send_size >= chk_len) && 3387 (chk->send_size - chk_len < 4)) { 3388 padding_len = chk->send_size - chk_len; 3389 payload_len = chk->send_size - chkhdr_len - padding_len; 3390 } 3391 } 3392 } 3393 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3394 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3395 memset(ssfe, 0, notifhdr_len); 3396 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3397 if (sent) { 3398 ssfe->ssfe_flags = SCTP_DATA_SENT; 3399 } else { 3400 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3401 } 3402 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3403 ssfe->ssfe_error = error; 3404 /* not exactly what the user sent in, but should be close :) */ 3405 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3406 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3407 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3408 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3409 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3410 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3411 } else { 3412 ssf = mtod(m_notify, struct sctp_send_failed *); 3413 memset(ssf, 0, notifhdr_len); 3414 ssf->ssf_type = SCTP_SEND_FAILED; 3415 if (sent) { 3416 ssf->ssf_flags = SCTP_DATA_SENT; 3417 } else { 3418 ssf->ssf_flags = SCTP_DATA_UNSENT; 3419 } 3420 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3421 ssf->ssf_error = error; 3422 /* not exactly what the user sent in, but should be close :) */ 3423 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3424 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3425 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3426 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3427 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3428 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3429 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3430 } 3431 if (chk->data != NULL) { 3432 /* Trim off the sctp chunk header (it should be there) */ 3433 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3434 m_adj(chk->data, chkhdr_len); 3435 m_adj(chk->data, -padding_len); 3436 sctp_mbuf_crush(chk->data); 3437 chk->send_size -= (chkhdr_len + padding_len); 3438 } 3439 } 3440 SCTP_BUF_NEXT(m_notify) = chk->data; 3441 /* Steal off the mbuf */ 3442 chk->data = NULL; 3443 /* 3444 * For this case, we check the actual socket buffer, since the assoc 3445 * is going away we don't want to overfill the socket buffer for a 3446 * non-reader 3447 */ 3448 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3449 sctp_m_freem(m_notify); 3450 return; 3451 } 3452 /* append to socket */ 3453 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3454 0, 0, stcb->asoc.context, 0, 0, 0, 3455 m_notify); 3456 if (control == NULL) { 3457 /* no memory */ 3458 sctp_m_freem(m_notify); 3459 return; 3460 } 3461 control->length = SCTP_BUF_LEN(m_notify); 3462 control->spec_flags = M_NOTIFICATION; 3463 /* not that we need this */ 3464 control->tail_mbuf = m_notify; 3465 sctp_add_to_readq(stcb->sctp_ep, stcb, 3466 control, 3467 &stcb->sctp_socket->so_rcv, 1, 3468 SCTP_READ_LOCK_NOT_HELD, 3469 so_locked); 3470 } 3471 3472 3473 static void 3474 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3475 struct sctp_stream_queue_pending *sp, int so_locked) 3476 { 3477 struct mbuf *m_notify; 3478 struct sctp_send_failed *ssf; 3479 struct sctp_send_failed_event *ssfe; 3480 struct sctp_queued_to_read *control; 3481 int notifhdr_len; 3482 3483 if ((stcb == NULL) || 3484 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3485 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3486 /* event not enabled */ 3487 return; 3488 } 3489 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3490 notifhdr_len = sizeof(struct sctp_send_failed_event); 3491 } else { 3492 notifhdr_len = sizeof(struct sctp_send_failed); 3493 } 3494 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3495 if (m_notify == NULL) { 3496 /* no space left */ 3497 return; 3498 } 3499 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3500 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3501 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3502 memset(ssfe, 0, notifhdr_len); 3503 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3504 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3505 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3506 ssfe->ssfe_error = error; 3507 /* not exactly what the user sent in, but should be close :) */ 3508 ssfe->ssfe_info.snd_sid = sp->sid; 3509 if (sp->some_taken) { 3510 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3511 } else { 3512 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3513 } 3514 ssfe->ssfe_info.snd_ppid = sp->ppid; 3515 ssfe->ssfe_info.snd_context = sp->context; 3516 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3517 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3518 } else { 3519 ssf = mtod(m_notify, struct sctp_send_failed *); 3520 memset(ssf, 0, notifhdr_len); 3521 ssf->ssf_type = SCTP_SEND_FAILED; 3522 ssf->ssf_flags = SCTP_DATA_UNSENT; 3523 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3524 ssf->ssf_error = error; 3525 /* not exactly what the user sent in, but should be close :) */ 3526 ssf->ssf_info.sinfo_stream = sp->sid; 3527 ssf->ssf_info.sinfo_ssn = 0; 3528 if (sp->some_taken) { 3529 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3530 } else { 3531 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3532 } 3533 ssf->ssf_info.sinfo_ppid = sp->ppid; 3534 ssf->ssf_info.sinfo_context = sp->context; 3535 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3536 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3537 } 3538 SCTP_BUF_NEXT(m_notify) = sp->data; 3539 3540 /* Steal off the mbuf */ 3541 sp->data = NULL; 3542 /* 3543 * For this case, we check the actual socket buffer, since the assoc 3544 * is going away we don't want to overfill the socket buffer for a 3545 * non-reader 3546 */ 3547 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3548 sctp_m_freem(m_notify); 3549 return; 3550 } 3551 /* append to socket */ 3552 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3553 0, 0, stcb->asoc.context, 0, 0, 0, 3554 m_notify); 3555 if (control == NULL) { 3556 /* no memory */ 3557 sctp_m_freem(m_notify); 3558 return; 3559 } 3560 control->length = SCTP_BUF_LEN(m_notify); 3561 control->spec_flags = M_NOTIFICATION; 3562 /* not that we need this */ 3563 control->tail_mbuf = m_notify; 3564 sctp_add_to_readq(stcb->sctp_ep, stcb, 3565 control, 3566 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3567 } 3568 3569 3570 3571 static void 3572 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3573 { 3574 struct mbuf *m_notify; 3575 struct sctp_adaptation_event *sai; 3576 struct sctp_queued_to_read *control; 3577 3578 if ((stcb == NULL) || 3579 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3580 /* event not enabled */ 3581 return; 3582 } 3583 3584 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3585 if (m_notify == NULL) 3586 /* no space left */ 3587 return; 3588 SCTP_BUF_LEN(m_notify) = 0; 3589 sai = mtod(m_notify, struct sctp_adaptation_event *); 3590 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3591 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3592 sai->sai_flags = 0; 3593 sai->sai_length = sizeof(struct sctp_adaptation_event); 3594 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3595 sai->sai_assoc_id = sctp_get_associd(stcb); 3596 3597 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3598 SCTP_BUF_NEXT(m_notify) = NULL; 3599 3600 /* append to socket */ 3601 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3602 0, 0, stcb->asoc.context, 0, 0, 0, 3603 m_notify); 3604 if (control == NULL) { 3605 /* no memory */ 3606 sctp_m_freem(m_notify); 3607 return; 3608 } 3609 control->length = SCTP_BUF_LEN(m_notify); 3610 control->spec_flags = M_NOTIFICATION; 3611 /* not that we need this */ 3612 control->tail_mbuf = m_notify; 3613 sctp_add_to_readq(stcb->sctp_ep, stcb, 3614 control, 3615 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3616 } 3617 3618 /* This always must be called with the read-queue LOCKED in the INP */ 3619 static void 3620 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3621 uint32_t val, int so_locked) 3622 { 3623 struct mbuf *m_notify; 3624 struct sctp_pdapi_event *pdapi; 3625 struct sctp_queued_to_read *control; 3626 struct sockbuf *sb; 3627 3628 if ((stcb == NULL) || 3629 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3630 /* event not enabled */ 3631 return; 3632 } 3633 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3634 return; 3635 } 3636 3637 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3638 if (m_notify == NULL) 3639 /* no space left */ 3640 return; 3641 SCTP_BUF_LEN(m_notify) = 0; 3642 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3643 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3644 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3645 pdapi->pdapi_flags = 0; 3646 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3647 pdapi->pdapi_indication = error; 3648 pdapi->pdapi_stream = (val >> 16); 3649 pdapi->pdapi_seq = (val & 0x0000ffff); 3650 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3651 3652 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3653 SCTP_BUF_NEXT(m_notify) = NULL; 3654 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3655 0, 0, stcb->asoc.context, 0, 0, 0, 3656 m_notify); 3657 if (control == NULL) { 3658 /* no memory */ 3659 sctp_m_freem(m_notify); 3660 return; 3661 } 3662 control->length = SCTP_BUF_LEN(m_notify); 3663 control->spec_flags = M_NOTIFICATION; 3664 /* not that we need this */ 3665 control->tail_mbuf = m_notify; 3666 sb = &stcb->sctp_socket->so_rcv; 3667 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3668 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3669 } 3670 sctp_sballoc(stcb, sb, m_notify); 3671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3672 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3673 } 3674 control->end_added = 1; 3675 if (stcb->asoc.control_pdapi) 3676 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3677 else { 3678 /* we really should not see this case */ 3679 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3680 } 3681 if (stcb->sctp_ep && stcb->sctp_socket) { 3682 /* This should always be the case */ 3683 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3684 } 3685 } 3686 3687 static void 3688 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3689 { 3690 struct mbuf *m_notify; 3691 struct sctp_shutdown_event *sse; 3692 struct sctp_queued_to_read *control; 3693 3694 /* 3695 * For TCP model AND UDP connected sockets we will send an error up 3696 * when an SHUTDOWN completes 3697 */ 3698 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3699 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3700 /* mark socket closed for read/write and wakeup! */ 3701 socantsendmore(stcb->sctp_socket); 3702 } 3703 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3704 /* event not enabled */ 3705 return; 3706 } 3707 3708 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3709 if (m_notify == NULL) 3710 /* no space left */ 3711 return; 3712 sse = mtod(m_notify, struct sctp_shutdown_event *); 3713 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3714 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3715 sse->sse_flags = 0; 3716 sse->sse_length = sizeof(struct sctp_shutdown_event); 3717 sse->sse_assoc_id = sctp_get_associd(stcb); 3718 3719 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3720 SCTP_BUF_NEXT(m_notify) = NULL; 3721 3722 /* append to socket */ 3723 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3724 0, 0, stcb->asoc.context, 0, 0, 0, 3725 m_notify); 3726 if (control == NULL) { 3727 /* no memory */ 3728 sctp_m_freem(m_notify); 3729 return; 3730 } 3731 control->length = SCTP_BUF_LEN(m_notify); 3732 control->spec_flags = M_NOTIFICATION; 3733 /* not that we need this */ 3734 control->tail_mbuf = m_notify; 3735 sctp_add_to_readq(stcb->sctp_ep, stcb, 3736 control, 3737 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3738 } 3739 3740 static void 3741 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3742 int so_locked) 3743 { 3744 struct mbuf *m_notify; 3745 struct sctp_sender_dry_event *event; 3746 struct sctp_queued_to_read *control; 3747 3748 if ((stcb == NULL) || 3749 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3750 /* event not enabled */ 3751 return; 3752 } 3753 3754 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3755 if (m_notify == NULL) { 3756 /* no space left */ 3757 return; 3758 } 3759 SCTP_BUF_LEN(m_notify) = 0; 3760 event = mtod(m_notify, struct sctp_sender_dry_event *); 3761 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3762 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3763 event->sender_dry_flags = 0; 3764 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3765 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3766 3767 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3768 SCTP_BUF_NEXT(m_notify) = NULL; 3769 3770 /* append to socket */ 3771 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3772 0, 0, stcb->asoc.context, 0, 0, 0, 3773 m_notify); 3774 if (control == NULL) { 3775 /* no memory */ 3776 sctp_m_freem(m_notify); 3777 return; 3778 } 3779 control->length = SCTP_BUF_LEN(m_notify); 3780 control->spec_flags = M_NOTIFICATION; 3781 /* not that we need this */ 3782 control->tail_mbuf = m_notify; 3783 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3784 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3785 } 3786 3787 3788 void 3789 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3790 { 3791 struct mbuf *m_notify; 3792 struct sctp_queued_to_read *control; 3793 struct sctp_stream_change_event *stradd; 3794 3795 if ((stcb == NULL) || 3796 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3797 /* event not enabled */ 3798 return; 3799 } 3800 if ((stcb->asoc.peer_req_out) && flag) { 3801 /* Peer made the request, don't tell the local user */ 3802 stcb->asoc.peer_req_out = 0; 3803 return; 3804 } 3805 stcb->asoc.peer_req_out = 0; 3806 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3807 if (m_notify == NULL) 3808 /* no space left */ 3809 return; 3810 SCTP_BUF_LEN(m_notify) = 0; 3811 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3812 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3813 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3814 stradd->strchange_flags = flag; 3815 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3816 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3817 stradd->strchange_instrms = numberin; 3818 stradd->strchange_outstrms = numberout; 3819 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3820 SCTP_BUF_NEXT(m_notify) = NULL; 3821 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3822 /* no space */ 3823 sctp_m_freem(m_notify); 3824 return; 3825 } 3826 /* append to socket */ 3827 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3828 0, 0, stcb->asoc.context, 0, 0, 0, 3829 m_notify); 3830 if (control == NULL) { 3831 /* no memory */ 3832 sctp_m_freem(m_notify); 3833 return; 3834 } 3835 control->length = SCTP_BUF_LEN(m_notify); 3836 control->spec_flags = M_NOTIFICATION; 3837 /* not that we need this */ 3838 control->tail_mbuf = m_notify; 3839 sctp_add_to_readq(stcb->sctp_ep, stcb, 3840 control, 3841 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3842 } 3843 3844 void 3845 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3846 { 3847 struct mbuf *m_notify; 3848 struct sctp_queued_to_read *control; 3849 struct sctp_assoc_reset_event *strasoc; 3850 3851 if ((stcb == NULL) || 3852 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3853 /* event not enabled */ 3854 return; 3855 } 3856 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3857 if (m_notify == NULL) 3858 /* no space left */ 3859 return; 3860 SCTP_BUF_LEN(m_notify) = 0; 3861 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3862 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3863 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3864 strasoc->assocreset_flags = flag; 3865 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3866 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3867 strasoc->assocreset_local_tsn = sending_tsn; 3868 strasoc->assocreset_remote_tsn = recv_tsn; 3869 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3870 SCTP_BUF_NEXT(m_notify) = NULL; 3871 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3872 /* no space */ 3873 sctp_m_freem(m_notify); 3874 return; 3875 } 3876 /* append to socket */ 3877 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3878 0, 0, stcb->asoc.context, 0, 0, 0, 3879 m_notify); 3880 if (control == NULL) { 3881 /* no memory */ 3882 sctp_m_freem(m_notify); 3883 return; 3884 } 3885 control->length = SCTP_BUF_LEN(m_notify); 3886 control->spec_flags = M_NOTIFICATION; 3887 /* not that we need this */ 3888 control->tail_mbuf = m_notify; 3889 sctp_add_to_readq(stcb->sctp_ep, stcb, 3890 control, 3891 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3892 } 3893 3894 3895 3896 static void 3897 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3898 int number_entries, uint16_t *list, int flag) 3899 { 3900 struct mbuf *m_notify; 3901 struct sctp_queued_to_read *control; 3902 struct sctp_stream_reset_event *strreset; 3903 int len; 3904 3905 if ((stcb == NULL) || 3906 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3907 /* event not enabled */ 3908 return; 3909 } 3910 3911 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3912 if (m_notify == NULL) 3913 /* no space left */ 3914 return; 3915 SCTP_BUF_LEN(m_notify) = 0; 3916 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3917 if (len > M_TRAILINGSPACE(m_notify)) { 3918 /* never enough room */ 3919 sctp_m_freem(m_notify); 3920 return; 3921 } 3922 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3923 memset(strreset, 0, len); 3924 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3925 strreset->strreset_flags = flag; 3926 strreset->strreset_length = len; 3927 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3928 if (number_entries) { 3929 int i; 3930 3931 for (i = 0; i < number_entries; i++) { 3932 strreset->strreset_stream_list[i] = ntohs(list[i]); 3933 } 3934 } 3935 SCTP_BUF_LEN(m_notify) = len; 3936 SCTP_BUF_NEXT(m_notify) = NULL; 3937 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3938 /* no space */ 3939 sctp_m_freem(m_notify); 3940 return; 3941 } 3942 /* append to socket */ 3943 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3944 0, 0, stcb->asoc.context, 0, 0, 0, 3945 m_notify); 3946 if (control == NULL) { 3947 /* no memory */ 3948 sctp_m_freem(m_notify); 3949 return; 3950 } 3951 control->length = SCTP_BUF_LEN(m_notify); 3952 control->spec_flags = M_NOTIFICATION; 3953 /* not that we need this */ 3954 control->tail_mbuf = m_notify; 3955 sctp_add_to_readq(stcb->sctp_ep, stcb, 3956 control, 3957 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3958 } 3959 3960 3961 static void 3962 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3963 { 3964 struct mbuf *m_notify; 3965 struct sctp_remote_error *sre; 3966 struct sctp_queued_to_read *control; 3967 unsigned int notif_len; 3968 uint16_t chunk_len; 3969 3970 if ((stcb == NULL) || 3971 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3972 return; 3973 } 3974 if (chunk != NULL) { 3975 chunk_len = ntohs(chunk->ch.chunk_length); 3976 /* 3977 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3978 * contiguous. 3979 */ 3980 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3981 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3982 } 3983 } else { 3984 chunk_len = 0; 3985 } 3986 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 3987 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3988 if (m_notify == NULL) { 3989 /* Retry with smaller value. */ 3990 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 3991 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3992 if (m_notify == NULL) { 3993 return; 3994 } 3995 } 3996 SCTP_BUF_NEXT(m_notify) = NULL; 3997 sre = mtod(m_notify, struct sctp_remote_error *); 3998 memset(sre, 0, notif_len); 3999 sre->sre_type = SCTP_REMOTE_ERROR; 4000 sre->sre_flags = 0; 4001 sre->sre_length = sizeof(struct sctp_remote_error); 4002 sre->sre_error = error; 4003 sre->sre_assoc_id = sctp_get_associd(stcb); 4004 if (notif_len > sizeof(struct sctp_remote_error)) { 4005 memcpy(sre->sre_data, chunk, chunk_len); 4006 sre->sre_length += chunk_len; 4007 } 4008 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4009 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4010 0, 0, stcb->asoc.context, 0, 0, 0, 4011 m_notify); 4012 if (control != NULL) { 4013 control->length = SCTP_BUF_LEN(m_notify); 4014 control->spec_flags = M_NOTIFICATION; 4015 /* not that we need this */ 4016 control->tail_mbuf = m_notify; 4017 sctp_add_to_readq(stcb->sctp_ep, stcb, 4018 control, 4019 &stcb->sctp_socket->so_rcv, 1, 4020 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4021 } else { 4022 sctp_m_freem(m_notify); 4023 } 4024 } 4025 4026 4027 void 4028 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4029 uint32_t error, void *data, int so_locked) 4030 { 4031 if ((stcb == NULL) || 4032 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4033 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4034 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4035 /* If the socket is gone we are out of here */ 4036 return; 4037 } 4038 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4039 return; 4040 } 4041 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4042 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4043 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4044 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4045 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4046 /* Don't report these in front states */ 4047 return; 4048 } 4049 } 4050 switch (notification) { 4051 case SCTP_NOTIFY_ASSOC_UP: 4052 if (stcb->asoc.assoc_up_sent == 0) { 4053 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4054 stcb->asoc.assoc_up_sent = 1; 4055 } 4056 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4057 sctp_notify_adaptation_layer(stcb); 4058 } 4059 if (stcb->asoc.auth_supported == 0) { 4060 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4061 NULL, so_locked); 4062 } 4063 break; 4064 case SCTP_NOTIFY_ASSOC_DOWN: 4065 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4066 break; 4067 case SCTP_NOTIFY_INTERFACE_DOWN: 4068 { 4069 struct sctp_nets *net; 4070 4071 net = (struct sctp_nets *)data; 4072 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4073 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4074 break; 4075 } 4076 case SCTP_NOTIFY_INTERFACE_UP: 4077 { 4078 struct sctp_nets *net; 4079 4080 net = (struct sctp_nets *)data; 4081 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4082 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4083 break; 4084 } 4085 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4086 { 4087 struct sctp_nets *net; 4088 4089 net = (struct sctp_nets *)data; 4090 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4091 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4092 break; 4093 } 4094 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4095 sctp_notify_send_failed2(stcb, error, 4096 (struct sctp_stream_queue_pending *)data, so_locked); 4097 break; 4098 case SCTP_NOTIFY_SENT_DG_FAIL: 4099 sctp_notify_send_failed(stcb, 1, error, 4100 (struct sctp_tmit_chunk *)data, so_locked); 4101 break; 4102 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4103 sctp_notify_send_failed(stcb, 0, error, 4104 (struct sctp_tmit_chunk *)data, so_locked); 4105 break; 4106 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4107 { 4108 uint32_t val; 4109 4110 val = *((uint32_t *)data); 4111 4112 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4113 break; 4114 } 4115 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4116 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4117 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4118 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4119 } else { 4120 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4121 } 4122 break; 4123 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4124 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4125 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4126 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4127 } else { 4128 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4129 } 4130 break; 4131 case SCTP_NOTIFY_ASSOC_RESTART: 4132 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4133 if (stcb->asoc.auth_supported == 0) { 4134 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4135 NULL, so_locked); 4136 } 4137 break; 4138 case SCTP_NOTIFY_STR_RESET_SEND: 4139 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4140 break; 4141 case SCTP_NOTIFY_STR_RESET_RECV: 4142 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4143 break; 4144 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4145 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4146 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4147 break; 4148 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4149 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4150 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4151 break; 4152 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4153 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4154 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4155 break; 4156 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4157 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4158 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4159 break; 4160 case SCTP_NOTIFY_ASCONF_ADD_IP: 4161 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4162 error, so_locked); 4163 break; 4164 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4165 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4166 error, so_locked); 4167 break; 4168 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4169 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4170 error, so_locked); 4171 break; 4172 case SCTP_NOTIFY_PEER_SHUTDOWN: 4173 sctp_notify_shutdown_event(stcb); 4174 break; 4175 case SCTP_NOTIFY_AUTH_NEW_KEY: 4176 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4177 (uint16_t)(uintptr_t)data, 4178 so_locked); 4179 break; 4180 case SCTP_NOTIFY_AUTH_FREE_KEY: 4181 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4182 (uint16_t)(uintptr_t)data, 4183 so_locked); 4184 break; 4185 case SCTP_NOTIFY_NO_PEER_AUTH: 4186 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4187 (uint16_t)(uintptr_t)data, 4188 so_locked); 4189 break; 4190 case SCTP_NOTIFY_SENDER_DRY: 4191 sctp_notify_sender_dry_event(stcb, so_locked); 4192 break; 4193 case SCTP_NOTIFY_REMOTE_ERROR: 4194 sctp_notify_remote_error(stcb, error, data); 4195 break; 4196 default: 4197 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4198 __func__, notification, notification); 4199 break; 4200 } /* end switch */ 4201 } 4202 4203 void 4204 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked) 4205 { 4206 struct sctp_association *asoc; 4207 struct sctp_stream_out *outs; 4208 struct sctp_tmit_chunk *chk, *nchk; 4209 struct sctp_stream_queue_pending *sp, *nsp; 4210 int i; 4211 4212 if (stcb == NULL) { 4213 return; 4214 } 4215 asoc = &stcb->asoc; 4216 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4217 /* already being freed */ 4218 return; 4219 } 4220 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4221 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4222 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4223 return; 4224 } 4225 /* now through all the gunk freeing chunks */ 4226 if (holds_lock == 0) { 4227 SCTP_TCB_SEND_LOCK(stcb); 4228 } 4229 /* sent queue SHOULD be empty */ 4230 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4231 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4232 asoc->sent_queue_cnt--; 4233 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4234 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4235 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4236 #ifdef INVARIANTS 4237 } else { 4238 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4239 #endif 4240 } 4241 } 4242 if (chk->data != NULL) { 4243 sctp_free_bufspace(stcb, asoc, chk, 1); 4244 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4245 error, chk, so_locked); 4246 if (chk->data) { 4247 sctp_m_freem(chk->data); 4248 chk->data = NULL; 4249 } 4250 } 4251 sctp_free_a_chunk(stcb, chk, so_locked); 4252 /* sa_ignore FREED_MEMORY */ 4253 } 4254 /* pending send queue SHOULD be empty */ 4255 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4256 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4257 asoc->send_queue_cnt--; 4258 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4259 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4260 #ifdef INVARIANTS 4261 } else { 4262 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4263 #endif 4264 } 4265 if (chk->data != NULL) { 4266 sctp_free_bufspace(stcb, asoc, chk, 1); 4267 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4268 error, chk, so_locked); 4269 if (chk->data) { 4270 sctp_m_freem(chk->data); 4271 chk->data = NULL; 4272 } 4273 } 4274 sctp_free_a_chunk(stcb, chk, so_locked); 4275 /* sa_ignore FREED_MEMORY */ 4276 } 4277 for (i = 0; i < asoc->streamoutcnt; i++) { 4278 /* For each stream */ 4279 outs = &asoc->strmout[i]; 4280 /* clean up any sends there */ 4281 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4282 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4283 TAILQ_REMOVE(&outs->outqueue, sp, next); 4284 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4285 sctp_free_spbufspace(stcb, asoc, sp); 4286 if (sp->data) { 4287 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4288 error, (void *)sp, so_locked); 4289 if (sp->data) { 4290 sctp_m_freem(sp->data); 4291 sp->data = NULL; 4292 sp->tail_mbuf = NULL; 4293 sp->length = 0; 4294 } 4295 } 4296 if (sp->net) { 4297 sctp_free_remote_addr(sp->net); 4298 sp->net = NULL; 4299 } 4300 /* Free the chunk */ 4301 sctp_free_a_strmoq(stcb, sp, so_locked); 4302 /* sa_ignore FREED_MEMORY */ 4303 } 4304 } 4305 4306 if (holds_lock == 0) { 4307 SCTP_TCB_SEND_UNLOCK(stcb); 4308 } 4309 } 4310 4311 void 4312 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4313 struct sctp_abort_chunk *abort, int so_locked) 4314 { 4315 if (stcb == NULL) { 4316 return; 4317 } 4318 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4319 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4320 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4321 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4322 } 4323 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4324 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4325 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4326 return; 4327 } 4328 /* Tell them we lost the asoc */ 4329 sctp_report_all_outbound(stcb, error, 0, so_locked); 4330 if (from_peer) { 4331 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4332 } else { 4333 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4334 } 4335 } 4336 4337 void 4338 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4339 struct mbuf *m, int iphlen, 4340 struct sockaddr *src, struct sockaddr *dst, 4341 struct sctphdr *sh, struct mbuf *op_err, 4342 uint8_t mflowtype, uint32_t mflowid, 4343 uint32_t vrf_id, uint16_t port) 4344 { 4345 uint32_t vtag; 4346 4347 vtag = 0; 4348 if (stcb != NULL) { 4349 vtag = stcb->asoc.peer_vtag; 4350 vrf_id = stcb->asoc.vrf_id; 4351 } 4352 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4353 mflowtype, mflowid, inp->fibnum, 4354 vrf_id, port); 4355 if (stcb != NULL) { 4356 /* We have a TCB to abort, send notification too */ 4357 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4358 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4359 /* Ok, now lets free it */ 4360 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4361 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4362 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4364 } 4365 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4366 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4367 } 4368 } 4369 #ifdef SCTP_ASOCLOG_OF_TSNS 4370 void 4371 sctp_print_out_track_log(struct sctp_tcb *stcb) 4372 { 4373 #ifdef NOSIY_PRINTS 4374 int i; 4375 4376 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4377 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4378 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4379 SCTP_PRINTF("None rcvd\n"); 4380 goto none_in; 4381 } 4382 if (stcb->asoc.tsn_in_wrapped) { 4383 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4384 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4385 stcb->asoc.in_tsnlog[i].tsn, 4386 stcb->asoc.in_tsnlog[i].strm, 4387 stcb->asoc.in_tsnlog[i].seq, 4388 stcb->asoc.in_tsnlog[i].flgs, 4389 stcb->asoc.in_tsnlog[i].sz); 4390 } 4391 } 4392 if (stcb->asoc.tsn_in_at) { 4393 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4394 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4395 stcb->asoc.in_tsnlog[i].tsn, 4396 stcb->asoc.in_tsnlog[i].strm, 4397 stcb->asoc.in_tsnlog[i].seq, 4398 stcb->asoc.in_tsnlog[i].flgs, 4399 stcb->asoc.in_tsnlog[i].sz); 4400 } 4401 } 4402 none_in: 4403 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4404 if ((stcb->asoc.tsn_out_at == 0) && 4405 (stcb->asoc.tsn_out_wrapped == 0)) { 4406 SCTP_PRINTF("None sent\n"); 4407 } 4408 if (stcb->asoc.tsn_out_wrapped) { 4409 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4410 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4411 stcb->asoc.out_tsnlog[i].tsn, 4412 stcb->asoc.out_tsnlog[i].strm, 4413 stcb->asoc.out_tsnlog[i].seq, 4414 stcb->asoc.out_tsnlog[i].flgs, 4415 stcb->asoc.out_tsnlog[i].sz); 4416 } 4417 } 4418 if (stcb->asoc.tsn_out_at) { 4419 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4420 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4421 stcb->asoc.out_tsnlog[i].tsn, 4422 stcb->asoc.out_tsnlog[i].strm, 4423 stcb->asoc.out_tsnlog[i].seq, 4424 stcb->asoc.out_tsnlog[i].flgs, 4425 stcb->asoc.out_tsnlog[i].sz); 4426 } 4427 } 4428 #endif 4429 } 4430 #endif 4431 4432 void 4433 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4434 struct mbuf *op_err, 4435 int so_locked) 4436 { 4437 4438 if (stcb == NULL) { 4439 /* Got to have a TCB */ 4440 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4441 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4442 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4443 SCTP_CALLED_DIRECTLY_NOCMPSET); 4444 } 4445 } 4446 return; 4447 } else { 4448 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4449 } 4450 /* notify the peer */ 4451 sctp_send_abort_tcb(stcb, op_err, so_locked); 4452 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4453 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4454 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4455 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4456 } 4457 /* notify the ulp */ 4458 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4459 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4460 } 4461 /* now free the asoc */ 4462 #ifdef SCTP_ASOCLOG_OF_TSNS 4463 sctp_print_out_track_log(stcb); 4464 #endif 4465 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4466 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4467 } 4468 4469 void 4470 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4471 struct sockaddr *src, struct sockaddr *dst, 4472 struct sctphdr *sh, struct sctp_inpcb *inp, 4473 struct mbuf *cause, 4474 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4475 uint32_t vrf_id, uint16_t port) 4476 { 4477 struct sctp_chunkhdr *ch, chunk_buf; 4478 unsigned int chk_length; 4479 int contains_init_chunk; 4480 4481 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4482 /* Generate a TO address for future reference */ 4483 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4484 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4485 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4486 SCTP_CALLED_DIRECTLY_NOCMPSET); 4487 } 4488 } 4489 contains_init_chunk = 0; 4490 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4491 sizeof(*ch), (uint8_t *)&chunk_buf); 4492 while (ch != NULL) { 4493 chk_length = ntohs(ch->chunk_length); 4494 if (chk_length < sizeof(*ch)) { 4495 /* break to abort land */ 4496 break; 4497 } 4498 switch (ch->chunk_type) { 4499 case SCTP_INIT: 4500 contains_init_chunk = 1; 4501 break; 4502 case SCTP_PACKET_DROPPED: 4503 /* we don't respond to pkt-dropped */ 4504 return; 4505 case SCTP_ABORT_ASSOCIATION: 4506 /* we don't respond with an ABORT to an ABORT */ 4507 return; 4508 case SCTP_SHUTDOWN_COMPLETE: 4509 /* 4510 * we ignore it since we are not waiting for it and 4511 * peer is gone 4512 */ 4513 return; 4514 case SCTP_SHUTDOWN_ACK: 4515 sctp_send_shutdown_complete2(src, dst, sh, 4516 mflowtype, mflowid, fibnum, 4517 vrf_id, port); 4518 return; 4519 default: 4520 break; 4521 } 4522 offset += SCTP_SIZE32(chk_length); 4523 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4524 sizeof(*ch), (uint8_t *)&chunk_buf); 4525 } 4526 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4527 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4528 (contains_init_chunk == 0))) { 4529 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4530 mflowtype, mflowid, fibnum, 4531 vrf_id, port); 4532 } 4533 } 4534 4535 /* 4536 * check the inbound datagram to make sure there is not an abort inside it, 4537 * if there is return 1, else return 0. 4538 */ 4539 int 4540 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4541 { 4542 struct sctp_chunkhdr *ch; 4543 struct sctp_init_chunk *init_chk, chunk_buf; 4544 int offset; 4545 unsigned int chk_length; 4546 4547 offset = iphlen + sizeof(struct sctphdr); 4548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4549 (uint8_t *)&chunk_buf); 4550 while (ch != NULL) { 4551 chk_length = ntohs(ch->chunk_length); 4552 if (chk_length < sizeof(*ch)) { 4553 /* packet is probably corrupt */ 4554 break; 4555 } 4556 /* we seem to be ok, is it an abort? */ 4557 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4558 /* yep, tell them */ 4559 return (1); 4560 } 4561 if (ch->chunk_type == SCTP_INITIATION) { 4562 /* need to update the Vtag */ 4563 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4564 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4565 if (init_chk != NULL) { 4566 *vtagfill = ntohl(init_chk->init.initiate_tag); 4567 } 4568 } 4569 /* Nope, move to the next chunk */ 4570 offset += SCTP_SIZE32(chk_length); 4571 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4572 sizeof(*ch), (uint8_t *)&chunk_buf); 4573 } 4574 return (0); 4575 } 4576 4577 /* 4578 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4579 * set (i.e. it's 0) so, create this function to compare link local scopes 4580 */ 4581 #ifdef INET6 4582 uint32_t 4583 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4584 { 4585 struct sockaddr_in6 a, b; 4586 4587 /* save copies */ 4588 a = *addr1; 4589 b = *addr2; 4590 4591 if (a.sin6_scope_id == 0) 4592 if (sa6_recoverscope(&a)) { 4593 /* can't get scope, so can't match */ 4594 return (0); 4595 } 4596 if (b.sin6_scope_id == 0) 4597 if (sa6_recoverscope(&b)) { 4598 /* can't get scope, so can't match */ 4599 return (0); 4600 } 4601 if (a.sin6_scope_id != b.sin6_scope_id) 4602 return (0); 4603 4604 return (1); 4605 } 4606 4607 /* 4608 * returns a sockaddr_in6 with embedded scope recovered and removed 4609 */ 4610 struct sockaddr_in6 * 4611 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4612 { 4613 /* check and strip embedded scope junk */ 4614 if (addr->sin6_family == AF_INET6) { 4615 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4616 if (addr->sin6_scope_id == 0) { 4617 *store = *addr; 4618 if (!sa6_recoverscope(store)) { 4619 /* use the recovered scope */ 4620 addr = store; 4621 } 4622 } else { 4623 /* else, return the original "to" addr */ 4624 in6_clearscope(&addr->sin6_addr); 4625 } 4626 } 4627 } 4628 return (addr); 4629 } 4630 #endif 4631 4632 /* 4633 * are the two addresses the same? currently a "scopeless" check returns: 1 4634 * if same, 0 if not 4635 */ 4636 int 4637 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4638 { 4639 4640 /* must be valid */ 4641 if (sa1 == NULL || sa2 == NULL) 4642 return (0); 4643 4644 /* must be the same family */ 4645 if (sa1->sa_family != sa2->sa_family) 4646 return (0); 4647 4648 switch (sa1->sa_family) { 4649 #ifdef INET6 4650 case AF_INET6: 4651 { 4652 /* IPv6 addresses */ 4653 struct sockaddr_in6 *sin6_1, *sin6_2; 4654 4655 sin6_1 = (struct sockaddr_in6 *)sa1; 4656 sin6_2 = (struct sockaddr_in6 *)sa2; 4657 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4658 sin6_2)); 4659 } 4660 #endif 4661 #ifdef INET 4662 case AF_INET: 4663 { 4664 /* IPv4 addresses */ 4665 struct sockaddr_in *sin_1, *sin_2; 4666 4667 sin_1 = (struct sockaddr_in *)sa1; 4668 sin_2 = (struct sockaddr_in *)sa2; 4669 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4670 } 4671 #endif 4672 default: 4673 /* we don't do these... */ 4674 return (0); 4675 } 4676 } 4677 4678 void 4679 sctp_print_address(struct sockaddr *sa) 4680 { 4681 #ifdef INET6 4682 char ip6buf[INET6_ADDRSTRLEN]; 4683 #endif 4684 4685 switch (sa->sa_family) { 4686 #ifdef INET6 4687 case AF_INET6: 4688 { 4689 struct sockaddr_in6 *sin6; 4690 4691 sin6 = (struct sockaddr_in6 *)sa; 4692 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4693 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4694 ntohs(sin6->sin6_port), 4695 sin6->sin6_scope_id); 4696 break; 4697 } 4698 #endif 4699 #ifdef INET 4700 case AF_INET: 4701 { 4702 struct sockaddr_in *sin; 4703 unsigned char *p; 4704 4705 sin = (struct sockaddr_in *)sa; 4706 p = (unsigned char *)&sin->sin_addr; 4707 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4708 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4709 break; 4710 } 4711 #endif 4712 default: 4713 SCTP_PRINTF("?\n"); 4714 break; 4715 } 4716 } 4717 4718 void 4719 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4720 struct sctp_inpcb *new_inp, 4721 struct sctp_tcb *stcb, 4722 int waitflags) 4723 { 4724 /* 4725 * go through our old INP and pull off any control structures that 4726 * belong to stcb and move then to the new inp. 4727 */ 4728 struct socket *old_so, *new_so; 4729 struct sctp_queued_to_read *control, *nctl; 4730 struct sctp_readhead tmp_queue; 4731 struct mbuf *m; 4732 int error = 0; 4733 4734 old_so = old_inp->sctp_socket; 4735 new_so = new_inp->sctp_socket; 4736 TAILQ_INIT(&tmp_queue); 4737 error = sblock(&old_so->so_rcv, waitflags); 4738 if (error) { 4739 /* 4740 * Gak, can't get sblock, we have a problem. data will be 4741 * left stranded.. and we don't dare look at it since the 4742 * other thread may be reading something. Oh well, its a 4743 * screwed up app that does a peeloff OR a accept while 4744 * reading from the main socket... actually its only the 4745 * peeloff() case, since I think read will fail on a 4746 * listening socket.. 4747 */ 4748 return; 4749 } 4750 /* lock the socket buffers */ 4751 SCTP_INP_READ_LOCK(old_inp); 4752 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4753 /* Pull off all for out target stcb */ 4754 if (control->stcb == stcb) { 4755 /* remove it we want it */ 4756 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4757 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4758 m = control->data; 4759 while (m) { 4760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4761 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4762 } 4763 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4765 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4766 } 4767 m = SCTP_BUF_NEXT(m); 4768 } 4769 } 4770 } 4771 SCTP_INP_READ_UNLOCK(old_inp); 4772 /* Remove the sb-lock on the old socket */ 4773 4774 sbunlock(&old_so->so_rcv); 4775 /* Now we move them over to the new socket buffer */ 4776 SCTP_INP_READ_LOCK(new_inp); 4777 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4778 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4779 m = control->data; 4780 while (m) { 4781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4782 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4783 } 4784 sctp_sballoc(stcb, &new_so->so_rcv, m); 4785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4786 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4787 } 4788 m = SCTP_BUF_NEXT(m); 4789 } 4790 } 4791 SCTP_INP_READ_UNLOCK(new_inp); 4792 } 4793 4794 void 4795 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4796 struct sctp_tcb *stcb, 4797 int so_locked 4798 SCTP_UNUSED 4799 ) 4800 { 4801 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4802 sctp_sorwakeup(inp, inp->sctp_socket); 4803 } 4804 } 4805 4806 void 4807 sctp_add_to_readq(struct sctp_inpcb *inp, 4808 struct sctp_tcb *stcb, 4809 struct sctp_queued_to_read *control, 4810 struct sockbuf *sb, 4811 int end, 4812 int inp_read_lock_held, 4813 int so_locked) 4814 { 4815 /* 4816 * Here we must place the control on the end of the socket read 4817 * queue AND increment sb_cc so that select will work properly on 4818 * read. 4819 */ 4820 struct mbuf *m, *prev = NULL; 4821 4822 if (inp == NULL) { 4823 /* Gak, TSNH!! */ 4824 #ifdef INVARIANTS 4825 panic("Gak, inp NULL on add_to_readq"); 4826 #endif 4827 return; 4828 } 4829 if (inp_read_lock_held == 0) 4830 SCTP_INP_READ_LOCK(inp); 4831 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4832 if (!control->on_strm_q) { 4833 sctp_free_remote_addr(control->whoFrom); 4834 if (control->data) { 4835 sctp_m_freem(control->data); 4836 control->data = NULL; 4837 } 4838 sctp_free_a_readq(stcb, control); 4839 } 4840 if (inp_read_lock_held == 0) 4841 SCTP_INP_READ_UNLOCK(inp); 4842 return; 4843 } 4844 if (!(control->spec_flags & M_NOTIFICATION)) { 4845 atomic_add_int(&inp->total_recvs, 1); 4846 if (!control->do_not_ref_stcb) { 4847 atomic_add_int(&stcb->total_recvs, 1); 4848 } 4849 } 4850 m = control->data; 4851 control->held_length = 0; 4852 control->length = 0; 4853 while (m) { 4854 if (SCTP_BUF_LEN(m) == 0) { 4855 /* Skip mbufs with NO length */ 4856 if (prev == NULL) { 4857 /* First one */ 4858 control->data = sctp_m_free(m); 4859 m = control->data; 4860 } else { 4861 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4862 m = SCTP_BUF_NEXT(prev); 4863 } 4864 if (m == NULL) { 4865 control->tail_mbuf = prev; 4866 } 4867 continue; 4868 } 4869 prev = m; 4870 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4871 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4872 } 4873 sctp_sballoc(stcb, sb, m); 4874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4875 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4876 } 4877 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4878 m = SCTP_BUF_NEXT(m); 4879 } 4880 if (prev != NULL) { 4881 control->tail_mbuf = prev; 4882 } else { 4883 /* Everything got collapsed out?? */ 4884 if (!control->on_strm_q) { 4885 sctp_free_remote_addr(control->whoFrom); 4886 sctp_free_a_readq(stcb, control); 4887 } 4888 if (inp_read_lock_held == 0) 4889 SCTP_INP_READ_UNLOCK(inp); 4890 return; 4891 } 4892 if (end) { 4893 control->end_added = 1; 4894 } 4895 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4896 control->on_read_q = 1; 4897 if (inp_read_lock_held == 0) 4898 SCTP_INP_READ_UNLOCK(inp); 4899 if (inp && inp->sctp_socket) { 4900 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4901 } 4902 } 4903 4904 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4905 *************ALTERNATE ROUTING CODE 4906 */ 4907 4908 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4909 *************ALTERNATE ROUTING CODE 4910 */ 4911 4912 struct mbuf * 4913 sctp_generate_cause(uint16_t code, char *info) 4914 { 4915 struct mbuf *m; 4916 struct sctp_gen_error_cause *cause; 4917 size_t info_len; 4918 uint16_t len; 4919 4920 if ((code == 0) || (info == NULL)) { 4921 return (NULL); 4922 } 4923 info_len = strlen(info); 4924 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4925 return (NULL); 4926 } 4927 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4928 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4929 if (m != NULL) { 4930 SCTP_BUF_LEN(m) = len; 4931 cause = mtod(m, struct sctp_gen_error_cause *); 4932 cause->code = htons(code); 4933 cause->length = htons(len); 4934 memcpy(cause->info, info, info_len); 4935 } 4936 return (m); 4937 } 4938 4939 struct mbuf * 4940 sctp_generate_no_user_data_cause(uint32_t tsn) 4941 { 4942 struct mbuf *m; 4943 struct sctp_error_no_user_data *no_user_data_cause; 4944 uint16_t len; 4945 4946 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4947 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4948 if (m != NULL) { 4949 SCTP_BUF_LEN(m) = len; 4950 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4951 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4952 no_user_data_cause->cause.length = htons(len); 4953 no_user_data_cause->tsn = htonl(tsn); 4954 } 4955 return (m); 4956 } 4957 4958 #ifdef SCTP_MBCNT_LOGGING 4959 void 4960 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4961 struct sctp_tmit_chunk *tp1, int chk_cnt) 4962 { 4963 if (tp1->data == NULL) { 4964 return; 4965 } 4966 asoc->chunks_on_out_queue -= chk_cnt; 4967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4968 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4969 asoc->total_output_queue_size, 4970 tp1->book_size, 4971 0, 4972 tp1->mbcnt); 4973 } 4974 if (asoc->total_output_queue_size >= tp1->book_size) { 4975 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4976 } else { 4977 asoc->total_output_queue_size = 0; 4978 } 4979 4980 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4981 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4982 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4983 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4984 } else { 4985 stcb->sctp_socket->so_snd.sb_cc = 0; 4986 4987 } 4988 } 4989 } 4990 4991 #endif 4992 4993 int 4994 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4995 uint8_t sent, int so_locked) 4996 { 4997 struct sctp_stream_out *strq; 4998 struct sctp_tmit_chunk *chk = NULL, *tp2; 4999 struct sctp_stream_queue_pending *sp; 5000 uint32_t mid; 5001 uint16_t sid; 5002 uint8_t foundeom = 0; 5003 int ret_sz = 0; 5004 int notdone; 5005 int do_wakeup_routine = 0; 5006 5007 sid = tp1->rec.data.sid; 5008 mid = tp1->rec.data.mid; 5009 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5010 stcb->asoc.abandoned_sent[0]++; 5011 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5012 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5013 #if defined(SCTP_DETAILED_STR_STATS) 5014 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5015 #endif 5016 } else { 5017 stcb->asoc.abandoned_unsent[0]++; 5018 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5019 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5020 #if defined(SCTP_DETAILED_STR_STATS) 5021 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5022 #endif 5023 } 5024 do { 5025 ret_sz += tp1->book_size; 5026 if (tp1->data != NULL) { 5027 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5028 sctp_flight_size_decrease(tp1); 5029 sctp_total_flight_decrease(stcb, tp1); 5030 } 5031 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5032 stcb->asoc.peers_rwnd += tp1->send_size; 5033 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5034 if (sent) { 5035 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5036 } else { 5037 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5038 } 5039 if (tp1->data) { 5040 sctp_m_freem(tp1->data); 5041 tp1->data = NULL; 5042 } 5043 do_wakeup_routine = 1; 5044 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5045 stcb->asoc.sent_queue_cnt_removeable--; 5046 } 5047 } 5048 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5049 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5050 SCTP_DATA_NOT_FRAG) { 5051 /* not frag'ed we ae done */ 5052 notdone = 0; 5053 foundeom = 1; 5054 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5055 /* end of frag, we are done */ 5056 notdone = 0; 5057 foundeom = 1; 5058 } else { 5059 /* 5060 * Its a begin or middle piece, we must mark all of 5061 * it 5062 */ 5063 notdone = 1; 5064 tp1 = TAILQ_NEXT(tp1, sctp_next); 5065 } 5066 } while (tp1 && notdone); 5067 if (foundeom == 0) { 5068 /* 5069 * The multi-part message was scattered across the send and 5070 * sent queue. 5071 */ 5072 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5073 if ((tp1->rec.data.sid != sid) || 5074 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5075 break; 5076 } 5077 /* 5078 * save to chk in case we have some on stream out 5079 * queue. If so and we have an un-transmitted one we 5080 * don't have to fudge the TSN. 5081 */ 5082 chk = tp1; 5083 ret_sz += tp1->book_size; 5084 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5085 if (sent) { 5086 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5087 } else { 5088 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5089 } 5090 if (tp1->data) { 5091 sctp_m_freem(tp1->data); 5092 tp1->data = NULL; 5093 } 5094 /* No flight involved here book the size to 0 */ 5095 tp1->book_size = 0; 5096 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5097 foundeom = 1; 5098 } 5099 do_wakeup_routine = 1; 5100 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5101 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5102 /* 5103 * on to the sent queue so we can wait for it to be 5104 * passed by. 5105 */ 5106 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5107 sctp_next); 5108 stcb->asoc.send_queue_cnt--; 5109 stcb->asoc.sent_queue_cnt++; 5110 } 5111 } 5112 if (foundeom == 0) { 5113 /* 5114 * Still no eom found. That means there is stuff left on the 5115 * stream out queue.. yuck. 5116 */ 5117 SCTP_TCB_SEND_LOCK(stcb); 5118 strq = &stcb->asoc.strmout[sid]; 5119 sp = TAILQ_FIRST(&strq->outqueue); 5120 if (sp != NULL) { 5121 sp->discard_rest = 1; 5122 /* 5123 * We may need to put a chunk on the queue that 5124 * holds the TSN that would have been sent with the 5125 * LAST bit. 5126 */ 5127 if (chk == NULL) { 5128 /* Yep, we have to */ 5129 sctp_alloc_a_chunk(stcb, chk); 5130 if (chk == NULL) { 5131 /* 5132 * we are hosed. All we can do is 5133 * nothing.. which will cause an 5134 * abort if the peer is paying 5135 * attention. 5136 */ 5137 goto oh_well; 5138 } 5139 memset(chk, 0, sizeof(*chk)); 5140 chk->rec.data.rcv_flags = 0; 5141 chk->sent = SCTP_FORWARD_TSN_SKIP; 5142 chk->asoc = &stcb->asoc; 5143 if (stcb->asoc.idata_supported == 0) { 5144 if (sp->sinfo_flags & SCTP_UNORDERED) { 5145 chk->rec.data.mid = 0; 5146 } else { 5147 chk->rec.data.mid = strq->next_mid_ordered; 5148 } 5149 } else { 5150 if (sp->sinfo_flags & SCTP_UNORDERED) { 5151 chk->rec.data.mid = strq->next_mid_unordered; 5152 } else { 5153 chk->rec.data.mid = strq->next_mid_ordered; 5154 } 5155 } 5156 chk->rec.data.sid = sp->sid; 5157 chk->rec.data.ppid = sp->ppid; 5158 chk->rec.data.context = sp->context; 5159 chk->flags = sp->act_flags; 5160 chk->whoTo = NULL; 5161 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5162 strq->chunks_on_queues++; 5163 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5164 stcb->asoc.sent_queue_cnt++; 5165 stcb->asoc.pr_sctp_cnt++; 5166 } 5167 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5168 if (sp->sinfo_flags & SCTP_UNORDERED) { 5169 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5170 } 5171 if (stcb->asoc.idata_supported == 0) { 5172 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5173 strq->next_mid_ordered++; 5174 } 5175 } else { 5176 if (sp->sinfo_flags & SCTP_UNORDERED) { 5177 strq->next_mid_unordered++; 5178 } else { 5179 strq->next_mid_ordered++; 5180 } 5181 } 5182 oh_well: 5183 if (sp->data) { 5184 /* 5185 * Pull any data to free up the SB and allow 5186 * sender to "add more" while we will throw 5187 * away :-) 5188 */ 5189 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5190 ret_sz += sp->length; 5191 do_wakeup_routine = 1; 5192 sp->some_taken = 1; 5193 sctp_m_freem(sp->data); 5194 sp->data = NULL; 5195 sp->tail_mbuf = NULL; 5196 sp->length = 0; 5197 } 5198 } 5199 SCTP_TCB_SEND_UNLOCK(stcb); 5200 } 5201 if (do_wakeup_routine) { 5202 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5203 } 5204 return (ret_sz); 5205 } 5206 5207 /* 5208 * checks to see if the given address, sa, is one that is currently known by 5209 * the kernel note: can't distinguish the same address on multiple interfaces 5210 * and doesn't handle multiple addresses with different zone/scope id's note: 5211 * ifa_ifwithaddr() compares the entire sockaddr struct 5212 */ 5213 struct sctp_ifa * 5214 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5215 int holds_lock) 5216 { 5217 struct sctp_laddr *laddr; 5218 5219 if (holds_lock == 0) { 5220 SCTP_INP_RLOCK(inp); 5221 } 5222 5223 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5224 if (laddr->ifa == NULL) 5225 continue; 5226 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5227 continue; 5228 #ifdef INET 5229 if (addr->sa_family == AF_INET) { 5230 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5231 laddr->ifa->address.sin.sin_addr.s_addr) { 5232 /* found him. */ 5233 break; 5234 } 5235 } 5236 #endif 5237 #ifdef INET6 5238 if (addr->sa_family == AF_INET6) { 5239 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5240 &laddr->ifa->address.sin6)) { 5241 /* found him. */ 5242 break; 5243 } 5244 } 5245 #endif 5246 } 5247 if (holds_lock == 0) { 5248 SCTP_INP_RUNLOCK(inp); 5249 } 5250 return (laddr->ifa); 5251 } 5252 5253 uint32_t 5254 sctp_get_ifa_hash_val(struct sockaddr *addr) 5255 { 5256 switch (addr->sa_family) { 5257 #ifdef INET 5258 case AF_INET: 5259 { 5260 struct sockaddr_in *sin; 5261 5262 sin = (struct sockaddr_in *)addr; 5263 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5264 } 5265 #endif 5266 #ifdef INET6 5267 case AF_INET6: 5268 { 5269 struct sockaddr_in6 *sin6; 5270 uint32_t hash_of_addr; 5271 5272 sin6 = (struct sockaddr_in6 *)addr; 5273 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5274 sin6->sin6_addr.s6_addr32[1] + 5275 sin6->sin6_addr.s6_addr32[2] + 5276 sin6->sin6_addr.s6_addr32[3]); 5277 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5278 return (hash_of_addr); 5279 } 5280 #endif 5281 default: 5282 break; 5283 } 5284 return (0); 5285 } 5286 5287 struct sctp_ifa * 5288 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5289 { 5290 struct sctp_ifa *sctp_ifap; 5291 struct sctp_vrf *vrf; 5292 struct sctp_ifalist *hash_head; 5293 uint32_t hash_of_addr; 5294 5295 if (holds_lock == 0) 5296 SCTP_IPI_ADDR_RLOCK(); 5297 5298 vrf = sctp_find_vrf(vrf_id); 5299 if (vrf == NULL) { 5300 if (holds_lock == 0) 5301 SCTP_IPI_ADDR_RUNLOCK(); 5302 return (NULL); 5303 } 5304 5305 hash_of_addr = sctp_get_ifa_hash_val(addr); 5306 5307 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5308 if (hash_head == NULL) { 5309 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5310 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5311 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5312 sctp_print_address(addr); 5313 SCTP_PRINTF("No such bucket for address\n"); 5314 if (holds_lock == 0) 5315 SCTP_IPI_ADDR_RUNLOCK(); 5316 5317 return (NULL); 5318 } 5319 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5320 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5321 continue; 5322 #ifdef INET 5323 if (addr->sa_family == AF_INET) { 5324 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5325 sctp_ifap->address.sin.sin_addr.s_addr) { 5326 /* found him. */ 5327 break; 5328 } 5329 } 5330 #endif 5331 #ifdef INET6 5332 if (addr->sa_family == AF_INET6) { 5333 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5334 &sctp_ifap->address.sin6)) { 5335 /* found him. */ 5336 break; 5337 } 5338 } 5339 #endif 5340 } 5341 if (holds_lock == 0) 5342 SCTP_IPI_ADDR_RUNLOCK(); 5343 return (sctp_ifap); 5344 } 5345 5346 static void 5347 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5348 uint32_t rwnd_req) 5349 { 5350 /* User pulled some data, do we need a rwnd update? */ 5351 struct epoch_tracker et; 5352 int r_unlocked = 0; 5353 uint32_t dif, rwnd; 5354 struct socket *so = NULL; 5355 5356 if (stcb == NULL) 5357 return; 5358 5359 atomic_add_int(&stcb->asoc.refcnt, 1); 5360 5361 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5362 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5363 /* Pre-check If we are freeing no update */ 5364 goto no_lock; 5365 } 5366 SCTP_INP_INCR_REF(stcb->sctp_ep); 5367 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5368 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5369 goto out; 5370 } 5371 so = stcb->sctp_socket; 5372 if (so == NULL) { 5373 goto out; 5374 } 5375 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5376 /* Have you have freed enough to look */ 5377 *freed_so_far = 0; 5378 /* Yep, its worth a look and the lock overhead */ 5379 5380 /* Figure out what the rwnd would be */ 5381 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5382 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5383 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5384 } else { 5385 dif = 0; 5386 } 5387 if (dif >= rwnd_req) { 5388 if (hold_rlock) { 5389 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5390 r_unlocked = 1; 5391 } 5392 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5393 /* 5394 * One last check before we allow the guy possibly 5395 * to get in. There is a race, where the guy has not 5396 * reached the gate. In that case 5397 */ 5398 goto out; 5399 } 5400 SCTP_TCB_LOCK(stcb); 5401 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5402 /* No reports here */ 5403 SCTP_TCB_UNLOCK(stcb); 5404 goto out; 5405 } 5406 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5407 NET_EPOCH_ENTER(et); 5408 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5409 5410 sctp_chunk_output(stcb->sctp_ep, stcb, 5411 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5412 /* make sure no timer is running */ 5413 NET_EPOCH_EXIT(et); 5414 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5415 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5416 SCTP_TCB_UNLOCK(stcb); 5417 } else { 5418 /* Update how much we have pending */ 5419 stcb->freed_by_sorcv_sincelast = dif; 5420 } 5421 out: 5422 if (so && r_unlocked && hold_rlock) { 5423 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5424 } 5425 5426 SCTP_INP_DECR_REF(stcb->sctp_ep); 5427 no_lock: 5428 atomic_add_int(&stcb->asoc.refcnt, -1); 5429 return; 5430 } 5431 5432 int 5433 sctp_sorecvmsg(struct socket *so, 5434 struct uio *uio, 5435 struct mbuf **mp, 5436 struct sockaddr *from, 5437 int fromlen, 5438 int *msg_flags, 5439 struct sctp_sndrcvinfo *sinfo, 5440 int filling_sinfo) 5441 { 5442 /* 5443 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5444 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5445 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5446 * On the way out we may send out any combination of: 5447 * MSG_NOTIFICATION MSG_EOR 5448 * 5449 */ 5450 struct sctp_inpcb *inp = NULL; 5451 ssize_t my_len = 0; 5452 ssize_t cp_len = 0; 5453 int error = 0; 5454 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5455 struct mbuf *m = NULL; 5456 struct sctp_tcb *stcb = NULL; 5457 int wakeup_read_socket = 0; 5458 int freecnt_applied = 0; 5459 int out_flags = 0, in_flags = 0; 5460 int block_allowed = 1; 5461 uint32_t freed_so_far = 0; 5462 ssize_t copied_so_far = 0; 5463 int in_eeor_mode = 0; 5464 int no_rcv_needed = 0; 5465 uint32_t rwnd_req = 0; 5466 int hold_sblock = 0; 5467 int hold_rlock = 0; 5468 ssize_t slen = 0; 5469 uint32_t held_length = 0; 5470 int sockbuf_lock = 0; 5471 5472 if (uio == NULL) { 5473 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5474 return (EINVAL); 5475 } 5476 5477 if (msg_flags) { 5478 in_flags = *msg_flags; 5479 if (in_flags & MSG_PEEK) 5480 SCTP_STAT_INCR(sctps_read_peeks); 5481 } else { 5482 in_flags = 0; 5483 } 5484 slen = uio->uio_resid; 5485 5486 /* Pull in and set up our int flags */ 5487 if (in_flags & MSG_OOB) { 5488 /* Out of band's NOT supported */ 5489 return (EOPNOTSUPP); 5490 } 5491 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5492 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5493 return (EINVAL); 5494 } 5495 if ((in_flags & (MSG_DONTWAIT 5496 | MSG_NBIO 5497 )) || 5498 SCTP_SO_IS_NBIO(so)) { 5499 block_allowed = 0; 5500 } 5501 /* setup the endpoint */ 5502 inp = (struct sctp_inpcb *)so->so_pcb; 5503 if (inp == NULL) { 5504 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5505 return (EFAULT); 5506 } 5507 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5508 /* Must be at least a MTU's worth */ 5509 if (rwnd_req < SCTP_MIN_RWND) 5510 rwnd_req = SCTP_MIN_RWND; 5511 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5513 sctp_misc_ints(SCTP_SORECV_ENTER, 5514 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5515 } 5516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5517 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5518 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5519 } 5520 5521 5522 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5523 if (error) { 5524 goto release_unlocked; 5525 } 5526 sockbuf_lock = 1; 5527 restart: 5528 5529 restart_nosblocks: 5530 if (hold_sblock == 0) { 5531 SOCKBUF_LOCK(&so->so_rcv); 5532 hold_sblock = 1; 5533 } 5534 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5535 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5536 goto out; 5537 } 5538 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5539 if (so->so_error) { 5540 error = so->so_error; 5541 if ((in_flags & MSG_PEEK) == 0) 5542 so->so_error = 0; 5543 goto out; 5544 } else { 5545 if (so->so_rcv.sb_cc == 0) { 5546 /* indicate EOF */ 5547 error = 0; 5548 goto out; 5549 } 5550 } 5551 } 5552 if (so->so_rcv.sb_cc <= held_length) { 5553 if (so->so_error) { 5554 error = so->so_error; 5555 if ((in_flags & MSG_PEEK) == 0) { 5556 so->so_error = 0; 5557 } 5558 goto out; 5559 } 5560 if ((so->so_rcv.sb_cc == 0) && 5561 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5562 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5563 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5564 /* 5565 * For active open side clear flags for 5566 * re-use passive open is blocked by 5567 * connect. 5568 */ 5569 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5570 /* 5571 * You were aborted, passive side 5572 * always hits here 5573 */ 5574 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5575 error = ECONNRESET; 5576 } 5577 so->so_state &= ~(SS_ISCONNECTING | 5578 SS_ISDISCONNECTING | 5579 SS_ISCONFIRMING | 5580 SS_ISCONNECTED); 5581 if (error == 0) { 5582 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5583 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5584 error = ENOTCONN; 5585 } 5586 } 5587 goto out; 5588 } 5589 } 5590 if (block_allowed) { 5591 error = sbwait(&so->so_rcv); 5592 if (error) { 5593 goto out; 5594 } 5595 held_length = 0; 5596 goto restart_nosblocks; 5597 } else { 5598 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5599 error = EWOULDBLOCK; 5600 goto out; 5601 } 5602 } 5603 if (hold_sblock == 1) { 5604 SOCKBUF_UNLOCK(&so->so_rcv); 5605 hold_sblock = 0; 5606 } 5607 /* we possibly have data we can read */ 5608 /* sa_ignore FREED_MEMORY */ 5609 control = TAILQ_FIRST(&inp->read_queue); 5610 if (control == NULL) { 5611 /* 5612 * This could be happening since the appender did the 5613 * increment but as not yet did the tailq insert onto the 5614 * read_queue 5615 */ 5616 if (hold_rlock == 0) { 5617 SCTP_INP_READ_LOCK(inp); 5618 } 5619 control = TAILQ_FIRST(&inp->read_queue); 5620 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5621 #ifdef INVARIANTS 5622 panic("Huh, its non zero and nothing on control?"); 5623 #endif 5624 so->so_rcv.sb_cc = 0; 5625 } 5626 SCTP_INP_READ_UNLOCK(inp); 5627 hold_rlock = 0; 5628 goto restart; 5629 } 5630 5631 if ((control->length == 0) && 5632 (control->do_not_ref_stcb)) { 5633 /* 5634 * Clean up code for freeing assoc that left behind a 5635 * pdapi.. maybe a peer in EEOR that just closed after 5636 * sending and never indicated a EOR. 5637 */ 5638 if (hold_rlock == 0) { 5639 hold_rlock = 1; 5640 SCTP_INP_READ_LOCK(inp); 5641 } 5642 control->held_length = 0; 5643 if (control->data) { 5644 /* Hmm there is data here .. fix */ 5645 struct mbuf *m_tmp; 5646 int cnt = 0; 5647 5648 m_tmp = control->data; 5649 while (m_tmp) { 5650 cnt += SCTP_BUF_LEN(m_tmp); 5651 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5652 control->tail_mbuf = m_tmp; 5653 control->end_added = 1; 5654 } 5655 m_tmp = SCTP_BUF_NEXT(m_tmp); 5656 } 5657 control->length = cnt; 5658 } else { 5659 /* remove it */ 5660 TAILQ_REMOVE(&inp->read_queue, control, next); 5661 /* Add back any hiddend data */ 5662 sctp_free_remote_addr(control->whoFrom); 5663 sctp_free_a_readq(stcb, control); 5664 } 5665 if (hold_rlock) { 5666 hold_rlock = 0; 5667 SCTP_INP_READ_UNLOCK(inp); 5668 } 5669 goto restart; 5670 } 5671 if ((control->length == 0) && 5672 (control->end_added == 1)) { 5673 /* 5674 * Do we also need to check for (control->pdapi_aborted == 5675 * 1)? 5676 */ 5677 if (hold_rlock == 0) { 5678 hold_rlock = 1; 5679 SCTP_INP_READ_LOCK(inp); 5680 } 5681 TAILQ_REMOVE(&inp->read_queue, control, next); 5682 if (control->data) { 5683 #ifdef INVARIANTS 5684 panic("control->data not null but control->length == 0"); 5685 #else 5686 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5687 sctp_m_freem(control->data); 5688 control->data = NULL; 5689 #endif 5690 } 5691 if (control->aux_data) { 5692 sctp_m_free(control->aux_data); 5693 control->aux_data = NULL; 5694 } 5695 #ifdef INVARIANTS 5696 if (control->on_strm_q) { 5697 panic("About to free ctl:%p so:%p and its in %d", 5698 control, so, control->on_strm_q); 5699 } 5700 #endif 5701 sctp_free_remote_addr(control->whoFrom); 5702 sctp_free_a_readq(stcb, control); 5703 if (hold_rlock) { 5704 hold_rlock = 0; 5705 SCTP_INP_READ_UNLOCK(inp); 5706 } 5707 goto restart; 5708 } 5709 if (control->length == 0) { 5710 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5711 (filling_sinfo)) { 5712 /* find a more suitable one then this */ 5713 ctl = TAILQ_NEXT(control, next); 5714 while (ctl) { 5715 if ((ctl->stcb != control->stcb) && (ctl->length) && 5716 (ctl->some_taken || 5717 (ctl->spec_flags & M_NOTIFICATION) || 5718 ((ctl->do_not_ref_stcb == 0) && 5719 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5720 ) { 5721 /*- 5722 * If we have a different TCB next, and there is data 5723 * present. If we have already taken some (pdapi), OR we can 5724 * ref the tcb and no delivery as started on this stream, we 5725 * take it. Note we allow a notification on a different 5726 * assoc to be delivered.. 5727 */ 5728 control = ctl; 5729 goto found_one; 5730 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5731 (ctl->length) && 5732 ((ctl->some_taken) || 5733 ((ctl->do_not_ref_stcb == 0) && 5734 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5735 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5736 /*- 5737 * If we have the same tcb, and there is data present, and we 5738 * have the strm interleave feature present. Then if we have 5739 * taken some (pdapi) or we can refer to tht tcb AND we have 5740 * not started a delivery for this stream, we can take it. 5741 * Note we do NOT allow a notificaiton on the same assoc to 5742 * be delivered. 5743 */ 5744 control = ctl; 5745 goto found_one; 5746 } 5747 ctl = TAILQ_NEXT(ctl, next); 5748 } 5749 } 5750 /* 5751 * if we reach here, not suitable replacement is available 5752 * <or> fragment interleave is NOT on. So stuff the sb_cc 5753 * into the our held count, and its time to sleep again. 5754 */ 5755 held_length = so->so_rcv.sb_cc; 5756 control->held_length = so->so_rcv.sb_cc; 5757 goto restart; 5758 } 5759 /* Clear the held length since there is something to read */ 5760 control->held_length = 0; 5761 found_one: 5762 /* 5763 * If we reach here, control has a some data for us to read off. 5764 * Note that stcb COULD be NULL. 5765 */ 5766 if (hold_rlock == 0) { 5767 hold_rlock = 1; 5768 SCTP_INP_READ_LOCK(inp); 5769 } 5770 control->some_taken++; 5771 stcb = control->stcb; 5772 if (stcb) { 5773 if ((control->do_not_ref_stcb == 0) && 5774 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5775 if (freecnt_applied == 0) 5776 stcb = NULL; 5777 } else if (control->do_not_ref_stcb == 0) { 5778 /* you can't free it on me please */ 5779 /* 5780 * The lock on the socket buffer protects us so the 5781 * free code will stop. But since we used the 5782 * socketbuf lock and the sender uses the tcb_lock 5783 * to increment, we need to use the atomic add to 5784 * the refcnt 5785 */ 5786 if (freecnt_applied) { 5787 #ifdef INVARIANTS 5788 panic("refcnt already incremented"); 5789 #else 5790 SCTP_PRINTF("refcnt already incremented?\n"); 5791 #endif 5792 } else { 5793 atomic_add_int(&stcb->asoc.refcnt, 1); 5794 freecnt_applied = 1; 5795 } 5796 /* 5797 * Setup to remember how much we have not yet told 5798 * the peer our rwnd has opened up. Note we grab the 5799 * value from the tcb from last time. Note too that 5800 * sack sending clears this when a sack is sent, 5801 * which is fine. Once we hit the rwnd_req, we then 5802 * will go to the sctp_user_rcvd() that will not 5803 * lock until it KNOWs it MUST send a WUP-SACK. 5804 */ 5805 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5806 stcb->freed_by_sorcv_sincelast = 0; 5807 } 5808 } 5809 if (stcb && 5810 ((control->spec_flags & M_NOTIFICATION) == 0) && 5811 control->do_not_ref_stcb == 0) { 5812 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5813 } 5814 5815 /* First lets get off the sinfo and sockaddr info */ 5816 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5817 sinfo->sinfo_stream = control->sinfo_stream; 5818 sinfo->sinfo_ssn = (uint16_t)control->mid; 5819 sinfo->sinfo_flags = control->sinfo_flags; 5820 sinfo->sinfo_ppid = control->sinfo_ppid; 5821 sinfo->sinfo_context = control->sinfo_context; 5822 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5823 sinfo->sinfo_tsn = control->sinfo_tsn; 5824 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5825 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5826 nxt = TAILQ_NEXT(control, next); 5827 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5828 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5829 struct sctp_extrcvinfo *s_extra; 5830 5831 s_extra = (struct sctp_extrcvinfo *)sinfo; 5832 if ((nxt) && 5833 (nxt->length)) { 5834 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5835 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5836 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5837 } 5838 if (nxt->spec_flags & M_NOTIFICATION) { 5839 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5840 } 5841 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5842 s_extra->serinfo_next_length = nxt->length; 5843 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5844 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5845 if (nxt->tail_mbuf != NULL) { 5846 if (nxt->end_added) { 5847 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5848 } 5849 } 5850 } else { 5851 /* 5852 * we explicitly 0 this, since the memcpy 5853 * got some other things beyond the older 5854 * sinfo_ that is on the control's structure 5855 * :-D 5856 */ 5857 nxt = NULL; 5858 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5859 s_extra->serinfo_next_aid = 0; 5860 s_extra->serinfo_next_length = 0; 5861 s_extra->serinfo_next_ppid = 0; 5862 s_extra->serinfo_next_stream = 0; 5863 } 5864 } 5865 /* 5866 * update off the real current cum-ack, if we have an stcb. 5867 */ 5868 if ((control->do_not_ref_stcb == 0) && stcb) 5869 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5870 /* 5871 * mask off the high bits, we keep the actual chunk bits in 5872 * there. 5873 */ 5874 sinfo->sinfo_flags &= 0x00ff; 5875 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5876 sinfo->sinfo_flags |= SCTP_UNORDERED; 5877 } 5878 } 5879 #ifdef SCTP_ASOCLOG_OF_TSNS 5880 { 5881 int index, newindex; 5882 struct sctp_pcbtsn_rlog *entry; 5883 5884 do { 5885 index = inp->readlog_index; 5886 newindex = index + 1; 5887 if (newindex >= SCTP_READ_LOG_SIZE) { 5888 newindex = 0; 5889 } 5890 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5891 entry = &inp->readlog[index]; 5892 entry->vtag = control->sinfo_assoc_id; 5893 entry->strm = control->sinfo_stream; 5894 entry->seq = (uint16_t)control->mid; 5895 entry->sz = control->length; 5896 entry->flgs = control->sinfo_flags; 5897 } 5898 #endif 5899 if ((fromlen > 0) && (from != NULL)) { 5900 union sctp_sockstore store; 5901 size_t len; 5902 5903 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5904 #ifdef INET6 5905 case AF_INET6: 5906 len = sizeof(struct sockaddr_in6); 5907 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5908 store.sin6.sin6_port = control->port_from; 5909 break; 5910 #endif 5911 #ifdef INET 5912 case AF_INET: 5913 #ifdef INET6 5914 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5915 len = sizeof(struct sockaddr_in6); 5916 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5917 &store.sin6); 5918 store.sin6.sin6_port = control->port_from; 5919 } else { 5920 len = sizeof(struct sockaddr_in); 5921 store.sin = control->whoFrom->ro._l_addr.sin; 5922 store.sin.sin_port = control->port_from; 5923 } 5924 #else 5925 len = sizeof(struct sockaddr_in); 5926 store.sin = control->whoFrom->ro._l_addr.sin; 5927 store.sin.sin_port = control->port_from; 5928 #endif 5929 break; 5930 #endif 5931 default: 5932 len = 0; 5933 break; 5934 } 5935 memcpy(from, &store, min((size_t)fromlen, len)); 5936 #ifdef INET6 5937 { 5938 struct sockaddr_in6 lsa6, *from6; 5939 5940 from6 = (struct sockaddr_in6 *)from; 5941 sctp_recover_scope_mac(from6, (&lsa6)); 5942 } 5943 #endif 5944 } 5945 if (hold_rlock) { 5946 SCTP_INP_READ_UNLOCK(inp); 5947 hold_rlock = 0; 5948 } 5949 if (hold_sblock) { 5950 SOCKBUF_UNLOCK(&so->so_rcv); 5951 hold_sblock = 0; 5952 } 5953 /* now copy out what data we can */ 5954 if (mp == NULL) { 5955 /* copy out each mbuf in the chain up to length */ 5956 get_more_data: 5957 m = control->data; 5958 while (m) { 5959 /* Move out all we can */ 5960 cp_len = uio->uio_resid; 5961 my_len = SCTP_BUF_LEN(m); 5962 if (cp_len > my_len) { 5963 /* not enough in this buf */ 5964 cp_len = my_len; 5965 } 5966 if (hold_rlock) { 5967 SCTP_INP_READ_UNLOCK(inp); 5968 hold_rlock = 0; 5969 } 5970 if (cp_len > 0) 5971 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5972 /* re-read */ 5973 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5974 goto release; 5975 } 5976 5977 if ((control->do_not_ref_stcb == 0) && stcb && 5978 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5979 no_rcv_needed = 1; 5980 } 5981 if (error) { 5982 /* error we are out of here */ 5983 goto release; 5984 } 5985 SCTP_INP_READ_LOCK(inp); 5986 hold_rlock = 1; 5987 if (cp_len == SCTP_BUF_LEN(m)) { 5988 if ((SCTP_BUF_NEXT(m) == NULL) && 5989 (control->end_added)) { 5990 out_flags |= MSG_EOR; 5991 if ((control->do_not_ref_stcb == 0) && 5992 (control->stcb != NULL) && 5993 ((control->spec_flags & M_NOTIFICATION) == 0)) 5994 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5995 } 5996 if (control->spec_flags & M_NOTIFICATION) { 5997 out_flags |= MSG_NOTIFICATION; 5998 } 5999 /* we ate up the mbuf */ 6000 if (in_flags & MSG_PEEK) { 6001 /* just looking */ 6002 m = SCTP_BUF_NEXT(m); 6003 copied_so_far += cp_len; 6004 } else { 6005 /* dispose of the mbuf */ 6006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6007 sctp_sblog(&so->so_rcv, 6008 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6009 } 6010 sctp_sbfree(control, stcb, &so->so_rcv, m); 6011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6012 sctp_sblog(&so->so_rcv, 6013 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6014 } 6015 copied_so_far += cp_len; 6016 freed_so_far += (uint32_t)cp_len; 6017 freed_so_far += MSIZE; 6018 atomic_subtract_int(&control->length, cp_len); 6019 control->data = sctp_m_free(m); 6020 m = control->data; 6021 /* 6022 * been through it all, must hold sb 6023 * lock ok to null tail 6024 */ 6025 if (control->data == NULL) { 6026 #ifdef INVARIANTS 6027 if ((control->end_added == 0) || 6028 (TAILQ_NEXT(control, next) == NULL)) { 6029 /* 6030 * If the end is not 6031 * added, OR the 6032 * next is NOT null 6033 * we MUST have the 6034 * lock. 6035 */ 6036 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6037 panic("Hmm we don't own the lock?"); 6038 } 6039 } 6040 #endif 6041 control->tail_mbuf = NULL; 6042 #ifdef INVARIANTS 6043 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6044 panic("end_added, nothing left and no MSG_EOR"); 6045 } 6046 #endif 6047 } 6048 } 6049 } else { 6050 /* Do we need to trim the mbuf? */ 6051 if (control->spec_flags & M_NOTIFICATION) { 6052 out_flags |= MSG_NOTIFICATION; 6053 } 6054 if ((in_flags & MSG_PEEK) == 0) { 6055 SCTP_BUF_RESV_UF(m, cp_len); 6056 SCTP_BUF_LEN(m) -= (int)cp_len; 6057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6058 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6059 } 6060 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6061 if ((control->do_not_ref_stcb == 0) && 6062 stcb) { 6063 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6064 } 6065 copied_so_far += cp_len; 6066 freed_so_far += (uint32_t)cp_len; 6067 freed_so_far += MSIZE; 6068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6069 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6070 SCTP_LOG_SBRESULT, 0); 6071 } 6072 atomic_subtract_int(&control->length, cp_len); 6073 } else { 6074 copied_so_far += cp_len; 6075 } 6076 } 6077 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6078 break; 6079 } 6080 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6081 (control->do_not_ref_stcb == 0) && 6082 (freed_so_far >= rwnd_req)) { 6083 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6084 } 6085 } /* end while(m) */ 6086 /* 6087 * At this point we have looked at it all and we either have 6088 * a MSG_EOR/or read all the user wants... <OR> 6089 * control->length == 0. 6090 */ 6091 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6092 /* we are done with this control */ 6093 if (control->length == 0) { 6094 if (control->data) { 6095 #ifdef INVARIANTS 6096 panic("control->data not null at read eor?"); 6097 #else 6098 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6099 sctp_m_freem(control->data); 6100 control->data = NULL; 6101 #endif 6102 } 6103 done_with_control: 6104 if (hold_rlock == 0) { 6105 SCTP_INP_READ_LOCK(inp); 6106 hold_rlock = 1; 6107 } 6108 TAILQ_REMOVE(&inp->read_queue, control, next); 6109 /* Add back any hiddend data */ 6110 if (control->held_length) { 6111 held_length = 0; 6112 control->held_length = 0; 6113 wakeup_read_socket = 1; 6114 } 6115 if (control->aux_data) { 6116 sctp_m_free(control->aux_data); 6117 control->aux_data = NULL; 6118 } 6119 no_rcv_needed = control->do_not_ref_stcb; 6120 sctp_free_remote_addr(control->whoFrom); 6121 control->data = NULL; 6122 #ifdef INVARIANTS 6123 if (control->on_strm_q) { 6124 panic("About to free ctl:%p so:%p and its in %d", 6125 control, so, control->on_strm_q); 6126 } 6127 #endif 6128 sctp_free_a_readq(stcb, control); 6129 control = NULL; 6130 if ((freed_so_far >= rwnd_req) && 6131 (no_rcv_needed == 0)) 6132 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6133 6134 } else { 6135 /* 6136 * The user did not read all of this 6137 * message, turn off the returned MSG_EOR 6138 * since we are leaving more behind on the 6139 * control to read. 6140 */ 6141 #ifdef INVARIANTS 6142 if (control->end_added && 6143 (control->data == NULL) && 6144 (control->tail_mbuf == NULL)) { 6145 panic("Gak, control->length is corrupt?"); 6146 } 6147 #endif 6148 no_rcv_needed = control->do_not_ref_stcb; 6149 out_flags &= ~MSG_EOR; 6150 } 6151 } 6152 if (out_flags & MSG_EOR) { 6153 goto release; 6154 } 6155 if ((uio->uio_resid == 0) || 6156 ((in_eeor_mode) && 6157 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6158 goto release; 6159 } 6160 /* 6161 * If I hit here the receiver wants more and this message is 6162 * NOT done (pd-api). So two questions. Can we block? if not 6163 * we are done. Did the user NOT set MSG_WAITALL? 6164 */ 6165 if (block_allowed == 0) { 6166 goto release; 6167 } 6168 /* 6169 * We need to wait for more data a few things: - We don't 6170 * sbunlock() so we don't get someone else reading. - We 6171 * must be sure to account for the case where what is added 6172 * is NOT to our control when we wakeup. 6173 */ 6174 6175 /* 6176 * Do we need to tell the transport a rwnd update might be 6177 * needed before we go to sleep? 6178 */ 6179 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6180 ((freed_so_far >= rwnd_req) && 6181 (control->do_not_ref_stcb == 0) && 6182 (no_rcv_needed == 0))) { 6183 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6184 } 6185 wait_some_more: 6186 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6187 goto release; 6188 } 6189 6190 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6191 goto release; 6192 6193 if (hold_rlock == 1) { 6194 SCTP_INP_READ_UNLOCK(inp); 6195 hold_rlock = 0; 6196 } 6197 if (hold_sblock == 0) { 6198 SOCKBUF_LOCK(&so->so_rcv); 6199 hold_sblock = 1; 6200 } 6201 if ((copied_so_far) && (control->length == 0) && 6202 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6203 goto release; 6204 } 6205 if (so->so_rcv.sb_cc <= control->held_length) { 6206 error = sbwait(&so->so_rcv); 6207 if (error) { 6208 goto release; 6209 } 6210 control->held_length = 0; 6211 } 6212 if (hold_sblock) { 6213 SOCKBUF_UNLOCK(&so->so_rcv); 6214 hold_sblock = 0; 6215 } 6216 if (control->length == 0) { 6217 /* still nothing here */ 6218 if (control->end_added == 1) { 6219 /* he aborted, or is done i.e.did a shutdown */ 6220 out_flags |= MSG_EOR; 6221 if (control->pdapi_aborted) { 6222 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6223 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6224 6225 out_flags |= MSG_TRUNC; 6226 } else { 6227 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6228 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6229 } 6230 goto done_with_control; 6231 } 6232 if (so->so_rcv.sb_cc > held_length) { 6233 control->held_length = so->so_rcv.sb_cc; 6234 held_length = 0; 6235 } 6236 goto wait_some_more; 6237 } else if (control->data == NULL) { 6238 /* 6239 * we must re-sync since data is probably being 6240 * added 6241 */ 6242 SCTP_INP_READ_LOCK(inp); 6243 if ((control->length > 0) && (control->data == NULL)) { 6244 /* 6245 * big trouble.. we have the lock and its 6246 * corrupt? 6247 */ 6248 #ifdef INVARIANTS 6249 panic("Impossible data==NULL length !=0"); 6250 #endif 6251 out_flags |= MSG_EOR; 6252 out_flags |= MSG_TRUNC; 6253 control->length = 0; 6254 SCTP_INP_READ_UNLOCK(inp); 6255 goto done_with_control; 6256 } 6257 SCTP_INP_READ_UNLOCK(inp); 6258 /* We will fall around to get more data */ 6259 } 6260 goto get_more_data; 6261 } else { 6262 /*- 6263 * Give caller back the mbuf chain, 6264 * store in uio_resid the length 6265 */ 6266 wakeup_read_socket = 0; 6267 if ((control->end_added == 0) || 6268 (TAILQ_NEXT(control, next) == NULL)) { 6269 /* Need to get rlock */ 6270 if (hold_rlock == 0) { 6271 SCTP_INP_READ_LOCK(inp); 6272 hold_rlock = 1; 6273 } 6274 } 6275 if (control->end_added) { 6276 out_flags |= MSG_EOR; 6277 if ((control->do_not_ref_stcb == 0) && 6278 (control->stcb != NULL) && 6279 ((control->spec_flags & M_NOTIFICATION) == 0)) 6280 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6281 } 6282 if (control->spec_flags & M_NOTIFICATION) { 6283 out_flags |= MSG_NOTIFICATION; 6284 } 6285 uio->uio_resid = control->length; 6286 *mp = control->data; 6287 m = control->data; 6288 while (m) { 6289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6290 sctp_sblog(&so->so_rcv, 6291 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6292 } 6293 sctp_sbfree(control, stcb, &so->so_rcv, m); 6294 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6295 freed_so_far += MSIZE; 6296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6297 sctp_sblog(&so->so_rcv, 6298 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6299 } 6300 m = SCTP_BUF_NEXT(m); 6301 } 6302 control->data = control->tail_mbuf = NULL; 6303 control->length = 0; 6304 if (out_flags & MSG_EOR) { 6305 /* Done with this control */ 6306 goto done_with_control; 6307 } 6308 } 6309 release: 6310 if (hold_rlock == 1) { 6311 SCTP_INP_READ_UNLOCK(inp); 6312 hold_rlock = 0; 6313 } 6314 if (hold_sblock == 1) { 6315 SOCKBUF_UNLOCK(&so->so_rcv); 6316 hold_sblock = 0; 6317 } 6318 6319 sbunlock(&so->so_rcv); 6320 sockbuf_lock = 0; 6321 6322 release_unlocked: 6323 if (hold_sblock) { 6324 SOCKBUF_UNLOCK(&so->so_rcv); 6325 hold_sblock = 0; 6326 } 6327 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6328 if ((freed_so_far >= rwnd_req) && 6329 (control && (control->do_not_ref_stcb == 0)) && 6330 (no_rcv_needed == 0)) 6331 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6332 } 6333 out: 6334 if (msg_flags) { 6335 *msg_flags = out_flags; 6336 } 6337 if (((out_flags & MSG_EOR) == 0) && 6338 ((in_flags & MSG_PEEK) == 0) && 6339 (sinfo) && 6340 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6341 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6342 struct sctp_extrcvinfo *s_extra; 6343 6344 s_extra = (struct sctp_extrcvinfo *)sinfo; 6345 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6346 } 6347 if (hold_rlock == 1) { 6348 SCTP_INP_READ_UNLOCK(inp); 6349 } 6350 if (hold_sblock) { 6351 SOCKBUF_UNLOCK(&so->so_rcv); 6352 } 6353 if (sockbuf_lock) { 6354 sbunlock(&so->so_rcv); 6355 } 6356 6357 if (freecnt_applied) { 6358 /* 6359 * The lock on the socket buffer protects us so the free 6360 * code will stop. But since we used the socketbuf lock and 6361 * the sender uses the tcb_lock to increment, we need to use 6362 * the atomic add to the refcnt. 6363 */ 6364 if (stcb == NULL) { 6365 #ifdef INVARIANTS 6366 panic("stcb for refcnt has gone NULL?"); 6367 goto stage_left; 6368 #else 6369 goto stage_left; 6370 #endif 6371 } 6372 /* Save the value back for next time */ 6373 stcb->freed_by_sorcv_sincelast = freed_so_far; 6374 atomic_add_int(&stcb->asoc.refcnt, -1); 6375 } 6376 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6377 if (stcb) { 6378 sctp_misc_ints(SCTP_SORECV_DONE, 6379 freed_so_far, 6380 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6381 stcb->asoc.my_rwnd, 6382 so->so_rcv.sb_cc); 6383 } else { 6384 sctp_misc_ints(SCTP_SORECV_DONE, 6385 freed_so_far, 6386 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6387 0, 6388 so->so_rcv.sb_cc); 6389 } 6390 } 6391 stage_left: 6392 if (wakeup_read_socket) { 6393 sctp_sorwakeup(inp, so); 6394 } 6395 return (error); 6396 } 6397 6398 6399 #ifdef SCTP_MBUF_LOGGING 6400 struct mbuf * 6401 sctp_m_free(struct mbuf *m) 6402 { 6403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6404 sctp_log_mb(m, SCTP_MBUF_IFREE); 6405 } 6406 return (m_free(m)); 6407 } 6408 6409 void 6410 sctp_m_freem(struct mbuf *mb) 6411 { 6412 while (mb != NULL) 6413 mb = sctp_m_free(mb); 6414 } 6415 6416 #endif 6417 6418 int 6419 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6420 { 6421 /* 6422 * Given a local address. For all associations that holds the 6423 * address, request a peer-set-primary. 6424 */ 6425 struct sctp_ifa *ifa; 6426 struct sctp_laddr *wi; 6427 6428 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6429 if (ifa == NULL) { 6430 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6431 return (EADDRNOTAVAIL); 6432 } 6433 /* 6434 * Now that we have the ifa we must awaken the iterator with this 6435 * message. 6436 */ 6437 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6438 if (wi == NULL) { 6439 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6440 return (ENOMEM); 6441 } 6442 /* Now incr the count and int wi structure */ 6443 SCTP_INCR_LADDR_COUNT(); 6444 memset(wi, 0, sizeof(*wi)); 6445 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6446 wi->ifa = ifa; 6447 wi->action = SCTP_SET_PRIM_ADDR; 6448 atomic_add_int(&ifa->refcount, 1); 6449 6450 /* Now add it to the work queue */ 6451 SCTP_WQ_ADDR_LOCK(); 6452 /* 6453 * Should this really be a tailq? As it is we will process the 6454 * newest first :-0 6455 */ 6456 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6457 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6458 (struct sctp_inpcb *)NULL, 6459 (struct sctp_tcb *)NULL, 6460 (struct sctp_nets *)NULL); 6461 SCTP_WQ_ADDR_UNLOCK(); 6462 return (0); 6463 } 6464 6465 6466 int 6467 sctp_soreceive(struct socket *so, 6468 struct sockaddr **psa, 6469 struct uio *uio, 6470 struct mbuf **mp0, 6471 struct mbuf **controlp, 6472 int *flagsp) 6473 { 6474 int error, fromlen; 6475 uint8_t sockbuf[256]; 6476 struct sockaddr *from; 6477 struct sctp_extrcvinfo sinfo; 6478 int filling_sinfo = 1; 6479 int flags; 6480 struct sctp_inpcb *inp; 6481 6482 inp = (struct sctp_inpcb *)so->so_pcb; 6483 /* pickup the assoc we are reading from */ 6484 if (inp == NULL) { 6485 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6486 return (EINVAL); 6487 } 6488 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6489 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6490 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6491 (controlp == NULL)) { 6492 /* user does not want the sndrcv ctl */ 6493 filling_sinfo = 0; 6494 } 6495 if (psa) { 6496 from = (struct sockaddr *)sockbuf; 6497 fromlen = sizeof(sockbuf); 6498 from->sa_len = 0; 6499 } else { 6500 from = NULL; 6501 fromlen = 0; 6502 } 6503 6504 if (filling_sinfo) { 6505 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6506 } 6507 if (flagsp != NULL) { 6508 flags = *flagsp; 6509 } else { 6510 flags = 0; 6511 } 6512 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6513 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6514 if (flagsp != NULL) { 6515 *flagsp = flags; 6516 } 6517 if (controlp != NULL) { 6518 /* copy back the sinfo in a CMSG format */ 6519 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6520 *controlp = sctp_build_ctl_nchunk(inp, 6521 (struct sctp_sndrcvinfo *)&sinfo); 6522 } else { 6523 *controlp = NULL; 6524 } 6525 } 6526 if (psa) { 6527 /* copy back the address info */ 6528 if (from && from->sa_len) { 6529 *psa = sodupsockaddr(from, M_NOWAIT); 6530 } else { 6531 *psa = NULL; 6532 } 6533 } 6534 return (error); 6535 } 6536 6537 6538 6539 6540 6541 int 6542 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6543 int totaddr, int *error) 6544 { 6545 int added = 0; 6546 int i; 6547 struct sctp_inpcb *inp; 6548 struct sockaddr *sa; 6549 size_t incr = 0; 6550 #ifdef INET 6551 struct sockaddr_in *sin; 6552 #endif 6553 #ifdef INET6 6554 struct sockaddr_in6 *sin6; 6555 #endif 6556 6557 sa = addr; 6558 inp = stcb->sctp_ep; 6559 *error = 0; 6560 for (i = 0; i < totaddr; i++) { 6561 switch (sa->sa_family) { 6562 #ifdef INET 6563 case AF_INET: 6564 incr = sizeof(struct sockaddr_in); 6565 sin = (struct sockaddr_in *)sa; 6566 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6567 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6568 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6569 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6570 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6571 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6572 *error = EINVAL; 6573 goto out_now; 6574 } 6575 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6576 SCTP_DONOT_SETSCOPE, 6577 SCTP_ADDR_IS_CONFIRMED)) { 6578 /* assoc gone no un-lock */ 6579 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6580 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6581 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6582 *error = ENOBUFS; 6583 goto out_now; 6584 } 6585 added++; 6586 break; 6587 #endif 6588 #ifdef INET6 6589 case AF_INET6: 6590 incr = sizeof(struct sockaddr_in6); 6591 sin6 = (struct sockaddr_in6 *)sa; 6592 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6593 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6594 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6595 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6596 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6597 *error = EINVAL; 6598 goto out_now; 6599 } 6600 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6601 SCTP_DONOT_SETSCOPE, 6602 SCTP_ADDR_IS_CONFIRMED)) { 6603 /* assoc gone no un-lock */ 6604 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6605 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6606 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6607 *error = ENOBUFS; 6608 goto out_now; 6609 } 6610 added++; 6611 break; 6612 #endif 6613 default: 6614 break; 6615 } 6616 sa = (struct sockaddr *)((caddr_t)sa + incr); 6617 } 6618 out_now: 6619 return (added); 6620 } 6621 6622 int 6623 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6624 unsigned int totaddr, 6625 unsigned int *num_v4, unsigned int *num_v6, 6626 unsigned int limit) 6627 { 6628 struct sockaddr *sa; 6629 struct sctp_tcb *stcb; 6630 unsigned int incr, at, i; 6631 6632 at = 0; 6633 sa = addr; 6634 *num_v6 = *num_v4 = 0; 6635 /* account and validate addresses */ 6636 if (totaddr == 0) { 6637 return (EINVAL); 6638 } 6639 for (i = 0; i < totaddr; i++) { 6640 if (at + sizeof(struct sockaddr) > limit) { 6641 return (EINVAL); 6642 } 6643 switch (sa->sa_family) { 6644 #ifdef INET 6645 case AF_INET: 6646 incr = (unsigned int)sizeof(struct sockaddr_in); 6647 if (sa->sa_len != incr) { 6648 return (EINVAL); 6649 } 6650 (*num_v4) += 1; 6651 break; 6652 #endif 6653 #ifdef INET6 6654 case AF_INET6: 6655 { 6656 struct sockaddr_in6 *sin6; 6657 6658 sin6 = (struct sockaddr_in6 *)sa; 6659 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6660 /* Must be non-mapped for connectx */ 6661 return (EINVAL); 6662 } 6663 incr = (unsigned int)sizeof(struct sockaddr_in6); 6664 if (sa->sa_len != incr) { 6665 return (EINVAL); 6666 } 6667 (*num_v6) += 1; 6668 break; 6669 } 6670 #endif 6671 default: 6672 return (EINVAL); 6673 } 6674 if ((at + incr) > limit) { 6675 return (EINVAL); 6676 } 6677 SCTP_INP_INCR_REF(inp); 6678 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6679 if (stcb != NULL) { 6680 SCTP_TCB_UNLOCK(stcb); 6681 return (EALREADY); 6682 } else { 6683 SCTP_INP_DECR_REF(inp); 6684 } 6685 at += incr; 6686 sa = (struct sockaddr *)((caddr_t)sa + incr); 6687 } 6688 return (0); 6689 } 6690 6691 /* 6692 * sctp_bindx(ADD) for one address. 6693 * assumes all arguments are valid/checked by caller. 6694 */ 6695 void 6696 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6697 struct sockaddr *sa, uint32_t vrf_id, int *error, 6698 void *p) 6699 { 6700 #if defined(INET) && defined(INET6) 6701 struct sockaddr_in sin; 6702 #endif 6703 #ifdef INET6 6704 struct sockaddr_in6 *sin6; 6705 #endif 6706 #ifdef INET 6707 struct sockaddr_in *sinp; 6708 #endif 6709 struct sockaddr *addr_to_use; 6710 struct sctp_inpcb *lep; 6711 uint16_t port; 6712 6713 /* see if we're bound all already! */ 6714 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6715 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6716 *error = EINVAL; 6717 return; 6718 } 6719 switch (sa->sa_family) { 6720 #ifdef INET6 6721 case AF_INET6: 6722 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6723 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6724 *error = EINVAL; 6725 return; 6726 } 6727 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6728 /* can only bind v6 on PF_INET6 sockets */ 6729 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6730 *error = EINVAL; 6731 return; 6732 } 6733 sin6 = (struct sockaddr_in6 *)sa; 6734 port = sin6->sin6_port; 6735 #ifdef INET 6736 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6737 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6738 SCTP_IPV6_V6ONLY(inp)) { 6739 /* can't bind v4-mapped on PF_INET sockets */ 6740 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6741 *error = EINVAL; 6742 return; 6743 } 6744 in6_sin6_2_sin(&sin, sin6); 6745 addr_to_use = (struct sockaddr *)&sin; 6746 } else { 6747 addr_to_use = sa; 6748 } 6749 #else 6750 addr_to_use = sa; 6751 #endif 6752 break; 6753 #endif 6754 #ifdef INET 6755 case AF_INET: 6756 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6757 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6758 *error = EINVAL; 6759 return; 6760 } 6761 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6762 SCTP_IPV6_V6ONLY(inp)) { 6763 /* can't bind v4 on PF_INET sockets */ 6764 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6765 *error = EINVAL; 6766 return; 6767 } 6768 sinp = (struct sockaddr_in *)sa; 6769 port = sinp->sin_port; 6770 addr_to_use = sa; 6771 break; 6772 #endif 6773 default: 6774 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6775 *error = EINVAL; 6776 return; 6777 } 6778 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6779 if (p == NULL) { 6780 /* Can't get proc for Net/Open BSD */ 6781 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6782 *error = EINVAL; 6783 return; 6784 } 6785 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6786 return; 6787 } 6788 /* Validate the incoming port. */ 6789 if ((port != 0) && (port != inp->sctp_lport)) { 6790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6791 *error = EINVAL; 6792 return; 6793 } 6794 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6795 if (lep == NULL) { 6796 /* add the address */ 6797 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6798 SCTP_ADD_IP_ADDRESS, vrf_id); 6799 } else { 6800 if (lep != inp) { 6801 *error = EADDRINUSE; 6802 } 6803 SCTP_INP_DECR_REF(lep); 6804 } 6805 } 6806 6807 /* 6808 * sctp_bindx(DELETE) for one address. 6809 * assumes all arguments are valid/checked by caller. 6810 */ 6811 void 6812 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6813 struct sockaddr *sa, uint32_t vrf_id, int *error) 6814 { 6815 struct sockaddr *addr_to_use; 6816 #if defined(INET) && defined(INET6) 6817 struct sockaddr_in6 *sin6; 6818 struct sockaddr_in sin; 6819 #endif 6820 6821 /* see if we're bound all already! */ 6822 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6823 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6824 *error = EINVAL; 6825 return; 6826 } 6827 switch (sa->sa_family) { 6828 #ifdef INET6 6829 case AF_INET6: 6830 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6831 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6832 *error = EINVAL; 6833 return; 6834 } 6835 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6836 /* can only bind v6 on PF_INET6 sockets */ 6837 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6838 *error = EINVAL; 6839 return; 6840 } 6841 #ifdef INET 6842 sin6 = (struct sockaddr_in6 *)sa; 6843 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6844 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6845 SCTP_IPV6_V6ONLY(inp)) { 6846 /* can't bind mapped-v4 on PF_INET sockets */ 6847 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6848 *error = EINVAL; 6849 return; 6850 } 6851 in6_sin6_2_sin(&sin, sin6); 6852 addr_to_use = (struct sockaddr *)&sin; 6853 } else { 6854 addr_to_use = sa; 6855 } 6856 #else 6857 addr_to_use = sa; 6858 #endif 6859 break; 6860 #endif 6861 #ifdef INET 6862 case AF_INET: 6863 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6864 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6865 *error = EINVAL; 6866 return; 6867 } 6868 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6869 SCTP_IPV6_V6ONLY(inp)) { 6870 /* can't bind v4 on PF_INET sockets */ 6871 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6872 *error = EINVAL; 6873 return; 6874 } 6875 addr_to_use = sa; 6876 break; 6877 #endif 6878 default: 6879 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6880 *error = EINVAL; 6881 return; 6882 } 6883 /* No lock required mgmt_ep_sa does its own locking. */ 6884 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6885 vrf_id); 6886 } 6887 6888 /* 6889 * returns the valid local address count for an assoc, taking into account 6890 * all scoping rules 6891 */ 6892 int 6893 sctp_local_addr_count(struct sctp_tcb *stcb) 6894 { 6895 int loopback_scope; 6896 #if defined(INET) 6897 int ipv4_local_scope, ipv4_addr_legal; 6898 #endif 6899 #if defined(INET6) 6900 int local_scope, site_scope, ipv6_addr_legal; 6901 #endif 6902 struct sctp_vrf *vrf; 6903 struct sctp_ifn *sctp_ifn; 6904 struct sctp_ifa *sctp_ifa; 6905 int count = 0; 6906 6907 /* Turn on all the appropriate scopes */ 6908 loopback_scope = stcb->asoc.scope.loopback_scope; 6909 #if defined(INET) 6910 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6911 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6912 #endif 6913 #if defined(INET6) 6914 local_scope = stcb->asoc.scope.local_scope; 6915 site_scope = stcb->asoc.scope.site_scope; 6916 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6917 #endif 6918 SCTP_IPI_ADDR_RLOCK(); 6919 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6920 if (vrf == NULL) { 6921 /* no vrf, no addresses */ 6922 SCTP_IPI_ADDR_RUNLOCK(); 6923 return (0); 6924 } 6925 6926 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6927 /* 6928 * bound all case: go through all ifns on the vrf 6929 */ 6930 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6931 if ((loopback_scope == 0) && 6932 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6933 continue; 6934 } 6935 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6936 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6937 continue; 6938 switch (sctp_ifa->address.sa.sa_family) { 6939 #ifdef INET 6940 case AF_INET: 6941 if (ipv4_addr_legal) { 6942 struct sockaddr_in *sin; 6943 6944 sin = &sctp_ifa->address.sin; 6945 if (sin->sin_addr.s_addr == 0) { 6946 /* 6947 * skip unspecified 6948 * addrs 6949 */ 6950 continue; 6951 } 6952 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6953 &sin->sin_addr) != 0) { 6954 continue; 6955 } 6956 if ((ipv4_local_scope == 0) && 6957 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6958 continue; 6959 } 6960 /* count this one */ 6961 count++; 6962 } else { 6963 continue; 6964 } 6965 break; 6966 #endif 6967 #ifdef INET6 6968 case AF_INET6: 6969 if (ipv6_addr_legal) { 6970 struct sockaddr_in6 *sin6; 6971 6972 sin6 = &sctp_ifa->address.sin6; 6973 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6974 continue; 6975 } 6976 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6977 &sin6->sin6_addr) != 0) { 6978 continue; 6979 } 6980 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6981 if (local_scope == 0) 6982 continue; 6983 if (sin6->sin6_scope_id == 0) { 6984 if (sa6_recoverscope(sin6) != 0) 6985 /* 6986 * 6987 * bad 6988 * link 6989 * 6990 * local 6991 * 6992 * address 6993 */ 6994 continue; 6995 } 6996 } 6997 if ((site_scope == 0) && 6998 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6999 continue; 7000 } 7001 /* count this one */ 7002 count++; 7003 } 7004 break; 7005 #endif 7006 default: 7007 /* TSNH */ 7008 break; 7009 } 7010 } 7011 } 7012 } else { 7013 /* 7014 * subset bound case 7015 */ 7016 struct sctp_laddr *laddr; 7017 7018 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7019 sctp_nxt_addr) { 7020 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7021 continue; 7022 } 7023 /* count this one */ 7024 count++; 7025 } 7026 } 7027 SCTP_IPI_ADDR_RUNLOCK(); 7028 return (count); 7029 } 7030 7031 #if defined(SCTP_LOCAL_TRACE_BUF) 7032 7033 void 7034 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7035 { 7036 uint32_t saveindex, newindex; 7037 7038 do { 7039 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7040 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7041 newindex = 1; 7042 } else { 7043 newindex = saveindex + 1; 7044 } 7045 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7046 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7047 saveindex = 0; 7048 } 7049 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7050 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7051 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7052 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7053 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7054 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7055 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7056 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7057 } 7058 7059 #endif 7060 static void 7061 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7062 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7063 { 7064 struct ip *iph; 7065 #ifdef INET6 7066 struct ip6_hdr *ip6; 7067 #endif 7068 struct mbuf *sp, *last; 7069 struct udphdr *uhdr; 7070 uint16_t port; 7071 7072 if ((m->m_flags & M_PKTHDR) == 0) { 7073 /* Can't handle one that is not a pkt hdr */ 7074 goto out; 7075 } 7076 /* Pull the src port */ 7077 iph = mtod(m, struct ip *); 7078 uhdr = (struct udphdr *)((caddr_t)iph + off); 7079 port = uhdr->uh_sport; 7080 /* 7081 * Split out the mbuf chain. Leave the IP header in m, place the 7082 * rest in the sp. 7083 */ 7084 sp = m_split(m, off, M_NOWAIT); 7085 if (sp == NULL) { 7086 /* Gak, drop packet, we can't do a split */ 7087 goto out; 7088 } 7089 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7090 /* Gak, packet can't have an SCTP header in it - too small */ 7091 m_freem(sp); 7092 goto out; 7093 } 7094 /* Now pull up the UDP header and SCTP header together */ 7095 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7096 if (sp == NULL) { 7097 /* Gak pullup failed */ 7098 goto out; 7099 } 7100 /* Trim out the UDP header */ 7101 m_adj(sp, sizeof(struct udphdr)); 7102 7103 /* Now reconstruct the mbuf chain */ 7104 for (last = m; last->m_next; last = last->m_next); 7105 last->m_next = sp; 7106 m->m_pkthdr.len += sp->m_pkthdr.len; 7107 /* 7108 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7109 * checksum and it was valid. Since CSUM_DATA_VALID == 7110 * CSUM_SCTP_VALID this would imply that the HW also verified the 7111 * SCTP checksum. Therefore, clear the bit. 7112 */ 7113 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7114 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7115 m->m_pkthdr.len, 7116 if_name(m->m_pkthdr.rcvif), 7117 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7118 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7119 iph = mtod(m, struct ip *); 7120 switch (iph->ip_v) { 7121 #ifdef INET 7122 case IPVERSION: 7123 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7124 sctp_input_with_port(m, off, port); 7125 break; 7126 #endif 7127 #ifdef INET6 7128 case IPV6_VERSION >> 4: 7129 ip6 = mtod(m, struct ip6_hdr *); 7130 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7131 sctp6_input_with_port(&m, &off, port); 7132 break; 7133 #endif 7134 default: 7135 goto out; 7136 break; 7137 } 7138 return; 7139 out: 7140 m_freem(m); 7141 } 7142 7143 #ifdef INET 7144 static void 7145 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7146 { 7147 struct ip *outer_ip, *inner_ip; 7148 struct sctphdr *sh; 7149 struct icmp *icmp; 7150 struct udphdr *udp; 7151 struct sctp_inpcb *inp; 7152 struct sctp_tcb *stcb; 7153 struct sctp_nets *net; 7154 struct sctp_init_chunk *ch; 7155 struct sockaddr_in src, dst; 7156 uint8_t type, code; 7157 7158 inner_ip = (struct ip *)vip; 7159 icmp = (struct icmp *)((caddr_t)inner_ip - 7160 (sizeof(struct icmp) - sizeof(struct ip))); 7161 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7162 if (ntohs(outer_ip->ip_len) < 7163 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7164 return; 7165 } 7166 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7167 sh = (struct sctphdr *)(udp + 1); 7168 memset(&src, 0, sizeof(struct sockaddr_in)); 7169 src.sin_family = AF_INET; 7170 src.sin_len = sizeof(struct sockaddr_in); 7171 src.sin_port = sh->src_port; 7172 src.sin_addr = inner_ip->ip_src; 7173 memset(&dst, 0, sizeof(struct sockaddr_in)); 7174 dst.sin_family = AF_INET; 7175 dst.sin_len = sizeof(struct sockaddr_in); 7176 dst.sin_port = sh->dest_port; 7177 dst.sin_addr = inner_ip->ip_dst; 7178 /* 7179 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7180 * holds our local endpoint address. Thus we reverse the dst and the 7181 * src in the lookup. 7182 */ 7183 inp = NULL; 7184 net = NULL; 7185 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7186 (struct sockaddr *)&src, 7187 &inp, &net, 1, 7188 SCTP_DEFAULT_VRFID); 7189 if ((stcb != NULL) && 7190 (net != NULL) && 7191 (inp != NULL)) { 7192 /* Check the UDP port numbers */ 7193 if ((udp->uh_dport != net->port) || 7194 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7195 SCTP_TCB_UNLOCK(stcb); 7196 return; 7197 } 7198 /* Check the verification tag */ 7199 if (ntohl(sh->v_tag) != 0) { 7200 /* 7201 * This must be the verification tag used for 7202 * sending out packets. We don't consider packets 7203 * reflecting the verification tag. 7204 */ 7205 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7206 SCTP_TCB_UNLOCK(stcb); 7207 return; 7208 } 7209 } else { 7210 if (ntohs(outer_ip->ip_len) >= 7211 sizeof(struct ip) + 7212 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7213 /* 7214 * In this case we can check if we got an 7215 * INIT chunk and if the initiate tag 7216 * matches. 7217 */ 7218 ch = (struct sctp_init_chunk *)(sh + 1); 7219 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7220 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7221 SCTP_TCB_UNLOCK(stcb); 7222 return; 7223 } 7224 } else { 7225 SCTP_TCB_UNLOCK(stcb); 7226 return; 7227 } 7228 } 7229 type = icmp->icmp_type; 7230 code = icmp->icmp_code; 7231 if ((type == ICMP_UNREACH) && 7232 (code == ICMP_UNREACH_PORT)) { 7233 code = ICMP_UNREACH_PROTOCOL; 7234 } 7235 sctp_notify(inp, stcb, net, type, code, 7236 ntohs(inner_ip->ip_len), 7237 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7238 } else { 7239 if ((stcb == NULL) && (inp != NULL)) { 7240 /* reduce ref-count */ 7241 SCTP_INP_WLOCK(inp); 7242 SCTP_INP_DECR_REF(inp); 7243 SCTP_INP_WUNLOCK(inp); 7244 } 7245 if (stcb) { 7246 SCTP_TCB_UNLOCK(stcb); 7247 } 7248 } 7249 return; 7250 } 7251 #endif 7252 7253 #ifdef INET6 7254 static void 7255 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7256 { 7257 struct ip6ctlparam *ip6cp; 7258 struct sctp_inpcb *inp; 7259 struct sctp_tcb *stcb; 7260 struct sctp_nets *net; 7261 struct sctphdr sh; 7262 struct udphdr udp; 7263 struct sockaddr_in6 src, dst; 7264 uint8_t type, code; 7265 7266 ip6cp = (struct ip6ctlparam *)d; 7267 /* 7268 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7269 */ 7270 if (ip6cp->ip6c_m == NULL) { 7271 return; 7272 } 7273 /* 7274 * Check if we can safely examine the ports and the verification tag 7275 * of the SCTP common header. 7276 */ 7277 if (ip6cp->ip6c_m->m_pkthdr.len < 7278 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7279 return; 7280 } 7281 /* Copy out the UDP header. */ 7282 memset(&udp, 0, sizeof(struct udphdr)); 7283 m_copydata(ip6cp->ip6c_m, 7284 ip6cp->ip6c_off, 7285 sizeof(struct udphdr), 7286 (caddr_t)&udp); 7287 /* Copy out the port numbers and the verification tag. */ 7288 memset(&sh, 0, sizeof(struct sctphdr)); 7289 m_copydata(ip6cp->ip6c_m, 7290 ip6cp->ip6c_off + sizeof(struct udphdr), 7291 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7292 (caddr_t)&sh); 7293 memset(&src, 0, sizeof(struct sockaddr_in6)); 7294 src.sin6_family = AF_INET6; 7295 src.sin6_len = sizeof(struct sockaddr_in6); 7296 src.sin6_port = sh.src_port; 7297 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7298 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7299 return; 7300 } 7301 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7302 dst.sin6_family = AF_INET6; 7303 dst.sin6_len = sizeof(struct sockaddr_in6); 7304 dst.sin6_port = sh.dest_port; 7305 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7306 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7307 return; 7308 } 7309 inp = NULL; 7310 net = NULL; 7311 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7312 (struct sockaddr *)&src, 7313 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7314 if ((stcb != NULL) && 7315 (net != NULL) && 7316 (inp != NULL)) { 7317 /* Check the UDP port numbers */ 7318 if ((udp.uh_dport != net->port) || 7319 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7320 SCTP_TCB_UNLOCK(stcb); 7321 return; 7322 } 7323 /* Check the verification tag */ 7324 if (ntohl(sh.v_tag) != 0) { 7325 /* 7326 * This must be the verification tag used for 7327 * sending out packets. We don't consider packets 7328 * reflecting the verification tag. 7329 */ 7330 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7331 SCTP_TCB_UNLOCK(stcb); 7332 return; 7333 } 7334 } else { 7335 if (ip6cp->ip6c_m->m_pkthdr.len >= 7336 ip6cp->ip6c_off + sizeof(struct udphdr) + 7337 sizeof(struct sctphdr) + 7338 sizeof(struct sctp_chunkhdr) + 7339 offsetof(struct sctp_init, a_rwnd)) { 7340 /* 7341 * In this case we can check if we got an 7342 * INIT chunk and if the initiate tag 7343 * matches. 7344 */ 7345 uint32_t initiate_tag; 7346 uint8_t chunk_type; 7347 7348 m_copydata(ip6cp->ip6c_m, 7349 ip6cp->ip6c_off + 7350 sizeof(struct udphdr) + 7351 sizeof(struct sctphdr), 7352 sizeof(uint8_t), 7353 (caddr_t)&chunk_type); 7354 m_copydata(ip6cp->ip6c_m, 7355 ip6cp->ip6c_off + 7356 sizeof(struct udphdr) + 7357 sizeof(struct sctphdr) + 7358 sizeof(struct sctp_chunkhdr), 7359 sizeof(uint32_t), 7360 (caddr_t)&initiate_tag); 7361 if ((chunk_type != SCTP_INITIATION) || 7362 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7363 SCTP_TCB_UNLOCK(stcb); 7364 return; 7365 } 7366 } else { 7367 SCTP_TCB_UNLOCK(stcb); 7368 return; 7369 } 7370 } 7371 type = ip6cp->ip6c_icmp6->icmp6_type; 7372 code = ip6cp->ip6c_icmp6->icmp6_code; 7373 if ((type == ICMP6_DST_UNREACH) && 7374 (code == ICMP6_DST_UNREACH_NOPORT)) { 7375 type = ICMP6_PARAM_PROB; 7376 code = ICMP6_PARAMPROB_NEXTHEADER; 7377 } 7378 sctp6_notify(inp, stcb, net, type, code, 7379 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7380 } else { 7381 if ((stcb == NULL) && (inp != NULL)) { 7382 /* reduce inp's ref-count */ 7383 SCTP_INP_WLOCK(inp); 7384 SCTP_INP_DECR_REF(inp); 7385 SCTP_INP_WUNLOCK(inp); 7386 } 7387 if (stcb) { 7388 SCTP_TCB_UNLOCK(stcb); 7389 } 7390 } 7391 } 7392 #endif 7393 7394 void 7395 sctp_over_udp_stop(void) 7396 { 7397 /* 7398 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7399 * for writting! 7400 */ 7401 #ifdef INET 7402 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7403 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7404 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7405 } 7406 #endif 7407 #ifdef INET6 7408 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7409 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7410 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7411 } 7412 #endif 7413 } 7414 7415 int 7416 sctp_over_udp_start(void) 7417 { 7418 uint16_t port; 7419 int ret; 7420 #ifdef INET 7421 struct sockaddr_in sin; 7422 #endif 7423 #ifdef INET6 7424 struct sockaddr_in6 sin6; 7425 #endif 7426 /* 7427 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7428 * for writting! 7429 */ 7430 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7431 if (ntohs(port) == 0) { 7432 /* Must have a port set */ 7433 return (EINVAL); 7434 } 7435 #ifdef INET 7436 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7437 /* Already running -- must stop first */ 7438 return (EALREADY); 7439 } 7440 #endif 7441 #ifdef INET6 7442 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7443 /* Already running -- must stop first */ 7444 return (EALREADY); 7445 } 7446 #endif 7447 #ifdef INET 7448 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7449 SOCK_DGRAM, IPPROTO_UDP, 7450 curthread->td_ucred, curthread))) { 7451 sctp_over_udp_stop(); 7452 return (ret); 7453 } 7454 /* Call the special UDP hook. */ 7455 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7456 sctp_recv_udp_tunneled_packet, 7457 sctp_recv_icmp_tunneled_packet, 7458 NULL))) { 7459 sctp_over_udp_stop(); 7460 return (ret); 7461 } 7462 /* Ok, we have a socket, bind it to the port. */ 7463 memset(&sin, 0, sizeof(struct sockaddr_in)); 7464 sin.sin_len = sizeof(struct sockaddr_in); 7465 sin.sin_family = AF_INET; 7466 sin.sin_port = htons(port); 7467 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7468 (struct sockaddr *)&sin, curthread))) { 7469 sctp_over_udp_stop(); 7470 return (ret); 7471 } 7472 #endif 7473 #ifdef INET6 7474 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7475 SOCK_DGRAM, IPPROTO_UDP, 7476 curthread->td_ucred, curthread))) { 7477 sctp_over_udp_stop(); 7478 return (ret); 7479 } 7480 /* Call the special UDP hook. */ 7481 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7482 sctp_recv_udp_tunneled_packet, 7483 sctp_recv_icmp6_tunneled_packet, 7484 NULL))) { 7485 sctp_over_udp_stop(); 7486 return (ret); 7487 } 7488 /* Ok, we have a socket, bind it to the port. */ 7489 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7490 sin6.sin6_len = sizeof(struct sockaddr_in6); 7491 sin6.sin6_family = AF_INET6; 7492 sin6.sin6_port = htons(port); 7493 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7494 (struct sockaddr *)&sin6, curthread))) { 7495 sctp_over_udp_stop(); 7496 return (ret); 7497 } 7498 #endif 7499 return (0); 7500 } 7501 7502 /* 7503 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7504 * If all arguments are zero, zero is returned. 7505 */ 7506 uint32_t 7507 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7508 { 7509 if (mtu1 > 0) { 7510 if (mtu2 > 0) { 7511 if (mtu3 > 0) { 7512 return (min(mtu1, min(mtu2, mtu3))); 7513 } else { 7514 return (min(mtu1, mtu2)); 7515 } 7516 } else { 7517 if (mtu3 > 0) { 7518 return (min(mtu1, mtu3)); 7519 } else { 7520 return (mtu1); 7521 } 7522 } 7523 } else { 7524 if (mtu2 > 0) { 7525 if (mtu3 > 0) { 7526 return (min(mtu2, mtu3)); 7527 } else { 7528 return (mtu2); 7529 } 7530 } else { 7531 return (mtu3); 7532 } 7533 } 7534 } 7535 7536 void 7537 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7538 { 7539 struct in_conninfo inc; 7540 7541 memset(&inc, 0, sizeof(struct in_conninfo)); 7542 inc.inc_fibnum = fibnum; 7543 switch (addr->sa.sa_family) { 7544 #ifdef INET 7545 case AF_INET: 7546 inc.inc_faddr = addr->sin.sin_addr; 7547 break; 7548 #endif 7549 #ifdef INET6 7550 case AF_INET6: 7551 inc.inc_flags |= INC_ISIPV6; 7552 inc.inc6_faddr = addr->sin6.sin6_addr; 7553 break; 7554 #endif 7555 default: 7556 return; 7557 } 7558 tcp_hc_updatemtu(&inc, (u_long)mtu); 7559 } 7560 7561 uint32_t 7562 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7563 { 7564 struct in_conninfo inc; 7565 7566 memset(&inc, 0, sizeof(struct in_conninfo)); 7567 inc.inc_fibnum = fibnum; 7568 switch (addr->sa.sa_family) { 7569 #ifdef INET 7570 case AF_INET: 7571 inc.inc_faddr = addr->sin.sin_addr; 7572 break; 7573 #endif 7574 #ifdef INET6 7575 case AF_INET6: 7576 inc.inc_flags |= INC_ISIPV6; 7577 inc.inc6_faddr = addr->sin6.sin6_addr; 7578 break; 7579 #endif 7580 default: 7581 return (0); 7582 } 7583 return ((uint32_t)tcp_hc_getmtu(&inc)); 7584 } 7585 7586 void 7587 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7588 { 7589 #if defined(KDTRACE_HOOKS) 7590 int old_state = stcb->asoc.state; 7591 #endif 7592 7593 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7594 ("sctp_set_state: Can't set substate (new_state = %x)", 7595 new_state)); 7596 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7597 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7598 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7599 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7600 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7601 } 7602 #if defined(KDTRACE_HOOKS) 7603 if (((old_state & SCTP_STATE_MASK) != new_state) && 7604 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7605 (new_state == SCTP_STATE_INUSE))) { 7606 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7607 } 7608 #endif 7609 } 7610 7611 void 7612 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7613 { 7614 #if defined(KDTRACE_HOOKS) 7615 int old_state = stcb->asoc.state; 7616 #endif 7617 7618 KASSERT((substate & SCTP_STATE_MASK) == 0, 7619 ("sctp_add_substate: Can't set state (substate = %x)", 7620 substate)); 7621 stcb->asoc.state |= substate; 7622 #if defined(KDTRACE_HOOKS) 7623 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7624 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7625 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7626 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7627 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7628 } 7629 #endif 7630 } 7631