1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_var.h> 42 #include <netinet/sctp_sysctl.h> 43 #ifdef INET6 44 #include <netinet6/sctp6_var.h> 45 #endif 46 #include <netinet/sctp_header.h> 47 #include <netinet/sctp_output.h> 48 #include <netinet/sctp_uio.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_auth.h> 52 #include <netinet/sctp_asconf.h> 53 #include <netinet/sctp_bsd_addr.h> 54 #include <netinet/sctp_kdtrace.h> 55 #if defined(INET6) || defined(INET) 56 #include <netinet/tcp_var.h> 57 #endif 58 #include <netinet/udp.h> 59 #include <netinet/udp_var.h> 60 #include <sys/proc.h> 61 #ifdef INET6 62 #include <netinet/icmp6.h> 63 #endif 64 65 #ifndef KTR_SCTP 66 #define KTR_SCTP KTR_SUBSYS 67 #endif 68 69 extern const struct sctp_cc_functions sctp_cc_functions[]; 70 extern const struct sctp_ss_functions sctp_ss_functions[]; 71 72 void 73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 74 { 75 #if defined(SCTP_LOCAL_TRACE_BUF) 76 struct sctp_cwnd_log sctp_clog; 77 78 sctp_clog.x.sb.stcb = stcb; 79 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 80 if (stcb) 81 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 82 else 83 sctp_clog.x.sb.stcb_sbcc = 0; 84 sctp_clog.x.sb.incr = incr; 85 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 86 SCTP_LOG_EVENT_SB, 87 from, 88 sctp_clog.x.misc.log1, 89 sctp_clog.x.misc.log2, 90 sctp_clog.x.misc.log3, 91 sctp_clog.x.misc.log4); 92 #endif 93 } 94 95 void 96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 97 { 98 #if defined(SCTP_LOCAL_TRACE_BUF) 99 struct sctp_cwnd_log sctp_clog; 100 101 sctp_clog.x.close.inp = (void *)inp; 102 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 103 if (stcb) { 104 sctp_clog.x.close.stcb = (void *)stcb; 105 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state; 106 } else { 107 sctp_clog.x.close.stcb = 0; 108 sctp_clog.x.close.state = 0; 109 } 110 sctp_clog.x.close.loc = loc; 111 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 112 SCTP_LOG_EVENT_CLOSE, 113 0, 114 sctp_clog.x.misc.log1, 115 sctp_clog.x.misc.log2, 116 sctp_clog.x.misc.log3, 117 sctp_clog.x.misc.log4); 118 #endif 119 } 120 121 void 122 rto_logging(struct sctp_nets *net, int from) 123 { 124 #if defined(SCTP_LOCAL_TRACE_BUF) 125 struct sctp_cwnd_log sctp_clog; 126 127 memset(&sctp_clog, 0, sizeof(sctp_clog)); 128 sctp_clog.x.rto.net = (void *)net; 129 sctp_clog.x.rto.rtt = net->rtt / 1000; 130 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 131 SCTP_LOG_EVENT_RTT, 132 from, 133 sctp_clog.x.misc.log1, 134 sctp_clog.x.misc.log2, 135 sctp_clog.x.misc.log3, 136 sctp_clog.x.misc.log4); 137 #endif 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 #if defined(SCTP_LOCAL_TRACE_BUF) 144 struct sctp_cwnd_log sctp_clog; 145 146 sctp_clog.x.strlog.stcb = stcb; 147 sctp_clog.x.strlog.n_tsn = tsn; 148 sctp_clog.x.strlog.n_sseq = sseq; 149 sctp_clog.x.strlog.e_tsn = 0; 150 sctp_clog.x.strlog.e_sseq = 0; 151 sctp_clog.x.strlog.strm = stream; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_STRM, 154 from, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 #endif 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 #if defined(SCTP_LOCAL_TRACE_BUF) 166 struct sctp_cwnd_log sctp_clog; 167 168 sctp_clog.x.nagle.stcb = (void *)stcb; 169 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 170 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 171 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 172 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 173 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 174 SCTP_LOG_EVENT_NAGLE, 175 action, 176 sctp_clog.x.misc.log1, 177 sctp_clog.x.misc.log2, 178 sctp_clog.x.misc.log3, 179 sctp_clog.x.misc.log4); 180 #endif 181 } 182 183 void 184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 185 { 186 #if defined(SCTP_LOCAL_TRACE_BUF) 187 struct sctp_cwnd_log sctp_clog; 188 189 sctp_clog.x.sack.cumack = cumack; 190 sctp_clog.x.sack.oldcumack = old_cumack; 191 sctp_clog.x.sack.tsn = tsn; 192 sctp_clog.x.sack.numGaps = gaps; 193 sctp_clog.x.sack.numDups = dups; 194 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 195 SCTP_LOG_EVENT_SACK, 196 from, 197 sctp_clog.x.misc.log1, 198 sctp_clog.x.misc.log2, 199 sctp_clog.x.misc.log3, 200 sctp_clog.x.misc.log4); 201 #endif 202 } 203 204 void 205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 206 { 207 #if defined(SCTP_LOCAL_TRACE_BUF) 208 struct sctp_cwnd_log sctp_clog; 209 210 memset(&sctp_clog, 0, sizeof(sctp_clog)); 211 sctp_clog.x.map.base = map; 212 sctp_clog.x.map.cum = cum; 213 sctp_clog.x.map.high = high; 214 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 215 SCTP_LOG_EVENT_MAP, 216 from, 217 sctp_clog.x.misc.log1, 218 sctp_clog.x.misc.log2, 219 sctp_clog.x.misc.log3, 220 sctp_clog.x.misc.log4); 221 #endif 222 } 223 224 void 225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 226 { 227 #if defined(SCTP_LOCAL_TRACE_BUF) 228 struct sctp_cwnd_log sctp_clog; 229 230 memset(&sctp_clog, 0, sizeof(sctp_clog)); 231 sctp_clog.x.fr.largest_tsn = biggest_tsn; 232 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 233 sctp_clog.x.fr.tsn = tsn; 234 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 235 SCTP_LOG_EVENT_FR, 236 from, 237 sctp_clog.x.misc.log1, 238 sctp_clog.x.misc.log2, 239 sctp_clog.x.misc.log3, 240 sctp_clog.x.misc.log4); 241 #endif 242 } 243 244 #ifdef SCTP_MBUF_LOGGING 245 void 246 sctp_log_mb(struct mbuf *m, int from) 247 { 248 #if defined(SCTP_LOCAL_TRACE_BUF) 249 struct sctp_cwnd_log sctp_clog; 250 251 sctp_clog.x.mb.mp = m; 252 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m)); 253 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m)); 254 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 255 if (SCTP_BUF_IS_EXTENDED(m)) { 256 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 257 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); 258 } else { 259 sctp_clog.x.mb.ext = 0; 260 sctp_clog.x.mb.refcnt = 0; 261 } 262 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 263 SCTP_LOG_EVENT_MBUF, 264 from, 265 sctp_clog.x.misc.log1, 266 sctp_clog.x.misc.log2, 267 sctp_clog.x.misc.log3, 268 sctp_clog.x.misc.log4); 269 #endif 270 } 271 272 void 273 sctp_log_mbc(struct mbuf *m, int from) 274 { 275 struct mbuf *mat; 276 277 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 278 sctp_log_mb(mat, from); 279 } 280 } 281 #endif 282 283 void 284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 285 { 286 #if defined(SCTP_LOCAL_TRACE_BUF) 287 struct sctp_cwnd_log sctp_clog; 288 289 if (control == NULL) { 290 SCTP_PRINTF("Gak log of NULL?\n"); 291 return; 292 } 293 sctp_clog.x.strlog.stcb = control->stcb; 294 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 295 sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid; 296 sctp_clog.x.strlog.strm = control->sinfo_stream; 297 if (poschk != NULL) { 298 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 299 sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid; 300 } else { 301 sctp_clog.x.strlog.e_tsn = 0; 302 sctp_clog.x.strlog.e_sseq = 0; 303 } 304 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 305 SCTP_LOG_EVENT_STRM, 306 from, 307 sctp_clog.x.misc.log1, 308 sctp_clog.x.misc.log2, 309 sctp_clog.x.misc.log3, 310 sctp_clog.x.misc.log4); 311 #endif 312 } 313 314 void 315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 316 { 317 #if defined(SCTP_LOCAL_TRACE_BUF) 318 struct sctp_cwnd_log sctp_clog; 319 320 sctp_clog.x.cwnd.net = net; 321 if (stcb->asoc.send_queue_cnt > 255) 322 sctp_clog.x.cwnd.cnt_in_send = 255; 323 else 324 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 325 if (stcb->asoc.stream_queue_cnt > 255) 326 sctp_clog.x.cwnd.cnt_in_str = 255; 327 else 328 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 329 330 if (net) { 331 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 332 sctp_clog.x.cwnd.inflight = net->flight_size; 333 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 334 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 335 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 336 } 337 if (SCTP_CWNDLOG_PRESEND == from) { 338 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 339 } 340 sctp_clog.x.cwnd.cwnd_augment = augment; 341 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 342 SCTP_LOG_EVENT_CWND, 343 from, 344 sctp_clog.x.misc.log1, 345 sctp_clog.x.misc.log2, 346 sctp_clog.x.misc.log3, 347 sctp_clog.x.misc.log4); 348 #endif 349 } 350 351 void 352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 353 { 354 #if defined(SCTP_LOCAL_TRACE_BUF) 355 struct sctp_cwnd_log sctp_clog; 356 357 memset(&sctp_clog, 0, sizeof(sctp_clog)); 358 if (inp) { 359 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 360 361 } else { 362 sctp_clog.x.lock.sock = (void *)NULL; 363 } 364 sctp_clog.x.lock.inp = (void *)inp; 365 if (stcb) { 366 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 367 } else { 368 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 369 } 370 if (inp) { 371 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 372 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 373 } else { 374 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 375 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 376 } 377 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 378 if (inp && (inp->sctp_socket)) { 379 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 380 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 381 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 382 } else { 383 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 384 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 385 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 386 } 387 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 388 SCTP_LOG_LOCK_EVENT, 389 from, 390 sctp_clog.x.misc.log1, 391 sctp_clog.x.misc.log2, 392 sctp_clog.x.misc.log3, 393 sctp_clog.x.misc.log4); 394 #endif 395 } 396 397 void 398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 399 { 400 #if defined(SCTP_LOCAL_TRACE_BUF) 401 struct sctp_cwnd_log sctp_clog; 402 403 memset(&sctp_clog, 0, sizeof(sctp_clog)); 404 sctp_clog.x.cwnd.net = net; 405 sctp_clog.x.cwnd.cwnd_new_value = error; 406 sctp_clog.x.cwnd.inflight = net->flight_size; 407 sctp_clog.x.cwnd.cwnd_augment = burst; 408 if (stcb->asoc.send_queue_cnt > 255) 409 sctp_clog.x.cwnd.cnt_in_send = 255; 410 else 411 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 412 if (stcb->asoc.stream_queue_cnt > 255) 413 sctp_clog.x.cwnd.cnt_in_str = 255; 414 else 415 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 416 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 417 SCTP_LOG_EVENT_MAXBURST, 418 from, 419 sctp_clog.x.misc.log1, 420 sctp_clog.x.misc.log2, 421 sctp_clog.x.misc.log3, 422 sctp_clog.x.misc.log4); 423 #endif 424 } 425 426 void 427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 428 { 429 #if defined(SCTP_LOCAL_TRACE_BUF) 430 struct sctp_cwnd_log sctp_clog; 431 432 sctp_clog.x.rwnd.rwnd = peers_rwnd; 433 sctp_clog.x.rwnd.send_size = snd_size; 434 sctp_clog.x.rwnd.overhead = overhead; 435 sctp_clog.x.rwnd.new_rwnd = 0; 436 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 437 SCTP_LOG_EVENT_RWND, 438 from, 439 sctp_clog.x.misc.log1, 440 sctp_clog.x.misc.log2, 441 sctp_clog.x.misc.log3, 442 sctp_clog.x.misc.log4); 443 #endif 444 } 445 446 void 447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 448 { 449 #if defined(SCTP_LOCAL_TRACE_BUF) 450 struct sctp_cwnd_log sctp_clog; 451 452 sctp_clog.x.rwnd.rwnd = peers_rwnd; 453 sctp_clog.x.rwnd.send_size = flight_size; 454 sctp_clog.x.rwnd.overhead = overhead; 455 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 456 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 457 SCTP_LOG_EVENT_RWND, 458 from, 459 sctp_clog.x.misc.log1, 460 sctp_clog.x.misc.log2, 461 sctp_clog.x.misc.log3, 462 sctp_clog.x.misc.log4); 463 #endif 464 } 465 466 #ifdef SCTP_MBCNT_LOGGING 467 static void 468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 469 { 470 #if defined(SCTP_LOCAL_TRACE_BUF) 471 struct sctp_cwnd_log sctp_clog; 472 473 sctp_clog.x.mbcnt.total_queue_size = total_oq; 474 sctp_clog.x.mbcnt.size_change = book; 475 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 476 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 477 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 478 SCTP_LOG_EVENT_MBCNT, 479 from, 480 sctp_clog.x.misc.log1, 481 sctp_clog.x.misc.log2, 482 sctp_clog.x.misc.log3, 483 sctp_clog.x.misc.log4); 484 #endif 485 } 486 #endif 487 488 void 489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 490 { 491 #if defined(SCTP_LOCAL_TRACE_BUF) 492 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 493 SCTP_LOG_MISC_EVENT, 494 from, 495 a, b, c, d); 496 #endif 497 } 498 499 void 500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 501 { 502 #if defined(SCTP_LOCAL_TRACE_BUF) 503 struct sctp_cwnd_log sctp_clog; 504 505 sctp_clog.x.wake.stcb = (void *)stcb; 506 sctp_clog.x.wake.wake_cnt = wake_cnt; 507 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 508 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 509 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 510 511 if (stcb->asoc.stream_queue_cnt < 0xff) 512 sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt; 513 else 514 sctp_clog.x.wake.stream_qcnt = 0xff; 515 516 if (stcb->asoc.chunks_on_out_queue < 0xff) 517 sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue; 518 else 519 sctp_clog.x.wake.chunks_on_oque = 0xff; 520 521 sctp_clog.x.wake.sctpflags = 0; 522 /* set in the defered mode stuff */ 523 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 524 sctp_clog.x.wake.sctpflags |= 1; 525 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 526 sctp_clog.x.wake.sctpflags |= 2; 527 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 528 sctp_clog.x.wake.sctpflags |= 4; 529 /* what about the sb */ 530 if (stcb->sctp_socket) { 531 struct socket *so = stcb->sctp_socket; 532 533 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff)); 534 } else { 535 sctp_clog.x.wake.sbflags = 0xff; 536 } 537 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 538 SCTP_LOG_EVENT_WAKE, 539 from, 540 sctp_clog.x.misc.log1, 541 sctp_clog.x.misc.log2, 542 sctp_clog.x.misc.log3, 543 sctp_clog.x.misc.log4); 544 #endif 545 } 546 547 void 548 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen) 549 { 550 #if defined(SCTP_LOCAL_TRACE_BUF) 551 struct sctp_cwnd_log sctp_clog; 552 553 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 554 sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt); 555 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 556 sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt; 557 sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue; 558 sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024); 559 sctp_clog.x.blk.sndlen = (uint32_t)sendlen; 560 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 561 SCTP_LOG_EVENT_BLOCK, 562 from, 563 sctp_clog.x.misc.log1, 564 sctp_clog.x.misc.log2, 565 sctp_clog.x.misc.log3, 566 sctp_clog.x.misc.log4); 567 #endif 568 } 569 570 int 571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 572 { 573 /* May need to fix this if ktrdump does not work */ 574 return (0); 575 } 576 577 #ifdef SCTP_AUDITING_ENABLED 578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 579 static int sctp_audit_indx = 0; 580 581 static 582 void 583 sctp_print_audit_report(void) 584 { 585 int i; 586 int cnt; 587 588 cnt = 0; 589 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 590 if ((sctp_audit_data[i][0] == 0xe0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 cnt = 0; 593 SCTP_PRINTF("\n"); 594 } else if (sctp_audit_data[i][0] == 0xf0) { 595 cnt = 0; 596 SCTP_PRINTF("\n"); 597 } else if ((sctp_audit_data[i][0] == 0xc0) && 598 (sctp_audit_data[i][1] == 0x01)) { 599 SCTP_PRINTF("\n"); 600 cnt = 0; 601 } 602 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 603 (uint32_t)sctp_audit_data[i][1]); 604 cnt++; 605 if ((cnt % 14) == 0) 606 SCTP_PRINTF("\n"); 607 } 608 for (i = 0; i < sctp_audit_indx; i++) { 609 if ((sctp_audit_data[i][0] == 0xe0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 cnt = 0; 612 SCTP_PRINTF("\n"); 613 } else if (sctp_audit_data[i][0] == 0xf0) { 614 cnt = 0; 615 SCTP_PRINTF("\n"); 616 } else if ((sctp_audit_data[i][0] == 0xc0) && 617 (sctp_audit_data[i][1] == 0x01)) { 618 SCTP_PRINTF("\n"); 619 cnt = 0; 620 } 621 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0], 622 (uint32_t)sctp_audit_data[i][1]); 623 cnt++; 624 if ((cnt % 14) == 0) 625 SCTP_PRINTF("\n"); 626 } 627 SCTP_PRINTF("\n"); 628 } 629 630 void 631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 632 struct sctp_nets *net) 633 { 634 int resend_cnt, tot_out, rep, tot_book_cnt; 635 struct sctp_nets *lnet; 636 struct sctp_tmit_chunk *chk; 637 638 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 639 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 if (inp == NULL) { 645 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 646 sctp_audit_data[sctp_audit_indx][1] = 0x01; 647 sctp_audit_indx++; 648 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 649 sctp_audit_indx = 0; 650 } 651 return; 652 } 653 if (stcb == NULL) { 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 655 sctp_audit_data[sctp_audit_indx][1] = 0x02; 656 sctp_audit_indx++; 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 658 sctp_audit_indx = 0; 659 } 660 return; 661 } 662 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 663 sctp_audit_data[sctp_audit_indx][1] = 664 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 665 sctp_audit_indx++; 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 667 sctp_audit_indx = 0; 668 } 669 rep = 0; 670 tot_book_cnt = 0; 671 resend_cnt = tot_out = 0; 672 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 673 if (chk->sent == SCTP_DATAGRAM_RESEND) { 674 resend_cnt++; 675 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 676 tot_out += chk->book_size; 677 tot_book_cnt++; 678 } 679 } 680 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 681 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 682 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 683 sctp_audit_indx++; 684 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 685 sctp_audit_indx = 0; 686 } 687 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 688 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 689 rep = 1; 690 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 691 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 692 sctp_audit_data[sctp_audit_indx][1] = 693 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 694 sctp_audit_indx++; 695 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 696 sctp_audit_indx = 0; 697 } 698 } 699 if (tot_out != stcb->asoc.total_flight) { 700 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 701 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 rep = 1; 707 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 708 (int)stcb->asoc.total_flight); 709 stcb->asoc.total_flight = tot_out; 710 } 711 if (tot_book_cnt != stcb->asoc.total_flight_count) { 712 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 713 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 714 sctp_audit_indx++; 715 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 716 sctp_audit_indx = 0; 717 } 718 rep = 1; 719 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 720 721 stcb->asoc.total_flight_count = tot_book_cnt; 722 } 723 tot_out = 0; 724 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 725 tot_out += lnet->flight_size; 726 } 727 if (tot_out != stcb->asoc.total_flight) { 728 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 729 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 730 sctp_audit_indx++; 731 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 732 sctp_audit_indx = 0; 733 } 734 rep = 1; 735 SCTP_PRINTF("real flight:%d net total was %d\n", 736 stcb->asoc.total_flight, tot_out); 737 /* now corrective action */ 738 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 739 740 tot_out = 0; 741 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 742 if ((chk->whoTo == lnet) && 743 (chk->sent < SCTP_DATAGRAM_RESEND)) { 744 tot_out += chk->book_size; 745 } 746 } 747 if (lnet->flight_size != tot_out) { 748 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 749 (void *)lnet, lnet->flight_size, 750 tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * The conversion from time to ticks and vice versa is done by rounding 776 * upwards. This way we can test in the code the time to be positive and 777 * know that this corresponds to a positive number of ticks. 778 */ 779 780 uint32_t 781 sctp_msecs_to_ticks(uint32_t msecs) 782 { 783 uint64_t temp; 784 uint32_t ticks; 785 786 if (hz == 1000) { 787 ticks = msecs; 788 } else { 789 temp = (((uint64_t)msecs * hz) + 999) / 1000; 790 if (temp > UINT32_MAX) { 791 ticks = UINT32_MAX; 792 } else { 793 ticks = (uint32_t)temp; 794 } 795 } 796 return (ticks); 797 } 798 799 uint32_t 800 sctp_ticks_to_msecs(uint32_t ticks) 801 { 802 uint64_t temp; 803 uint32_t msecs; 804 805 if (hz == 1000) { 806 msecs = ticks; 807 } else { 808 temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; 809 if (temp > UINT32_MAX) { 810 msecs = UINT32_MAX; 811 } else { 812 msecs = (uint32_t)temp; 813 } 814 } 815 return (msecs); 816 } 817 818 uint32_t 819 sctp_secs_to_ticks(uint32_t secs) 820 { 821 uint64_t temp; 822 uint32_t ticks; 823 824 temp = (uint64_t)secs * hz; 825 if (temp > UINT32_MAX) { 826 ticks = UINT32_MAX; 827 } else { 828 ticks = (uint32_t)temp; 829 } 830 return (ticks); 831 } 832 833 uint32_t 834 sctp_ticks_to_secs(uint32_t ticks) 835 { 836 uint64_t temp; 837 uint32_t secs; 838 839 temp = ((uint64_t)ticks + (hz - 1)) / hz; 840 if (temp > UINT32_MAX) { 841 secs = UINT32_MAX; 842 } else { 843 secs = (uint32_t)temp; 844 } 845 return (secs); 846 } 847 848 /* 849 * sctp_stop_timers_for_shutdown() should be called 850 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 851 * state to make sure that all timers are stopped. 852 */ 853 void 854 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 855 { 856 struct sctp_inpcb *inp; 857 struct sctp_nets *net; 858 859 inp = stcb->sctp_ep; 860 861 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 862 SCTP_FROM_SCTPUTIL + SCTP_LOC_12); 863 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 864 SCTP_FROM_SCTPUTIL + SCTP_LOC_13); 865 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 866 SCTP_FROM_SCTPUTIL + SCTP_LOC_14); 867 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 868 SCTP_FROM_SCTPUTIL + SCTP_LOC_15); 869 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 870 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 871 SCTP_FROM_SCTPUTIL + SCTP_LOC_16); 872 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 873 SCTP_FROM_SCTPUTIL + SCTP_LOC_17); 874 } 875 } 876 877 void 878 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) 879 { 880 struct sctp_inpcb *inp; 881 struct sctp_nets *net; 882 883 inp = stcb->sctp_ep; 884 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, 885 SCTP_FROM_SCTPUTIL + SCTP_LOC_18); 886 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, 887 SCTP_FROM_SCTPUTIL + SCTP_LOC_19); 888 if (stop_assoc_kill_timer) { 889 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 890 SCTP_FROM_SCTPUTIL + SCTP_LOC_20); 891 } 892 sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, 893 SCTP_FROM_SCTPUTIL + SCTP_LOC_21); 894 sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, 895 SCTP_FROM_SCTPUTIL + SCTP_LOC_22); 896 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, 897 SCTP_FROM_SCTPUTIL + SCTP_LOC_23); 898 /* Mobility adaptation */ 899 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, 900 SCTP_FROM_SCTPUTIL + SCTP_LOC_24); 901 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 902 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 903 SCTP_FROM_SCTPUTIL + SCTP_LOC_25); 904 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 905 SCTP_FROM_SCTPUTIL + SCTP_LOC_26); 906 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, 907 SCTP_FROM_SCTPUTIL + SCTP_LOC_27); 908 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, 909 SCTP_FROM_SCTPUTIL + SCTP_LOC_28); 910 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, 911 SCTP_FROM_SCTPUTIL + SCTP_LOC_29); 912 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 913 SCTP_FROM_SCTPUTIL + SCTP_LOC_30); 914 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 915 SCTP_FROM_SCTPUTIL + SCTP_LOC_31); 916 } 917 } 918 919 /* 920 * A list of sizes based on typical mtu's, used only if next hop size not 921 * returned. These values MUST be multiples of 4 and MUST be ordered. 922 */ 923 static uint32_t sctp_mtu_sizes[] = { 924 68, 925 296, 926 508, 927 512, 928 544, 929 576, 930 1004, 931 1492, 932 1500, 933 1536, 934 2000, 935 2048, 936 4352, 937 4464, 938 8168, 939 17912, 940 32000, 941 65532 942 }; 943 944 /* 945 * Return the largest MTU in sctp_mtu_sizes smaller than val. 946 * If val is smaller than the minimum, just return the largest 947 * multiple of 4 smaller or equal to val. 948 * Ensure that the result is a multiple of 4. 949 */ 950 uint32_t 951 sctp_get_prev_mtu(uint32_t val) 952 { 953 uint32_t i; 954 955 val &= 0xfffffffc; 956 if (val <= sctp_mtu_sizes[0]) { 957 return (val); 958 } 959 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 960 if (val <= sctp_mtu_sizes[i]) { 961 break; 962 } 963 } 964 KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0, 965 ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1)); 966 return (sctp_mtu_sizes[i - 1]); 967 } 968 969 /* 970 * Return the smallest MTU in sctp_mtu_sizes larger than val. 971 * If val is larger than the maximum, just return the largest multiple of 4 smaller 972 * or equal to val. 973 * Ensure that the result is a multiple of 4. 974 */ 975 uint32_t 976 sctp_get_next_mtu(uint32_t val) 977 { 978 /* select another MTU that is just bigger than this one */ 979 uint32_t i; 980 981 val &= 0xfffffffc; 982 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 983 if (val < sctp_mtu_sizes[i]) { 984 KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0, 985 ("sctp_mtu_sizes[%u] not a multiple of 4", i)); 986 return (sctp_mtu_sizes[i]); 987 } 988 } 989 return (val); 990 } 991 992 void 993 sctp_fill_random_store(struct sctp_pcb *m) 994 { 995 /* 996 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 997 * our counter. The result becomes our good random numbers and we 998 * then setup to give these out. Note that we do no locking to 999 * protect this. This is ok, since if competing folks call this we 1000 * will get more gobbled gook in the random store which is what we 1001 * want. There is a danger that two guys will use the same random 1002 * numbers, but thats ok too since that is random as well :-> 1003 */ 1004 m->store_at = 0; 1005 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers, 1006 sizeof(m->random_numbers), (uint8_t *)&m->random_counter, 1007 sizeof(m->random_counter), (uint8_t *)m->random_store); 1008 m->random_counter++; 1009 } 1010 1011 uint32_t 1012 sctp_select_initial_TSN(struct sctp_pcb *inp) 1013 { 1014 /* 1015 * A true implementation should use random selection process to get 1016 * the initial stream sequence number, using RFC1750 as a good 1017 * guideline 1018 */ 1019 uint32_t x, *xp; 1020 uint8_t *p; 1021 int store_at, new_store; 1022 1023 if (inp->initial_sequence_debug != 0) { 1024 uint32_t ret; 1025 1026 ret = inp->initial_sequence_debug; 1027 inp->initial_sequence_debug++; 1028 return (ret); 1029 } 1030 retry: 1031 store_at = inp->store_at; 1032 new_store = store_at + sizeof(uint32_t); 1033 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 1034 new_store = 0; 1035 } 1036 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 1037 goto retry; 1038 } 1039 if (new_store == 0) { 1040 /* Refill the random store */ 1041 sctp_fill_random_store(inp); 1042 } 1043 p = &inp->random_store[store_at]; 1044 xp = (uint32_t *)p; 1045 x = *xp; 1046 return (x); 1047 } 1048 1049 uint32_t 1050 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 1051 { 1052 uint32_t x; 1053 struct timeval now; 1054 1055 if (check) { 1056 (void)SCTP_GETTIME_TIMEVAL(&now); 1057 } 1058 for (;;) { 1059 x = sctp_select_initial_TSN(&inp->sctp_ep); 1060 if (x == 0) { 1061 /* we never use 0 */ 1062 continue; 1063 } 1064 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 1065 break; 1066 } 1067 } 1068 return (x); 1069 } 1070 1071 int32_t 1072 sctp_map_assoc_state(int kernel_state) 1073 { 1074 int32_t user_state; 1075 1076 if (kernel_state & SCTP_STATE_WAS_ABORTED) { 1077 user_state = SCTP_CLOSED; 1078 } else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) { 1079 user_state = SCTP_SHUTDOWN_PENDING; 1080 } else { 1081 switch (kernel_state & SCTP_STATE_MASK) { 1082 case SCTP_STATE_EMPTY: 1083 user_state = SCTP_CLOSED; 1084 break; 1085 case SCTP_STATE_INUSE: 1086 user_state = SCTP_CLOSED; 1087 break; 1088 case SCTP_STATE_COOKIE_WAIT: 1089 user_state = SCTP_COOKIE_WAIT; 1090 break; 1091 case SCTP_STATE_COOKIE_ECHOED: 1092 user_state = SCTP_COOKIE_ECHOED; 1093 break; 1094 case SCTP_STATE_OPEN: 1095 user_state = SCTP_ESTABLISHED; 1096 break; 1097 case SCTP_STATE_SHUTDOWN_SENT: 1098 user_state = SCTP_SHUTDOWN_SENT; 1099 break; 1100 case SCTP_STATE_SHUTDOWN_RECEIVED: 1101 user_state = SCTP_SHUTDOWN_RECEIVED; 1102 break; 1103 case SCTP_STATE_SHUTDOWN_ACK_SENT: 1104 user_state = SCTP_SHUTDOWN_ACK_SENT; 1105 break; 1106 default: 1107 user_state = SCTP_CLOSED; 1108 break; 1109 } 1110 } 1111 return (user_state); 1112 } 1113 1114 int 1115 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1116 uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) 1117 { 1118 struct sctp_association *asoc; 1119 1120 /* 1121 * Anything set to zero is taken care of by the allocation routine's 1122 * bzero 1123 */ 1124 1125 /* 1126 * Up front select what scoping to apply on addresses I tell my peer 1127 * Not sure what to do with these right now, we will need to come up 1128 * with a way to set them. We may need to pass them through from the 1129 * caller in the sctp_aloc_assoc() function. 1130 */ 1131 int i; 1132 #if defined(SCTP_DETAILED_STR_STATS) 1133 int j; 1134 #endif 1135 1136 asoc = &stcb->asoc; 1137 /* init all variables to a known value. */ 1138 SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); 1139 asoc->max_burst = inp->sctp_ep.max_burst; 1140 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 1141 asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 1142 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 1143 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 1144 asoc->ecn_supported = inp->ecn_supported; 1145 asoc->prsctp_supported = inp->prsctp_supported; 1146 asoc->auth_supported = inp->auth_supported; 1147 asoc->asconf_supported = inp->asconf_supported; 1148 asoc->reconfig_supported = inp->reconfig_supported; 1149 asoc->nrsack_supported = inp->nrsack_supported; 1150 asoc->pktdrop_supported = inp->pktdrop_supported; 1151 asoc->idata_supported = inp->idata_supported; 1152 asoc->sctp_cmt_pf = (uint8_t)0; 1153 asoc->sctp_frag_point = inp->sctp_frag_point; 1154 asoc->sctp_features = inp->sctp_features; 1155 asoc->default_dscp = inp->sctp_ep.default_dscp; 1156 asoc->max_cwnd = inp->max_cwnd; 1157 #ifdef INET6 1158 if (inp->sctp_ep.default_flowlabel) { 1159 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 1160 } else { 1161 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 1162 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 1163 asoc->default_flowlabel &= 0x000fffff; 1164 asoc->default_flowlabel |= 0x80000000; 1165 } else { 1166 asoc->default_flowlabel = 0; 1167 } 1168 } 1169 #endif 1170 asoc->sb_send_resv = 0; 1171 if (override_tag) { 1172 asoc->my_vtag = override_tag; 1173 } else { 1174 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 1175 } 1176 /* Get the nonce tags */ 1177 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1178 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 1179 asoc->vrf_id = vrf_id; 1180 1181 #ifdef SCTP_ASOCLOG_OF_TSNS 1182 asoc->tsn_in_at = 0; 1183 asoc->tsn_out_at = 0; 1184 asoc->tsn_in_wrapped = 0; 1185 asoc->tsn_out_wrapped = 0; 1186 asoc->cumack_log_at = 0; 1187 asoc->cumack_log_atsnt = 0; 1188 #endif 1189 #ifdef SCTP_FS_SPEC_LOG 1190 asoc->fs_index = 0; 1191 #endif 1192 asoc->refcnt = 0; 1193 asoc->assoc_up_sent = 0; 1194 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1195 sctp_select_initial_TSN(&inp->sctp_ep); 1196 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1197 /* we are optimisitic here */ 1198 asoc->peer_supports_nat = 0; 1199 asoc->sent_queue_retran_cnt = 0; 1200 1201 /* for CMT */ 1202 asoc->last_net_cmt_send_started = NULL; 1203 1204 /* This will need to be adjusted */ 1205 asoc->last_acked_seq = asoc->init_seq_number - 1; 1206 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1207 asoc->asconf_seq_in = asoc->last_acked_seq; 1208 1209 /* here we are different, we hold the next one we expect */ 1210 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1211 1212 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 1213 asoc->initial_rto = inp->sctp_ep.initial_rto; 1214 1215 asoc->default_mtu = inp->sctp_ep.default_mtu; 1216 asoc->max_init_times = inp->sctp_ep.max_init_times; 1217 asoc->max_send_times = inp->sctp_ep.max_send_times; 1218 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 1219 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 1220 asoc->free_chunk_cnt = 0; 1221 1222 asoc->iam_blocking = 0; 1223 asoc->context = inp->sctp_context; 1224 asoc->local_strreset_support = inp->local_strreset_support; 1225 asoc->def_send = inp->def_send; 1226 asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1227 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 1228 asoc->pr_sctp_cnt = 0; 1229 asoc->total_output_queue_size = 0; 1230 1231 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1232 asoc->scope.ipv6_addr_legal = 1; 1233 if (SCTP_IPV6_V6ONLY(inp) == 0) { 1234 asoc->scope.ipv4_addr_legal = 1; 1235 } else { 1236 asoc->scope.ipv4_addr_legal = 0; 1237 } 1238 } else { 1239 asoc->scope.ipv6_addr_legal = 0; 1240 asoc->scope.ipv4_addr_legal = 1; 1241 } 1242 1243 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1244 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1245 1246 asoc->smallest_mtu = inp->sctp_frag_point; 1247 asoc->minrto = inp->sctp_ep.sctp_minrto; 1248 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1249 1250 asoc->stream_locked_on = 0; 1251 asoc->ecn_echo_cnt_onq = 0; 1252 asoc->stream_locked = 0; 1253 1254 asoc->send_sack = 1; 1255 1256 LIST_INIT(&asoc->sctp_restricted_addrs); 1257 1258 TAILQ_INIT(&asoc->nets); 1259 TAILQ_INIT(&asoc->pending_reply_queue); 1260 TAILQ_INIT(&asoc->asconf_ack_sent); 1261 /* Setup to fill the hb random cache at first HB */ 1262 asoc->hb_random_idx = 4; 1263 1264 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1265 1266 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1267 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1268 1269 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1270 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1271 1272 /* 1273 * Now the stream parameters, here we allocate space for all streams 1274 * that we request by default. 1275 */ 1276 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1277 o_strms; 1278 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1279 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1280 SCTP_M_STRMO); 1281 if (asoc->strmout == NULL) { 1282 /* big trouble no memory */ 1283 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1284 return (ENOMEM); 1285 } 1286 for (i = 0; i < asoc->streamoutcnt; i++) { 1287 /* 1288 * inbound side must be set to 0xffff, also NOTE when we get 1289 * the INIT-ACK back (for INIT sender) we MUST reduce the 1290 * count (streamoutcnt) but first check if we sent to any of 1291 * the upper streams that were dropped (if some were). Those 1292 * that were dropped must be notified to the upper layer as 1293 * failed to send. 1294 */ 1295 asoc->strmout[i].next_mid_ordered = 0; 1296 asoc->strmout[i].next_mid_unordered = 0; 1297 TAILQ_INIT(&asoc->strmout[i].outqueue); 1298 asoc->strmout[i].chunks_on_queues = 0; 1299 #if defined(SCTP_DETAILED_STR_STATS) 1300 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1301 asoc->strmout[i].abandoned_sent[j] = 0; 1302 asoc->strmout[i].abandoned_unsent[j] = 0; 1303 } 1304 #else 1305 asoc->strmout[i].abandoned_sent[0] = 0; 1306 asoc->strmout[i].abandoned_unsent[0] = 0; 1307 #endif 1308 asoc->strmout[i].sid = i; 1309 asoc->strmout[i].last_msg_incomplete = 0; 1310 asoc->strmout[i].state = SCTP_STREAM_OPENING; 1311 asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); 1312 } 1313 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1314 1315 /* Now the mapping array */ 1316 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1317 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1318 SCTP_M_MAP); 1319 if (asoc->mapping_array == NULL) { 1320 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1321 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1322 return (ENOMEM); 1323 } 1324 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1325 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1326 SCTP_M_MAP); 1327 if (asoc->nr_mapping_array == NULL) { 1328 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1329 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1330 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1331 return (ENOMEM); 1332 } 1333 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1334 1335 /* Now the init of the other outqueues */ 1336 TAILQ_INIT(&asoc->free_chunks); 1337 TAILQ_INIT(&asoc->control_send_queue); 1338 TAILQ_INIT(&asoc->asconf_send_queue); 1339 TAILQ_INIT(&asoc->send_queue); 1340 TAILQ_INIT(&asoc->sent_queue); 1341 TAILQ_INIT(&asoc->resetHead); 1342 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1343 TAILQ_INIT(&asoc->asconf_queue); 1344 /* authentication fields */ 1345 asoc->authinfo.random = NULL; 1346 asoc->authinfo.active_keyid = 0; 1347 asoc->authinfo.assoc_key = NULL; 1348 asoc->authinfo.assoc_keyid = 0; 1349 asoc->authinfo.recv_key = NULL; 1350 asoc->authinfo.recv_keyid = 0; 1351 LIST_INIT(&asoc->shared_keys); 1352 asoc->marked_retrans = 0; 1353 asoc->port = inp->sctp_ep.port; 1354 asoc->timoinit = 0; 1355 asoc->timodata = 0; 1356 asoc->timosack = 0; 1357 asoc->timoshutdown = 0; 1358 asoc->timoheartbeat = 0; 1359 asoc->timocookie = 0; 1360 asoc->timoshutdownack = 0; 1361 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1362 asoc->discontinuity_time = asoc->start_time; 1363 for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) { 1364 asoc->abandoned_unsent[i] = 0; 1365 asoc->abandoned_sent[i] = 0; 1366 } 1367 /* 1368 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1369 * freed later when the association is freed. 1370 */ 1371 return (0); 1372 } 1373 1374 void 1375 sctp_print_mapping_array(struct sctp_association *asoc) 1376 { 1377 unsigned int i, limit; 1378 1379 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1380 asoc->mapping_array_size, 1381 asoc->mapping_array_base_tsn, 1382 asoc->cumulative_tsn, 1383 asoc->highest_tsn_inside_map, 1384 asoc->highest_tsn_inside_nr_map); 1385 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1386 if (asoc->mapping_array[limit - 1] != 0) { 1387 break; 1388 } 1389 } 1390 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1391 for (i = 0; i < limit; i++) { 1392 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1393 } 1394 if (limit % 16) 1395 SCTP_PRINTF("\n"); 1396 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1397 if (asoc->nr_mapping_array[limit - 1]) { 1398 break; 1399 } 1400 } 1401 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1402 for (i = 0; i < limit; i++) { 1403 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1404 } 1405 if (limit % 16) 1406 SCTP_PRINTF("\n"); 1407 } 1408 1409 int 1410 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1411 { 1412 /* mapping array needs to grow */ 1413 uint8_t *new_array1, *new_array2; 1414 uint32_t new_size; 1415 1416 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1417 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1418 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1419 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1420 /* can't get more, forget it */ 1421 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1422 if (new_array1) { 1423 SCTP_FREE(new_array1, SCTP_M_MAP); 1424 } 1425 if (new_array2) { 1426 SCTP_FREE(new_array2, SCTP_M_MAP); 1427 } 1428 return (-1); 1429 } 1430 memset(new_array1, 0, new_size); 1431 memset(new_array2, 0, new_size); 1432 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1433 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1434 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1435 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1436 asoc->mapping_array = new_array1; 1437 asoc->nr_mapping_array = new_array2; 1438 asoc->mapping_array_size = new_size; 1439 return (0); 1440 } 1441 1442 1443 static void 1444 sctp_iterator_work(struct sctp_iterator *it) 1445 { 1446 struct epoch_tracker et; 1447 struct sctp_inpcb *tinp; 1448 int iteration_count = 0; 1449 int inp_skip = 0; 1450 int first_in = 1; 1451 1452 NET_EPOCH_ENTER(et); 1453 SCTP_INP_INFO_RLOCK(); 1454 SCTP_ITERATOR_LOCK(); 1455 sctp_it_ctl.cur_it = it; 1456 if (it->inp) { 1457 SCTP_INP_RLOCK(it->inp); 1458 SCTP_INP_DECR_REF(it->inp); 1459 } 1460 if (it->inp == NULL) { 1461 /* iterator is complete */ 1462 done_with_iterator: 1463 sctp_it_ctl.cur_it = NULL; 1464 SCTP_ITERATOR_UNLOCK(); 1465 SCTP_INP_INFO_RUNLOCK(); 1466 if (it->function_atend != NULL) { 1467 (*it->function_atend) (it->pointer, it->val); 1468 } 1469 SCTP_FREE(it, SCTP_M_ITER); 1470 NET_EPOCH_EXIT(et); 1471 return; 1472 } 1473 select_a_new_ep: 1474 if (first_in) { 1475 first_in = 0; 1476 } else { 1477 SCTP_INP_RLOCK(it->inp); 1478 } 1479 while (((it->pcb_flags) && 1480 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1481 ((it->pcb_features) && 1482 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1483 /* endpoint flags or features don't match, so keep looking */ 1484 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1485 SCTP_INP_RUNLOCK(it->inp); 1486 goto done_with_iterator; 1487 } 1488 tinp = it->inp; 1489 it->inp = LIST_NEXT(it->inp, sctp_list); 1490 it->stcb = NULL; 1491 SCTP_INP_RUNLOCK(tinp); 1492 if (it->inp == NULL) { 1493 goto done_with_iterator; 1494 } 1495 SCTP_INP_RLOCK(it->inp); 1496 } 1497 /* now go through each assoc which is in the desired state */ 1498 if (it->done_current_ep == 0) { 1499 if (it->function_inp != NULL) 1500 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1501 it->done_current_ep = 1; 1502 } 1503 if (it->stcb == NULL) { 1504 /* run the per instance function */ 1505 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1506 } 1507 if ((inp_skip) || it->stcb == NULL) { 1508 if (it->function_inp_end != NULL) { 1509 inp_skip = (*it->function_inp_end) (it->inp, 1510 it->pointer, 1511 it->val); 1512 } 1513 SCTP_INP_RUNLOCK(it->inp); 1514 goto no_stcb; 1515 } 1516 while (it->stcb) { 1517 SCTP_TCB_LOCK(it->stcb); 1518 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1519 /* not in the right state... keep looking */ 1520 SCTP_TCB_UNLOCK(it->stcb); 1521 goto next_assoc; 1522 } 1523 /* see if we have limited out the iterator loop */ 1524 iteration_count++; 1525 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1526 /* Pause to let others grab the lock */ 1527 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1528 SCTP_TCB_UNLOCK(it->stcb); 1529 SCTP_INP_INCR_REF(it->inp); 1530 SCTP_INP_RUNLOCK(it->inp); 1531 SCTP_ITERATOR_UNLOCK(); 1532 SCTP_INP_INFO_RUNLOCK(); 1533 SCTP_INP_INFO_RLOCK(); 1534 SCTP_ITERATOR_LOCK(); 1535 if (sctp_it_ctl.iterator_flags) { 1536 /* We won't be staying here */ 1537 SCTP_INP_DECR_REF(it->inp); 1538 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1539 if (sctp_it_ctl.iterator_flags & 1540 SCTP_ITERATOR_STOP_CUR_IT) { 1541 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1542 goto done_with_iterator; 1543 } 1544 if (sctp_it_ctl.iterator_flags & 1545 SCTP_ITERATOR_STOP_CUR_INP) { 1546 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1547 goto no_stcb; 1548 } 1549 /* If we reach here huh? */ 1550 SCTP_PRINTF("Unknown it ctl flag %x\n", 1551 sctp_it_ctl.iterator_flags); 1552 sctp_it_ctl.iterator_flags = 0; 1553 } 1554 SCTP_INP_RLOCK(it->inp); 1555 SCTP_INP_DECR_REF(it->inp); 1556 SCTP_TCB_LOCK(it->stcb); 1557 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1558 iteration_count = 0; 1559 } 1560 KASSERT(it->inp == it->stcb->sctp_ep, 1561 ("%s: stcb %p does not belong to inp %p, but inp %p", 1562 __func__, it->stcb, it->inp, it->stcb->sctp_ep)); 1563 1564 /* run function on this one */ 1565 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1566 1567 /* 1568 * we lie here, it really needs to have its own type but 1569 * first I must verify that this won't effect things :-0 1570 */ 1571 if (it->no_chunk_output == 0) 1572 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1573 1574 SCTP_TCB_UNLOCK(it->stcb); 1575 next_assoc: 1576 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1577 if (it->stcb == NULL) { 1578 /* Run last function */ 1579 if (it->function_inp_end != NULL) { 1580 inp_skip = (*it->function_inp_end) (it->inp, 1581 it->pointer, 1582 it->val); 1583 } 1584 } 1585 } 1586 SCTP_INP_RUNLOCK(it->inp); 1587 no_stcb: 1588 /* done with all assocs on this endpoint, move on to next endpoint */ 1589 it->done_current_ep = 0; 1590 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1591 it->inp = NULL; 1592 } else { 1593 it->inp = LIST_NEXT(it->inp, sctp_list); 1594 } 1595 it->stcb = NULL; 1596 if (it->inp == NULL) { 1597 goto done_with_iterator; 1598 } 1599 goto select_a_new_ep; 1600 } 1601 1602 void 1603 sctp_iterator_worker(void) 1604 { 1605 struct sctp_iterator *it; 1606 1607 /* This function is called with the WQ lock in place */ 1608 sctp_it_ctl.iterator_running = 1; 1609 while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { 1610 /* now lets work on this one */ 1611 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1612 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1613 CURVNET_SET(it->vn); 1614 sctp_iterator_work(it); 1615 CURVNET_RESTORE(); 1616 SCTP_IPI_ITERATOR_WQ_LOCK(); 1617 /* sa_ignore FREED_MEMORY */ 1618 } 1619 sctp_it_ctl.iterator_running = 0; 1620 return; 1621 } 1622 1623 1624 static void 1625 sctp_handle_addr_wq(void) 1626 { 1627 /* deal with the ADDR wq from the rtsock calls */ 1628 struct sctp_laddr *wi, *nwi; 1629 struct sctp_asconf_iterator *asc; 1630 1631 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1632 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1633 if (asc == NULL) { 1634 /* Try later, no memory */ 1635 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1636 (struct sctp_inpcb *)NULL, 1637 (struct sctp_tcb *)NULL, 1638 (struct sctp_nets *)NULL); 1639 return; 1640 } 1641 LIST_INIT(&asc->list_of_work); 1642 asc->cnt = 0; 1643 1644 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1645 LIST_REMOVE(wi, sctp_nxt_addr); 1646 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1647 asc->cnt++; 1648 } 1649 1650 if (asc->cnt == 0) { 1651 SCTP_FREE(asc, SCTP_M_ASC_IT); 1652 } else { 1653 int ret; 1654 1655 ret = sctp_initiate_iterator(sctp_asconf_iterator_ep, 1656 sctp_asconf_iterator_stcb, 1657 NULL, /* No ep end for boundall */ 1658 SCTP_PCB_FLAGS_BOUNDALL, 1659 SCTP_PCB_ANY_FEATURES, 1660 SCTP_ASOC_ANY_STATE, 1661 (void *)asc, 0, 1662 sctp_asconf_iterator_end, NULL, 0); 1663 if (ret) { 1664 SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n"); 1665 /* 1666 * Freeing if we are stopping or put back on the 1667 * addr_wq. 1668 */ 1669 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 1670 sctp_asconf_iterator_end(asc, 0); 1671 } else { 1672 LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) { 1673 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1674 } 1675 SCTP_FREE(asc, SCTP_M_ASC_IT); 1676 } 1677 } 1678 } 1679 } 1680 1681 /*- 1682 * The following table shows which pointers for the inp, stcb, or net are 1683 * stored for each timer after it was started. 1684 * 1685 *|Name |Timer |inp |stcb|net | 1686 *|-----------------------------|-----------------------------|----|----|----| 1687 *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | 1688 *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | 1689 *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | 1690 *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | 1691 *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | 1692 *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | 1693 *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | 1694 *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | 1695 *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | 1696 *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | 1697 *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | 1698 *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | 1699 *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | 1700 *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | 1701 *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | 1702 *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | 1703 *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | 1704 */ 1705 1706 void 1707 sctp_timeout_handler(void *t) 1708 { 1709 struct epoch_tracker et; 1710 struct timeval tv; 1711 struct sctp_inpcb *inp; 1712 struct sctp_tcb *stcb; 1713 struct sctp_nets *net; 1714 struct sctp_timer *tmr; 1715 struct mbuf *op_err; 1716 int did_output; 1717 int type; 1718 int i, secret; 1719 1720 tmr = (struct sctp_timer *)t; 1721 inp = (struct sctp_inpcb *)tmr->ep; 1722 stcb = (struct sctp_tcb *)tmr->tcb; 1723 net = (struct sctp_nets *)tmr->net; 1724 CURVNET_SET((struct vnet *)tmr->vnet); 1725 did_output = 1; 1726 1727 #ifdef SCTP_AUDITING_ENABLED 1728 sctp_audit_log(0xF0, (uint8_t)tmr->type); 1729 sctp_auditing(3, inp, stcb, net); 1730 #endif 1731 1732 /* sanity checks... */ 1733 KASSERT(tmr->self == NULL || tmr->self == tmr, 1734 ("sctp_timeout_handler: tmr->self corrupted")); 1735 KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), 1736 ("sctp_timeout_handler: invalid timer type %d", tmr->type)); 1737 type = tmr->type; 1738 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 1739 ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", 1740 type, stcb, stcb->sctp_ep)); 1741 if (inp) { 1742 SCTP_INP_INCR_REF(inp); 1743 } 1744 tmr->stopped_from = 0xa001; 1745 if (stcb) { 1746 atomic_add_int(&stcb->asoc.refcnt, 1); 1747 if (stcb->asoc.state == 0) { 1748 atomic_add_int(&stcb->asoc.refcnt, -1); 1749 if (inp) { 1750 SCTP_INP_DECR_REF(inp); 1751 } 1752 SCTPDBG(SCTP_DEBUG_TIMER2, 1753 "Timer type %d handler exiting due to CLOSED association.\n", 1754 type); 1755 CURVNET_RESTORE(); 1756 return; 1757 } 1758 } 1759 tmr->stopped_from = 0xa002; 1760 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); 1761 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1762 if (inp) { 1763 SCTP_INP_DECR_REF(inp); 1764 } 1765 if (stcb) { 1766 atomic_add_int(&stcb->asoc.refcnt, -1); 1767 } 1768 SCTPDBG(SCTP_DEBUG_TIMER2, 1769 "Timer type %d handler exiting due to not being active.\n", 1770 type); 1771 CURVNET_RESTORE(); 1772 return; 1773 } 1774 1775 tmr->stopped_from = 0xa003; 1776 if (stcb) { 1777 SCTP_TCB_LOCK(stcb); 1778 atomic_add_int(&stcb->asoc.refcnt, -1); 1779 if ((type != SCTP_TIMER_TYPE_ASOCKILL) && 1780 ((stcb->asoc.state == 0) || 1781 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1782 SCTP_TCB_UNLOCK(stcb); 1783 if (inp) { 1784 SCTP_INP_DECR_REF(inp); 1785 } 1786 SCTPDBG(SCTP_DEBUG_TIMER2, 1787 "Timer type %d handler exiting due to CLOSED association.\n", 1788 type); 1789 CURVNET_RESTORE(); 1790 return; 1791 } 1792 } else if (inp != NULL) { 1793 SCTP_INP_WLOCK(inp); 1794 } else { 1795 SCTP_WQ_ADDR_LOCK(); 1796 } 1797 1798 /* Record in stopped_from which timeout occurred. */ 1799 tmr->stopped_from = type; 1800 NET_EPOCH_ENTER(et); 1801 /* mark as being serviced now */ 1802 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1803 /* 1804 * Callout has been rescheduled. 1805 */ 1806 goto get_out; 1807 } 1808 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1809 /* 1810 * Not active, so no action. 1811 */ 1812 goto get_out; 1813 } 1814 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1815 1816 /* call the handler for the appropriate timer type */ 1817 switch (type) { 1818 case SCTP_TIMER_TYPE_SEND: 1819 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1820 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1821 type, inp, stcb, net)); 1822 SCTP_STAT_INCR(sctps_timodata); 1823 stcb->asoc.timodata++; 1824 stcb->asoc.num_send_timers_up--; 1825 if (stcb->asoc.num_send_timers_up < 0) { 1826 stcb->asoc.num_send_timers_up = 0; 1827 } 1828 SCTP_TCB_LOCK_ASSERT(stcb); 1829 if (sctp_t3rxt_timer(inp, stcb, net)) { 1830 /* no need to unlock on tcb its gone */ 1831 1832 goto out_decr; 1833 } 1834 SCTP_TCB_LOCK_ASSERT(stcb); 1835 #ifdef SCTP_AUDITING_ENABLED 1836 sctp_auditing(4, inp, stcb, net); 1837 #endif 1838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1839 if ((stcb->asoc.num_send_timers_up == 0) && 1840 (stcb->asoc.sent_queue_cnt > 0)) { 1841 struct sctp_tmit_chunk *chk; 1842 1843 /* 1844 * Safeguard. If there on some on the sent queue 1845 * somewhere but no timers running something is 1846 * wrong... so we start a timer on the first chunk 1847 * on the send queue on whatever net it is sent to. 1848 */ 1849 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1850 if (chk->whoTo != NULL) { 1851 break; 1852 } 1853 } 1854 if (chk != NULL) { 1855 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 1856 } 1857 } 1858 break; 1859 case SCTP_TIMER_TYPE_INIT: 1860 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1861 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1862 type, inp, stcb, net)); 1863 SCTP_STAT_INCR(sctps_timoinit); 1864 stcb->asoc.timoinit++; 1865 if (sctp_t1init_timer(inp, stcb, net)) { 1866 /* no need to unlock on tcb its gone */ 1867 goto out_decr; 1868 } 1869 /* We do output but not here */ 1870 did_output = 0; 1871 break; 1872 case SCTP_TIMER_TYPE_RECV: 1873 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1874 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1875 type, inp, stcb, net)); 1876 SCTP_STAT_INCR(sctps_timosack); 1877 stcb->asoc.timosack++; 1878 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1879 #ifdef SCTP_AUDITING_ENABLED 1880 sctp_auditing(4, inp, stcb, NULL); 1881 #endif 1882 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1883 break; 1884 case SCTP_TIMER_TYPE_SHUTDOWN: 1885 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1886 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1887 type, inp, stcb, net)); 1888 SCTP_STAT_INCR(sctps_timoshutdown); 1889 stcb->asoc.timoshutdown++; 1890 if (sctp_shutdown_timer(inp, stcb, net)) { 1891 /* no need to unlock on tcb its gone */ 1892 goto out_decr; 1893 } 1894 #ifdef SCTP_AUDITING_ENABLED 1895 sctp_auditing(4, inp, stcb, net); 1896 #endif 1897 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1898 break; 1899 case SCTP_TIMER_TYPE_HEARTBEAT: 1900 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1901 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1902 type, inp, stcb, net)); 1903 SCTP_STAT_INCR(sctps_timoheartbeat); 1904 stcb->asoc.timoheartbeat++; 1905 if (sctp_heartbeat_timer(inp, stcb, net)) { 1906 /* no need to unlock on tcb its gone */ 1907 goto out_decr; 1908 } 1909 #ifdef SCTP_AUDITING_ENABLED 1910 sctp_auditing(4, inp, stcb, net); 1911 #endif 1912 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1913 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1914 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1915 } 1916 break; 1917 case SCTP_TIMER_TYPE_COOKIE: 1918 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1919 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1920 type, inp, stcb, net)); 1921 SCTP_STAT_INCR(sctps_timocookie); 1922 stcb->asoc.timocookie++; 1923 if (sctp_cookie_timer(inp, stcb, net)) { 1924 /* no need to unlock on tcb its gone */ 1925 goto out_decr; 1926 } 1927 #ifdef SCTP_AUDITING_ENABLED 1928 sctp_auditing(4, inp, stcb, net); 1929 #endif 1930 /* 1931 * We consider T3 and Cookie timer pretty much the same with 1932 * respect to where from in chunk_output. 1933 */ 1934 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1935 break; 1936 case SCTP_TIMER_TYPE_NEWCOOKIE: 1937 KASSERT(inp != NULL && stcb == NULL && net == NULL, 1938 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1939 type, inp, stcb, net)); 1940 SCTP_STAT_INCR(sctps_timosecret); 1941 (void)SCTP_GETTIME_TIMEVAL(&tv); 1942 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1943 inp->sctp_ep.last_secret_number = 1944 inp->sctp_ep.current_secret_number; 1945 inp->sctp_ep.current_secret_number++; 1946 if (inp->sctp_ep.current_secret_number >= 1947 SCTP_HOW_MANY_SECRETS) { 1948 inp->sctp_ep.current_secret_number = 0; 1949 } 1950 secret = (int)inp->sctp_ep.current_secret_number; 1951 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1952 inp->sctp_ep.secret_key[secret][i] = 1953 sctp_select_initial_TSN(&inp->sctp_ep); 1954 } 1955 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1956 did_output = 0; 1957 break; 1958 case SCTP_TIMER_TYPE_PATHMTURAISE: 1959 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1960 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1961 type, inp, stcb, net)); 1962 SCTP_STAT_INCR(sctps_timopathmtu); 1963 sctp_pathmtu_timer(inp, stcb, net); 1964 did_output = 0; 1965 break; 1966 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1967 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1968 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1969 type, inp, stcb, net)); 1970 if (sctp_shutdownack_timer(inp, stcb, net)) { 1971 /* no need to unlock on tcb its gone */ 1972 goto out_decr; 1973 } 1974 SCTP_STAT_INCR(sctps_timoshutdownack); 1975 stcb->asoc.timoshutdownack++; 1976 #ifdef SCTP_AUDITING_ENABLED 1977 sctp_auditing(4, inp, stcb, net); 1978 #endif 1979 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1980 break; 1981 case SCTP_TIMER_TYPE_ASCONF: 1982 KASSERT(inp != NULL && stcb != NULL && net != NULL, 1983 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1984 type, inp, stcb, net)); 1985 SCTP_STAT_INCR(sctps_timoasconf); 1986 if (sctp_asconf_timer(inp, stcb, net)) { 1987 /* no need to unlock on tcb its gone */ 1988 goto out_decr; 1989 } 1990 #ifdef SCTP_AUDITING_ENABLED 1991 sctp_auditing(4, inp, stcb, net); 1992 #endif 1993 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1994 break; 1995 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1996 KASSERT(inp != NULL && stcb != NULL && net == NULL, 1997 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 1998 type, inp, stcb, net)); 1999 SCTP_STAT_INCR(sctps_timoshutdownguard); 2000 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 2001 "Shutdown guard timer expired"); 2002 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2003 /* no need to unlock on tcb its gone */ 2004 goto out_decr; 2005 case SCTP_TIMER_TYPE_AUTOCLOSE: 2006 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2007 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2008 type, inp, stcb, net)); 2009 SCTP_STAT_INCR(sctps_timoautoclose); 2010 sctp_autoclose_timer(inp, stcb); 2011 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 2012 did_output = 0; 2013 break; 2014 case SCTP_TIMER_TYPE_STRRESET: 2015 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2016 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2017 type, inp, stcb, net)); 2018 SCTP_STAT_INCR(sctps_timostrmrst); 2019 if (sctp_strreset_timer(inp, stcb)) { 2020 /* no need to unlock on tcb its gone */ 2021 goto out_decr; 2022 } 2023 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 2024 break; 2025 case SCTP_TIMER_TYPE_INPKILL: 2026 KASSERT(inp != NULL && stcb == NULL && net == NULL, 2027 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2028 type, inp, stcb, net)); 2029 SCTP_STAT_INCR(sctps_timoinpkill); 2030 /* 2031 * special case, take away our increment since WE are the 2032 * killer 2033 */ 2034 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, 2035 SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 2036 SCTP_INP_DECR_REF(inp); 2037 SCTP_INP_WUNLOCK(inp); 2038 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 2039 SCTP_CALLED_FROM_INPKILL_TIMER); 2040 inp = NULL; 2041 goto out_no_decr; 2042 case SCTP_TIMER_TYPE_ASOCKILL: 2043 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2044 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2045 type, inp, stcb, net)); 2046 SCTP_STAT_INCR(sctps_timoassockill); 2047 /* Can we free it yet? */ 2048 SCTP_INP_DECR_REF(inp); 2049 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, 2050 SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 2051 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2052 SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 2053 /* 2054 * free asoc, always unlocks (or destroy's) so prevent 2055 * duplicate unlock or unlock of a free mtx :-0 2056 */ 2057 stcb = NULL; 2058 goto out_no_decr; 2059 case SCTP_TIMER_TYPE_ADDR_WQ: 2060 KASSERT(inp == NULL && stcb == NULL && net == NULL, 2061 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2062 type, inp, stcb, net)); 2063 sctp_handle_addr_wq(); 2064 break; 2065 case SCTP_TIMER_TYPE_PRIM_DELETED: 2066 KASSERT(inp != NULL && stcb != NULL && net == NULL, 2067 ("timeout of type %d: inp = %p, stcb = %p, net = %p", 2068 type, inp, stcb, net)); 2069 SCTP_STAT_INCR(sctps_timodelprim); 2070 sctp_delete_prim_timer(inp, stcb); 2071 break; 2072 default: 2073 #ifdef INVARIANTS 2074 panic("Unknown timer type %d", type); 2075 #else 2076 goto get_out; 2077 #endif 2078 } 2079 #ifdef SCTP_AUDITING_ENABLED 2080 sctp_audit_log(0xF1, (uint8_t)type); 2081 if (inp) 2082 sctp_auditing(5, inp, stcb, net); 2083 #endif 2084 if ((did_output) && stcb) { 2085 /* 2086 * Now we need to clean up the control chunk chain if an 2087 * ECNE is on it. It must be marked as UNSENT again so next 2088 * call will continue to send it until such time that we get 2089 * a CWR, to remove it. It is, however, less likely that we 2090 * will find a ecn echo on the chain though. 2091 */ 2092 sctp_fix_ecn_echo(&stcb->asoc); 2093 } 2094 get_out: 2095 if (stcb) { 2096 SCTP_TCB_UNLOCK(stcb); 2097 } else if (inp != NULL) { 2098 SCTP_INP_WUNLOCK(inp); 2099 } else { 2100 SCTP_WQ_ADDR_UNLOCK(); 2101 } 2102 2103 out_decr: 2104 if (inp) { 2105 SCTP_INP_DECR_REF(inp); 2106 } 2107 2108 out_no_decr: 2109 SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); 2110 CURVNET_RESTORE(); 2111 NET_EPOCH_EXIT(et); 2112 } 2113 2114 /*- 2115 * The following table shows which parameters must be provided 2116 * when calling sctp_timer_start(). For parameters not being 2117 * provided, NULL must be used. 2118 * 2119 * |Name |inp |stcb|net | 2120 * |-----------------------------|----|----|----| 2121 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2122 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2123 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2124 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2125 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2126 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2127 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2128 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2129 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2130 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | 2131 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2132 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2133 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | 2134 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2135 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2136 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2137 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2138 * 2139 */ 2140 2141 void 2142 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2143 struct sctp_nets *net) 2144 { 2145 struct sctp_timer *tmr; 2146 uint32_t to_ticks; 2147 uint32_t rndval, jitter; 2148 2149 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2150 ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", 2151 t_type, stcb, stcb->sctp_ep)); 2152 tmr = NULL; 2153 to_ticks = 0; 2154 if (stcb != NULL) { 2155 SCTP_TCB_LOCK_ASSERT(stcb); 2156 } else if (inp != NULL) { 2157 SCTP_INP_WLOCK_ASSERT(inp); 2158 } else { 2159 SCTP_WQ_ADDR_LOCK_ASSERT(); 2160 } 2161 if (stcb != NULL) { 2162 /* 2163 * Don't restart timer on association that's about to be 2164 * killed. 2165 */ 2166 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 2167 (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { 2168 SCTPDBG(SCTP_DEBUG_TIMER2, 2169 "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", 2170 t_type, inp, stcb, net); 2171 return; 2172 } 2173 /* Don't restart timer on net that's been removed. */ 2174 if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { 2175 SCTPDBG(SCTP_DEBUG_TIMER2, 2176 "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", 2177 t_type, inp, stcb, net); 2178 return; 2179 } 2180 } 2181 switch (t_type) { 2182 case SCTP_TIMER_TYPE_SEND: 2183 /* Here we use the RTO timer. */ 2184 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2185 #ifdef INVARIANTS 2186 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2187 t_type, inp, stcb, net); 2188 #else 2189 return; 2190 #endif 2191 } 2192 tmr = &net->rxt_timer; 2193 if (net->RTO == 0) { 2194 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2195 } else { 2196 to_ticks = sctp_msecs_to_ticks(net->RTO); 2197 } 2198 break; 2199 case SCTP_TIMER_TYPE_INIT: 2200 /* 2201 * Here we use the INIT timer default usually about 1 2202 * second. 2203 */ 2204 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2205 #ifdef INVARIANTS 2206 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2207 t_type, inp, stcb, net); 2208 #else 2209 return; 2210 #endif 2211 } 2212 tmr = &net->rxt_timer; 2213 if (net->RTO == 0) { 2214 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2215 } else { 2216 to_ticks = sctp_msecs_to_ticks(net->RTO); 2217 } 2218 break; 2219 case SCTP_TIMER_TYPE_RECV: 2220 /* 2221 * Here we use the Delayed-Ack timer value from the inp, 2222 * ususually about 200ms. 2223 */ 2224 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2225 #ifdef INVARIANTS 2226 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2227 t_type, inp, stcb, net); 2228 #else 2229 return; 2230 #endif 2231 } 2232 tmr = &stcb->asoc.dack_timer; 2233 to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); 2234 break; 2235 case SCTP_TIMER_TYPE_SHUTDOWN: 2236 /* Here we use the RTO of the destination. */ 2237 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2238 #ifdef INVARIANTS 2239 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2240 t_type, inp, stcb, net); 2241 #else 2242 return; 2243 #endif 2244 } 2245 tmr = &net->rxt_timer; 2246 if (net->RTO == 0) { 2247 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2248 } else { 2249 to_ticks = sctp_msecs_to_ticks(net->RTO); 2250 } 2251 break; 2252 case SCTP_TIMER_TYPE_HEARTBEAT: 2253 /* 2254 * The net is used here so that we can add in the RTO. Even 2255 * though we use a different timer. We also add the HB timer 2256 * PLUS a random jitter. 2257 */ 2258 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2259 #ifdef INVARIANTS 2260 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2261 t_type, inp, stcb, net); 2262 #else 2263 return; 2264 #endif 2265 } 2266 if ((net->dest_state & SCTP_ADDR_NOHB) && 2267 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 2268 SCTPDBG(SCTP_DEBUG_TIMER2, 2269 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2270 t_type, inp, stcb, net); 2271 return; 2272 } 2273 tmr = &net->hb_timer; 2274 if (net->RTO == 0) { 2275 to_ticks = stcb->asoc.initial_rto; 2276 } else { 2277 to_ticks = net->RTO; 2278 } 2279 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 2280 jitter = rndval % to_ticks; 2281 if (jitter >= (to_ticks >> 1)) { 2282 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 2283 } else { 2284 to_ticks = to_ticks - jitter; 2285 } 2286 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 2287 !(net->dest_state & SCTP_ADDR_PF)) { 2288 to_ticks += net->heart_beat_delay; 2289 } 2290 /* 2291 * Now we must convert the to_ticks that are now in ms to 2292 * ticks. 2293 */ 2294 to_ticks = sctp_msecs_to_ticks(to_ticks); 2295 break; 2296 case SCTP_TIMER_TYPE_COOKIE: 2297 /* 2298 * Here we can use the RTO timer from the network since one 2299 * RTT was complete. If a retransmission happened then we 2300 * will be using the RTO initial value. 2301 */ 2302 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2303 #ifdef INVARIANTS 2304 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2305 t_type, inp, stcb, net); 2306 #else 2307 return; 2308 #endif 2309 } 2310 tmr = &net->rxt_timer; 2311 if (net->RTO == 0) { 2312 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2313 } else { 2314 to_ticks = sctp_msecs_to_ticks(net->RTO); 2315 } 2316 break; 2317 case SCTP_TIMER_TYPE_NEWCOOKIE: 2318 /* 2319 * Nothing needed but the endpoint here ususually about 60 2320 * minutes. 2321 */ 2322 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2323 #ifdef INVARIANTS 2324 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2325 t_type, inp, stcb, net); 2326 #else 2327 return; 2328 #endif 2329 } 2330 tmr = &inp->sctp_ep.signature_change; 2331 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 2332 break; 2333 case SCTP_TIMER_TYPE_PATHMTURAISE: 2334 /* 2335 * Here we use the value found in the EP for PMTUD, 2336 * ususually about 10 minutes. 2337 */ 2338 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2339 #ifdef INVARIANTS 2340 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2341 t_type, inp, stcb, net); 2342 #else 2343 return; 2344 #endif 2345 } 2346 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2347 SCTPDBG(SCTP_DEBUG_TIMER2, 2348 "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", 2349 t_type, inp, stcb, net); 2350 return; 2351 } 2352 tmr = &net->pmtu_timer; 2353 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2354 break; 2355 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2356 /* Here we use the RTO of the destination. */ 2357 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2358 #ifdef INVARIANTS 2359 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2360 t_type, inp, stcb, net); 2361 #else 2362 return; 2363 #endif 2364 } 2365 tmr = &net->rxt_timer; 2366 if (net->RTO == 0) { 2367 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2368 } else { 2369 to_ticks = sctp_msecs_to_ticks(net->RTO); 2370 } 2371 break; 2372 case SCTP_TIMER_TYPE_ASCONF: 2373 /* 2374 * Here the timer comes from the stcb but its value is from 2375 * the net's RTO. 2376 */ 2377 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2378 #ifdef INVARIANTS 2379 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2380 t_type, inp, stcb, net); 2381 #else 2382 return; 2383 #endif 2384 } 2385 tmr = &stcb->asoc.asconf_timer; 2386 if (net->RTO == 0) { 2387 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2388 } else { 2389 to_ticks = sctp_msecs_to_ticks(net->RTO); 2390 } 2391 break; 2392 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2393 /* 2394 * Here we use the endpoints shutdown guard timer usually 2395 * about 3 minutes. 2396 */ 2397 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2398 #ifdef INVARIANTS 2399 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2400 t_type, inp, stcb, net); 2401 #else 2402 return; 2403 #endif 2404 } 2405 tmr = &stcb->asoc.shut_guard_timer; 2406 if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { 2407 if (stcb->asoc.maxrto < UINT32_MAX / 5) { 2408 to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); 2409 } else { 2410 to_ticks = sctp_msecs_to_ticks(UINT32_MAX); 2411 } 2412 } else { 2413 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2414 } 2415 break; 2416 case SCTP_TIMER_TYPE_AUTOCLOSE: 2417 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2418 #ifdef INVARIANTS 2419 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2420 t_type, inp, stcb, net); 2421 #else 2422 return; 2423 #endif 2424 } 2425 tmr = &stcb->asoc.autoclose_timer; 2426 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2427 break; 2428 case SCTP_TIMER_TYPE_STRRESET: 2429 /* 2430 * Here the timer comes from the stcb but its value is from 2431 * the net's RTO. 2432 */ 2433 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2434 #ifdef INVARIANTS 2435 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2436 t_type, inp, stcb, net); 2437 #else 2438 return; 2439 #endif 2440 } 2441 tmr = &stcb->asoc.strreset_timer; 2442 if (net->RTO == 0) { 2443 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2444 } else { 2445 to_ticks = sctp_msecs_to_ticks(net->RTO); 2446 } 2447 break; 2448 case SCTP_TIMER_TYPE_INPKILL: 2449 /* 2450 * The inp is setup to die. We re-use the signature_chage 2451 * timer since that has stopped and we are in the GONE 2452 * state. 2453 */ 2454 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2455 #ifdef INVARIANTS 2456 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2457 t_type, inp, stcb, net); 2458 #else 2459 return; 2460 #endif 2461 } 2462 tmr = &inp->sctp_ep.signature_change; 2463 to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); 2464 break; 2465 case SCTP_TIMER_TYPE_ASOCKILL: 2466 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2467 #ifdef INVARIANTS 2468 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2469 t_type, inp, stcb, net); 2470 #else 2471 return; 2472 #endif 2473 } 2474 tmr = &stcb->asoc.strreset_timer; 2475 to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); 2476 break; 2477 case SCTP_TIMER_TYPE_ADDR_WQ: 2478 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2479 #ifdef INVARIANTS 2480 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2481 t_type, inp, stcb, net); 2482 #else 2483 return; 2484 #endif 2485 } 2486 /* Only 1 tick away :-) */ 2487 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2488 to_ticks = SCTP_ADDRESS_TICK_DELAY; 2489 break; 2490 case SCTP_TIMER_TYPE_PRIM_DELETED: 2491 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2492 #ifdef INVARIANTS 2493 panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", 2494 t_type, inp, stcb, net); 2495 #else 2496 return; 2497 #endif 2498 } 2499 tmr = &stcb->asoc.delete_prim_timer; 2500 to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); 2501 break; 2502 default: 2503 #ifdef INVARIANTS 2504 panic("Unknown timer type %d", t_type); 2505 #else 2506 return; 2507 #endif 2508 } 2509 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2510 KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); 2511 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2512 /* 2513 * We do NOT allow you to have it already running. If it is, 2514 * we leave the current one up unchanged. 2515 */ 2516 SCTPDBG(SCTP_DEBUG_TIMER2, 2517 "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", 2518 t_type, inp, stcb, net); 2519 return; 2520 } 2521 /* At this point we can proceed. */ 2522 if (t_type == SCTP_TIMER_TYPE_SEND) { 2523 stcb->asoc.num_send_timers_up++; 2524 } 2525 tmr->stopped_from = 0; 2526 tmr->type = t_type; 2527 tmr->ep = (void *)inp; 2528 tmr->tcb = (void *)stcb; 2529 if (t_type == SCTP_TIMER_TYPE_STRRESET) { 2530 tmr->net = NULL; 2531 } else { 2532 tmr->net = (void *)net; 2533 } 2534 tmr->self = (void *)tmr; 2535 tmr->vnet = (void *)curvnet; 2536 tmr->ticks = sctp_get_tick_count(); 2537 if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { 2538 SCTPDBG(SCTP_DEBUG_TIMER2, 2539 "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2540 t_type, to_ticks, inp, stcb, net); 2541 } else { 2542 /* 2543 * This should not happen, since we checked for pending 2544 * above. 2545 */ 2546 SCTPDBG(SCTP_DEBUG_TIMER2, 2547 "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", 2548 t_type, to_ticks, inp, stcb, net); 2549 } 2550 return; 2551 } 2552 2553 /*- 2554 * The following table shows which parameters must be provided 2555 * when calling sctp_timer_stop(). For parameters not being 2556 * provided, NULL must be used. 2557 * 2558 * |Name |inp |stcb|net | 2559 * |-----------------------------|----|----|----| 2560 * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | 2561 * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | 2562 * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | 2563 * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | 2564 * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | 2565 * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | 2566 * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | 2567 * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | 2568 * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | 2569 * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | 2570 * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | 2571 * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | 2572 * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | 2573 * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | 2574 * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | 2575 * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | 2576 * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | 2577 * 2578 */ 2579 2580 void 2581 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2582 struct sctp_nets *net, uint32_t from) 2583 { 2584 struct sctp_timer *tmr; 2585 2586 KASSERT(stcb == NULL || stcb->sctp_ep == inp, 2587 ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", 2588 t_type, stcb, stcb->sctp_ep)); 2589 if (stcb != NULL) { 2590 SCTP_TCB_LOCK_ASSERT(stcb); 2591 } else if (inp != NULL) { 2592 SCTP_INP_WLOCK_ASSERT(inp); 2593 } else { 2594 SCTP_WQ_ADDR_LOCK_ASSERT(); 2595 } 2596 tmr = NULL; 2597 switch (t_type) { 2598 case SCTP_TIMER_TYPE_SEND: 2599 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2600 #ifdef INVARIANTS 2601 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2602 t_type, inp, stcb, net); 2603 #else 2604 return; 2605 #endif 2606 } 2607 tmr = &net->rxt_timer; 2608 break; 2609 case SCTP_TIMER_TYPE_INIT: 2610 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2611 #ifdef INVARIANTS 2612 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2613 t_type, inp, stcb, net); 2614 #else 2615 return; 2616 #endif 2617 } 2618 tmr = &net->rxt_timer; 2619 break; 2620 case SCTP_TIMER_TYPE_RECV: 2621 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2622 #ifdef INVARIANTS 2623 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2624 t_type, inp, stcb, net); 2625 #else 2626 return; 2627 #endif 2628 } 2629 tmr = &stcb->asoc.dack_timer; 2630 break; 2631 case SCTP_TIMER_TYPE_SHUTDOWN: 2632 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2633 #ifdef INVARIANTS 2634 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2635 t_type, inp, stcb, net); 2636 #else 2637 return; 2638 #endif 2639 } 2640 tmr = &net->rxt_timer; 2641 break; 2642 case SCTP_TIMER_TYPE_HEARTBEAT: 2643 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2644 #ifdef INVARIANTS 2645 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2646 t_type, inp, stcb, net); 2647 #else 2648 return; 2649 #endif 2650 } 2651 tmr = &net->hb_timer; 2652 break; 2653 case SCTP_TIMER_TYPE_COOKIE: 2654 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2655 #ifdef INVARIANTS 2656 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2657 t_type, inp, stcb, net); 2658 #else 2659 return; 2660 #endif 2661 } 2662 tmr = &net->rxt_timer; 2663 break; 2664 case SCTP_TIMER_TYPE_NEWCOOKIE: 2665 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2666 #ifdef INVARIANTS 2667 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2668 t_type, inp, stcb, net); 2669 #else 2670 return; 2671 #endif 2672 } 2673 tmr = &inp->sctp_ep.signature_change; 2674 break; 2675 case SCTP_TIMER_TYPE_PATHMTURAISE: 2676 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2677 #ifdef INVARIANTS 2678 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2679 t_type, inp, stcb, net); 2680 #else 2681 return; 2682 #endif 2683 } 2684 tmr = &net->pmtu_timer; 2685 break; 2686 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2687 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 2688 #ifdef INVARIANTS 2689 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2690 t_type, inp, stcb, net); 2691 #else 2692 return; 2693 #endif 2694 } 2695 tmr = &net->rxt_timer; 2696 break; 2697 case SCTP_TIMER_TYPE_ASCONF: 2698 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2699 #ifdef INVARIANTS 2700 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2701 t_type, inp, stcb, net); 2702 #else 2703 return; 2704 #endif 2705 } 2706 tmr = &stcb->asoc.asconf_timer; 2707 break; 2708 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2709 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2710 #ifdef INVARIANTS 2711 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2712 t_type, inp, stcb, net); 2713 #else 2714 return; 2715 #endif 2716 } 2717 tmr = &stcb->asoc.shut_guard_timer; 2718 break; 2719 case SCTP_TIMER_TYPE_AUTOCLOSE: 2720 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2721 #ifdef INVARIANTS 2722 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2723 t_type, inp, stcb, net); 2724 #else 2725 return; 2726 #endif 2727 } 2728 tmr = &stcb->asoc.autoclose_timer; 2729 break; 2730 case SCTP_TIMER_TYPE_STRRESET: 2731 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2732 #ifdef INVARIANTS 2733 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2734 t_type, inp, stcb, net); 2735 #else 2736 return; 2737 #endif 2738 } 2739 tmr = &stcb->asoc.strreset_timer; 2740 break; 2741 case SCTP_TIMER_TYPE_INPKILL: 2742 /* 2743 * The inp is setup to die. We re-use the signature_chage 2744 * timer since that has stopped and we are in the GONE 2745 * state. 2746 */ 2747 if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { 2748 #ifdef INVARIANTS 2749 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2750 t_type, inp, stcb, net); 2751 #else 2752 return; 2753 #endif 2754 } 2755 tmr = &inp->sctp_ep.signature_change; 2756 break; 2757 case SCTP_TIMER_TYPE_ASOCKILL: 2758 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2759 #ifdef INVARIANTS 2760 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2761 t_type, inp, stcb, net); 2762 #else 2763 return; 2764 #endif 2765 } 2766 tmr = &stcb->asoc.strreset_timer; 2767 break; 2768 case SCTP_TIMER_TYPE_ADDR_WQ: 2769 if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { 2770 #ifdef INVARIANTS 2771 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2772 t_type, inp, stcb, net); 2773 #else 2774 return; 2775 #endif 2776 } 2777 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2778 break; 2779 case SCTP_TIMER_TYPE_PRIM_DELETED: 2780 if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { 2781 #ifdef INVARIANTS 2782 panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", 2783 t_type, inp, stcb, net); 2784 #else 2785 return; 2786 #endif 2787 } 2788 tmr = &stcb->asoc.delete_prim_timer; 2789 break; 2790 default: 2791 #ifdef INVARIANTS 2792 panic("Unknown timer type %d", t_type); 2793 #else 2794 return; 2795 #endif 2796 } 2797 KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); 2798 if ((tmr->type != SCTP_TIMER_TYPE_NONE) && 2799 (tmr->type != t_type)) { 2800 /* 2801 * Ok we have a timer that is under joint use. Cookie timer 2802 * per chance with the SEND timer. We therefore are NOT 2803 * running the timer that the caller wants stopped. So just 2804 * return. 2805 */ 2806 SCTPDBG(SCTP_DEBUG_TIMER2, 2807 "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", 2808 t_type, inp, stcb, net); 2809 return; 2810 } 2811 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2812 stcb->asoc.num_send_timers_up--; 2813 if (stcb->asoc.num_send_timers_up < 0) { 2814 stcb->asoc.num_send_timers_up = 0; 2815 } 2816 } 2817 tmr->self = NULL; 2818 tmr->stopped_from = from; 2819 if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { 2820 KASSERT(tmr->ep == inp, 2821 ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", 2822 t_type, inp, tmr->ep)); 2823 KASSERT(tmr->tcb == stcb, 2824 ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", 2825 t_type, stcb, tmr->tcb)); 2826 KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || 2827 ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), 2828 ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", 2829 t_type, net, tmr->net)); 2830 SCTPDBG(SCTP_DEBUG_TIMER2, 2831 "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", 2832 t_type, inp, stcb, net); 2833 tmr->ep = NULL; 2834 tmr->tcb = NULL; 2835 tmr->net = NULL; 2836 } else { 2837 SCTPDBG(SCTP_DEBUG_TIMER2, 2838 "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", 2839 t_type, inp, stcb, net); 2840 } 2841 return; 2842 } 2843 2844 uint32_t 2845 sctp_calculate_len(struct mbuf *m) 2846 { 2847 uint32_t tlen = 0; 2848 struct mbuf *at; 2849 2850 at = m; 2851 while (at) { 2852 tlen += SCTP_BUF_LEN(at); 2853 at = SCTP_BUF_NEXT(at); 2854 } 2855 return (tlen); 2856 } 2857 2858 void 2859 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2860 struct sctp_association *asoc, uint32_t mtu) 2861 { 2862 /* 2863 * Reset the P-MTU size on this association, this involves changing 2864 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2865 * allow the DF flag to be cleared. 2866 */ 2867 struct sctp_tmit_chunk *chk; 2868 unsigned int eff_mtu, ovh; 2869 2870 asoc->smallest_mtu = mtu; 2871 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2872 ovh = SCTP_MIN_OVERHEAD; 2873 } else { 2874 ovh = SCTP_MIN_V4_OVERHEAD; 2875 } 2876 eff_mtu = mtu - ovh; 2877 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2878 if (chk->send_size > eff_mtu) { 2879 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2880 } 2881 } 2882 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2883 if (chk->send_size > eff_mtu) { 2884 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2885 } 2886 } 2887 } 2888 2889 2890 /* 2891 * Given an association and starting time of the current RTT period, update 2892 * RTO in number of msecs. net should point to the current network. 2893 * Return 1, if an RTO update was performed, return 0 if no update was 2894 * performed due to invalid starting point. 2895 */ 2896 2897 int 2898 sctp_calculate_rto(struct sctp_tcb *stcb, 2899 struct sctp_association *asoc, 2900 struct sctp_nets *net, 2901 struct timeval *old, 2902 int rtt_from_sack) 2903 { 2904 struct timeval now; 2905 uint64_t rtt_us; /* RTT in us */ 2906 int32_t rtt; /* RTT in ms */ 2907 uint32_t new_rto; 2908 int first_measure = 0; 2909 2910 /************************/ 2911 /* 1. calculate new RTT */ 2912 /************************/ 2913 /* get the current time */ 2914 if (stcb->asoc.use_precise_time) { 2915 (void)SCTP_GETPTIME_TIMEVAL(&now); 2916 } else { 2917 (void)SCTP_GETTIME_TIMEVAL(&now); 2918 } 2919 if ((old->tv_sec > now.tv_sec) || 2920 ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { 2921 /* The starting point is in the future. */ 2922 return (0); 2923 } 2924 timevalsub(&now, old); 2925 rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec; 2926 if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) { 2927 /* The RTT is larger than a sane value. */ 2928 return (0); 2929 } 2930 /* store the current RTT in us */ 2931 net->rtt = rtt_us; 2932 /* compute rtt in ms */ 2933 rtt = (int32_t)(net->rtt / 1000); 2934 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2935 /* 2936 * Tell the CC module that a new update has just occurred 2937 * from a sack 2938 */ 2939 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2940 } 2941 /* 2942 * Do we need to determine the lan? We do this only on sacks i.e. 2943 * RTT being determined from data not non-data (HB/INIT->INITACK). 2944 */ 2945 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2946 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2947 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2948 net->lan_type = SCTP_LAN_INTERNET; 2949 } else { 2950 net->lan_type = SCTP_LAN_LOCAL; 2951 } 2952 } 2953 2954 /***************************/ 2955 /* 2. update RTTVAR & SRTT */ 2956 /***************************/ 2957 /*- 2958 * Compute the scaled average lastsa and the 2959 * scaled variance lastsv as described in van Jacobson 2960 * Paper "Congestion Avoidance and Control", Annex A. 2961 * 2962 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2963 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar 2964 */ 2965 if (net->RTO_measured) { 2966 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2967 net->lastsa += rtt; 2968 if (rtt < 0) { 2969 rtt = -rtt; 2970 } 2971 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2972 net->lastsv += rtt; 2973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2974 rto_logging(net, SCTP_LOG_RTTVAR); 2975 } 2976 } else { 2977 /* First RTO measurment */ 2978 net->RTO_measured = 1; 2979 first_measure = 1; 2980 net->lastsa = rtt << SCTP_RTT_SHIFT; 2981 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2983 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2984 } 2985 } 2986 if (net->lastsv == 0) { 2987 net->lastsv = SCTP_CLOCK_GRANULARITY; 2988 } 2989 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2990 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2991 (stcb->asoc.sat_network_lockout == 0)) { 2992 stcb->asoc.sat_network = 1; 2993 } else if ((!first_measure) && stcb->asoc.sat_network) { 2994 stcb->asoc.sat_network = 0; 2995 stcb->asoc.sat_network_lockout = 1; 2996 } 2997 /* bound it, per C6/C7 in Section 5.3.1 */ 2998 if (new_rto < stcb->asoc.minrto) { 2999 new_rto = stcb->asoc.minrto; 3000 } 3001 if (new_rto > stcb->asoc.maxrto) { 3002 new_rto = stcb->asoc.maxrto; 3003 } 3004 net->RTO = new_rto; 3005 return (1); 3006 } 3007 3008 /* 3009 * return a pointer to a contiguous piece of data from the given mbuf chain 3010 * starting at 'off' for 'len' bytes. If the desired piece spans more than 3011 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 3012 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 3013 */ 3014 caddr_t 3015 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr) 3016 { 3017 uint32_t count; 3018 uint8_t *ptr; 3019 3020 ptr = in_ptr; 3021 if ((off < 0) || (len <= 0)) 3022 return (NULL); 3023 3024 /* find the desired start location */ 3025 while ((m != NULL) && (off > 0)) { 3026 if (off < SCTP_BUF_LEN(m)) 3027 break; 3028 off -= SCTP_BUF_LEN(m); 3029 m = SCTP_BUF_NEXT(m); 3030 } 3031 if (m == NULL) 3032 return (NULL); 3033 3034 /* is the current mbuf large enough (eg. contiguous)? */ 3035 if ((SCTP_BUF_LEN(m) - off) >= len) { 3036 return (mtod(m, caddr_t)+off); 3037 } else { 3038 /* else, it spans more than one mbuf, so save a temp copy... */ 3039 while ((m != NULL) && (len > 0)) { 3040 count = min(SCTP_BUF_LEN(m) - off, len); 3041 memcpy(ptr, mtod(m, caddr_t)+off, count); 3042 len -= count; 3043 ptr += count; 3044 off = 0; 3045 m = SCTP_BUF_NEXT(m); 3046 } 3047 if ((m == NULL) && (len > 0)) 3048 return (NULL); 3049 else 3050 return ((caddr_t)in_ptr); 3051 } 3052 } 3053 3054 3055 3056 struct sctp_paramhdr * 3057 sctp_get_next_param(struct mbuf *m, 3058 int offset, 3059 struct sctp_paramhdr *pull, 3060 int pull_limit) 3061 { 3062 /* This just provides a typed signature to Peter's Pull routine */ 3063 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 3064 (uint8_t *)pull)); 3065 } 3066 3067 3068 struct mbuf * 3069 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 3070 { 3071 struct mbuf *m_last; 3072 caddr_t dp; 3073 3074 if (padlen > 3) { 3075 return (NULL); 3076 } 3077 if (padlen <= M_TRAILINGSPACE(m)) { 3078 /* 3079 * The easy way. We hope the majority of the time we hit 3080 * here :) 3081 */ 3082 m_last = m; 3083 } else { 3084 /* Hard way we must grow the mbuf chain */ 3085 m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 3086 if (m_last == NULL) { 3087 return (NULL); 3088 } 3089 SCTP_BUF_LEN(m_last) = 0; 3090 SCTP_BUF_NEXT(m_last) = NULL; 3091 SCTP_BUF_NEXT(m) = m_last; 3092 } 3093 dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last); 3094 SCTP_BUF_LEN(m_last) += padlen; 3095 memset(dp, 0, padlen); 3096 return (m_last); 3097 } 3098 3099 struct mbuf * 3100 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 3101 { 3102 /* find the last mbuf in chain and pad it */ 3103 struct mbuf *m_at; 3104 3105 if (last_mbuf != NULL) { 3106 return (sctp_add_pad_tombuf(last_mbuf, padval)); 3107 } else { 3108 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3109 if (SCTP_BUF_NEXT(m_at) == NULL) { 3110 return (sctp_add_pad_tombuf(m_at, padval)); 3111 } 3112 } 3113 } 3114 return (NULL); 3115 } 3116 3117 static void 3118 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 3119 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked) 3120 { 3121 struct mbuf *m_notify; 3122 struct sctp_assoc_change *sac; 3123 struct sctp_queued_to_read *control; 3124 unsigned int notif_len; 3125 uint16_t abort_len; 3126 unsigned int i; 3127 3128 if (stcb == NULL) { 3129 return; 3130 } 3131 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 3132 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3133 if (abort != NULL) { 3134 abort_len = ntohs(abort->ch.chunk_length); 3135 /* 3136 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3137 * contiguous. 3138 */ 3139 if (abort_len > SCTP_CHUNK_BUFFER_SIZE) { 3140 abort_len = SCTP_CHUNK_BUFFER_SIZE; 3141 } 3142 } else { 3143 abort_len = 0; 3144 } 3145 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3146 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 3147 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3148 notif_len += abort_len; 3149 } 3150 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3151 if (m_notify == NULL) { 3152 /* Retry with smaller value. */ 3153 notif_len = (unsigned int)sizeof(struct sctp_assoc_change); 3154 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3155 if (m_notify == NULL) { 3156 goto set_error; 3157 } 3158 } 3159 SCTP_BUF_NEXT(m_notify) = NULL; 3160 sac = mtod(m_notify, struct sctp_assoc_change *); 3161 memset(sac, 0, notif_len); 3162 sac->sac_type = SCTP_ASSOC_CHANGE; 3163 sac->sac_flags = 0; 3164 sac->sac_length = sizeof(struct sctp_assoc_change); 3165 sac->sac_state = state; 3166 sac->sac_error = error; 3167 /* XXX verify these stream counts */ 3168 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 3169 sac->sac_inbound_streams = stcb->asoc.streamincnt; 3170 sac->sac_assoc_id = sctp_get_associd(stcb); 3171 if (notif_len > sizeof(struct sctp_assoc_change)) { 3172 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 3173 i = 0; 3174 if (stcb->asoc.prsctp_supported == 1) { 3175 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 3176 } 3177 if (stcb->asoc.auth_supported == 1) { 3178 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 3179 } 3180 if (stcb->asoc.asconf_supported == 1) { 3181 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 3182 } 3183 if (stcb->asoc.idata_supported == 1) { 3184 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING; 3185 } 3186 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 3187 if (stcb->asoc.reconfig_supported == 1) { 3188 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 3189 } 3190 sac->sac_length += i; 3191 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 3192 memcpy(sac->sac_info, abort, abort_len); 3193 sac->sac_length += abort_len; 3194 } 3195 } 3196 SCTP_BUF_LEN(m_notify) = sac->sac_length; 3197 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3198 0, 0, stcb->asoc.context, 0, 0, 0, 3199 m_notify); 3200 if (control != NULL) { 3201 control->length = SCTP_BUF_LEN(m_notify); 3202 control->spec_flags = M_NOTIFICATION; 3203 /* not that we need this */ 3204 control->tail_mbuf = m_notify; 3205 sctp_add_to_readq(stcb->sctp_ep, stcb, 3206 control, 3207 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 3208 so_locked); 3209 } else { 3210 sctp_m_freem(m_notify); 3211 } 3212 } 3213 /* 3214 * For 1-to-1 style sockets, we send up and error when an ABORT 3215 * comes in. 3216 */ 3217 set_error: 3218 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3219 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3220 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3221 SOCK_LOCK(stcb->sctp_socket); 3222 if (from_peer) { 3223 if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { 3224 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 3225 stcb->sctp_socket->so_error = ECONNREFUSED; 3226 } else { 3227 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 3228 stcb->sctp_socket->so_error = ECONNRESET; 3229 } 3230 } else { 3231 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3232 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 3233 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 3234 stcb->sctp_socket->so_error = ETIMEDOUT; 3235 } else { 3236 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 3237 stcb->sctp_socket->so_error = ECONNABORTED; 3238 } 3239 } 3240 SOCK_UNLOCK(stcb->sctp_socket); 3241 } 3242 /* Wake ANY sleepers */ 3243 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3244 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 3245 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 3246 socantrcvmore(stcb->sctp_socket); 3247 } 3248 sorwakeup(stcb->sctp_socket); 3249 sowwakeup(stcb->sctp_socket); 3250 } 3251 3252 static void 3253 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 3254 struct sockaddr *sa, uint32_t error, int so_locked) 3255 { 3256 struct mbuf *m_notify; 3257 struct sctp_paddr_change *spc; 3258 struct sctp_queued_to_read *control; 3259 3260 if ((stcb == NULL) || 3261 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 3262 /* event not enabled */ 3263 return; 3264 } 3265 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 3266 if (m_notify == NULL) 3267 return; 3268 SCTP_BUF_LEN(m_notify) = 0; 3269 spc = mtod(m_notify, struct sctp_paddr_change *); 3270 memset(spc, 0, sizeof(struct sctp_paddr_change)); 3271 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 3272 spc->spc_flags = 0; 3273 spc->spc_length = sizeof(struct sctp_paddr_change); 3274 switch (sa->sa_family) { 3275 #ifdef INET 3276 case AF_INET: 3277 #ifdef INET6 3278 if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 3279 in6_sin_2_v4mapsin6((struct sockaddr_in *)sa, 3280 (struct sockaddr_in6 *)&spc->spc_aaddr); 3281 } else { 3282 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3283 } 3284 #else 3285 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 3286 #endif 3287 break; 3288 #endif 3289 #ifdef INET6 3290 case AF_INET6: 3291 { 3292 struct sockaddr_in6 *sin6; 3293 3294 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 3295 3296 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 3297 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 3298 if (sin6->sin6_scope_id == 0) { 3299 /* recover scope_id for user */ 3300 (void)sa6_recoverscope(sin6); 3301 } else { 3302 /* clear embedded scope_id for user */ 3303 in6_clearscope(&sin6->sin6_addr); 3304 } 3305 } 3306 break; 3307 } 3308 #endif 3309 default: 3310 /* TSNH */ 3311 break; 3312 } 3313 spc->spc_state = state; 3314 spc->spc_error = error; 3315 spc->spc_assoc_id = sctp_get_associd(stcb); 3316 3317 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 3318 SCTP_BUF_NEXT(m_notify) = NULL; 3319 3320 /* append to socket */ 3321 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3322 0, 0, stcb->asoc.context, 0, 0, 0, 3323 m_notify); 3324 if (control == NULL) { 3325 /* no memory */ 3326 sctp_m_freem(m_notify); 3327 return; 3328 } 3329 control->length = SCTP_BUF_LEN(m_notify); 3330 control->spec_flags = M_NOTIFICATION; 3331 /* not that we need this */ 3332 control->tail_mbuf = m_notify; 3333 sctp_add_to_readq(stcb->sctp_ep, stcb, 3334 control, 3335 &stcb->sctp_socket->so_rcv, 1, 3336 SCTP_READ_LOCK_NOT_HELD, 3337 so_locked); 3338 } 3339 3340 3341 static void 3342 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 3343 struct sctp_tmit_chunk *chk, int so_locked) 3344 { 3345 struct mbuf *m_notify; 3346 struct sctp_send_failed *ssf; 3347 struct sctp_send_failed_event *ssfe; 3348 struct sctp_queued_to_read *control; 3349 struct sctp_chunkhdr *chkhdr; 3350 int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len; 3351 3352 if ((stcb == NULL) || 3353 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3354 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3355 /* event not enabled */ 3356 return; 3357 } 3358 3359 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3360 notifhdr_len = sizeof(struct sctp_send_failed_event); 3361 } else { 3362 notifhdr_len = sizeof(struct sctp_send_failed); 3363 } 3364 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3365 if (m_notify == NULL) 3366 /* no space left */ 3367 return; 3368 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3369 if (stcb->asoc.idata_supported) { 3370 chkhdr_len = sizeof(struct sctp_idata_chunk); 3371 } else { 3372 chkhdr_len = sizeof(struct sctp_data_chunk); 3373 } 3374 /* Use some defaults in case we can't access the chunk header */ 3375 if (chk->send_size >= chkhdr_len) { 3376 payload_len = chk->send_size - chkhdr_len; 3377 } else { 3378 payload_len = 0; 3379 } 3380 padding_len = 0; 3381 if (chk->data != NULL) { 3382 chkhdr = mtod(chk->data, struct sctp_chunkhdr *); 3383 if (chkhdr != NULL) { 3384 chk_len = ntohs(chkhdr->chunk_length); 3385 if ((chk_len >= chkhdr_len) && 3386 (chk->send_size >= chk_len) && 3387 (chk->send_size - chk_len < 4)) { 3388 padding_len = chk->send_size - chk_len; 3389 payload_len = chk->send_size - chkhdr_len - padding_len; 3390 } 3391 } 3392 } 3393 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3394 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3395 memset(ssfe, 0, notifhdr_len); 3396 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3397 if (sent) { 3398 ssfe->ssfe_flags = SCTP_DATA_SENT; 3399 } else { 3400 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3401 } 3402 ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len); 3403 ssfe->ssfe_error = error; 3404 /* not exactly what the user sent in, but should be close :) */ 3405 ssfe->ssfe_info.snd_sid = chk->rec.data.sid; 3406 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 3407 ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid; 3408 ssfe->ssfe_info.snd_context = chk->rec.data.context; 3409 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3410 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3411 } else { 3412 ssf = mtod(m_notify, struct sctp_send_failed *); 3413 memset(ssf, 0, notifhdr_len); 3414 ssf->ssf_type = SCTP_SEND_FAILED; 3415 if (sent) { 3416 ssf->ssf_flags = SCTP_DATA_SENT; 3417 } else { 3418 ssf->ssf_flags = SCTP_DATA_UNSENT; 3419 } 3420 ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len); 3421 ssf->ssf_error = error; 3422 /* not exactly what the user sent in, but should be close :) */ 3423 ssf->ssf_info.sinfo_stream = chk->rec.data.sid; 3424 ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid; 3425 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 3426 ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid; 3427 ssf->ssf_info.sinfo_context = chk->rec.data.context; 3428 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3429 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3430 } 3431 if (chk->data != NULL) { 3432 /* Trim off the sctp chunk header (it should be there) */ 3433 if (chk->send_size == chkhdr_len + payload_len + padding_len) { 3434 m_adj(chk->data, chkhdr_len); 3435 m_adj(chk->data, -padding_len); 3436 sctp_mbuf_crush(chk->data); 3437 chk->send_size -= (chkhdr_len + padding_len); 3438 } 3439 } 3440 SCTP_BUF_NEXT(m_notify) = chk->data; 3441 /* Steal off the mbuf */ 3442 chk->data = NULL; 3443 /* 3444 * For this case, we check the actual socket buffer, since the assoc 3445 * is going away we don't want to overfill the socket buffer for a 3446 * non-reader 3447 */ 3448 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3449 sctp_m_freem(m_notify); 3450 return; 3451 } 3452 /* append to socket */ 3453 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3454 0, 0, stcb->asoc.context, 0, 0, 0, 3455 m_notify); 3456 if (control == NULL) { 3457 /* no memory */ 3458 sctp_m_freem(m_notify); 3459 return; 3460 } 3461 control->length = SCTP_BUF_LEN(m_notify); 3462 control->spec_flags = M_NOTIFICATION; 3463 /* not that we need this */ 3464 control->tail_mbuf = m_notify; 3465 sctp_add_to_readq(stcb->sctp_ep, stcb, 3466 control, 3467 &stcb->sctp_socket->so_rcv, 1, 3468 SCTP_READ_LOCK_NOT_HELD, 3469 so_locked); 3470 } 3471 3472 3473 static void 3474 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 3475 struct sctp_stream_queue_pending *sp, int so_locked) 3476 { 3477 struct mbuf *m_notify; 3478 struct sctp_send_failed *ssf; 3479 struct sctp_send_failed_event *ssfe; 3480 struct sctp_queued_to_read *control; 3481 int notifhdr_len; 3482 3483 if ((stcb == NULL) || 3484 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 3485 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 3486 /* event not enabled */ 3487 return; 3488 } 3489 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3490 notifhdr_len = sizeof(struct sctp_send_failed_event); 3491 } else { 3492 notifhdr_len = sizeof(struct sctp_send_failed); 3493 } 3494 m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA); 3495 if (m_notify == NULL) { 3496 /* no space left */ 3497 return; 3498 } 3499 SCTP_BUF_LEN(m_notify) = notifhdr_len; 3500 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 3501 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 3502 memset(ssfe, 0, notifhdr_len); 3503 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 3504 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 3505 ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length); 3506 ssfe->ssfe_error = error; 3507 /* not exactly what the user sent in, but should be close :) */ 3508 ssfe->ssfe_info.snd_sid = sp->sid; 3509 if (sp->some_taken) { 3510 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 3511 } else { 3512 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 3513 } 3514 ssfe->ssfe_info.snd_ppid = sp->ppid; 3515 ssfe->ssfe_info.snd_context = sp->context; 3516 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 3517 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 3518 } else { 3519 ssf = mtod(m_notify, struct sctp_send_failed *); 3520 memset(ssf, 0, notifhdr_len); 3521 ssf->ssf_type = SCTP_SEND_FAILED; 3522 ssf->ssf_flags = SCTP_DATA_UNSENT; 3523 ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length); 3524 ssf->ssf_error = error; 3525 /* not exactly what the user sent in, but should be close :) */ 3526 ssf->ssf_info.sinfo_stream = sp->sid; 3527 ssf->ssf_info.sinfo_ssn = 0; 3528 if (sp->some_taken) { 3529 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 3530 } else { 3531 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 3532 } 3533 ssf->ssf_info.sinfo_ppid = sp->ppid; 3534 ssf->ssf_info.sinfo_context = sp->context; 3535 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 3536 ssf->ssf_assoc_id = sctp_get_associd(stcb); 3537 } 3538 SCTP_BUF_NEXT(m_notify) = sp->data; 3539 3540 /* Steal off the mbuf */ 3541 sp->data = NULL; 3542 /* 3543 * For this case, we check the actual socket buffer, since the assoc 3544 * is going away we don't want to overfill the socket buffer for a 3545 * non-reader 3546 */ 3547 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3548 sctp_m_freem(m_notify); 3549 return; 3550 } 3551 /* append to socket */ 3552 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3553 0, 0, stcb->asoc.context, 0, 0, 0, 3554 m_notify); 3555 if (control == NULL) { 3556 /* no memory */ 3557 sctp_m_freem(m_notify); 3558 return; 3559 } 3560 control->length = SCTP_BUF_LEN(m_notify); 3561 control->spec_flags = M_NOTIFICATION; 3562 /* not that we need this */ 3563 control->tail_mbuf = m_notify; 3564 sctp_add_to_readq(stcb->sctp_ep, stcb, 3565 control, 3566 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3567 } 3568 3569 3570 3571 static void 3572 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3573 { 3574 struct mbuf *m_notify; 3575 struct sctp_adaptation_event *sai; 3576 struct sctp_queued_to_read *control; 3577 3578 if ((stcb == NULL) || 3579 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3580 /* event not enabled */ 3581 return; 3582 } 3583 3584 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3585 if (m_notify == NULL) 3586 /* no space left */ 3587 return; 3588 SCTP_BUF_LEN(m_notify) = 0; 3589 sai = mtod(m_notify, struct sctp_adaptation_event *); 3590 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3591 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3592 sai->sai_flags = 0; 3593 sai->sai_length = sizeof(struct sctp_adaptation_event); 3594 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3595 sai->sai_assoc_id = sctp_get_associd(stcb); 3596 3597 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3598 SCTP_BUF_NEXT(m_notify) = NULL; 3599 3600 /* append to socket */ 3601 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3602 0, 0, stcb->asoc.context, 0, 0, 0, 3603 m_notify); 3604 if (control == NULL) { 3605 /* no memory */ 3606 sctp_m_freem(m_notify); 3607 return; 3608 } 3609 control->length = SCTP_BUF_LEN(m_notify); 3610 control->spec_flags = M_NOTIFICATION; 3611 /* not that we need this */ 3612 control->tail_mbuf = m_notify; 3613 sctp_add_to_readq(stcb->sctp_ep, stcb, 3614 control, 3615 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3616 } 3617 3618 /* This always must be called with the read-queue LOCKED in the INP */ 3619 static void 3620 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3621 uint32_t val, int so_locked) 3622 { 3623 struct mbuf *m_notify; 3624 struct sctp_pdapi_event *pdapi; 3625 struct sctp_queued_to_read *control; 3626 struct sockbuf *sb; 3627 3628 if ((stcb == NULL) || 3629 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3630 /* event not enabled */ 3631 return; 3632 } 3633 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3634 return; 3635 } 3636 3637 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3638 if (m_notify == NULL) 3639 /* no space left */ 3640 return; 3641 SCTP_BUF_LEN(m_notify) = 0; 3642 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3643 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3644 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3645 pdapi->pdapi_flags = 0; 3646 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3647 pdapi->pdapi_indication = error; 3648 pdapi->pdapi_stream = (val >> 16); 3649 pdapi->pdapi_seq = (val & 0x0000ffff); 3650 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3651 3652 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3653 SCTP_BUF_NEXT(m_notify) = NULL; 3654 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3655 0, 0, stcb->asoc.context, 0, 0, 0, 3656 m_notify); 3657 if (control == NULL) { 3658 /* no memory */ 3659 sctp_m_freem(m_notify); 3660 return; 3661 } 3662 control->length = SCTP_BUF_LEN(m_notify); 3663 control->spec_flags = M_NOTIFICATION; 3664 /* not that we need this */ 3665 control->tail_mbuf = m_notify; 3666 sb = &stcb->sctp_socket->so_rcv; 3667 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3668 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3669 } 3670 sctp_sballoc(stcb, sb, m_notify); 3671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3672 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3673 } 3674 control->end_added = 1; 3675 if (stcb->asoc.control_pdapi) 3676 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3677 else { 3678 /* we really should not see this case */ 3679 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3680 } 3681 if (stcb->sctp_ep && stcb->sctp_socket) { 3682 /* This should always be the case */ 3683 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3684 } 3685 } 3686 3687 static void 3688 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3689 { 3690 struct mbuf *m_notify; 3691 struct sctp_shutdown_event *sse; 3692 struct sctp_queued_to_read *control; 3693 3694 /* 3695 * For TCP model AND UDP connected sockets we will send an error up 3696 * when an SHUTDOWN completes 3697 */ 3698 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3699 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3700 /* mark socket closed for read/write and wakeup! */ 3701 socantsendmore(stcb->sctp_socket); 3702 } 3703 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3704 /* event not enabled */ 3705 return; 3706 } 3707 3708 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3709 if (m_notify == NULL) 3710 /* no space left */ 3711 return; 3712 sse = mtod(m_notify, struct sctp_shutdown_event *); 3713 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3714 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3715 sse->sse_flags = 0; 3716 sse->sse_length = sizeof(struct sctp_shutdown_event); 3717 sse->sse_assoc_id = sctp_get_associd(stcb); 3718 3719 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3720 SCTP_BUF_NEXT(m_notify) = NULL; 3721 3722 /* append to socket */ 3723 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3724 0, 0, stcb->asoc.context, 0, 0, 0, 3725 m_notify); 3726 if (control == NULL) { 3727 /* no memory */ 3728 sctp_m_freem(m_notify); 3729 return; 3730 } 3731 control->length = SCTP_BUF_LEN(m_notify); 3732 control->spec_flags = M_NOTIFICATION; 3733 /* not that we need this */ 3734 control->tail_mbuf = m_notify; 3735 sctp_add_to_readq(stcb->sctp_ep, stcb, 3736 control, 3737 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3738 } 3739 3740 static void 3741 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3742 int so_locked) 3743 { 3744 struct mbuf *m_notify; 3745 struct sctp_sender_dry_event *event; 3746 struct sctp_queued_to_read *control; 3747 3748 if ((stcb == NULL) || 3749 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3750 /* event not enabled */ 3751 return; 3752 } 3753 3754 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3755 if (m_notify == NULL) { 3756 /* no space left */ 3757 return; 3758 } 3759 SCTP_BUF_LEN(m_notify) = 0; 3760 event = mtod(m_notify, struct sctp_sender_dry_event *); 3761 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3762 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3763 event->sender_dry_flags = 0; 3764 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3765 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3766 3767 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3768 SCTP_BUF_NEXT(m_notify) = NULL; 3769 3770 /* append to socket */ 3771 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3772 0, 0, stcb->asoc.context, 0, 0, 0, 3773 m_notify); 3774 if (control == NULL) { 3775 /* no memory */ 3776 sctp_m_freem(m_notify); 3777 return; 3778 } 3779 control->length = SCTP_BUF_LEN(m_notify); 3780 control->spec_flags = M_NOTIFICATION; 3781 /* not that we need this */ 3782 control->tail_mbuf = m_notify; 3783 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3784 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3785 } 3786 3787 3788 void 3789 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3790 { 3791 struct mbuf *m_notify; 3792 struct sctp_queued_to_read *control; 3793 struct sctp_stream_change_event *stradd; 3794 3795 if ((stcb == NULL) || 3796 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3797 /* event not enabled */ 3798 return; 3799 } 3800 if ((stcb->asoc.peer_req_out) && flag) { 3801 /* Peer made the request, don't tell the local user */ 3802 stcb->asoc.peer_req_out = 0; 3803 return; 3804 } 3805 stcb->asoc.peer_req_out = 0; 3806 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3807 if (m_notify == NULL) 3808 /* no space left */ 3809 return; 3810 SCTP_BUF_LEN(m_notify) = 0; 3811 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3812 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3813 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3814 stradd->strchange_flags = flag; 3815 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3816 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3817 stradd->strchange_instrms = numberin; 3818 stradd->strchange_outstrms = numberout; 3819 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3820 SCTP_BUF_NEXT(m_notify) = NULL; 3821 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3822 /* no space */ 3823 sctp_m_freem(m_notify); 3824 return; 3825 } 3826 /* append to socket */ 3827 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3828 0, 0, stcb->asoc.context, 0, 0, 0, 3829 m_notify); 3830 if (control == NULL) { 3831 /* no memory */ 3832 sctp_m_freem(m_notify); 3833 return; 3834 } 3835 control->length = SCTP_BUF_LEN(m_notify); 3836 control->spec_flags = M_NOTIFICATION; 3837 /* not that we need this */ 3838 control->tail_mbuf = m_notify; 3839 sctp_add_to_readq(stcb->sctp_ep, stcb, 3840 control, 3841 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3842 } 3843 3844 void 3845 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3846 { 3847 struct mbuf *m_notify; 3848 struct sctp_queued_to_read *control; 3849 struct sctp_assoc_reset_event *strasoc; 3850 3851 if ((stcb == NULL) || 3852 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3853 /* event not enabled */ 3854 return; 3855 } 3856 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3857 if (m_notify == NULL) 3858 /* no space left */ 3859 return; 3860 SCTP_BUF_LEN(m_notify) = 0; 3861 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3862 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3863 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3864 strasoc->assocreset_flags = flag; 3865 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3866 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3867 strasoc->assocreset_local_tsn = sending_tsn; 3868 strasoc->assocreset_remote_tsn = recv_tsn; 3869 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3870 SCTP_BUF_NEXT(m_notify) = NULL; 3871 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3872 /* no space */ 3873 sctp_m_freem(m_notify); 3874 return; 3875 } 3876 /* append to socket */ 3877 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3878 0, 0, stcb->asoc.context, 0, 0, 0, 3879 m_notify); 3880 if (control == NULL) { 3881 /* no memory */ 3882 sctp_m_freem(m_notify); 3883 return; 3884 } 3885 control->length = SCTP_BUF_LEN(m_notify); 3886 control->spec_flags = M_NOTIFICATION; 3887 /* not that we need this */ 3888 control->tail_mbuf = m_notify; 3889 sctp_add_to_readq(stcb->sctp_ep, stcb, 3890 control, 3891 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3892 } 3893 3894 3895 3896 static void 3897 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3898 int number_entries, uint16_t *list, int flag) 3899 { 3900 struct mbuf *m_notify; 3901 struct sctp_queued_to_read *control; 3902 struct sctp_stream_reset_event *strreset; 3903 int len; 3904 3905 if ((stcb == NULL) || 3906 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3907 /* event not enabled */ 3908 return; 3909 } 3910 3911 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3912 if (m_notify == NULL) 3913 /* no space left */ 3914 return; 3915 SCTP_BUF_LEN(m_notify) = 0; 3916 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3917 if (len > M_TRAILINGSPACE(m_notify)) { 3918 /* never enough room */ 3919 sctp_m_freem(m_notify); 3920 return; 3921 } 3922 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3923 memset(strreset, 0, len); 3924 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3925 strreset->strreset_flags = flag; 3926 strreset->strreset_length = len; 3927 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3928 if (number_entries) { 3929 int i; 3930 3931 for (i = 0; i < number_entries; i++) { 3932 strreset->strreset_stream_list[i] = ntohs(list[i]); 3933 } 3934 } 3935 SCTP_BUF_LEN(m_notify) = len; 3936 SCTP_BUF_NEXT(m_notify) = NULL; 3937 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3938 /* no space */ 3939 sctp_m_freem(m_notify); 3940 return; 3941 } 3942 /* append to socket */ 3943 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3944 0, 0, stcb->asoc.context, 0, 0, 0, 3945 m_notify); 3946 if (control == NULL) { 3947 /* no memory */ 3948 sctp_m_freem(m_notify); 3949 return; 3950 } 3951 control->length = SCTP_BUF_LEN(m_notify); 3952 control->spec_flags = M_NOTIFICATION; 3953 /* not that we need this */ 3954 control->tail_mbuf = m_notify; 3955 sctp_add_to_readq(stcb->sctp_ep, stcb, 3956 control, 3957 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3958 } 3959 3960 3961 static void 3962 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3963 { 3964 struct mbuf *m_notify; 3965 struct sctp_remote_error *sre; 3966 struct sctp_queued_to_read *control; 3967 unsigned int notif_len; 3968 uint16_t chunk_len; 3969 3970 if ((stcb == NULL) || 3971 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3972 return; 3973 } 3974 if (chunk != NULL) { 3975 chunk_len = ntohs(chunk->ch.chunk_length); 3976 /* 3977 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be 3978 * contiguous. 3979 */ 3980 if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) { 3981 chunk_len = SCTP_CHUNK_BUFFER_SIZE; 3982 } 3983 } else { 3984 chunk_len = 0; 3985 } 3986 notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len); 3987 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3988 if (m_notify == NULL) { 3989 /* Retry with smaller value. */ 3990 notif_len = (unsigned int)sizeof(struct sctp_remote_error); 3991 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3992 if (m_notify == NULL) { 3993 return; 3994 } 3995 } 3996 SCTP_BUF_NEXT(m_notify) = NULL; 3997 sre = mtod(m_notify, struct sctp_remote_error *); 3998 memset(sre, 0, notif_len); 3999 sre->sre_type = SCTP_REMOTE_ERROR; 4000 sre->sre_flags = 0; 4001 sre->sre_length = sizeof(struct sctp_remote_error); 4002 sre->sre_error = error; 4003 sre->sre_assoc_id = sctp_get_associd(stcb); 4004 if (notif_len > sizeof(struct sctp_remote_error)) { 4005 memcpy(sre->sre_data, chunk, chunk_len); 4006 sre->sre_length += chunk_len; 4007 } 4008 SCTP_BUF_LEN(m_notify) = sre->sre_length; 4009 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 4010 0, 0, stcb->asoc.context, 0, 0, 0, 4011 m_notify); 4012 if (control != NULL) { 4013 control->length = SCTP_BUF_LEN(m_notify); 4014 control->spec_flags = M_NOTIFICATION; 4015 /* not that we need this */ 4016 control->tail_mbuf = m_notify; 4017 sctp_add_to_readq(stcb->sctp_ep, stcb, 4018 control, 4019 &stcb->sctp_socket->so_rcv, 1, 4020 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 4021 } else { 4022 sctp_m_freem(m_notify); 4023 } 4024 } 4025 4026 4027 void 4028 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 4029 uint32_t error, void *data, int so_locked) 4030 { 4031 if ((stcb == NULL) || 4032 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4033 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4034 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4035 /* If the socket is gone we are out of here */ 4036 return; 4037 } 4038 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 4039 return; 4040 } 4041 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4042 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4043 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 4044 (notification == SCTP_NOTIFY_INTERFACE_UP) || 4045 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 4046 /* Don't report these in front states */ 4047 return; 4048 } 4049 } 4050 switch (notification) { 4051 case SCTP_NOTIFY_ASSOC_UP: 4052 if (stcb->asoc.assoc_up_sent == 0) { 4053 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 4054 stcb->asoc.assoc_up_sent = 1; 4055 } 4056 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 4057 sctp_notify_adaptation_layer(stcb); 4058 } 4059 if (stcb->asoc.auth_supported == 0) { 4060 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4061 NULL, so_locked); 4062 } 4063 break; 4064 case SCTP_NOTIFY_ASSOC_DOWN: 4065 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 4066 break; 4067 case SCTP_NOTIFY_INTERFACE_DOWN: 4068 { 4069 struct sctp_nets *net; 4070 4071 net = (struct sctp_nets *)data; 4072 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 4073 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4074 break; 4075 } 4076 case SCTP_NOTIFY_INTERFACE_UP: 4077 { 4078 struct sctp_nets *net; 4079 4080 net = (struct sctp_nets *)data; 4081 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 4082 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4083 break; 4084 } 4085 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 4086 { 4087 struct sctp_nets *net; 4088 4089 net = (struct sctp_nets *)data; 4090 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 4091 (struct sockaddr *)&net->ro._l_addr, error, so_locked); 4092 break; 4093 } 4094 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 4095 sctp_notify_send_failed2(stcb, error, 4096 (struct sctp_stream_queue_pending *)data, so_locked); 4097 break; 4098 case SCTP_NOTIFY_SENT_DG_FAIL: 4099 sctp_notify_send_failed(stcb, 1, error, 4100 (struct sctp_tmit_chunk *)data, so_locked); 4101 break; 4102 case SCTP_NOTIFY_UNSENT_DG_FAIL: 4103 sctp_notify_send_failed(stcb, 0, error, 4104 (struct sctp_tmit_chunk *)data, so_locked); 4105 break; 4106 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 4107 { 4108 uint32_t val; 4109 4110 val = *((uint32_t *)data); 4111 4112 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 4113 break; 4114 } 4115 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 4116 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4117 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4118 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 4119 } else { 4120 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 4121 } 4122 break; 4123 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 4124 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 4125 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 4126 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 4127 } else { 4128 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 4129 } 4130 break; 4131 case SCTP_NOTIFY_ASSOC_RESTART: 4132 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 4133 if (stcb->asoc.auth_supported == 0) { 4134 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 4135 NULL, so_locked); 4136 } 4137 break; 4138 case SCTP_NOTIFY_STR_RESET_SEND: 4139 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN); 4140 break; 4141 case SCTP_NOTIFY_STR_RESET_RECV: 4142 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING); 4143 break; 4144 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 4145 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4146 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 4147 break; 4148 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 4149 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4150 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 4151 break; 4152 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 4153 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4154 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 4155 break; 4156 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 4157 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), 4158 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 4159 break; 4160 case SCTP_NOTIFY_ASCONF_ADD_IP: 4161 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 4162 error, so_locked); 4163 break; 4164 case SCTP_NOTIFY_ASCONF_DELETE_IP: 4165 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 4166 error, so_locked); 4167 break; 4168 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 4169 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 4170 error, so_locked); 4171 break; 4172 case SCTP_NOTIFY_PEER_SHUTDOWN: 4173 sctp_notify_shutdown_event(stcb); 4174 break; 4175 case SCTP_NOTIFY_AUTH_NEW_KEY: 4176 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 4177 (uint16_t)(uintptr_t)data, 4178 so_locked); 4179 break; 4180 case SCTP_NOTIFY_AUTH_FREE_KEY: 4181 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 4182 (uint16_t)(uintptr_t)data, 4183 so_locked); 4184 break; 4185 case SCTP_NOTIFY_NO_PEER_AUTH: 4186 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 4187 (uint16_t)(uintptr_t)data, 4188 so_locked); 4189 break; 4190 case SCTP_NOTIFY_SENDER_DRY: 4191 sctp_notify_sender_dry_event(stcb, so_locked); 4192 break; 4193 case SCTP_NOTIFY_REMOTE_ERROR: 4194 sctp_notify_remote_error(stcb, error, data); 4195 break; 4196 default: 4197 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 4198 __func__, notification, notification); 4199 break; 4200 } /* end switch */ 4201 } 4202 4203 void 4204 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked) 4205 { 4206 struct sctp_association *asoc; 4207 struct sctp_stream_out *outs; 4208 struct sctp_tmit_chunk *chk, *nchk; 4209 struct sctp_stream_queue_pending *sp, *nsp; 4210 int i; 4211 4212 if (stcb == NULL) { 4213 return; 4214 } 4215 asoc = &stcb->asoc; 4216 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4217 /* already being freed */ 4218 return; 4219 } 4220 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4221 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4222 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 4223 return; 4224 } 4225 /* now through all the gunk freeing chunks */ 4226 if (holds_lock == 0) { 4227 SCTP_TCB_SEND_LOCK(stcb); 4228 } 4229 /* sent queue SHOULD be empty */ 4230 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 4231 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4232 asoc->sent_queue_cnt--; 4233 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 4234 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4235 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4236 #ifdef INVARIANTS 4237 } else { 4238 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4239 #endif 4240 } 4241 } 4242 if (chk->data != NULL) { 4243 sctp_free_bufspace(stcb, asoc, chk, 1); 4244 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 4245 error, chk, so_locked); 4246 if (chk->data) { 4247 sctp_m_freem(chk->data); 4248 chk->data = NULL; 4249 } 4250 } 4251 sctp_free_a_chunk(stcb, chk, so_locked); 4252 /* sa_ignore FREED_MEMORY */ 4253 } 4254 /* pending send queue SHOULD be empty */ 4255 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 4256 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 4257 asoc->send_queue_cnt--; 4258 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 4259 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 4260 #ifdef INVARIANTS 4261 } else { 4262 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 4263 #endif 4264 } 4265 if (chk->data != NULL) { 4266 sctp_free_bufspace(stcb, asoc, chk, 1); 4267 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 4268 error, chk, so_locked); 4269 if (chk->data) { 4270 sctp_m_freem(chk->data); 4271 chk->data = NULL; 4272 } 4273 } 4274 sctp_free_a_chunk(stcb, chk, so_locked); 4275 /* sa_ignore FREED_MEMORY */ 4276 } 4277 for (i = 0; i < asoc->streamoutcnt; i++) { 4278 /* For each stream */ 4279 outs = &asoc->strmout[i]; 4280 /* clean up any sends there */ 4281 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 4282 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 4283 TAILQ_REMOVE(&outs->outqueue, sp, next); 4284 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 4285 sctp_free_spbufspace(stcb, asoc, sp); 4286 if (sp->data) { 4287 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 4288 error, (void *)sp, so_locked); 4289 if (sp->data) { 4290 sctp_m_freem(sp->data); 4291 sp->data = NULL; 4292 sp->tail_mbuf = NULL; 4293 sp->length = 0; 4294 } 4295 } 4296 if (sp->net) { 4297 sctp_free_remote_addr(sp->net); 4298 sp->net = NULL; 4299 } 4300 /* Free the chunk */ 4301 sctp_free_a_strmoq(stcb, sp, so_locked); 4302 /* sa_ignore FREED_MEMORY */ 4303 } 4304 } 4305 4306 if (holds_lock == 0) { 4307 SCTP_TCB_SEND_UNLOCK(stcb); 4308 } 4309 } 4310 4311 void 4312 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 4313 struct sctp_abort_chunk *abort, int so_locked) 4314 { 4315 if (stcb == NULL) { 4316 return; 4317 } 4318 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4319 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4320 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4321 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 4322 } 4323 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4324 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 4325 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 4326 return; 4327 } 4328 /* Tell them we lost the asoc */ 4329 sctp_report_all_outbound(stcb, error, 0, so_locked); 4330 if (from_peer) { 4331 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 4332 } else { 4333 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 4334 } 4335 } 4336 4337 void 4338 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4339 struct mbuf *m, int iphlen, 4340 struct sockaddr *src, struct sockaddr *dst, 4341 struct sctphdr *sh, struct mbuf *op_err, 4342 uint8_t mflowtype, uint32_t mflowid, 4343 uint32_t vrf_id, uint16_t port) 4344 { 4345 uint32_t vtag; 4346 4347 vtag = 0; 4348 if (stcb != NULL) { 4349 vtag = stcb->asoc.peer_vtag; 4350 vrf_id = stcb->asoc.vrf_id; 4351 } 4352 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 4353 mflowtype, mflowid, inp->fibnum, 4354 vrf_id, port); 4355 if (stcb != NULL) { 4356 /* We have a TCB to abort, send notification too */ 4357 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 4358 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4359 /* Ok, now lets free it */ 4360 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4361 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4362 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4364 } 4365 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4366 SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 4367 } 4368 } 4369 #ifdef SCTP_ASOCLOG_OF_TSNS 4370 void 4371 sctp_print_out_track_log(struct sctp_tcb *stcb) 4372 { 4373 #ifdef NOSIY_PRINTS 4374 int i; 4375 4376 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 4377 SCTP_PRINTF("IN bound TSN log-aaa\n"); 4378 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 4379 SCTP_PRINTF("None rcvd\n"); 4380 goto none_in; 4381 } 4382 if (stcb->asoc.tsn_in_wrapped) { 4383 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 4384 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4385 stcb->asoc.in_tsnlog[i].tsn, 4386 stcb->asoc.in_tsnlog[i].strm, 4387 stcb->asoc.in_tsnlog[i].seq, 4388 stcb->asoc.in_tsnlog[i].flgs, 4389 stcb->asoc.in_tsnlog[i].sz); 4390 } 4391 } 4392 if (stcb->asoc.tsn_in_at) { 4393 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 4394 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4395 stcb->asoc.in_tsnlog[i].tsn, 4396 stcb->asoc.in_tsnlog[i].strm, 4397 stcb->asoc.in_tsnlog[i].seq, 4398 stcb->asoc.in_tsnlog[i].flgs, 4399 stcb->asoc.in_tsnlog[i].sz); 4400 } 4401 } 4402 none_in: 4403 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 4404 if ((stcb->asoc.tsn_out_at == 0) && 4405 (stcb->asoc.tsn_out_wrapped == 0)) { 4406 SCTP_PRINTF("None sent\n"); 4407 } 4408 if (stcb->asoc.tsn_out_wrapped) { 4409 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 4410 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4411 stcb->asoc.out_tsnlog[i].tsn, 4412 stcb->asoc.out_tsnlog[i].strm, 4413 stcb->asoc.out_tsnlog[i].seq, 4414 stcb->asoc.out_tsnlog[i].flgs, 4415 stcb->asoc.out_tsnlog[i].sz); 4416 } 4417 } 4418 if (stcb->asoc.tsn_out_at) { 4419 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 4420 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 4421 stcb->asoc.out_tsnlog[i].tsn, 4422 stcb->asoc.out_tsnlog[i].strm, 4423 stcb->asoc.out_tsnlog[i].seq, 4424 stcb->asoc.out_tsnlog[i].flgs, 4425 stcb->asoc.out_tsnlog[i].sz); 4426 } 4427 } 4428 #endif 4429 } 4430 #endif 4431 4432 void 4433 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4434 struct mbuf *op_err, 4435 int so_locked) 4436 { 4437 4438 if (stcb == NULL) { 4439 /* Got to have a TCB */ 4440 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4441 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4442 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4443 SCTP_CALLED_DIRECTLY_NOCMPSET); 4444 } 4445 } 4446 return; 4447 } else { 4448 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); 4449 } 4450 /* notify the peer */ 4451 sctp_send_abort_tcb(stcb, op_err, so_locked); 4452 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4453 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4454 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4455 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4456 } 4457 /* notify the ulp */ 4458 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 4459 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 4460 } 4461 /* now free the asoc */ 4462 #ifdef SCTP_ASOCLOG_OF_TSNS 4463 sctp_print_out_track_log(stcb); 4464 #endif 4465 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4466 SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4467 } 4468 4469 void 4470 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4471 struct sockaddr *src, struct sockaddr *dst, 4472 struct sctphdr *sh, struct sctp_inpcb *inp, 4473 struct mbuf *cause, 4474 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4475 uint32_t vrf_id, uint16_t port) 4476 { 4477 struct sctp_chunkhdr *ch, chunk_buf; 4478 unsigned int chk_length; 4479 int contains_init_chunk; 4480 4481 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4482 /* Generate a TO address for future reference */ 4483 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4484 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4485 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4486 SCTP_CALLED_DIRECTLY_NOCMPSET); 4487 } 4488 } 4489 contains_init_chunk = 0; 4490 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4491 sizeof(*ch), (uint8_t *)&chunk_buf); 4492 while (ch != NULL) { 4493 chk_length = ntohs(ch->chunk_length); 4494 if (chk_length < sizeof(*ch)) { 4495 /* break to abort land */ 4496 break; 4497 } 4498 switch (ch->chunk_type) { 4499 case SCTP_INIT: 4500 contains_init_chunk = 1; 4501 break; 4502 case SCTP_PACKET_DROPPED: 4503 /* we don't respond to pkt-dropped */ 4504 return; 4505 case SCTP_ABORT_ASSOCIATION: 4506 /* we don't respond with an ABORT to an ABORT */ 4507 return; 4508 case SCTP_SHUTDOWN_COMPLETE: 4509 /* 4510 * we ignore it since we are not waiting for it and 4511 * peer is gone 4512 */ 4513 return; 4514 case SCTP_SHUTDOWN_ACK: 4515 sctp_send_shutdown_complete2(src, dst, sh, 4516 mflowtype, mflowid, fibnum, 4517 vrf_id, port); 4518 return; 4519 default: 4520 break; 4521 } 4522 offset += SCTP_SIZE32(chk_length); 4523 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4524 sizeof(*ch), (uint8_t *)&chunk_buf); 4525 } 4526 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4527 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4528 (contains_init_chunk == 0))) { 4529 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4530 mflowtype, mflowid, fibnum, 4531 vrf_id, port); 4532 } 4533 } 4534 4535 /* 4536 * check the inbound datagram to make sure there is not an abort inside it, 4537 * if there is return 1, else return 0. 4538 */ 4539 int 4540 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill) 4541 { 4542 struct sctp_chunkhdr *ch; 4543 struct sctp_init_chunk *init_chk, chunk_buf; 4544 int offset; 4545 unsigned int chk_length; 4546 4547 offset = iphlen + sizeof(struct sctphdr); 4548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4549 (uint8_t *)&chunk_buf); 4550 while (ch != NULL) { 4551 chk_length = ntohs(ch->chunk_length); 4552 if (chk_length < sizeof(*ch)) { 4553 /* packet is probably corrupt */ 4554 break; 4555 } 4556 /* we seem to be ok, is it an abort? */ 4557 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4558 /* yep, tell them */ 4559 return (1); 4560 } 4561 if (ch->chunk_type == SCTP_INITIATION) { 4562 /* need to update the Vtag */ 4563 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4564 offset, sizeof(*init_chk), (uint8_t *)&chunk_buf); 4565 if (init_chk != NULL) { 4566 *vtagfill = ntohl(init_chk->init.initiate_tag); 4567 } 4568 } 4569 /* Nope, move to the next chunk */ 4570 offset += SCTP_SIZE32(chk_length); 4571 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4572 sizeof(*ch), (uint8_t *)&chunk_buf); 4573 } 4574 return (0); 4575 } 4576 4577 /* 4578 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4579 * set (i.e. it's 0) so, create this function to compare link local scopes 4580 */ 4581 #ifdef INET6 4582 uint32_t 4583 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4584 { 4585 struct sockaddr_in6 a, b; 4586 4587 /* save copies */ 4588 a = *addr1; 4589 b = *addr2; 4590 4591 if (a.sin6_scope_id == 0) 4592 if (sa6_recoverscope(&a)) { 4593 /* can't get scope, so can't match */ 4594 return (0); 4595 } 4596 if (b.sin6_scope_id == 0) 4597 if (sa6_recoverscope(&b)) { 4598 /* can't get scope, so can't match */ 4599 return (0); 4600 } 4601 if (a.sin6_scope_id != b.sin6_scope_id) 4602 return (0); 4603 4604 return (1); 4605 } 4606 4607 /* 4608 * returns a sockaddr_in6 with embedded scope recovered and removed 4609 */ 4610 struct sockaddr_in6 * 4611 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4612 { 4613 /* check and strip embedded scope junk */ 4614 if (addr->sin6_family == AF_INET6) { 4615 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4616 if (addr->sin6_scope_id == 0) { 4617 *store = *addr; 4618 if (!sa6_recoverscope(store)) { 4619 /* use the recovered scope */ 4620 addr = store; 4621 } 4622 } else { 4623 /* else, return the original "to" addr */ 4624 in6_clearscope(&addr->sin6_addr); 4625 } 4626 } 4627 } 4628 return (addr); 4629 } 4630 #endif 4631 4632 /* 4633 * are the two addresses the same? currently a "scopeless" check returns: 1 4634 * if same, 0 if not 4635 */ 4636 int 4637 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4638 { 4639 4640 /* must be valid */ 4641 if (sa1 == NULL || sa2 == NULL) 4642 return (0); 4643 4644 /* must be the same family */ 4645 if (sa1->sa_family != sa2->sa_family) 4646 return (0); 4647 4648 switch (sa1->sa_family) { 4649 #ifdef INET6 4650 case AF_INET6: 4651 { 4652 /* IPv6 addresses */ 4653 struct sockaddr_in6 *sin6_1, *sin6_2; 4654 4655 sin6_1 = (struct sockaddr_in6 *)sa1; 4656 sin6_2 = (struct sockaddr_in6 *)sa2; 4657 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4658 sin6_2)); 4659 } 4660 #endif 4661 #ifdef INET 4662 case AF_INET: 4663 { 4664 /* IPv4 addresses */ 4665 struct sockaddr_in *sin_1, *sin_2; 4666 4667 sin_1 = (struct sockaddr_in *)sa1; 4668 sin_2 = (struct sockaddr_in *)sa2; 4669 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4670 } 4671 #endif 4672 default: 4673 /* we don't do these... */ 4674 return (0); 4675 } 4676 } 4677 4678 void 4679 sctp_print_address(struct sockaddr *sa) 4680 { 4681 #ifdef INET6 4682 char ip6buf[INET6_ADDRSTRLEN]; 4683 #endif 4684 4685 switch (sa->sa_family) { 4686 #ifdef INET6 4687 case AF_INET6: 4688 { 4689 struct sockaddr_in6 *sin6; 4690 4691 sin6 = (struct sockaddr_in6 *)sa; 4692 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4693 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4694 ntohs(sin6->sin6_port), 4695 sin6->sin6_scope_id); 4696 break; 4697 } 4698 #endif 4699 #ifdef INET 4700 case AF_INET: 4701 { 4702 struct sockaddr_in *sin; 4703 unsigned char *p; 4704 4705 sin = (struct sockaddr_in *)sa; 4706 p = (unsigned char *)&sin->sin_addr; 4707 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4708 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4709 break; 4710 } 4711 #endif 4712 default: 4713 SCTP_PRINTF("?\n"); 4714 break; 4715 } 4716 } 4717 4718 void 4719 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4720 struct sctp_inpcb *new_inp, 4721 struct sctp_tcb *stcb, 4722 int waitflags) 4723 { 4724 /* 4725 * go through our old INP and pull off any control structures that 4726 * belong to stcb and move then to the new inp. 4727 */ 4728 struct socket *old_so, *new_so; 4729 struct sctp_queued_to_read *control, *nctl; 4730 struct sctp_readhead tmp_queue; 4731 struct mbuf *m; 4732 int error = 0; 4733 4734 old_so = old_inp->sctp_socket; 4735 new_so = new_inp->sctp_socket; 4736 TAILQ_INIT(&tmp_queue); 4737 error = sblock(&old_so->so_rcv, waitflags); 4738 if (error) { 4739 /* 4740 * Gak, can't get sblock, we have a problem. data will be 4741 * left stranded.. and we don't dare look at it since the 4742 * other thread may be reading something. Oh well, its a 4743 * screwed up app that does a peeloff OR a accept while 4744 * reading from the main socket... actually its only the 4745 * peeloff() case, since I think read will fail on a 4746 * listening socket.. 4747 */ 4748 return; 4749 } 4750 /* lock the socket buffers */ 4751 SCTP_INP_READ_LOCK(old_inp); 4752 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4753 /* Pull off all for out target stcb */ 4754 if (control->stcb == stcb) { 4755 /* remove it we want it */ 4756 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4757 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4758 m = control->data; 4759 while (m) { 4760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4761 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4762 } 4763 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4765 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4766 } 4767 m = SCTP_BUF_NEXT(m); 4768 } 4769 } 4770 } 4771 SCTP_INP_READ_UNLOCK(old_inp); 4772 /* Remove the sb-lock on the old socket */ 4773 4774 sbunlock(&old_so->so_rcv); 4775 /* Now we move them over to the new socket buffer */ 4776 SCTP_INP_READ_LOCK(new_inp); 4777 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4778 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4779 m = control->data; 4780 while (m) { 4781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4782 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4783 } 4784 sctp_sballoc(stcb, &new_so->so_rcv, m); 4785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4786 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4787 } 4788 m = SCTP_BUF_NEXT(m); 4789 } 4790 } 4791 SCTP_INP_READ_UNLOCK(new_inp); 4792 } 4793 4794 void 4795 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, 4796 struct sctp_tcb *stcb, 4797 int so_locked 4798 SCTP_UNUSED 4799 ) 4800 { 4801 if ((inp != NULL) && (inp->sctp_socket != NULL)) { 4802 sctp_sorwakeup(inp, inp->sctp_socket); 4803 } 4804 } 4805 4806 void 4807 sctp_add_to_readq(struct sctp_inpcb *inp, 4808 struct sctp_tcb *stcb, 4809 struct sctp_queued_to_read *control, 4810 struct sockbuf *sb, 4811 int end, 4812 int inp_read_lock_held, 4813 int so_locked) 4814 { 4815 /* 4816 * Here we must place the control on the end of the socket read 4817 * queue AND increment sb_cc so that select will work properly on 4818 * read. 4819 */ 4820 struct mbuf *m, *prev = NULL; 4821 4822 if (inp == NULL) { 4823 /* Gak, TSNH!! */ 4824 #ifdef INVARIANTS 4825 panic("Gak, inp NULL on add_to_readq"); 4826 #endif 4827 return; 4828 } 4829 if (inp_read_lock_held == 0) 4830 SCTP_INP_READ_LOCK(inp); 4831 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4832 if (!control->on_strm_q) { 4833 sctp_free_remote_addr(control->whoFrom); 4834 if (control->data) { 4835 sctp_m_freem(control->data); 4836 control->data = NULL; 4837 } 4838 sctp_free_a_readq(stcb, control); 4839 } 4840 if (inp_read_lock_held == 0) 4841 SCTP_INP_READ_UNLOCK(inp); 4842 return; 4843 } 4844 if (!(control->spec_flags & M_NOTIFICATION)) { 4845 atomic_add_int(&inp->total_recvs, 1); 4846 if (!control->do_not_ref_stcb) { 4847 atomic_add_int(&stcb->total_recvs, 1); 4848 } 4849 } 4850 m = control->data; 4851 control->held_length = 0; 4852 control->length = 0; 4853 while (m) { 4854 if (SCTP_BUF_LEN(m) == 0) { 4855 /* Skip mbufs with NO length */ 4856 if (prev == NULL) { 4857 /* First one */ 4858 control->data = sctp_m_free(m); 4859 m = control->data; 4860 } else { 4861 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4862 m = SCTP_BUF_NEXT(prev); 4863 } 4864 if (m == NULL) { 4865 control->tail_mbuf = prev; 4866 } 4867 continue; 4868 } 4869 prev = m; 4870 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4871 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4872 } 4873 sctp_sballoc(stcb, sb, m); 4874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4875 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4876 } 4877 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4878 m = SCTP_BUF_NEXT(m); 4879 } 4880 if (prev != NULL) { 4881 control->tail_mbuf = prev; 4882 } else { 4883 /* Everything got collapsed out?? */ 4884 if (!control->on_strm_q) { 4885 sctp_free_remote_addr(control->whoFrom); 4886 sctp_free_a_readq(stcb, control); 4887 } 4888 if (inp_read_lock_held == 0) 4889 SCTP_INP_READ_UNLOCK(inp); 4890 return; 4891 } 4892 if (end) { 4893 control->end_added = 1; 4894 } 4895 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4896 control->on_read_q = 1; 4897 if (inp_read_lock_held == 0) 4898 SCTP_INP_READ_UNLOCK(inp); 4899 if (inp && inp->sctp_socket) { 4900 sctp_wakeup_the_read_socket(inp, stcb, so_locked); 4901 } 4902 } 4903 4904 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4905 *************ALTERNATE ROUTING CODE 4906 */ 4907 4908 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4909 *************ALTERNATE ROUTING CODE 4910 */ 4911 4912 struct mbuf * 4913 sctp_generate_cause(uint16_t code, char *info) 4914 { 4915 struct mbuf *m; 4916 struct sctp_gen_error_cause *cause; 4917 size_t info_len; 4918 uint16_t len; 4919 4920 if ((code == 0) || (info == NULL)) { 4921 return (NULL); 4922 } 4923 info_len = strlen(info); 4924 if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) { 4925 return (NULL); 4926 } 4927 len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len); 4928 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4929 if (m != NULL) { 4930 SCTP_BUF_LEN(m) = len; 4931 cause = mtod(m, struct sctp_gen_error_cause *); 4932 cause->code = htons(code); 4933 cause->length = htons(len); 4934 memcpy(cause->info, info, info_len); 4935 } 4936 return (m); 4937 } 4938 4939 struct mbuf * 4940 sctp_generate_no_user_data_cause(uint32_t tsn) 4941 { 4942 struct mbuf *m; 4943 struct sctp_error_no_user_data *no_user_data_cause; 4944 uint16_t len; 4945 4946 len = (uint16_t)sizeof(struct sctp_error_no_user_data); 4947 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4948 if (m != NULL) { 4949 SCTP_BUF_LEN(m) = len; 4950 no_user_data_cause = mtod(m, struct sctp_error_no_user_data *); 4951 no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA); 4952 no_user_data_cause->cause.length = htons(len); 4953 no_user_data_cause->tsn = htonl(tsn); 4954 } 4955 return (m); 4956 } 4957 4958 #ifdef SCTP_MBCNT_LOGGING 4959 void 4960 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4961 struct sctp_tmit_chunk *tp1, int chk_cnt) 4962 { 4963 if (tp1->data == NULL) { 4964 return; 4965 } 4966 asoc->chunks_on_out_queue -= chk_cnt; 4967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4968 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4969 asoc->total_output_queue_size, 4970 tp1->book_size, 4971 0, 4972 tp1->mbcnt); 4973 } 4974 if (asoc->total_output_queue_size >= tp1->book_size) { 4975 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4976 } else { 4977 asoc->total_output_queue_size = 0; 4978 } 4979 4980 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4981 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4982 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4983 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4984 } else { 4985 stcb->sctp_socket->so_snd.sb_cc = 0; 4986 4987 } 4988 } 4989 } 4990 4991 #endif 4992 4993 int 4994 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4995 uint8_t sent, int so_locked) 4996 { 4997 struct sctp_stream_out *strq; 4998 struct sctp_tmit_chunk *chk = NULL, *tp2; 4999 struct sctp_stream_queue_pending *sp; 5000 uint32_t mid; 5001 uint16_t sid; 5002 uint8_t foundeom = 0; 5003 int ret_sz = 0; 5004 int notdone; 5005 int do_wakeup_routine = 0; 5006 5007 sid = tp1->rec.data.sid; 5008 mid = tp1->rec.data.mid; 5009 if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5010 stcb->asoc.abandoned_sent[0]++; 5011 stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5012 stcb->asoc.strmout[sid].abandoned_sent[0]++; 5013 #if defined(SCTP_DETAILED_STR_STATS) 5014 stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++; 5015 #endif 5016 } else { 5017 stcb->asoc.abandoned_unsent[0]++; 5018 stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5019 stcb->asoc.strmout[sid].abandoned_unsent[0]++; 5020 #if defined(SCTP_DETAILED_STR_STATS) 5021 stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++; 5022 #endif 5023 } 5024 do { 5025 ret_sz += tp1->book_size; 5026 if (tp1->data != NULL) { 5027 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5028 sctp_flight_size_decrease(tp1); 5029 sctp_total_flight_decrease(stcb, tp1); 5030 } 5031 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5032 stcb->asoc.peers_rwnd += tp1->send_size; 5033 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 5034 if (sent) { 5035 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5036 } else { 5037 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5038 } 5039 if (tp1->data) { 5040 sctp_m_freem(tp1->data); 5041 tp1->data = NULL; 5042 } 5043 do_wakeup_routine = 1; 5044 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5045 stcb->asoc.sent_queue_cnt_removeable--; 5046 } 5047 } 5048 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5049 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 5050 SCTP_DATA_NOT_FRAG) { 5051 /* not frag'ed we ae done */ 5052 notdone = 0; 5053 foundeom = 1; 5054 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5055 /* end of frag, we are done */ 5056 notdone = 0; 5057 foundeom = 1; 5058 } else { 5059 /* 5060 * Its a begin or middle piece, we must mark all of 5061 * it 5062 */ 5063 notdone = 1; 5064 tp1 = TAILQ_NEXT(tp1, sctp_next); 5065 } 5066 } while (tp1 && notdone); 5067 if (foundeom == 0) { 5068 /* 5069 * The multi-part message was scattered across the send and 5070 * sent queue. 5071 */ 5072 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 5073 if ((tp1->rec.data.sid != sid) || 5074 (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) { 5075 break; 5076 } 5077 /* 5078 * save to chk in case we have some on stream out 5079 * queue. If so and we have an un-transmitted one we 5080 * don't have to fudge the TSN. 5081 */ 5082 chk = tp1; 5083 ret_sz += tp1->book_size; 5084 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 5085 if (sent) { 5086 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 5087 } else { 5088 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 5089 } 5090 if (tp1->data) { 5091 sctp_m_freem(tp1->data); 5092 tp1->data = NULL; 5093 } 5094 /* No flight involved here book the size to 0 */ 5095 tp1->book_size = 0; 5096 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 5097 foundeom = 1; 5098 } 5099 do_wakeup_routine = 1; 5100 tp1->sent = SCTP_FORWARD_TSN_SKIP; 5101 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 5102 /* 5103 * on to the sent queue so we can wait for it to be 5104 * passed by. 5105 */ 5106 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 5107 sctp_next); 5108 stcb->asoc.send_queue_cnt--; 5109 stcb->asoc.sent_queue_cnt++; 5110 } 5111 } 5112 if (foundeom == 0) { 5113 /* 5114 * Still no eom found. That means there is stuff left on the 5115 * stream out queue.. yuck. 5116 */ 5117 SCTP_TCB_SEND_LOCK(stcb); 5118 strq = &stcb->asoc.strmout[sid]; 5119 sp = TAILQ_FIRST(&strq->outqueue); 5120 if (sp != NULL) { 5121 sp->discard_rest = 1; 5122 /* 5123 * We may need to put a chunk on the queue that 5124 * holds the TSN that would have been sent with the 5125 * LAST bit. 5126 */ 5127 if (chk == NULL) { 5128 /* Yep, we have to */ 5129 sctp_alloc_a_chunk(stcb, chk); 5130 if (chk == NULL) { 5131 /* 5132 * we are hosed. All we can do is 5133 * nothing.. which will cause an 5134 * abort if the peer is paying 5135 * attention. 5136 */ 5137 goto oh_well; 5138 } 5139 memset(chk, 0, sizeof(*chk)); 5140 chk->rec.data.rcv_flags = 0; 5141 chk->sent = SCTP_FORWARD_TSN_SKIP; 5142 chk->asoc = &stcb->asoc; 5143 if (stcb->asoc.idata_supported == 0) { 5144 if (sp->sinfo_flags & SCTP_UNORDERED) { 5145 chk->rec.data.mid = 0; 5146 } else { 5147 chk->rec.data.mid = strq->next_mid_ordered; 5148 } 5149 } else { 5150 if (sp->sinfo_flags & SCTP_UNORDERED) { 5151 chk->rec.data.mid = strq->next_mid_unordered; 5152 } else { 5153 chk->rec.data.mid = strq->next_mid_ordered; 5154 } 5155 } 5156 chk->rec.data.sid = sp->sid; 5157 chk->rec.data.ppid = sp->ppid; 5158 chk->rec.data.context = sp->context; 5159 chk->flags = sp->act_flags; 5160 chk->whoTo = NULL; 5161 chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 5162 strq->chunks_on_queues++; 5163 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 5164 stcb->asoc.sent_queue_cnt++; 5165 stcb->asoc.pr_sctp_cnt++; 5166 } 5167 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 5168 if (sp->sinfo_flags & SCTP_UNORDERED) { 5169 chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED; 5170 } 5171 if (stcb->asoc.idata_supported == 0) { 5172 if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) { 5173 strq->next_mid_ordered++; 5174 } 5175 } else { 5176 if (sp->sinfo_flags & SCTP_UNORDERED) { 5177 strq->next_mid_unordered++; 5178 } else { 5179 strq->next_mid_ordered++; 5180 } 5181 } 5182 oh_well: 5183 if (sp->data) { 5184 /* 5185 * Pull any data to free up the SB and allow 5186 * sender to "add more" while we will throw 5187 * away :-) 5188 */ 5189 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 5190 ret_sz += sp->length; 5191 do_wakeup_routine = 1; 5192 sp->some_taken = 1; 5193 sctp_m_freem(sp->data); 5194 sp->data = NULL; 5195 sp->tail_mbuf = NULL; 5196 sp->length = 0; 5197 } 5198 } 5199 SCTP_TCB_SEND_UNLOCK(stcb); 5200 } 5201 if (do_wakeup_routine) { 5202 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 5203 } 5204 return (ret_sz); 5205 } 5206 5207 /* 5208 * checks to see if the given address, sa, is one that is currently known by 5209 * the kernel note: can't distinguish the same address on multiple interfaces 5210 * and doesn't handle multiple addresses with different zone/scope id's note: 5211 * ifa_ifwithaddr() compares the entire sockaddr struct 5212 */ 5213 struct sctp_ifa * 5214 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 5215 int holds_lock) 5216 { 5217 struct sctp_laddr *laddr; 5218 5219 if (holds_lock == 0) { 5220 SCTP_INP_RLOCK(inp); 5221 } 5222 5223 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5224 if (laddr->ifa == NULL) 5225 continue; 5226 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 5227 continue; 5228 #ifdef INET 5229 if (addr->sa_family == AF_INET) { 5230 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5231 laddr->ifa->address.sin.sin_addr.s_addr) { 5232 /* found him. */ 5233 break; 5234 } 5235 } 5236 #endif 5237 #ifdef INET6 5238 if (addr->sa_family == AF_INET6) { 5239 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5240 &laddr->ifa->address.sin6)) { 5241 /* found him. */ 5242 break; 5243 } 5244 } 5245 #endif 5246 } 5247 if (holds_lock == 0) { 5248 SCTP_INP_RUNLOCK(inp); 5249 } 5250 if (laddr != NULL) { 5251 return (laddr->ifa); 5252 } else { 5253 return (NULL); 5254 } 5255 } 5256 5257 uint32_t 5258 sctp_get_ifa_hash_val(struct sockaddr *addr) 5259 { 5260 switch (addr->sa_family) { 5261 #ifdef INET 5262 case AF_INET: 5263 { 5264 struct sockaddr_in *sin; 5265 5266 sin = (struct sockaddr_in *)addr; 5267 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 5268 } 5269 #endif 5270 #ifdef INET6 5271 case AF_INET6: 5272 { 5273 struct sockaddr_in6 *sin6; 5274 uint32_t hash_of_addr; 5275 5276 sin6 = (struct sockaddr_in6 *)addr; 5277 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 5278 sin6->sin6_addr.s6_addr32[1] + 5279 sin6->sin6_addr.s6_addr32[2] + 5280 sin6->sin6_addr.s6_addr32[3]); 5281 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 5282 return (hash_of_addr); 5283 } 5284 #endif 5285 default: 5286 break; 5287 } 5288 return (0); 5289 } 5290 5291 struct sctp_ifa * 5292 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 5293 { 5294 struct sctp_ifa *sctp_ifap; 5295 struct sctp_vrf *vrf; 5296 struct sctp_ifalist *hash_head; 5297 uint32_t hash_of_addr; 5298 5299 if (holds_lock == 0) 5300 SCTP_IPI_ADDR_RLOCK(); 5301 5302 vrf = sctp_find_vrf(vrf_id); 5303 if (vrf == NULL) { 5304 if (holds_lock == 0) 5305 SCTP_IPI_ADDR_RUNLOCK(); 5306 return (NULL); 5307 } 5308 5309 hash_of_addr = sctp_get_ifa_hash_val(addr); 5310 5311 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5312 if (hash_head == NULL) { 5313 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5314 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark, 5315 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark)); 5316 sctp_print_address(addr); 5317 SCTP_PRINTF("No such bucket for address\n"); 5318 if (holds_lock == 0) 5319 SCTP_IPI_ADDR_RUNLOCK(); 5320 5321 return (NULL); 5322 } 5323 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5324 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5325 continue; 5326 #ifdef INET 5327 if (addr->sa_family == AF_INET) { 5328 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5329 sctp_ifap->address.sin.sin_addr.s_addr) { 5330 /* found him. */ 5331 break; 5332 } 5333 } 5334 #endif 5335 #ifdef INET6 5336 if (addr->sa_family == AF_INET6) { 5337 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5338 &sctp_ifap->address.sin6)) { 5339 /* found him. */ 5340 break; 5341 } 5342 } 5343 #endif 5344 } 5345 if (holds_lock == 0) 5346 SCTP_IPI_ADDR_RUNLOCK(); 5347 return (sctp_ifap); 5348 } 5349 5350 static void 5351 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, 5352 uint32_t rwnd_req) 5353 { 5354 /* User pulled some data, do we need a rwnd update? */ 5355 struct epoch_tracker et; 5356 int r_unlocked = 0; 5357 uint32_t dif, rwnd; 5358 struct socket *so = NULL; 5359 5360 if (stcb == NULL) 5361 return; 5362 5363 atomic_add_int(&stcb->asoc.refcnt, 1); 5364 5365 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5366 (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { 5367 /* Pre-check If we are freeing no update */ 5368 goto no_lock; 5369 } 5370 SCTP_INP_INCR_REF(stcb->sctp_ep); 5371 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5372 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5373 goto out; 5374 } 5375 so = stcb->sctp_socket; 5376 if (so == NULL) { 5377 goto out; 5378 } 5379 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5380 /* Have you have freed enough to look */ 5381 *freed_so_far = 0; 5382 /* Yep, its worth a look and the lock overhead */ 5383 5384 /* Figure out what the rwnd would be */ 5385 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5386 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5387 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5388 } else { 5389 dif = 0; 5390 } 5391 if (dif >= rwnd_req) { 5392 if (hold_rlock) { 5393 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5394 r_unlocked = 1; 5395 } 5396 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5397 /* 5398 * One last check before we allow the guy possibly 5399 * to get in. There is a race, where the guy has not 5400 * reached the gate. In that case 5401 */ 5402 goto out; 5403 } 5404 SCTP_TCB_LOCK(stcb); 5405 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5406 /* No reports here */ 5407 SCTP_TCB_UNLOCK(stcb); 5408 goto out; 5409 } 5410 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5411 NET_EPOCH_ENTER(et); 5412 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5413 5414 sctp_chunk_output(stcb->sctp_ep, stcb, 5415 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5416 /* make sure no timer is running */ 5417 NET_EPOCH_EXIT(et); 5418 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 5419 SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5420 SCTP_TCB_UNLOCK(stcb); 5421 } else { 5422 /* Update how much we have pending */ 5423 stcb->freed_by_sorcv_sincelast = dif; 5424 } 5425 out: 5426 if (so && r_unlocked && hold_rlock) { 5427 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5428 } 5429 5430 SCTP_INP_DECR_REF(stcb->sctp_ep); 5431 no_lock: 5432 atomic_add_int(&stcb->asoc.refcnt, -1); 5433 return; 5434 } 5435 5436 int 5437 sctp_sorecvmsg(struct socket *so, 5438 struct uio *uio, 5439 struct mbuf **mp, 5440 struct sockaddr *from, 5441 int fromlen, 5442 int *msg_flags, 5443 struct sctp_sndrcvinfo *sinfo, 5444 int filling_sinfo) 5445 { 5446 /* 5447 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5448 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5449 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5450 * On the way out we may send out any combination of: 5451 * MSG_NOTIFICATION MSG_EOR 5452 * 5453 */ 5454 struct sctp_inpcb *inp = NULL; 5455 ssize_t my_len = 0; 5456 ssize_t cp_len = 0; 5457 int error = 0; 5458 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5459 struct mbuf *m = NULL; 5460 struct sctp_tcb *stcb = NULL; 5461 int wakeup_read_socket = 0; 5462 int freecnt_applied = 0; 5463 int out_flags = 0, in_flags = 0; 5464 int block_allowed = 1; 5465 uint32_t freed_so_far = 0; 5466 ssize_t copied_so_far = 0; 5467 int in_eeor_mode = 0; 5468 int no_rcv_needed = 0; 5469 uint32_t rwnd_req = 0; 5470 int hold_sblock = 0; 5471 int hold_rlock = 0; 5472 ssize_t slen = 0; 5473 uint32_t held_length = 0; 5474 int sockbuf_lock = 0; 5475 5476 if (uio == NULL) { 5477 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5478 return (EINVAL); 5479 } 5480 5481 if (msg_flags) { 5482 in_flags = *msg_flags; 5483 if (in_flags & MSG_PEEK) 5484 SCTP_STAT_INCR(sctps_read_peeks); 5485 } else { 5486 in_flags = 0; 5487 } 5488 slen = uio->uio_resid; 5489 5490 /* Pull in and set up our int flags */ 5491 if (in_flags & MSG_OOB) { 5492 /* Out of band's NOT supported */ 5493 return (EOPNOTSUPP); 5494 } 5495 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5496 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5497 return (EINVAL); 5498 } 5499 if ((in_flags & (MSG_DONTWAIT 5500 | MSG_NBIO 5501 )) || 5502 SCTP_SO_IS_NBIO(so)) { 5503 block_allowed = 0; 5504 } 5505 /* setup the endpoint */ 5506 inp = (struct sctp_inpcb *)so->so_pcb; 5507 if (inp == NULL) { 5508 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5509 return (EFAULT); 5510 } 5511 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5512 /* Must be at least a MTU's worth */ 5513 if (rwnd_req < SCTP_MIN_RWND) 5514 rwnd_req = SCTP_MIN_RWND; 5515 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5517 sctp_misc_ints(SCTP_SORECV_ENTER, 5518 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5519 } 5520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5521 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5522 rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); 5523 } 5524 5525 5526 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5527 if (error) { 5528 goto release_unlocked; 5529 } 5530 sockbuf_lock = 1; 5531 restart: 5532 5533 restart_nosblocks: 5534 if (hold_sblock == 0) { 5535 SOCKBUF_LOCK(&so->so_rcv); 5536 hold_sblock = 1; 5537 } 5538 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5539 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5540 goto out; 5541 } 5542 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5543 if (so->so_error) { 5544 error = so->so_error; 5545 if ((in_flags & MSG_PEEK) == 0) 5546 so->so_error = 0; 5547 goto out; 5548 } else { 5549 if (so->so_rcv.sb_cc == 0) { 5550 /* indicate EOF */ 5551 error = 0; 5552 goto out; 5553 } 5554 } 5555 } 5556 if (so->so_rcv.sb_cc <= held_length) { 5557 if (so->so_error) { 5558 error = so->so_error; 5559 if ((in_flags & MSG_PEEK) == 0) { 5560 so->so_error = 0; 5561 } 5562 goto out; 5563 } 5564 if ((so->so_rcv.sb_cc == 0) && 5565 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5566 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5567 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5568 /* 5569 * For active open side clear flags for 5570 * re-use passive open is blocked by 5571 * connect. 5572 */ 5573 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5574 /* 5575 * You were aborted, passive side 5576 * always hits here 5577 */ 5578 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5579 error = ECONNRESET; 5580 } 5581 so->so_state &= ~(SS_ISCONNECTING | 5582 SS_ISDISCONNECTING | 5583 SS_ISCONFIRMING | 5584 SS_ISCONNECTED); 5585 if (error == 0) { 5586 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5587 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5588 error = ENOTCONN; 5589 } 5590 } 5591 goto out; 5592 } 5593 } 5594 if (block_allowed) { 5595 error = sbwait(&so->so_rcv); 5596 if (error) { 5597 goto out; 5598 } 5599 held_length = 0; 5600 goto restart_nosblocks; 5601 } else { 5602 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5603 error = EWOULDBLOCK; 5604 goto out; 5605 } 5606 } 5607 if (hold_sblock == 1) { 5608 SOCKBUF_UNLOCK(&so->so_rcv); 5609 hold_sblock = 0; 5610 } 5611 /* we possibly have data we can read */ 5612 /* sa_ignore FREED_MEMORY */ 5613 control = TAILQ_FIRST(&inp->read_queue); 5614 if (control == NULL) { 5615 /* 5616 * This could be happening since the appender did the 5617 * increment but as not yet did the tailq insert onto the 5618 * read_queue 5619 */ 5620 if (hold_rlock == 0) { 5621 SCTP_INP_READ_LOCK(inp); 5622 } 5623 control = TAILQ_FIRST(&inp->read_queue); 5624 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5625 #ifdef INVARIANTS 5626 panic("Huh, its non zero and nothing on control?"); 5627 #endif 5628 so->so_rcv.sb_cc = 0; 5629 } 5630 SCTP_INP_READ_UNLOCK(inp); 5631 hold_rlock = 0; 5632 goto restart; 5633 } 5634 5635 if ((control->length == 0) && 5636 (control->do_not_ref_stcb)) { 5637 /* 5638 * Clean up code for freeing assoc that left behind a 5639 * pdapi.. maybe a peer in EEOR that just closed after 5640 * sending and never indicated a EOR. 5641 */ 5642 if (hold_rlock == 0) { 5643 hold_rlock = 1; 5644 SCTP_INP_READ_LOCK(inp); 5645 } 5646 control->held_length = 0; 5647 if (control->data) { 5648 /* Hmm there is data here .. fix */ 5649 struct mbuf *m_tmp; 5650 int cnt = 0; 5651 5652 m_tmp = control->data; 5653 while (m_tmp) { 5654 cnt += SCTP_BUF_LEN(m_tmp); 5655 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5656 control->tail_mbuf = m_tmp; 5657 control->end_added = 1; 5658 } 5659 m_tmp = SCTP_BUF_NEXT(m_tmp); 5660 } 5661 control->length = cnt; 5662 } else { 5663 /* remove it */ 5664 TAILQ_REMOVE(&inp->read_queue, control, next); 5665 /* Add back any hiddend data */ 5666 sctp_free_remote_addr(control->whoFrom); 5667 sctp_free_a_readq(stcb, control); 5668 } 5669 if (hold_rlock) { 5670 hold_rlock = 0; 5671 SCTP_INP_READ_UNLOCK(inp); 5672 } 5673 goto restart; 5674 } 5675 if ((control->length == 0) && 5676 (control->end_added == 1)) { 5677 /* 5678 * Do we also need to check for (control->pdapi_aborted == 5679 * 1)? 5680 */ 5681 if (hold_rlock == 0) { 5682 hold_rlock = 1; 5683 SCTP_INP_READ_LOCK(inp); 5684 } 5685 TAILQ_REMOVE(&inp->read_queue, control, next); 5686 if (control->data) { 5687 #ifdef INVARIANTS 5688 panic("control->data not null but control->length == 0"); 5689 #else 5690 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5691 sctp_m_freem(control->data); 5692 control->data = NULL; 5693 #endif 5694 } 5695 if (control->aux_data) { 5696 sctp_m_free(control->aux_data); 5697 control->aux_data = NULL; 5698 } 5699 #ifdef INVARIANTS 5700 if (control->on_strm_q) { 5701 panic("About to free ctl:%p so:%p and its in %d", 5702 control, so, control->on_strm_q); 5703 } 5704 #endif 5705 sctp_free_remote_addr(control->whoFrom); 5706 sctp_free_a_readq(stcb, control); 5707 if (hold_rlock) { 5708 hold_rlock = 0; 5709 SCTP_INP_READ_UNLOCK(inp); 5710 } 5711 goto restart; 5712 } 5713 if (control->length == 0) { 5714 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5715 (filling_sinfo)) { 5716 /* find a more suitable one then this */ 5717 ctl = TAILQ_NEXT(control, next); 5718 while (ctl) { 5719 if ((ctl->stcb != control->stcb) && (ctl->length) && 5720 (ctl->some_taken || 5721 (ctl->spec_flags & M_NOTIFICATION) || 5722 ((ctl->do_not_ref_stcb == 0) && 5723 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5724 ) { 5725 /*- 5726 * If we have a different TCB next, and there is data 5727 * present. If we have already taken some (pdapi), OR we can 5728 * ref the tcb and no delivery as started on this stream, we 5729 * take it. Note we allow a notification on a different 5730 * assoc to be delivered.. 5731 */ 5732 control = ctl; 5733 goto found_one; 5734 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5735 (ctl->length) && 5736 ((ctl->some_taken) || 5737 ((ctl->do_not_ref_stcb == 0) && 5738 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5739 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5740 /*- 5741 * If we have the same tcb, and there is data present, and we 5742 * have the strm interleave feature present. Then if we have 5743 * taken some (pdapi) or we can refer to tht tcb AND we have 5744 * not started a delivery for this stream, we can take it. 5745 * Note we do NOT allow a notificaiton on the same assoc to 5746 * be delivered. 5747 */ 5748 control = ctl; 5749 goto found_one; 5750 } 5751 ctl = TAILQ_NEXT(ctl, next); 5752 } 5753 } 5754 /* 5755 * if we reach here, not suitable replacement is available 5756 * <or> fragment interleave is NOT on. So stuff the sb_cc 5757 * into the our held count, and its time to sleep again. 5758 */ 5759 held_length = so->so_rcv.sb_cc; 5760 control->held_length = so->so_rcv.sb_cc; 5761 goto restart; 5762 } 5763 /* Clear the held length since there is something to read */ 5764 control->held_length = 0; 5765 found_one: 5766 /* 5767 * If we reach here, control has a some data for us to read off. 5768 * Note that stcb COULD be NULL. 5769 */ 5770 if (hold_rlock == 0) { 5771 hold_rlock = 1; 5772 SCTP_INP_READ_LOCK(inp); 5773 } 5774 control->some_taken++; 5775 stcb = control->stcb; 5776 if (stcb) { 5777 if ((control->do_not_ref_stcb == 0) && 5778 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5779 if (freecnt_applied == 0) 5780 stcb = NULL; 5781 } else if (control->do_not_ref_stcb == 0) { 5782 /* you can't free it on me please */ 5783 /* 5784 * The lock on the socket buffer protects us so the 5785 * free code will stop. But since we used the 5786 * socketbuf lock and the sender uses the tcb_lock 5787 * to increment, we need to use the atomic add to 5788 * the refcnt 5789 */ 5790 if (freecnt_applied) { 5791 #ifdef INVARIANTS 5792 panic("refcnt already incremented"); 5793 #else 5794 SCTP_PRINTF("refcnt already incremented?\n"); 5795 #endif 5796 } else { 5797 atomic_add_int(&stcb->asoc.refcnt, 1); 5798 freecnt_applied = 1; 5799 } 5800 /* 5801 * Setup to remember how much we have not yet told 5802 * the peer our rwnd has opened up. Note we grab the 5803 * value from the tcb from last time. Note too that 5804 * sack sending clears this when a sack is sent, 5805 * which is fine. Once we hit the rwnd_req, we then 5806 * will go to the sctp_user_rcvd() that will not 5807 * lock until it KNOWs it MUST send a WUP-SACK. 5808 */ 5809 freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast; 5810 stcb->freed_by_sorcv_sincelast = 0; 5811 } 5812 } 5813 if (stcb && 5814 ((control->spec_flags & M_NOTIFICATION) == 0) && 5815 control->do_not_ref_stcb == 0) { 5816 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5817 } 5818 5819 /* First lets get off the sinfo and sockaddr info */ 5820 if ((sinfo != NULL) && (filling_sinfo != 0)) { 5821 sinfo->sinfo_stream = control->sinfo_stream; 5822 sinfo->sinfo_ssn = (uint16_t)control->mid; 5823 sinfo->sinfo_flags = control->sinfo_flags; 5824 sinfo->sinfo_ppid = control->sinfo_ppid; 5825 sinfo->sinfo_context = control->sinfo_context; 5826 sinfo->sinfo_timetolive = control->sinfo_timetolive; 5827 sinfo->sinfo_tsn = control->sinfo_tsn; 5828 sinfo->sinfo_cumtsn = control->sinfo_cumtsn; 5829 sinfo->sinfo_assoc_id = control->sinfo_assoc_id; 5830 nxt = TAILQ_NEXT(control, next); 5831 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5832 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5833 struct sctp_extrcvinfo *s_extra; 5834 5835 s_extra = (struct sctp_extrcvinfo *)sinfo; 5836 if ((nxt) && 5837 (nxt->length)) { 5838 s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5839 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5840 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5841 } 5842 if (nxt->spec_flags & M_NOTIFICATION) { 5843 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5844 } 5845 s_extra->serinfo_next_aid = nxt->sinfo_assoc_id; 5846 s_extra->serinfo_next_length = nxt->length; 5847 s_extra->serinfo_next_ppid = nxt->sinfo_ppid; 5848 s_extra->serinfo_next_stream = nxt->sinfo_stream; 5849 if (nxt->tail_mbuf != NULL) { 5850 if (nxt->end_added) { 5851 s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5852 } 5853 } 5854 } else { 5855 /* 5856 * we explicitly 0 this, since the memcpy 5857 * got some other things beyond the older 5858 * sinfo_ that is on the control's structure 5859 * :-D 5860 */ 5861 nxt = NULL; 5862 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 5863 s_extra->serinfo_next_aid = 0; 5864 s_extra->serinfo_next_length = 0; 5865 s_extra->serinfo_next_ppid = 0; 5866 s_extra->serinfo_next_stream = 0; 5867 } 5868 } 5869 /* 5870 * update off the real current cum-ack, if we have an stcb. 5871 */ 5872 if ((control->do_not_ref_stcb == 0) && stcb) 5873 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5874 /* 5875 * mask off the high bits, we keep the actual chunk bits in 5876 * there. 5877 */ 5878 sinfo->sinfo_flags &= 0x00ff; 5879 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5880 sinfo->sinfo_flags |= SCTP_UNORDERED; 5881 } 5882 } 5883 #ifdef SCTP_ASOCLOG_OF_TSNS 5884 { 5885 int index, newindex; 5886 struct sctp_pcbtsn_rlog *entry; 5887 5888 do { 5889 index = inp->readlog_index; 5890 newindex = index + 1; 5891 if (newindex >= SCTP_READ_LOG_SIZE) { 5892 newindex = 0; 5893 } 5894 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5895 entry = &inp->readlog[index]; 5896 entry->vtag = control->sinfo_assoc_id; 5897 entry->strm = control->sinfo_stream; 5898 entry->seq = (uint16_t)control->mid; 5899 entry->sz = control->length; 5900 entry->flgs = control->sinfo_flags; 5901 } 5902 #endif 5903 if ((fromlen > 0) && (from != NULL)) { 5904 union sctp_sockstore store; 5905 size_t len; 5906 5907 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5908 #ifdef INET6 5909 case AF_INET6: 5910 len = sizeof(struct sockaddr_in6); 5911 store.sin6 = control->whoFrom->ro._l_addr.sin6; 5912 store.sin6.sin6_port = control->port_from; 5913 break; 5914 #endif 5915 #ifdef INET 5916 case AF_INET: 5917 #ifdef INET6 5918 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) { 5919 len = sizeof(struct sockaddr_in6); 5920 in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin, 5921 &store.sin6); 5922 store.sin6.sin6_port = control->port_from; 5923 } else { 5924 len = sizeof(struct sockaddr_in); 5925 store.sin = control->whoFrom->ro._l_addr.sin; 5926 store.sin.sin_port = control->port_from; 5927 } 5928 #else 5929 len = sizeof(struct sockaddr_in); 5930 store.sin = control->whoFrom->ro._l_addr.sin; 5931 store.sin.sin_port = control->port_from; 5932 #endif 5933 break; 5934 #endif 5935 default: 5936 len = 0; 5937 break; 5938 } 5939 memcpy(from, &store, min((size_t)fromlen, len)); 5940 #ifdef INET6 5941 { 5942 struct sockaddr_in6 lsa6, *from6; 5943 5944 from6 = (struct sockaddr_in6 *)from; 5945 sctp_recover_scope_mac(from6, (&lsa6)); 5946 } 5947 #endif 5948 } 5949 if (hold_rlock) { 5950 SCTP_INP_READ_UNLOCK(inp); 5951 hold_rlock = 0; 5952 } 5953 if (hold_sblock) { 5954 SOCKBUF_UNLOCK(&so->so_rcv); 5955 hold_sblock = 0; 5956 } 5957 /* now copy out what data we can */ 5958 if (mp == NULL) { 5959 /* copy out each mbuf in the chain up to length */ 5960 get_more_data: 5961 m = control->data; 5962 while (m) { 5963 /* Move out all we can */ 5964 cp_len = uio->uio_resid; 5965 my_len = SCTP_BUF_LEN(m); 5966 if (cp_len > my_len) { 5967 /* not enough in this buf */ 5968 cp_len = my_len; 5969 } 5970 if (hold_rlock) { 5971 SCTP_INP_READ_UNLOCK(inp); 5972 hold_rlock = 0; 5973 } 5974 if (cp_len > 0) 5975 error = uiomove(mtod(m, char *), (int)cp_len, uio); 5976 /* re-read */ 5977 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5978 goto release; 5979 } 5980 5981 if ((control->do_not_ref_stcb == 0) && stcb && 5982 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5983 no_rcv_needed = 1; 5984 } 5985 if (error) { 5986 /* error we are out of here */ 5987 goto release; 5988 } 5989 SCTP_INP_READ_LOCK(inp); 5990 hold_rlock = 1; 5991 if (cp_len == SCTP_BUF_LEN(m)) { 5992 if ((SCTP_BUF_NEXT(m) == NULL) && 5993 (control->end_added)) { 5994 out_flags |= MSG_EOR; 5995 if ((control->do_not_ref_stcb == 0) && 5996 (control->stcb != NULL) && 5997 ((control->spec_flags & M_NOTIFICATION) == 0)) 5998 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5999 } 6000 if (control->spec_flags & M_NOTIFICATION) { 6001 out_flags |= MSG_NOTIFICATION; 6002 } 6003 /* we ate up the mbuf */ 6004 if (in_flags & MSG_PEEK) { 6005 /* just looking */ 6006 m = SCTP_BUF_NEXT(m); 6007 copied_so_far += cp_len; 6008 } else { 6009 /* dispose of the mbuf */ 6010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6011 sctp_sblog(&so->so_rcv, 6012 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6013 } 6014 sctp_sbfree(control, stcb, &so->so_rcv, m); 6015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6016 sctp_sblog(&so->so_rcv, 6017 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6018 } 6019 copied_so_far += cp_len; 6020 freed_so_far += (uint32_t)cp_len; 6021 freed_so_far += MSIZE; 6022 atomic_subtract_int(&control->length, cp_len); 6023 control->data = sctp_m_free(m); 6024 m = control->data; 6025 /* 6026 * been through it all, must hold sb 6027 * lock ok to null tail 6028 */ 6029 if (control->data == NULL) { 6030 #ifdef INVARIANTS 6031 if ((control->end_added == 0) || 6032 (TAILQ_NEXT(control, next) == NULL)) { 6033 /* 6034 * If the end is not 6035 * added, OR the 6036 * next is NOT null 6037 * we MUST have the 6038 * lock. 6039 */ 6040 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 6041 panic("Hmm we don't own the lock?"); 6042 } 6043 } 6044 #endif 6045 control->tail_mbuf = NULL; 6046 #ifdef INVARIANTS 6047 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 6048 panic("end_added, nothing left and no MSG_EOR"); 6049 } 6050 #endif 6051 } 6052 } 6053 } else { 6054 /* Do we need to trim the mbuf? */ 6055 if (control->spec_flags & M_NOTIFICATION) { 6056 out_flags |= MSG_NOTIFICATION; 6057 } 6058 if ((in_flags & MSG_PEEK) == 0) { 6059 SCTP_BUF_RESV_UF(m, cp_len); 6060 SCTP_BUF_LEN(m) -= (int)cp_len; 6061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6062 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len); 6063 } 6064 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 6065 if ((control->do_not_ref_stcb == 0) && 6066 stcb) { 6067 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 6068 } 6069 copied_so_far += cp_len; 6070 freed_so_far += (uint32_t)cp_len; 6071 freed_so_far += MSIZE; 6072 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6073 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 6074 SCTP_LOG_SBRESULT, 0); 6075 } 6076 atomic_subtract_int(&control->length, cp_len); 6077 } else { 6078 copied_so_far += cp_len; 6079 } 6080 } 6081 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 6082 break; 6083 } 6084 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6085 (control->do_not_ref_stcb == 0) && 6086 (freed_so_far >= rwnd_req)) { 6087 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6088 } 6089 } /* end while(m) */ 6090 /* 6091 * At this point we have looked at it all and we either have 6092 * a MSG_EOR/or read all the user wants... <OR> 6093 * control->length == 0. 6094 */ 6095 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 6096 /* we are done with this control */ 6097 if (control->length == 0) { 6098 if (control->data) { 6099 #ifdef INVARIANTS 6100 panic("control->data not null at read eor?"); 6101 #else 6102 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 6103 sctp_m_freem(control->data); 6104 control->data = NULL; 6105 #endif 6106 } 6107 done_with_control: 6108 if (hold_rlock == 0) { 6109 SCTP_INP_READ_LOCK(inp); 6110 hold_rlock = 1; 6111 } 6112 TAILQ_REMOVE(&inp->read_queue, control, next); 6113 /* Add back any hiddend data */ 6114 if (control->held_length) { 6115 held_length = 0; 6116 control->held_length = 0; 6117 wakeup_read_socket = 1; 6118 } 6119 if (control->aux_data) { 6120 sctp_m_free(control->aux_data); 6121 control->aux_data = NULL; 6122 } 6123 no_rcv_needed = control->do_not_ref_stcb; 6124 sctp_free_remote_addr(control->whoFrom); 6125 control->data = NULL; 6126 #ifdef INVARIANTS 6127 if (control->on_strm_q) { 6128 panic("About to free ctl:%p so:%p and its in %d", 6129 control, so, control->on_strm_q); 6130 } 6131 #endif 6132 sctp_free_a_readq(stcb, control); 6133 control = NULL; 6134 if ((freed_so_far >= rwnd_req) && 6135 (no_rcv_needed == 0)) 6136 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6137 6138 } else { 6139 /* 6140 * The user did not read all of this 6141 * message, turn off the returned MSG_EOR 6142 * since we are leaving more behind on the 6143 * control to read. 6144 */ 6145 #ifdef INVARIANTS 6146 if (control->end_added && 6147 (control->data == NULL) && 6148 (control->tail_mbuf == NULL)) { 6149 panic("Gak, control->length is corrupt?"); 6150 } 6151 #endif 6152 no_rcv_needed = control->do_not_ref_stcb; 6153 out_flags &= ~MSG_EOR; 6154 } 6155 } 6156 if (out_flags & MSG_EOR) { 6157 goto release; 6158 } 6159 if ((uio->uio_resid == 0) || 6160 ((in_eeor_mode) && 6161 (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) { 6162 goto release; 6163 } 6164 /* 6165 * If I hit here the receiver wants more and this message is 6166 * NOT done (pd-api). So two questions. Can we block? if not 6167 * we are done. Did the user NOT set MSG_WAITALL? 6168 */ 6169 if (block_allowed == 0) { 6170 goto release; 6171 } 6172 /* 6173 * We need to wait for more data a few things: - We don't 6174 * sbunlock() so we don't get someone else reading. - We 6175 * must be sure to account for the case where what is added 6176 * is NOT to our control when we wakeup. 6177 */ 6178 6179 /* 6180 * Do we need to tell the transport a rwnd update might be 6181 * needed before we go to sleep? 6182 */ 6183 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 6184 ((freed_so_far >= rwnd_req) && 6185 (control->do_not_ref_stcb == 0) && 6186 (no_rcv_needed == 0))) { 6187 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6188 } 6189 wait_some_more: 6190 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 6191 goto release; 6192 } 6193 6194 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 6195 goto release; 6196 6197 if (hold_rlock == 1) { 6198 SCTP_INP_READ_UNLOCK(inp); 6199 hold_rlock = 0; 6200 } 6201 if (hold_sblock == 0) { 6202 SOCKBUF_LOCK(&so->so_rcv); 6203 hold_sblock = 1; 6204 } 6205 if ((copied_so_far) && (control->length == 0) && 6206 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 6207 goto release; 6208 } 6209 if (so->so_rcv.sb_cc <= control->held_length) { 6210 error = sbwait(&so->so_rcv); 6211 if (error) { 6212 goto release; 6213 } 6214 control->held_length = 0; 6215 } 6216 if (hold_sblock) { 6217 SOCKBUF_UNLOCK(&so->so_rcv); 6218 hold_sblock = 0; 6219 } 6220 if (control->length == 0) { 6221 /* still nothing here */ 6222 if (control->end_added == 1) { 6223 /* he aborted, or is done i.e.did a shutdown */ 6224 out_flags |= MSG_EOR; 6225 if (control->pdapi_aborted) { 6226 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6227 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6228 6229 out_flags |= MSG_TRUNC; 6230 } else { 6231 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 6232 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6233 } 6234 goto done_with_control; 6235 } 6236 if (so->so_rcv.sb_cc > held_length) { 6237 control->held_length = so->so_rcv.sb_cc; 6238 held_length = 0; 6239 } 6240 goto wait_some_more; 6241 } else if (control->data == NULL) { 6242 /* 6243 * we must re-sync since data is probably being 6244 * added 6245 */ 6246 SCTP_INP_READ_LOCK(inp); 6247 if ((control->length > 0) && (control->data == NULL)) { 6248 /* 6249 * big trouble.. we have the lock and its 6250 * corrupt? 6251 */ 6252 #ifdef INVARIANTS 6253 panic("Impossible data==NULL length !=0"); 6254 #endif 6255 out_flags |= MSG_EOR; 6256 out_flags |= MSG_TRUNC; 6257 control->length = 0; 6258 SCTP_INP_READ_UNLOCK(inp); 6259 goto done_with_control; 6260 } 6261 SCTP_INP_READ_UNLOCK(inp); 6262 /* We will fall around to get more data */ 6263 } 6264 goto get_more_data; 6265 } else { 6266 /*- 6267 * Give caller back the mbuf chain, 6268 * store in uio_resid the length 6269 */ 6270 wakeup_read_socket = 0; 6271 if ((control->end_added == 0) || 6272 (TAILQ_NEXT(control, next) == NULL)) { 6273 /* Need to get rlock */ 6274 if (hold_rlock == 0) { 6275 SCTP_INP_READ_LOCK(inp); 6276 hold_rlock = 1; 6277 } 6278 } 6279 if (control->end_added) { 6280 out_flags |= MSG_EOR; 6281 if ((control->do_not_ref_stcb == 0) && 6282 (control->stcb != NULL) && 6283 ((control->spec_flags & M_NOTIFICATION) == 0)) 6284 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6285 } 6286 if (control->spec_flags & M_NOTIFICATION) { 6287 out_flags |= MSG_NOTIFICATION; 6288 } 6289 uio->uio_resid = control->length; 6290 *mp = control->data; 6291 m = control->data; 6292 while (m) { 6293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6294 sctp_sblog(&so->so_rcv, 6295 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6296 } 6297 sctp_sbfree(control, stcb, &so->so_rcv, m); 6298 freed_so_far += (uint32_t)SCTP_BUF_LEN(m); 6299 freed_so_far += MSIZE; 6300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6301 sctp_sblog(&so->so_rcv, 6302 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6303 } 6304 m = SCTP_BUF_NEXT(m); 6305 } 6306 control->data = control->tail_mbuf = NULL; 6307 control->length = 0; 6308 if (out_flags & MSG_EOR) { 6309 /* Done with this control */ 6310 goto done_with_control; 6311 } 6312 } 6313 release: 6314 if (hold_rlock == 1) { 6315 SCTP_INP_READ_UNLOCK(inp); 6316 hold_rlock = 0; 6317 } 6318 if (hold_sblock == 1) { 6319 SOCKBUF_UNLOCK(&so->so_rcv); 6320 hold_sblock = 0; 6321 } 6322 6323 sbunlock(&so->so_rcv); 6324 sockbuf_lock = 0; 6325 6326 release_unlocked: 6327 if (hold_sblock) { 6328 SOCKBUF_UNLOCK(&so->so_rcv); 6329 hold_sblock = 0; 6330 } 6331 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6332 if ((freed_so_far >= rwnd_req) && 6333 (control && (control->do_not_ref_stcb == 0)) && 6334 (no_rcv_needed == 0)) 6335 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6336 } 6337 out: 6338 if (msg_flags) { 6339 *msg_flags = out_flags; 6340 } 6341 if (((out_flags & MSG_EOR) == 0) && 6342 ((in_flags & MSG_PEEK) == 0) && 6343 (sinfo) && 6344 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6345 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6346 struct sctp_extrcvinfo *s_extra; 6347 6348 s_extra = (struct sctp_extrcvinfo *)sinfo; 6349 s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG; 6350 } 6351 if (hold_rlock == 1) { 6352 SCTP_INP_READ_UNLOCK(inp); 6353 } 6354 if (hold_sblock) { 6355 SOCKBUF_UNLOCK(&so->so_rcv); 6356 } 6357 if (sockbuf_lock) { 6358 sbunlock(&so->so_rcv); 6359 } 6360 6361 if (freecnt_applied) { 6362 /* 6363 * The lock on the socket buffer protects us so the free 6364 * code will stop. But since we used the socketbuf lock and 6365 * the sender uses the tcb_lock to increment, we need to use 6366 * the atomic add to the refcnt. 6367 */ 6368 if (stcb == NULL) { 6369 #ifdef INVARIANTS 6370 panic("stcb for refcnt has gone NULL?"); 6371 goto stage_left; 6372 #else 6373 goto stage_left; 6374 #endif 6375 } 6376 /* Save the value back for next time */ 6377 stcb->freed_by_sorcv_sincelast = freed_so_far; 6378 atomic_add_int(&stcb->asoc.refcnt, -1); 6379 } 6380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6381 if (stcb) { 6382 sctp_misc_ints(SCTP_SORECV_DONE, 6383 freed_so_far, 6384 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6385 stcb->asoc.my_rwnd, 6386 so->so_rcv.sb_cc); 6387 } else { 6388 sctp_misc_ints(SCTP_SORECV_DONE, 6389 freed_so_far, 6390 (uint32_t)((uio) ? (slen - uio->uio_resid) : slen), 6391 0, 6392 so->so_rcv.sb_cc); 6393 } 6394 } 6395 stage_left: 6396 if (wakeup_read_socket) { 6397 sctp_sorwakeup(inp, so); 6398 } 6399 return (error); 6400 } 6401 6402 6403 #ifdef SCTP_MBUF_LOGGING 6404 struct mbuf * 6405 sctp_m_free(struct mbuf *m) 6406 { 6407 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6408 sctp_log_mb(m, SCTP_MBUF_IFREE); 6409 } 6410 return (m_free(m)); 6411 } 6412 6413 void 6414 sctp_m_freem(struct mbuf *mb) 6415 { 6416 while (mb != NULL) 6417 mb = sctp_m_free(mb); 6418 } 6419 6420 #endif 6421 6422 int 6423 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6424 { 6425 /* 6426 * Given a local address. For all associations that holds the 6427 * address, request a peer-set-primary. 6428 */ 6429 struct sctp_ifa *ifa; 6430 struct sctp_laddr *wi; 6431 6432 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6433 if (ifa == NULL) { 6434 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6435 return (EADDRNOTAVAIL); 6436 } 6437 /* 6438 * Now that we have the ifa we must awaken the iterator with this 6439 * message. 6440 */ 6441 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6442 if (wi == NULL) { 6443 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6444 return (ENOMEM); 6445 } 6446 /* Now incr the count and int wi structure */ 6447 SCTP_INCR_LADDR_COUNT(); 6448 memset(wi, 0, sizeof(*wi)); 6449 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6450 wi->ifa = ifa; 6451 wi->action = SCTP_SET_PRIM_ADDR; 6452 atomic_add_int(&ifa->refcount, 1); 6453 6454 /* Now add it to the work queue */ 6455 SCTP_WQ_ADDR_LOCK(); 6456 /* 6457 * Should this really be a tailq? As it is we will process the 6458 * newest first :-0 6459 */ 6460 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6461 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6462 (struct sctp_inpcb *)NULL, 6463 (struct sctp_tcb *)NULL, 6464 (struct sctp_nets *)NULL); 6465 SCTP_WQ_ADDR_UNLOCK(); 6466 return (0); 6467 } 6468 6469 6470 int 6471 sctp_soreceive(struct socket *so, 6472 struct sockaddr **psa, 6473 struct uio *uio, 6474 struct mbuf **mp0, 6475 struct mbuf **controlp, 6476 int *flagsp) 6477 { 6478 int error, fromlen; 6479 uint8_t sockbuf[256]; 6480 struct sockaddr *from; 6481 struct sctp_extrcvinfo sinfo; 6482 int filling_sinfo = 1; 6483 int flags; 6484 struct sctp_inpcb *inp; 6485 6486 inp = (struct sctp_inpcb *)so->so_pcb; 6487 /* pickup the assoc we are reading from */ 6488 if (inp == NULL) { 6489 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6490 return (EINVAL); 6491 } 6492 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6493 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6494 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6495 (controlp == NULL)) { 6496 /* user does not want the sndrcv ctl */ 6497 filling_sinfo = 0; 6498 } 6499 if (psa) { 6500 from = (struct sockaddr *)sockbuf; 6501 fromlen = sizeof(sockbuf); 6502 from->sa_len = 0; 6503 } else { 6504 from = NULL; 6505 fromlen = 0; 6506 } 6507 6508 if (filling_sinfo) { 6509 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6510 } 6511 if (flagsp != NULL) { 6512 flags = *flagsp; 6513 } else { 6514 flags = 0; 6515 } 6516 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags, 6517 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6518 if (flagsp != NULL) { 6519 *flagsp = flags; 6520 } 6521 if (controlp != NULL) { 6522 /* copy back the sinfo in a CMSG format */ 6523 if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) { 6524 *controlp = sctp_build_ctl_nchunk(inp, 6525 (struct sctp_sndrcvinfo *)&sinfo); 6526 } else { 6527 *controlp = NULL; 6528 } 6529 } 6530 if (psa) { 6531 /* copy back the address info */ 6532 if (from && from->sa_len) { 6533 *psa = sodupsockaddr(from, M_NOWAIT); 6534 } else { 6535 *psa = NULL; 6536 } 6537 } 6538 return (error); 6539 } 6540 6541 6542 6543 6544 6545 int 6546 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6547 int totaddr, int *error) 6548 { 6549 int added = 0; 6550 int i; 6551 struct sctp_inpcb *inp; 6552 struct sockaddr *sa; 6553 size_t incr = 0; 6554 #ifdef INET 6555 struct sockaddr_in *sin; 6556 #endif 6557 #ifdef INET6 6558 struct sockaddr_in6 *sin6; 6559 #endif 6560 6561 sa = addr; 6562 inp = stcb->sctp_ep; 6563 *error = 0; 6564 for (i = 0; i < totaddr; i++) { 6565 switch (sa->sa_family) { 6566 #ifdef INET 6567 case AF_INET: 6568 incr = sizeof(struct sockaddr_in); 6569 sin = (struct sockaddr_in *)sa; 6570 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6571 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6572 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6573 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6574 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6575 SCTP_FROM_SCTPUTIL + SCTP_LOC_7); 6576 *error = EINVAL; 6577 goto out_now; 6578 } 6579 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6580 SCTP_DONOT_SETSCOPE, 6581 SCTP_ADDR_IS_CONFIRMED)) { 6582 /* assoc gone no un-lock */ 6583 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6584 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6585 SCTP_FROM_SCTPUTIL + SCTP_LOC_8); 6586 *error = ENOBUFS; 6587 goto out_now; 6588 } 6589 added++; 6590 break; 6591 #endif 6592 #ifdef INET6 6593 case AF_INET6: 6594 incr = sizeof(struct sockaddr_in6); 6595 sin6 = (struct sockaddr_in6 *)sa; 6596 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6597 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6598 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6599 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6600 SCTP_FROM_SCTPUTIL + SCTP_LOC_9); 6601 *error = EINVAL; 6602 goto out_now; 6603 } 6604 if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port, 6605 SCTP_DONOT_SETSCOPE, 6606 SCTP_ADDR_IS_CONFIRMED)) { 6607 /* assoc gone no un-lock */ 6608 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6609 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 6610 SCTP_FROM_SCTPUTIL + SCTP_LOC_10); 6611 *error = ENOBUFS; 6612 goto out_now; 6613 } 6614 added++; 6615 break; 6616 #endif 6617 default: 6618 break; 6619 } 6620 sa = (struct sockaddr *)((caddr_t)sa + incr); 6621 } 6622 out_now: 6623 return (added); 6624 } 6625 6626 int 6627 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6628 unsigned int totaddr, 6629 unsigned int *num_v4, unsigned int *num_v6, 6630 unsigned int limit) 6631 { 6632 struct sockaddr *sa; 6633 struct sctp_tcb *stcb; 6634 unsigned int incr, at, i; 6635 6636 at = 0; 6637 sa = addr; 6638 *num_v6 = *num_v4 = 0; 6639 /* account and validate addresses */ 6640 if (totaddr == 0) { 6641 return (EINVAL); 6642 } 6643 for (i = 0; i < totaddr; i++) { 6644 if (at + sizeof(struct sockaddr) > limit) { 6645 return (EINVAL); 6646 } 6647 switch (sa->sa_family) { 6648 #ifdef INET 6649 case AF_INET: 6650 incr = (unsigned int)sizeof(struct sockaddr_in); 6651 if (sa->sa_len != incr) { 6652 return (EINVAL); 6653 } 6654 (*num_v4) += 1; 6655 break; 6656 #endif 6657 #ifdef INET6 6658 case AF_INET6: 6659 { 6660 struct sockaddr_in6 *sin6; 6661 6662 sin6 = (struct sockaddr_in6 *)sa; 6663 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6664 /* Must be non-mapped for connectx */ 6665 return (EINVAL); 6666 } 6667 incr = (unsigned int)sizeof(struct sockaddr_in6); 6668 if (sa->sa_len != incr) { 6669 return (EINVAL); 6670 } 6671 (*num_v6) += 1; 6672 break; 6673 } 6674 #endif 6675 default: 6676 return (EINVAL); 6677 } 6678 if ((at + incr) > limit) { 6679 return (EINVAL); 6680 } 6681 SCTP_INP_INCR_REF(inp); 6682 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6683 if (stcb != NULL) { 6684 SCTP_TCB_UNLOCK(stcb); 6685 return (EALREADY); 6686 } else { 6687 SCTP_INP_DECR_REF(inp); 6688 } 6689 at += incr; 6690 sa = (struct sockaddr *)((caddr_t)sa + incr); 6691 } 6692 return (0); 6693 } 6694 6695 /* 6696 * sctp_bindx(ADD) for one address. 6697 * assumes all arguments are valid/checked by caller. 6698 */ 6699 void 6700 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6701 struct sockaddr *sa, uint32_t vrf_id, int *error, 6702 void *p) 6703 { 6704 #if defined(INET) && defined(INET6) 6705 struct sockaddr_in sin; 6706 #endif 6707 #ifdef INET6 6708 struct sockaddr_in6 *sin6; 6709 #endif 6710 #ifdef INET 6711 struct sockaddr_in *sinp; 6712 #endif 6713 struct sockaddr *addr_to_use; 6714 struct sctp_inpcb *lep; 6715 uint16_t port; 6716 6717 /* see if we're bound all already! */ 6718 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6719 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6720 *error = EINVAL; 6721 return; 6722 } 6723 switch (sa->sa_family) { 6724 #ifdef INET6 6725 case AF_INET6: 6726 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6727 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6728 *error = EINVAL; 6729 return; 6730 } 6731 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6732 /* can only bind v6 on PF_INET6 sockets */ 6733 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6734 *error = EINVAL; 6735 return; 6736 } 6737 sin6 = (struct sockaddr_in6 *)sa; 6738 port = sin6->sin6_port; 6739 #ifdef INET 6740 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6741 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6742 SCTP_IPV6_V6ONLY(inp)) { 6743 /* can't bind v4-mapped on PF_INET sockets */ 6744 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6745 *error = EINVAL; 6746 return; 6747 } 6748 in6_sin6_2_sin(&sin, sin6); 6749 addr_to_use = (struct sockaddr *)&sin; 6750 } else { 6751 addr_to_use = sa; 6752 } 6753 #else 6754 addr_to_use = sa; 6755 #endif 6756 break; 6757 #endif 6758 #ifdef INET 6759 case AF_INET: 6760 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6762 *error = EINVAL; 6763 return; 6764 } 6765 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6766 SCTP_IPV6_V6ONLY(inp)) { 6767 /* can't bind v4 on PF_INET sockets */ 6768 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6769 *error = EINVAL; 6770 return; 6771 } 6772 sinp = (struct sockaddr_in *)sa; 6773 port = sinp->sin_port; 6774 addr_to_use = sa; 6775 break; 6776 #endif 6777 default: 6778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6779 *error = EINVAL; 6780 return; 6781 } 6782 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6783 if (p == NULL) { 6784 /* Can't get proc for Net/Open BSD */ 6785 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6786 *error = EINVAL; 6787 return; 6788 } 6789 *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); 6790 return; 6791 } 6792 /* Validate the incoming port. */ 6793 if ((port != 0) && (port != inp->sctp_lport)) { 6794 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6795 *error = EINVAL; 6796 return; 6797 } 6798 lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); 6799 if (lep == NULL) { 6800 /* add the address */ 6801 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, 6802 SCTP_ADD_IP_ADDRESS, vrf_id); 6803 } else { 6804 if (lep != inp) { 6805 *error = EADDRINUSE; 6806 } 6807 SCTP_INP_DECR_REF(lep); 6808 } 6809 } 6810 6811 /* 6812 * sctp_bindx(DELETE) for one address. 6813 * assumes all arguments are valid/checked by caller. 6814 */ 6815 void 6816 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6817 struct sockaddr *sa, uint32_t vrf_id, int *error) 6818 { 6819 struct sockaddr *addr_to_use; 6820 #if defined(INET) && defined(INET6) 6821 struct sockaddr_in6 *sin6; 6822 struct sockaddr_in sin; 6823 #endif 6824 6825 /* see if we're bound all already! */ 6826 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6827 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6828 *error = EINVAL; 6829 return; 6830 } 6831 switch (sa->sa_family) { 6832 #ifdef INET6 6833 case AF_INET6: 6834 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6835 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6836 *error = EINVAL; 6837 return; 6838 } 6839 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6840 /* can only bind v6 on PF_INET6 sockets */ 6841 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6842 *error = EINVAL; 6843 return; 6844 } 6845 #ifdef INET 6846 sin6 = (struct sockaddr_in6 *)sa; 6847 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6848 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6849 SCTP_IPV6_V6ONLY(inp)) { 6850 /* can't bind mapped-v4 on PF_INET sockets */ 6851 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6852 *error = EINVAL; 6853 return; 6854 } 6855 in6_sin6_2_sin(&sin, sin6); 6856 addr_to_use = (struct sockaddr *)&sin; 6857 } else { 6858 addr_to_use = sa; 6859 } 6860 #else 6861 addr_to_use = sa; 6862 #endif 6863 break; 6864 #endif 6865 #ifdef INET 6866 case AF_INET: 6867 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6868 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6869 *error = EINVAL; 6870 return; 6871 } 6872 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6873 SCTP_IPV6_V6ONLY(inp)) { 6874 /* can't bind v4 on PF_INET sockets */ 6875 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6876 *error = EINVAL; 6877 return; 6878 } 6879 addr_to_use = sa; 6880 break; 6881 #endif 6882 default: 6883 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6884 *error = EINVAL; 6885 return; 6886 } 6887 /* No lock required mgmt_ep_sa does its own locking. */ 6888 *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, 6889 vrf_id); 6890 } 6891 6892 /* 6893 * returns the valid local address count for an assoc, taking into account 6894 * all scoping rules 6895 */ 6896 int 6897 sctp_local_addr_count(struct sctp_tcb *stcb) 6898 { 6899 int loopback_scope; 6900 #if defined(INET) 6901 int ipv4_local_scope, ipv4_addr_legal; 6902 #endif 6903 #if defined(INET6) 6904 int local_scope, site_scope, ipv6_addr_legal; 6905 #endif 6906 struct sctp_vrf *vrf; 6907 struct sctp_ifn *sctp_ifn; 6908 struct sctp_ifa *sctp_ifa; 6909 int count = 0; 6910 6911 /* Turn on all the appropriate scopes */ 6912 loopback_scope = stcb->asoc.scope.loopback_scope; 6913 #if defined(INET) 6914 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6915 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6916 #endif 6917 #if defined(INET6) 6918 local_scope = stcb->asoc.scope.local_scope; 6919 site_scope = stcb->asoc.scope.site_scope; 6920 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6921 #endif 6922 SCTP_IPI_ADDR_RLOCK(); 6923 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6924 if (vrf == NULL) { 6925 /* no vrf, no addresses */ 6926 SCTP_IPI_ADDR_RUNLOCK(); 6927 return (0); 6928 } 6929 6930 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6931 /* 6932 * bound all case: go through all ifns on the vrf 6933 */ 6934 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6935 if ((loopback_scope == 0) && 6936 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6937 continue; 6938 } 6939 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6940 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6941 continue; 6942 switch (sctp_ifa->address.sa.sa_family) { 6943 #ifdef INET 6944 case AF_INET: 6945 if (ipv4_addr_legal) { 6946 struct sockaddr_in *sin; 6947 6948 sin = &sctp_ifa->address.sin; 6949 if (sin->sin_addr.s_addr == 0) { 6950 /* 6951 * skip unspecified 6952 * addrs 6953 */ 6954 continue; 6955 } 6956 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 6957 &sin->sin_addr) != 0) { 6958 continue; 6959 } 6960 if ((ipv4_local_scope == 0) && 6961 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6962 continue; 6963 } 6964 /* count this one */ 6965 count++; 6966 } else { 6967 continue; 6968 } 6969 break; 6970 #endif 6971 #ifdef INET6 6972 case AF_INET6: 6973 if (ipv6_addr_legal) { 6974 struct sockaddr_in6 *sin6; 6975 6976 sin6 = &sctp_ifa->address.sin6; 6977 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6978 continue; 6979 } 6980 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 6981 &sin6->sin6_addr) != 0) { 6982 continue; 6983 } 6984 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6985 if (local_scope == 0) 6986 continue; 6987 if (sin6->sin6_scope_id == 0) { 6988 if (sa6_recoverscope(sin6) != 0) 6989 /* 6990 * 6991 * bad 6992 * link 6993 * 6994 * local 6995 * 6996 * address 6997 */ 6998 continue; 6999 } 7000 } 7001 if ((site_scope == 0) && 7002 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 7003 continue; 7004 } 7005 /* count this one */ 7006 count++; 7007 } 7008 break; 7009 #endif 7010 default: 7011 /* TSNH */ 7012 break; 7013 } 7014 } 7015 } 7016 } else { 7017 /* 7018 * subset bound case 7019 */ 7020 struct sctp_laddr *laddr; 7021 7022 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 7023 sctp_nxt_addr) { 7024 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 7025 continue; 7026 } 7027 /* count this one */ 7028 count++; 7029 } 7030 } 7031 SCTP_IPI_ADDR_RUNLOCK(); 7032 return (count); 7033 } 7034 7035 #if defined(SCTP_LOCAL_TRACE_BUF) 7036 7037 void 7038 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 7039 { 7040 uint32_t saveindex, newindex; 7041 7042 do { 7043 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 7044 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7045 newindex = 1; 7046 } else { 7047 newindex = saveindex + 1; 7048 } 7049 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 7050 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 7051 saveindex = 0; 7052 } 7053 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 7054 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 7055 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 7056 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 7057 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 7058 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 7059 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 7060 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 7061 } 7062 7063 #endif 7064 static void 7065 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, 7066 const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) 7067 { 7068 struct ip *iph; 7069 #ifdef INET6 7070 struct ip6_hdr *ip6; 7071 #endif 7072 struct mbuf *sp, *last; 7073 struct udphdr *uhdr; 7074 uint16_t port; 7075 7076 if ((m->m_flags & M_PKTHDR) == 0) { 7077 /* Can't handle one that is not a pkt hdr */ 7078 goto out; 7079 } 7080 /* Pull the src port */ 7081 iph = mtod(m, struct ip *); 7082 uhdr = (struct udphdr *)((caddr_t)iph + off); 7083 port = uhdr->uh_sport; 7084 /* 7085 * Split out the mbuf chain. Leave the IP header in m, place the 7086 * rest in the sp. 7087 */ 7088 sp = m_split(m, off, M_NOWAIT); 7089 if (sp == NULL) { 7090 /* Gak, drop packet, we can't do a split */ 7091 goto out; 7092 } 7093 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 7094 /* Gak, packet can't have an SCTP header in it - too small */ 7095 m_freem(sp); 7096 goto out; 7097 } 7098 /* Now pull up the UDP header and SCTP header together */ 7099 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 7100 if (sp == NULL) { 7101 /* Gak pullup failed */ 7102 goto out; 7103 } 7104 /* Trim out the UDP header */ 7105 m_adj(sp, sizeof(struct udphdr)); 7106 7107 /* Now reconstruct the mbuf chain */ 7108 for (last = m; last->m_next; last = last->m_next); 7109 last->m_next = sp; 7110 m->m_pkthdr.len += sp->m_pkthdr.len; 7111 /* 7112 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP 7113 * checksum and it was valid. Since CSUM_DATA_VALID == 7114 * CSUM_SCTP_VALID this would imply that the HW also verified the 7115 * SCTP checksum. Therefore, clear the bit. 7116 */ 7117 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 7118 "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", 7119 m->m_pkthdr.len, 7120 if_name(m->m_pkthdr.rcvif), 7121 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 7122 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; 7123 iph = mtod(m, struct ip *); 7124 switch (iph->ip_v) { 7125 #ifdef INET 7126 case IPVERSION: 7127 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 7128 sctp_input_with_port(m, off, port); 7129 break; 7130 #endif 7131 #ifdef INET6 7132 case IPV6_VERSION >> 4: 7133 ip6 = mtod(m, struct ip6_hdr *); 7134 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 7135 sctp6_input_with_port(&m, &off, port); 7136 break; 7137 #endif 7138 default: 7139 goto out; 7140 break; 7141 } 7142 return; 7143 out: 7144 m_freem(m); 7145 } 7146 7147 #ifdef INET 7148 static void 7149 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) 7150 { 7151 struct ip *outer_ip, *inner_ip; 7152 struct sctphdr *sh; 7153 struct icmp *icmp; 7154 struct udphdr *udp; 7155 struct sctp_inpcb *inp; 7156 struct sctp_tcb *stcb; 7157 struct sctp_nets *net; 7158 struct sctp_init_chunk *ch; 7159 struct sockaddr_in src, dst; 7160 uint8_t type, code; 7161 7162 inner_ip = (struct ip *)vip; 7163 icmp = (struct icmp *)((caddr_t)inner_ip - 7164 (sizeof(struct icmp) - sizeof(struct ip))); 7165 outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); 7166 if (ntohs(outer_ip->ip_len) < 7167 sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) { 7168 return; 7169 } 7170 udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); 7171 sh = (struct sctphdr *)(udp + 1); 7172 memset(&src, 0, sizeof(struct sockaddr_in)); 7173 src.sin_family = AF_INET; 7174 src.sin_len = sizeof(struct sockaddr_in); 7175 src.sin_port = sh->src_port; 7176 src.sin_addr = inner_ip->ip_src; 7177 memset(&dst, 0, sizeof(struct sockaddr_in)); 7178 dst.sin_family = AF_INET; 7179 dst.sin_len = sizeof(struct sockaddr_in); 7180 dst.sin_port = sh->dest_port; 7181 dst.sin_addr = inner_ip->ip_dst; 7182 /* 7183 * 'dst' holds the dest of the packet that failed to be sent. 'src' 7184 * holds our local endpoint address. Thus we reverse the dst and the 7185 * src in the lookup. 7186 */ 7187 inp = NULL; 7188 net = NULL; 7189 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7190 (struct sockaddr *)&src, 7191 &inp, &net, 1, 7192 SCTP_DEFAULT_VRFID); 7193 if ((stcb != NULL) && 7194 (net != NULL) && 7195 (inp != NULL)) { 7196 /* Check the UDP port numbers */ 7197 if ((udp->uh_dport != net->port) || 7198 (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7199 SCTP_TCB_UNLOCK(stcb); 7200 return; 7201 } 7202 /* Check the verification tag */ 7203 if (ntohl(sh->v_tag) != 0) { 7204 /* 7205 * This must be the verification tag used for 7206 * sending out packets. We don't consider packets 7207 * reflecting the verification tag. 7208 */ 7209 if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) { 7210 SCTP_TCB_UNLOCK(stcb); 7211 return; 7212 } 7213 } else { 7214 if (ntohs(outer_ip->ip_len) >= 7215 sizeof(struct ip) + 7216 8 + (inner_ip->ip_hl << 2) + 8 + 20) { 7217 /* 7218 * In this case we can check if we got an 7219 * INIT chunk and if the initiate tag 7220 * matches. 7221 */ 7222 ch = (struct sctp_init_chunk *)(sh + 1); 7223 if ((ch->ch.chunk_type != SCTP_INITIATION) || 7224 (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) { 7225 SCTP_TCB_UNLOCK(stcb); 7226 return; 7227 } 7228 } else { 7229 SCTP_TCB_UNLOCK(stcb); 7230 return; 7231 } 7232 } 7233 type = icmp->icmp_type; 7234 code = icmp->icmp_code; 7235 if ((type == ICMP_UNREACH) && 7236 (code == ICMP_UNREACH_PORT)) { 7237 code = ICMP_UNREACH_PROTOCOL; 7238 } 7239 sctp_notify(inp, stcb, net, type, code, 7240 ntohs(inner_ip->ip_len), 7241 (uint32_t)ntohs(icmp->icmp_nextmtu)); 7242 } else { 7243 if ((stcb == NULL) && (inp != NULL)) { 7244 /* reduce ref-count */ 7245 SCTP_INP_WLOCK(inp); 7246 SCTP_INP_DECR_REF(inp); 7247 SCTP_INP_WUNLOCK(inp); 7248 } 7249 if (stcb) { 7250 SCTP_TCB_UNLOCK(stcb); 7251 } 7252 } 7253 return; 7254 } 7255 #endif 7256 7257 #ifdef INET6 7258 static void 7259 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED) 7260 { 7261 struct ip6ctlparam *ip6cp; 7262 struct sctp_inpcb *inp; 7263 struct sctp_tcb *stcb; 7264 struct sctp_nets *net; 7265 struct sctphdr sh; 7266 struct udphdr udp; 7267 struct sockaddr_in6 src, dst; 7268 uint8_t type, code; 7269 7270 ip6cp = (struct ip6ctlparam *)d; 7271 /* 7272 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid. 7273 */ 7274 if (ip6cp->ip6c_m == NULL) { 7275 return; 7276 } 7277 /* 7278 * Check if we can safely examine the ports and the verification tag 7279 * of the SCTP common header. 7280 */ 7281 if (ip6cp->ip6c_m->m_pkthdr.len < 7282 ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) { 7283 return; 7284 } 7285 /* Copy out the UDP header. */ 7286 memset(&udp, 0, sizeof(struct udphdr)); 7287 m_copydata(ip6cp->ip6c_m, 7288 ip6cp->ip6c_off, 7289 sizeof(struct udphdr), 7290 (caddr_t)&udp); 7291 /* Copy out the port numbers and the verification tag. */ 7292 memset(&sh, 0, sizeof(struct sctphdr)); 7293 m_copydata(ip6cp->ip6c_m, 7294 ip6cp->ip6c_off + sizeof(struct udphdr), 7295 sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t), 7296 (caddr_t)&sh); 7297 memset(&src, 0, sizeof(struct sockaddr_in6)); 7298 src.sin6_family = AF_INET6; 7299 src.sin6_len = sizeof(struct sockaddr_in6); 7300 src.sin6_port = sh.src_port; 7301 src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; 7302 if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7303 return; 7304 } 7305 memset(&dst, 0, sizeof(struct sockaddr_in6)); 7306 dst.sin6_family = AF_INET6; 7307 dst.sin6_len = sizeof(struct sockaddr_in6); 7308 dst.sin6_port = sh.dest_port; 7309 dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; 7310 if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { 7311 return; 7312 } 7313 inp = NULL; 7314 net = NULL; 7315 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst, 7316 (struct sockaddr *)&src, 7317 &inp, &net, 1, SCTP_DEFAULT_VRFID); 7318 if ((stcb != NULL) && 7319 (net != NULL) && 7320 (inp != NULL)) { 7321 /* Check the UDP port numbers */ 7322 if ((udp.uh_dport != net->port) || 7323 (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) { 7324 SCTP_TCB_UNLOCK(stcb); 7325 return; 7326 } 7327 /* Check the verification tag */ 7328 if (ntohl(sh.v_tag) != 0) { 7329 /* 7330 * This must be the verification tag used for 7331 * sending out packets. We don't consider packets 7332 * reflecting the verification tag. 7333 */ 7334 if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) { 7335 SCTP_TCB_UNLOCK(stcb); 7336 return; 7337 } 7338 } else { 7339 if (ip6cp->ip6c_m->m_pkthdr.len >= 7340 ip6cp->ip6c_off + sizeof(struct udphdr) + 7341 sizeof(struct sctphdr) + 7342 sizeof(struct sctp_chunkhdr) + 7343 offsetof(struct sctp_init, a_rwnd)) { 7344 /* 7345 * In this case we can check if we got an 7346 * INIT chunk and if the initiate tag 7347 * matches. 7348 */ 7349 uint32_t initiate_tag; 7350 uint8_t chunk_type; 7351 7352 m_copydata(ip6cp->ip6c_m, 7353 ip6cp->ip6c_off + 7354 sizeof(struct udphdr) + 7355 sizeof(struct sctphdr), 7356 sizeof(uint8_t), 7357 (caddr_t)&chunk_type); 7358 m_copydata(ip6cp->ip6c_m, 7359 ip6cp->ip6c_off + 7360 sizeof(struct udphdr) + 7361 sizeof(struct sctphdr) + 7362 sizeof(struct sctp_chunkhdr), 7363 sizeof(uint32_t), 7364 (caddr_t)&initiate_tag); 7365 if ((chunk_type != SCTP_INITIATION) || 7366 (ntohl(initiate_tag) != stcb->asoc.my_vtag)) { 7367 SCTP_TCB_UNLOCK(stcb); 7368 return; 7369 } 7370 } else { 7371 SCTP_TCB_UNLOCK(stcb); 7372 return; 7373 } 7374 } 7375 type = ip6cp->ip6c_icmp6->icmp6_type; 7376 code = ip6cp->ip6c_icmp6->icmp6_code; 7377 if ((type == ICMP6_DST_UNREACH) && 7378 (code == ICMP6_DST_UNREACH_NOPORT)) { 7379 type = ICMP6_PARAM_PROB; 7380 code = ICMP6_PARAMPROB_NEXTHEADER; 7381 } 7382 sctp6_notify(inp, stcb, net, type, code, 7383 ntohl(ip6cp->ip6c_icmp6->icmp6_mtu)); 7384 } else { 7385 if ((stcb == NULL) && (inp != NULL)) { 7386 /* reduce inp's ref-count */ 7387 SCTP_INP_WLOCK(inp); 7388 SCTP_INP_DECR_REF(inp); 7389 SCTP_INP_WUNLOCK(inp); 7390 } 7391 if (stcb) { 7392 SCTP_TCB_UNLOCK(stcb); 7393 } 7394 } 7395 } 7396 #endif 7397 7398 void 7399 sctp_over_udp_stop(void) 7400 { 7401 /* 7402 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7403 * for writting! 7404 */ 7405 #ifdef INET 7406 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7407 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 7408 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 7409 } 7410 #endif 7411 #ifdef INET6 7412 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7413 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 7414 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 7415 } 7416 #endif 7417 } 7418 7419 int 7420 sctp_over_udp_start(void) 7421 { 7422 uint16_t port; 7423 int ret; 7424 #ifdef INET 7425 struct sockaddr_in sin; 7426 #endif 7427 #ifdef INET6 7428 struct sockaddr_in6 sin6; 7429 #endif 7430 /* 7431 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 7432 * for writting! 7433 */ 7434 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 7435 if (ntohs(port) == 0) { 7436 /* Must have a port set */ 7437 return (EINVAL); 7438 } 7439 #ifdef INET 7440 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 7441 /* Already running -- must stop first */ 7442 return (EALREADY); 7443 } 7444 #endif 7445 #ifdef INET6 7446 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 7447 /* Already running -- must stop first */ 7448 return (EALREADY); 7449 } 7450 #endif 7451 #ifdef INET 7452 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 7453 SOCK_DGRAM, IPPROTO_UDP, 7454 curthread->td_ucred, curthread))) { 7455 sctp_over_udp_stop(); 7456 return (ret); 7457 } 7458 /* Call the special UDP hook. */ 7459 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 7460 sctp_recv_udp_tunneled_packet, 7461 sctp_recv_icmp_tunneled_packet, 7462 NULL))) { 7463 sctp_over_udp_stop(); 7464 return (ret); 7465 } 7466 /* Ok, we have a socket, bind it to the port. */ 7467 memset(&sin, 0, sizeof(struct sockaddr_in)); 7468 sin.sin_len = sizeof(struct sockaddr_in); 7469 sin.sin_family = AF_INET; 7470 sin.sin_port = htons(port); 7471 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 7472 (struct sockaddr *)&sin, curthread))) { 7473 sctp_over_udp_stop(); 7474 return (ret); 7475 } 7476 #endif 7477 #ifdef INET6 7478 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 7479 SOCK_DGRAM, IPPROTO_UDP, 7480 curthread->td_ucred, curthread))) { 7481 sctp_over_udp_stop(); 7482 return (ret); 7483 } 7484 /* Call the special UDP hook. */ 7485 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 7486 sctp_recv_udp_tunneled_packet, 7487 sctp_recv_icmp6_tunneled_packet, 7488 NULL))) { 7489 sctp_over_udp_stop(); 7490 return (ret); 7491 } 7492 /* Ok, we have a socket, bind it to the port. */ 7493 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 7494 sin6.sin6_len = sizeof(struct sockaddr_in6); 7495 sin6.sin6_family = AF_INET6; 7496 sin6.sin6_port = htons(port); 7497 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 7498 (struct sockaddr *)&sin6, curthread))) { 7499 sctp_over_udp_stop(); 7500 return (ret); 7501 } 7502 #endif 7503 return (0); 7504 } 7505 7506 /* 7507 * sctp_min_mtu ()returns the minimum of all non-zero arguments. 7508 * If all arguments are zero, zero is returned. 7509 */ 7510 uint32_t 7511 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) 7512 { 7513 if (mtu1 > 0) { 7514 if (mtu2 > 0) { 7515 if (mtu3 > 0) { 7516 return (min(mtu1, min(mtu2, mtu3))); 7517 } else { 7518 return (min(mtu1, mtu2)); 7519 } 7520 } else { 7521 if (mtu3 > 0) { 7522 return (min(mtu1, mtu3)); 7523 } else { 7524 return (mtu1); 7525 } 7526 } 7527 } else { 7528 if (mtu2 > 0) { 7529 if (mtu3 > 0) { 7530 return (min(mtu2, mtu3)); 7531 } else { 7532 return (mtu2); 7533 } 7534 } else { 7535 return (mtu3); 7536 } 7537 } 7538 } 7539 7540 void 7541 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) 7542 { 7543 struct in_conninfo inc; 7544 7545 memset(&inc, 0, sizeof(struct in_conninfo)); 7546 inc.inc_fibnum = fibnum; 7547 switch (addr->sa.sa_family) { 7548 #ifdef INET 7549 case AF_INET: 7550 inc.inc_faddr = addr->sin.sin_addr; 7551 break; 7552 #endif 7553 #ifdef INET6 7554 case AF_INET6: 7555 inc.inc_flags |= INC_ISIPV6; 7556 inc.inc6_faddr = addr->sin6.sin6_addr; 7557 break; 7558 #endif 7559 default: 7560 return; 7561 } 7562 tcp_hc_updatemtu(&inc, (u_long)mtu); 7563 } 7564 7565 uint32_t 7566 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum) 7567 { 7568 struct in_conninfo inc; 7569 7570 memset(&inc, 0, sizeof(struct in_conninfo)); 7571 inc.inc_fibnum = fibnum; 7572 switch (addr->sa.sa_family) { 7573 #ifdef INET 7574 case AF_INET: 7575 inc.inc_faddr = addr->sin.sin_addr; 7576 break; 7577 #endif 7578 #ifdef INET6 7579 case AF_INET6: 7580 inc.inc_flags |= INC_ISIPV6; 7581 inc.inc6_faddr = addr->sin6.sin6_addr; 7582 break; 7583 #endif 7584 default: 7585 return (0); 7586 } 7587 return ((uint32_t)tcp_hc_getmtu(&inc)); 7588 } 7589 7590 void 7591 sctp_set_state(struct sctp_tcb *stcb, int new_state) 7592 { 7593 #if defined(KDTRACE_HOOKS) 7594 int old_state = stcb->asoc.state; 7595 #endif 7596 7597 KASSERT((new_state & ~SCTP_STATE_MASK) == 0, 7598 ("sctp_set_state: Can't set substate (new_state = %x)", 7599 new_state)); 7600 stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state; 7601 if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) || 7602 (new_state == SCTP_STATE_SHUTDOWN_SENT) || 7603 (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7604 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7605 } 7606 #if defined(KDTRACE_HOOKS) 7607 if (((old_state & SCTP_STATE_MASK) != new_state) && 7608 !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) && 7609 (new_state == SCTP_STATE_INUSE))) { 7610 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7611 } 7612 #endif 7613 } 7614 7615 void 7616 sctp_add_substate(struct sctp_tcb *stcb, int substate) 7617 { 7618 #if defined(KDTRACE_HOOKS) 7619 int old_state = stcb->asoc.state; 7620 #endif 7621 7622 KASSERT((substate & SCTP_STATE_MASK) == 0, 7623 ("sctp_add_substate: Can't set state (substate = %x)", 7624 substate)); 7625 stcb->asoc.state |= substate; 7626 #if defined(KDTRACE_HOOKS) 7627 if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) && 7628 ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) || 7629 ((substate & SCTP_STATE_SHUTDOWN_PENDING) && 7630 ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) { 7631 SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state); 7632 } 7633 #endif 7634 } 7635