1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/udp.h> 53 #include <netinet/udp_var.h> 54 #include <sys/proc.h> 55 56 57 #ifndef KTR_SCTP 58 #define KTR_SCTP KTR_SUBSYS 59 #endif 60 61 extern struct sctp_cc_functions sctp_cc_functions[]; 62 extern struct sctp_ss_functions sctp_ss_functions[]; 63 64 void 65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 66 { 67 struct sctp_cwnd_log sctp_clog; 68 69 sctp_clog.x.sb.stcb = stcb; 70 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 71 if (stcb) 72 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 73 else 74 sctp_clog.x.sb.stcb_sbcc = 0; 75 sctp_clog.x.sb.incr = incr; 76 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 77 SCTP_LOG_EVENT_SB, 78 from, 79 sctp_clog.x.misc.log1, 80 sctp_clog.x.misc.log2, 81 sctp_clog.x.misc.log3, 82 sctp_clog.x.misc.log4); 83 } 84 85 void 86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 87 { 88 struct sctp_cwnd_log sctp_clog; 89 90 sctp_clog.x.close.inp = (void *)inp; 91 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 92 if (stcb) { 93 sctp_clog.x.close.stcb = (void *)stcb; 94 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 95 } else { 96 sctp_clog.x.close.stcb = 0; 97 sctp_clog.x.close.state = 0; 98 } 99 sctp_clog.x.close.loc = loc; 100 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 101 SCTP_LOG_EVENT_CLOSE, 102 0, 103 sctp_clog.x.misc.log1, 104 sctp_clog.x.misc.log2, 105 sctp_clog.x.misc.log3, 106 sctp_clog.x.misc.log4); 107 } 108 109 void 110 rto_logging(struct sctp_nets *net, int from) 111 { 112 struct sctp_cwnd_log sctp_clog; 113 114 memset(&sctp_clog, 0, sizeof(sctp_clog)); 115 sctp_clog.x.rto.net = (void *)net; 116 sctp_clog.x.rto.rtt = net->rtt / 1000; 117 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 118 SCTP_LOG_EVENT_RTT, 119 from, 120 sctp_clog.x.misc.log1, 121 sctp_clog.x.misc.log2, 122 sctp_clog.x.misc.log3, 123 sctp_clog.x.misc.log4); 124 } 125 126 void 127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 128 { 129 struct sctp_cwnd_log sctp_clog; 130 131 sctp_clog.x.strlog.stcb = stcb; 132 sctp_clog.x.strlog.n_tsn = tsn; 133 sctp_clog.x.strlog.n_sseq = sseq; 134 sctp_clog.x.strlog.e_tsn = 0; 135 sctp_clog.x.strlog.e_sseq = 0; 136 sctp_clog.x.strlog.strm = stream; 137 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 138 SCTP_LOG_EVENT_STRM, 139 from, 140 sctp_clog.x.misc.log1, 141 sctp_clog.x.misc.log2, 142 sctp_clog.x.misc.log3, 143 sctp_clog.x.misc.log4); 144 } 145 146 void 147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 148 { 149 struct sctp_cwnd_log sctp_clog; 150 151 sctp_clog.x.nagle.stcb = (void *)stcb; 152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 157 SCTP_LOG_EVENT_NAGLE, 158 action, 159 sctp_clog.x.misc.log1, 160 sctp_clog.x.misc.log2, 161 sctp_clog.x.misc.log3, 162 sctp_clog.x.misc.log4); 163 } 164 165 void 166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 167 { 168 struct sctp_cwnd_log sctp_clog; 169 170 sctp_clog.x.sack.cumack = cumack; 171 sctp_clog.x.sack.oldcumack = old_cumack; 172 sctp_clog.x.sack.tsn = tsn; 173 sctp_clog.x.sack.numGaps = gaps; 174 sctp_clog.x.sack.numDups = dups; 175 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 176 SCTP_LOG_EVENT_SACK, 177 from, 178 sctp_clog.x.misc.log1, 179 sctp_clog.x.misc.log2, 180 sctp_clog.x.misc.log3, 181 sctp_clog.x.misc.log4); 182 } 183 184 void 185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 186 { 187 struct sctp_cwnd_log sctp_clog; 188 189 memset(&sctp_clog, 0, sizeof(sctp_clog)); 190 sctp_clog.x.map.base = map; 191 sctp_clog.x.map.cum = cum; 192 sctp_clog.x.map.high = high; 193 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 194 SCTP_LOG_EVENT_MAP, 195 from, 196 sctp_clog.x.misc.log1, 197 sctp_clog.x.misc.log2, 198 sctp_clog.x.misc.log3, 199 sctp_clog.x.misc.log4); 200 } 201 202 void 203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 204 { 205 struct sctp_cwnd_log sctp_clog; 206 207 memset(&sctp_clog, 0, sizeof(sctp_clog)); 208 sctp_clog.x.fr.largest_tsn = biggest_tsn; 209 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 210 sctp_clog.x.fr.tsn = tsn; 211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 212 SCTP_LOG_EVENT_FR, 213 from, 214 sctp_clog.x.misc.log1, 215 sctp_clog.x.misc.log2, 216 sctp_clog.x.misc.log3, 217 sctp_clog.x.misc.log4); 218 } 219 220 void 221 sctp_log_mb(struct mbuf *m, int from) 222 { 223 struct sctp_cwnd_log sctp_clog; 224 225 sctp_clog.x.mb.mp = m; 226 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 227 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 228 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 229 if (SCTP_BUF_IS_EXTENDED(m)) { 230 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 231 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 232 } else { 233 sctp_clog.x.mb.ext = 0; 234 sctp_clog.x.mb.refcnt = 0; 235 } 236 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 237 SCTP_LOG_EVENT_MBUF, 238 from, 239 sctp_clog.x.misc.log1, 240 sctp_clog.x.misc.log2, 241 sctp_clog.x.misc.log3, 242 sctp_clog.x.misc.log4); 243 } 244 245 void 246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 247 { 248 struct sctp_cwnd_log sctp_clog; 249 250 if (control == NULL) { 251 SCTP_PRINTF("Gak log of NULL?\n"); 252 return; 253 } 254 sctp_clog.x.strlog.stcb = control->stcb; 255 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 256 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 257 sctp_clog.x.strlog.strm = control->sinfo_stream; 258 if (poschk != NULL) { 259 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 260 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 261 } else { 262 sctp_clog.x.strlog.e_tsn = 0; 263 sctp_clog.x.strlog.e_sseq = 0; 264 } 265 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 266 SCTP_LOG_EVENT_STRM, 267 from, 268 sctp_clog.x.misc.log1, 269 sctp_clog.x.misc.log2, 270 sctp_clog.x.misc.log3, 271 sctp_clog.x.misc.log4); 272 } 273 274 void 275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 276 { 277 struct sctp_cwnd_log sctp_clog; 278 279 sctp_clog.x.cwnd.net = net; 280 if (stcb->asoc.send_queue_cnt > 255) 281 sctp_clog.x.cwnd.cnt_in_send = 255; 282 else 283 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 284 if (stcb->asoc.stream_queue_cnt > 255) 285 sctp_clog.x.cwnd.cnt_in_str = 255; 286 else 287 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 288 289 if (net) { 290 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 291 sctp_clog.x.cwnd.inflight = net->flight_size; 292 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 293 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 294 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 295 } 296 if (SCTP_CWNDLOG_PRESEND == from) { 297 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 298 } 299 sctp_clog.x.cwnd.cwnd_augment = augment; 300 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 301 SCTP_LOG_EVENT_CWND, 302 from, 303 sctp_clog.x.misc.log1, 304 sctp_clog.x.misc.log2, 305 sctp_clog.x.misc.log3, 306 sctp_clog.x.misc.log4); 307 } 308 309 void 310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 311 { 312 struct sctp_cwnd_log sctp_clog; 313 314 memset(&sctp_clog, 0, sizeof(sctp_clog)); 315 if (inp) { 316 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 317 318 } else { 319 sctp_clog.x.lock.sock = (void *)NULL; 320 } 321 sctp_clog.x.lock.inp = (void *)inp; 322 if (stcb) { 323 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 324 } else { 325 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 326 } 327 if (inp) { 328 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 329 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 330 } else { 331 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 332 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 333 } 334 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 335 if (inp && (inp->sctp_socket)) { 336 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 337 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 338 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 339 } else { 340 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 341 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 342 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 343 } 344 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 345 SCTP_LOG_LOCK_EVENT, 346 from, 347 sctp_clog.x.misc.log1, 348 sctp_clog.x.misc.log2, 349 sctp_clog.x.misc.log3, 350 sctp_clog.x.misc.log4); 351 } 352 353 void 354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 355 { 356 struct sctp_cwnd_log sctp_clog; 357 358 memset(&sctp_clog, 0, sizeof(sctp_clog)); 359 sctp_clog.x.cwnd.net = net; 360 sctp_clog.x.cwnd.cwnd_new_value = error; 361 sctp_clog.x.cwnd.inflight = net->flight_size; 362 sctp_clog.x.cwnd.cwnd_augment = burst; 363 if (stcb->asoc.send_queue_cnt > 255) 364 sctp_clog.x.cwnd.cnt_in_send = 255; 365 else 366 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 367 if (stcb->asoc.stream_queue_cnt > 255) 368 sctp_clog.x.cwnd.cnt_in_str = 255; 369 else 370 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 371 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 372 SCTP_LOG_EVENT_MAXBURST, 373 from, 374 sctp_clog.x.misc.log1, 375 sctp_clog.x.misc.log2, 376 sctp_clog.x.misc.log3, 377 sctp_clog.x.misc.log4); 378 } 379 380 void 381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 382 { 383 struct sctp_cwnd_log sctp_clog; 384 385 sctp_clog.x.rwnd.rwnd = peers_rwnd; 386 sctp_clog.x.rwnd.send_size = snd_size; 387 sctp_clog.x.rwnd.overhead = overhead; 388 sctp_clog.x.rwnd.new_rwnd = 0; 389 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 390 SCTP_LOG_EVENT_RWND, 391 from, 392 sctp_clog.x.misc.log1, 393 sctp_clog.x.misc.log2, 394 sctp_clog.x.misc.log3, 395 sctp_clog.x.misc.log4); 396 } 397 398 void 399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 400 { 401 struct sctp_cwnd_log sctp_clog; 402 403 sctp_clog.x.rwnd.rwnd = peers_rwnd; 404 sctp_clog.x.rwnd.send_size = flight_size; 405 sctp_clog.x.rwnd.overhead = overhead; 406 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 407 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 408 SCTP_LOG_EVENT_RWND, 409 from, 410 sctp_clog.x.misc.log1, 411 sctp_clog.x.misc.log2, 412 sctp_clog.x.misc.log3, 413 sctp_clog.x.misc.log4); 414 } 415 416 void 417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 418 { 419 struct sctp_cwnd_log sctp_clog; 420 421 sctp_clog.x.mbcnt.total_queue_size = total_oq; 422 sctp_clog.x.mbcnt.size_change = book; 423 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 424 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 425 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 426 SCTP_LOG_EVENT_MBCNT, 427 from, 428 sctp_clog.x.misc.log1, 429 sctp_clog.x.misc.log2, 430 sctp_clog.x.misc.log3, 431 sctp_clog.x.misc.log4); 432 } 433 434 void 435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 436 { 437 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 438 SCTP_LOG_MISC_EVENT, 439 from, 440 a, b, c, d); 441 } 442 443 void 444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 445 { 446 struct sctp_cwnd_log sctp_clog; 447 448 sctp_clog.x.wake.stcb = (void *)stcb; 449 sctp_clog.x.wake.wake_cnt = wake_cnt; 450 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 451 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 452 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 453 454 if (stcb->asoc.stream_queue_cnt < 0xff) 455 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 456 else 457 sctp_clog.x.wake.stream_qcnt = 0xff; 458 459 if (stcb->asoc.chunks_on_out_queue < 0xff) 460 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 461 else 462 sctp_clog.x.wake.chunks_on_oque = 0xff; 463 464 sctp_clog.x.wake.sctpflags = 0; 465 /* set in the defered mode stuff */ 466 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 467 sctp_clog.x.wake.sctpflags |= 1; 468 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 469 sctp_clog.x.wake.sctpflags |= 2; 470 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 471 sctp_clog.x.wake.sctpflags |= 4; 472 /* what about the sb */ 473 if (stcb->sctp_socket) { 474 struct socket *so = stcb->sctp_socket; 475 476 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 477 } else { 478 sctp_clog.x.wake.sbflags = 0xff; 479 } 480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 481 SCTP_LOG_EVENT_WAKE, 482 from, 483 sctp_clog.x.misc.log1, 484 sctp_clog.x.misc.log2, 485 sctp_clog.x.misc.log3, 486 sctp_clog.x.misc.log4); 487 } 488 489 void 490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen) 491 { 492 struct sctp_cwnd_log sctp_clog; 493 494 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 495 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 496 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 497 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 498 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 499 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 500 sctp_clog.x.blk.sndlen = sendlen; 501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 502 SCTP_LOG_EVENT_BLOCK, 503 from, 504 sctp_clog.x.misc.log1, 505 sctp_clog.x.misc.log2, 506 sctp_clog.x.misc.log3, 507 sctp_clog.x.misc.log4); 508 } 509 510 int 511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 512 { 513 /* May need to fix this if ktrdump does not work */ 514 return (0); 515 } 516 517 #ifdef SCTP_AUDITING_ENABLED 518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 519 static int sctp_audit_indx = 0; 520 521 static 522 void 523 sctp_print_audit_report(void) 524 { 525 int i; 526 int cnt; 527 528 cnt = 0; 529 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 530 if ((sctp_audit_data[i][0] == 0xe0) && 531 (sctp_audit_data[i][1] == 0x01)) { 532 cnt = 0; 533 SCTP_PRINTF("\n"); 534 } else if (sctp_audit_data[i][0] == 0xf0) { 535 cnt = 0; 536 SCTP_PRINTF("\n"); 537 } else if ((sctp_audit_data[i][0] == 0xc0) && 538 (sctp_audit_data[i][1] == 0x01)) { 539 SCTP_PRINTF("\n"); 540 cnt = 0; 541 } 542 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 543 (uint32_t) sctp_audit_data[i][1]); 544 cnt++; 545 if ((cnt % 14) == 0) 546 SCTP_PRINTF("\n"); 547 } 548 for (i = 0; i < sctp_audit_indx; i++) { 549 if ((sctp_audit_data[i][0] == 0xe0) && 550 (sctp_audit_data[i][1] == 0x01)) { 551 cnt = 0; 552 SCTP_PRINTF("\n"); 553 } else if (sctp_audit_data[i][0] == 0xf0) { 554 cnt = 0; 555 SCTP_PRINTF("\n"); 556 } else if ((sctp_audit_data[i][0] == 0xc0) && 557 (sctp_audit_data[i][1] == 0x01)) { 558 SCTP_PRINTF("\n"); 559 cnt = 0; 560 } 561 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 562 (uint32_t) sctp_audit_data[i][1]); 563 cnt++; 564 if ((cnt % 14) == 0) 565 SCTP_PRINTF("\n"); 566 } 567 SCTP_PRINTF("\n"); 568 } 569 570 void 571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 572 struct sctp_nets *net) 573 { 574 int resend_cnt, tot_out, rep, tot_book_cnt; 575 struct sctp_nets *lnet; 576 struct sctp_tmit_chunk *chk; 577 578 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 579 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 580 sctp_audit_indx++; 581 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 582 sctp_audit_indx = 0; 583 } 584 if (inp == NULL) { 585 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 586 sctp_audit_data[sctp_audit_indx][1] = 0x01; 587 sctp_audit_indx++; 588 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 589 sctp_audit_indx = 0; 590 } 591 return; 592 } 593 if (stcb == NULL) { 594 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 595 sctp_audit_data[sctp_audit_indx][1] = 0x02; 596 sctp_audit_indx++; 597 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 598 sctp_audit_indx = 0; 599 } 600 return; 601 } 602 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 603 sctp_audit_data[sctp_audit_indx][1] = 604 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 605 sctp_audit_indx++; 606 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 607 sctp_audit_indx = 0; 608 } 609 rep = 0; 610 tot_book_cnt = 0; 611 resend_cnt = tot_out = 0; 612 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 613 if (chk->sent == SCTP_DATAGRAM_RESEND) { 614 resend_cnt++; 615 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 616 tot_out += chk->book_size; 617 tot_book_cnt++; 618 } 619 } 620 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 621 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 622 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 623 sctp_audit_indx++; 624 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 625 sctp_audit_indx = 0; 626 } 627 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 628 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 629 rep = 1; 630 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 631 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 632 sctp_audit_data[sctp_audit_indx][1] = 633 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 634 sctp_audit_indx++; 635 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 636 sctp_audit_indx = 0; 637 } 638 } 639 if (tot_out != stcb->asoc.total_flight) { 640 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 641 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 642 sctp_audit_indx++; 643 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 644 sctp_audit_indx = 0; 645 } 646 rep = 1; 647 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 648 (int)stcb->asoc.total_flight); 649 stcb->asoc.total_flight = tot_out; 650 } 651 if (tot_book_cnt != stcb->asoc.total_flight_count) { 652 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 653 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 654 sctp_audit_indx++; 655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 656 sctp_audit_indx = 0; 657 } 658 rep = 1; 659 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 660 661 stcb->asoc.total_flight_count = tot_book_cnt; 662 } 663 tot_out = 0; 664 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 665 tot_out += lnet->flight_size; 666 } 667 if (tot_out != stcb->asoc.total_flight) { 668 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 669 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 670 sctp_audit_indx++; 671 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 672 sctp_audit_indx = 0; 673 } 674 rep = 1; 675 SCTP_PRINTF("real flight:%d net total was %d\n", 676 stcb->asoc.total_flight, tot_out); 677 /* now corrective action */ 678 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 679 680 tot_out = 0; 681 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 682 if ((chk->whoTo == lnet) && 683 (chk->sent < SCTP_DATAGRAM_RESEND)) { 684 tot_out += chk->book_size; 685 } 686 } 687 if (lnet->flight_size != tot_out) { 688 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 689 (void *)lnet, lnet->flight_size, 690 tot_out); 691 lnet->flight_size = tot_out; 692 } 693 } 694 } 695 if (rep) { 696 sctp_print_audit_report(); 697 } 698 } 699 700 void 701 sctp_audit_log(uint8_t ev, uint8_t fd) 702 { 703 704 sctp_audit_data[sctp_audit_indx][0] = ev; 705 sctp_audit_data[sctp_audit_indx][1] = fd; 706 sctp_audit_indx++; 707 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 708 sctp_audit_indx = 0; 709 } 710 } 711 712 #endif 713 714 /* 715 * sctp_stop_timers_for_shutdown() should be called 716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 717 * state to make sure that all timers are stopped. 718 */ 719 void 720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 721 { 722 struct sctp_association *asoc; 723 struct sctp_nets *net; 724 725 asoc = &stcb->asoc; 726 727 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 728 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 729 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 730 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 731 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 732 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 733 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 734 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 735 } 736 } 737 738 /* 739 * a list of sizes based on typical mtu's, used only if next hop size not 740 * returned. 741 */ 742 static uint32_t sctp_mtu_sizes[] = { 743 68, 744 296, 745 508, 746 512, 747 544, 748 576, 749 1006, 750 1492, 751 1500, 752 1536, 753 2002, 754 2048, 755 4352, 756 4464, 757 8166, 758 17914, 759 32000, 760 65535 761 }; 762 763 /* 764 * Return the largest MTU smaller than val. If there is no 765 * entry, just return val. 766 */ 767 uint32_t 768 sctp_get_prev_mtu(uint32_t val) 769 { 770 uint32_t i; 771 772 if (val <= sctp_mtu_sizes[0]) { 773 return (val); 774 } 775 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 776 if (val <= sctp_mtu_sizes[i]) { 777 break; 778 } 779 } 780 return (sctp_mtu_sizes[i - 1]); 781 } 782 783 /* 784 * Return the smallest MTU larger than val. If there is no 785 * entry, just return val. 786 */ 787 uint32_t 788 sctp_get_next_mtu(uint32_t val) 789 { 790 /* select another MTU that is just bigger than this one */ 791 uint32_t i; 792 793 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 794 if (val < sctp_mtu_sizes[i]) { 795 return (sctp_mtu_sizes[i]); 796 } 797 } 798 return (val); 799 } 800 801 void 802 sctp_fill_random_store(struct sctp_pcb *m) 803 { 804 /* 805 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 806 * our counter. The result becomes our good random numbers and we 807 * then setup to give these out. Note that we do no locking to 808 * protect this. This is ok, since if competing folks call this we 809 * will get more gobbled gook in the random store which is what we 810 * want. There is a danger that two guys will use the same random 811 * numbers, but thats ok too since that is random as well :-> 812 */ 813 m->store_at = 0; 814 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 815 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 816 sizeof(m->random_counter), (uint8_t *) m->random_store); 817 m->random_counter++; 818 } 819 820 uint32_t 821 sctp_select_initial_TSN(struct sctp_pcb *inp) 822 { 823 /* 824 * A true implementation should use random selection process to get 825 * the initial stream sequence number, using RFC1750 as a good 826 * guideline 827 */ 828 uint32_t x, *xp; 829 uint8_t *p; 830 int store_at, new_store; 831 832 if (inp->initial_sequence_debug != 0) { 833 uint32_t ret; 834 835 ret = inp->initial_sequence_debug; 836 inp->initial_sequence_debug++; 837 return (ret); 838 } 839 retry: 840 store_at = inp->store_at; 841 new_store = store_at + sizeof(uint32_t); 842 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 843 new_store = 0; 844 } 845 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 846 goto retry; 847 } 848 if (new_store == 0) { 849 /* Refill the random store */ 850 sctp_fill_random_store(inp); 851 } 852 p = &inp->random_store[store_at]; 853 xp = (uint32_t *) p; 854 x = *xp; 855 return (x); 856 } 857 858 uint32_t 859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 860 { 861 uint32_t x; 862 struct timeval now; 863 864 if (check) { 865 (void)SCTP_GETTIME_TIMEVAL(&now); 866 } 867 for (;;) { 868 x = sctp_select_initial_TSN(&inp->sctp_ep); 869 if (x == 0) { 870 /* we never use 0 */ 871 continue; 872 } 873 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 874 break; 875 } 876 } 877 return (x); 878 } 879 880 int 881 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 882 uint32_t override_tag, uint32_t vrf_id) 883 { 884 struct sctp_association *asoc; 885 886 /* 887 * Anything set to zero is taken care of by the allocation routine's 888 * bzero 889 */ 890 891 /* 892 * Up front select what scoping to apply on addresses I tell my peer 893 * Not sure what to do with these right now, we will need to come up 894 * with a way to set them. We may need to pass them through from the 895 * caller in the sctp_aloc_assoc() function. 896 */ 897 int i; 898 899 asoc = &stcb->asoc; 900 /* init all variables to a known value. */ 901 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 902 asoc->max_burst = inp->sctp_ep.max_burst; 903 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 904 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 905 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 906 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 907 asoc->ecn_allowed = inp->sctp_ecn_enable; 908 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off); 909 asoc->sctp_cmt_pf = (uint8_t) 0; 910 asoc->sctp_frag_point = inp->sctp_frag_point; 911 asoc->sctp_features = inp->sctp_features; 912 asoc->default_dscp = inp->sctp_ep.default_dscp; 913 #ifdef INET6 914 if (inp->sctp_ep.default_flowlabel) { 915 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 916 } else { 917 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 918 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 919 asoc->default_flowlabel &= 0x000fffff; 920 asoc->default_flowlabel |= 0x80000000; 921 } else { 922 asoc->default_flowlabel = 0; 923 } 924 } 925 #endif 926 asoc->sb_send_resv = 0; 927 if (override_tag) { 928 asoc->my_vtag = override_tag; 929 } else { 930 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 931 } 932 /* Get the nonce tags */ 933 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 934 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 935 asoc->vrf_id = vrf_id; 936 937 #ifdef SCTP_ASOCLOG_OF_TSNS 938 asoc->tsn_in_at = 0; 939 asoc->tsn_out_at = 0; 940 asoc->tsn_in_wrapped = 0; 941 asoc->tsn_out_wrapped = 0; 942 asoc->cumack_log_at = 0; 943 asoc->cumack_log_atsnt = 0; 944 #endif 945 #ifdef SCTP_FS_SPEC_LOG 946 asoc->fs_index = 0; 947 #endif 948 asoc->refcnt = 0; 949 asoc->assoc_up_sent = 0; 950 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 951 sctp_select_initial_TSN(&inp->sctp_ep); 952 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 953 /* we are optimisitic here */ 954 asoc->peer_supports_pktdrop = 1; 955 asoc->peer_supports_nat = 0; 956 asoc->sent_queue_retran_cnt = 0; 957 958 /* for CMT */ 959 asoc->last_net_cmt_send_started = NULL; 960 961 /* This will need to be adjusted */ 962 asoc->last_acked_seq = asoc->init_seq_number - 1; 963 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 964 asoc->asconf_seq_in = asoc->last_acked_seq; 965 966 /* here we are different, we hold the next one we expect */ 967 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 968 969 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 970 asoc->initial_rto = inp->sctp_ep.initial_rto; 971 972 asoc->max_init_times = inp->sctp_ep.max_init_times; 973 asoc->max_send_times = inp->sctp_ep.max_send_times; 974 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 975 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 976 asoc->free_chunk_cnt = 0; 977 978 asoc->iam_blocking = 0; 979 asoc->context = inp->sctp_context; 980 asoc->local_strreset_support = inp->local_strreset_support; 981 asoc->def_send = inp->def_send; 982 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 983 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 984 asoc->pr_sctp_cnt = 0; 985 asoc->total_output_queue_size = 0; 986 987 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 988 asoc->scope.ipv6_addr_legal = 1; 989 if (SCTP_IPV6_V6ONLY(inp) == 0) { 990 asoc->scope.ipv4_addr_legal = 1; 991 } else { 992 asoc->scope.ipv4_addr_legal = 0; 993 } 994 } else { 995 asoc->scope.ipv6_addr_legal = 0; 996 asoc->scope.ipv4_addr_legal = 1; 997 } 998 999 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1000 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1001 1002 asoc->smallest_mtu = inp->sctp_frag_point; 1003 asoc->minrto = inp->sctp_ep.sctp_minrto; 1004 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1005 1006 asoc->locked_on_sending = NULL; 1007 asoc->stream_locked_on = 0; 1008 asoc->ecn_echo_cnt_onq = 0; 1009 asoc->stream_locked = 0; 1010 1011 asoc->send_sack = 1; 1012 1013 LIST_INIT(&asoc->sctp_restricted_addrs); 1014 1015 TAILQ_INIT(&asoc->nets); 1016 TAILQ_INIT(&asoc->pending_reply_queue); 1017 TAILQ_INIT(&asoc->asconf_ack_sent); 1018 /* Setup to fill the hb random cache at first HB */ 1019 asoc->hb_random_idx = 4; 1020 1021 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1022 1023 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1024 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1025 1026 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1027 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1028 1029 /* 1030 * Now the stream parameters, here we allocate space for all streams 1031 * that we request by default. 1032 */ 1033 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1034 inp->sctp_ep.pre_open_stream_count; 1035 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1036 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1037 SCTP_M_STRMO); 1038 if (asoc->strmout == NULL) { 1039 /* big trouble no memory */ 1040 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1041 return (ENOMEM); 1042 } 1043 for (i = 0; i < asoc->streamoutcnt; i++) { 1044 /* 1045 * inbound side must be set to 0xffff, also NOTE when we get 1046 * the INIT-ACK back (for INIT sender) we MUST reduce the 1047 * count (streamoutcnt) but first check if we sent to any of 1048 * the upper streams that were dropped (if some were). Those 1049 * that were dropped must be notified to the upper layer as 1050 * failed to send. 1051 */ 1052 asoc->strmout[i].next_sequence_send = 0x0; 1053 TAILQ_INIT(&asoc->strmout[i].outqueue); 1054 asoc->strmout[i].chunks_on_queues = 0; 1055 asoc->strmout[i].stream_no = i; 1056 asoc->strmout[i].last_msg_incomplete = 0; 1057 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL); 1058 } 1059 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1060 1061 /* Now the mapping array */ 1062 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1063 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1064 SCTP_M_MAP); 1065 if (asoc->mapping_array == NULL) { 1066 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1067 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1068 return (ENOMEM); 1069 } 1070 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1071 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1072 SCTP_M_MAP); 1073 if (asoc->nr_mapping_array == NULL) { 1074 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1075 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1076 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1077 return (ENOMEM); 1078 } 1079 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1080 1081 /* Now the init of the other outqueues */ 1082 TAILQ_INIT(&asoc->free_chunks); 1083 TAILQ_INIT(&asoc->control_send_queue); 1084 TAILQ_INIT(&asoc->asconf_send_queue); 1085 TAILQ_INIT(&asoc->send_queue); 1086 TAILQ_INIT(&asoc->sent_queue); 1087 TAILQ_INIT(&asoc->reasmqueue); 1088 TAILQ_INIT(&asoc->resetHead); 1089 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1090 TAILQ_INIT(&asoc->asconf_queue); 1091 /* authentication fields */ 1092 asoc->authinfo.random = NULL; 1093 asoc->authinfo.active_keyid = 0; 1094 asoc->authinfo.assoc_key = NULL; 1095 asoc->authinfo.assoc_keyid = 0; 1096 asoc->authinfo.recv_key = NULL; 1097 asoc->authinfo.recv_keyid = 0; 1098 LIST_INIT(&asoc->shared_keys); 1099 asoc->marked_retrans = 0; 1100 asoc->port = inp->sctp_ep.port; 1101 asoc->timoinit = 0; 1102 asoc->timodata = 0; 1103 asoc->timosack = 0; 1104 asoc->timoshutdown = 0; 1105 asoc->timoheartbeat = 0; 1106 asoc->timocookie = 0; 1107 asoc->timoshutdownack = 0; 1108 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1109 asoc->discontinuity_time = asoc->start_time; 1110 /* 1111 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1112 * freed later when the association is freed. 1113 */ 1114 return (0); 1115 } 1116 1117 void 1118 sctp_print_mapping_array(struct sctp_association *asoc) 1119 { 1120 unsigned int i, limit; 1121 1122 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1123 asoc->mapping_array_size, 1124 asoc->mapping_array_base_tsn, 1125 asoc->cumulative_tsn, 1126 asoc->highest_tsn_inside_map, 1127 asoc->highest_tsn_inside_nr_map); 1128 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1129 if (asoc->mapping_array[limit - 1] != 0) { 1130 break; 1131 } 1132 } 1133 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1134 for (i = 0; i < limit; i++) { 1135 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1136 } 1137 if (limit % 16) 1138 SCTP_PRINTF("\n"); 1139 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1140 if (asoc->nr_mapping_array[limit - 1]) { 1141 break; 1142 } 1143 } 1144 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1145 for (i = 0; i < limit; i++) { 1146 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1147 } 1148 if (limit % 16) 1149 SCTP_PRINTF("\n"); 1150 } 1151 1152 int 1153 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1154 { 1155 /* mapping array needs to grow */ 1156 uint8_t *new_array1, *new_array2; 1157 uint32_t new_size; 1158 1159 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1160 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1161 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1162 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1163 /* can't get more, forget it */ 1164 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1165 if (new_array1) { 1166 SCTP_FREE(new_array1, SCTP_M_MAP); 1167 } 1168 if (new_array2) { 1169 SCTP_FREE(new_array2, SCTP_M_MAP); 1170 } 1171 return (-1); 1172 } 1173 memset(new_array1, 0, new_size); 1174 memset(new_array2, 0, new_size); 1175 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1176 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1177 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1178 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1179 asoc->mapping_array = new_array1; 1180 asoc->nr_mapping_array = new_array2; 1181 asoc->mapping_array_size = new_size; 1182 return (0); 1183 } 1184 1185 1186 static void 1187 sctp_iterator_work(struct sctp_iterator *it) 1188 { 1189 int iteration_count = 0; 1190 int inp_skip = 0; 1191 int first_in = 1; 1192 struct sctp_inpcb *tinp; 1193 1194 SCTP_INP_INFO_RLOCK(); 1195 SCTP_ITERATOR_LOCK(); 1196 if (it->inp) { 1197 SCTP_INP_RLOCK(it->inp); 1198 SCTP_INP_DECR_REF(it->inp); 1199 } 1200 if (it->inp == NULL) { 1201 /* iterator is complete */ 1202 done_with_iterator: 1203 SCTP_ITERATOR_UNLOCK(); 1204 SCTP_INP_INFO_RUNLOCK(); 1205 if (it->function_atend != NULL) { 1206 (*it->function_atend) (it->pointer, it->val); 1207 } 1208 SCTP_FREE(it, SCTP_M_ITER); 1209 return; 1210 } 1211 select_a_new_ep: 1212 if (first_in) { 1213 first_in = 0; 1214 } else { 1215 SCTP_INP_RLOCK(it->inp); 1216 } 1217 while (((it->pcb_flags) && 1218 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1219 ((it->pcb_features) && 1220 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1221 /* endpoint flags or features don't match, so keep looking */ 1222 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1223 SCTP_INP_RUNLOCK(it->inp); 1224 goto done_with_iterator; 1225 } 1226 tinp = it->inp; 1227 it->inp = LIST_NEXT(it->inp, sctp_list); 1228 SCTP_INP_RUNLOCK(tinp); 1229 if (it->inp == NULL) { 1230 goto done_with_iterator; 1231 } 1232 SCTP_INP_RLOCK(it->inp); 1233 } 1234 /* now go through each assoc which is in the desired state */ 1235 if (it->done_current_ep == 0) { 1236 if (it->function_inp != NULL) 1237 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1238 it->done_current_ep = 1; 1239 } 1240 if (it->stcb == NULL) { 1241 /* run the per instance function */ 1242 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1243 } 1244 if ((inp_skip) || it->stcb == NULL) { 1245 if (it->function_inp_end != NULL) { 1246 inp_skip = (*it->function_inp_end) (it->inp, 1247 it->pointer, 1248 it->val); 1249 } 1250 SCTP_INP_RUNLOCK(it->inp); 1251 goto no_stcb; 1252 } 1253 while (it->stcb) { 1254 SCTP_TCB_LOCK(it->stcb); 1255 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1256 /* not in the right state... keep looking */ 1257 SCTP_TCB_UNLOCK(it->stcb); 1258 goto next_assoc; 1259 } 1260 /* see if we have limited out the iterator loop */ 1261 iteration_count++; 1262 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1263 /* Pause to let others grab the lock */ 1264 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1265 SCTP_TCB_UNLOCK(it->stcb); 1266 SCTP_INP_INCR_REF(it->inp); 1267 SCTP_INP_RUNLOCK(it->inp); 1268 SCTP_ITERATOR_UNLOCK(); 1269 SCTP_INP_INFO_RUNLOCK(); 1270 SCTP_INP_INFO_RLOCK(); 1271 SCTP_ITERATOR_LOCK(); 1272 if (sctp_it_ctl.iterator_flags) { 1273 /* We won't be staying here */ 1274 SCTP_INP_DECR_REF(it->inp); 1275 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1276 if (sctp_it_ctl.iterator_flags & 1277 SCTP_ITERATOR_STOP_CUR_IT) { 1278 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1279 goto done_with_iterator; 1280 } 1281 if (sctp_it_ctl.iterator_flags & 1282 SCTP_ITERATOR_STOP_CUR_INP) { 1283 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1284 goto no_stcb; 1285 } 1286 /* If we reach here huh? */ 1287 SCTP_PRINTF("Unknown it ctl flag %x\n", 1288 sctp_it_ctl.iterator_flags); 1289 sctp_it_ctl.iterator_flags = 0; 1290 } 1291 SCTP_INP_RLOCK(it->inp); 1292 SCTP_INP_DECR_REF(it->inp); 1293 SCTP_TCB_LOCK(it->stcb); 1294 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1295 iteration_count = 0; 1296 } 1297 /* run function on this one */ 1298 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1299 1300 /* 1301 * we lie here, it really needs to have its own type but 1302 * first I must verify that this won't effect things :-0 1303 */ 1304 if (it->no_chunk_output == 0) 1305 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1306 1307 SCTP_TCB_UNLOCK(it->stcb); 1308 next_assoc: 1309 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1310 if (it->stcb == NULL) { 1311 /* Run last function */ 1312 if (it->function_inp_end != NULL) { 1313 inp_skip = (*it->function_inp_end) (it->inp, 1314 it->pointer, 1315 it->val); 1316 } 1317 } 1318 } 1319 SCTP_INP_RUNLOCK(it->inp); 1320 no_stcb: 1321 /* done with all assocs on this endpoint, move on to next endpoint */ 1322 it->done_current_ep = 0; 1323 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1324 it->inp = NULL; 1325 } else { 1326 it->inp = LIST_NEXT(it->inp, sctp_list); 1327 } 1328 if (it->inp == NULL) { 1329 goto done_with_iterator; 1330 } 1331 goto select_a_new_ep; 1332 } 1333 1334 void 1335 sctp_iterator_worker(void) 1336 { 1337 struct sctp_iterator *it, *nit; 1338 1339 /* This function is called with the WQ lock in place */ 1340 1341 sctp_it_ctl.iterator_running = 1; 1342 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1343 sctp_it_ctl.cur_it = it; 1344 /* now lets work on this one */ 1345 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1346 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1347 CURVNET_SET(it->vn); 1348 sctp_iterator_work(it); 1349 sctp_it_ctl.cur_it = NULL; 1350 CURVNET_RESTORE(); 1351 SCTP_IPI_ITERATOR_WQ_LOCK(); 1352 /* sa_ignore FREED_MEMORY */ 1353 } 1354 sctp_it_ctl.iterator_running = 0; 1355 return; 1356 } 1357 1358 1359 static void 1360 sctp_handle_addr_wq(void) 1361 { 1362 /* deal with the ADDR wq from the rtsock calls */ 1363 struct sctp_laddr *wi, *nwi; 1364 struct sctp_asconf_iterator *asc; 1365 1366 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1367 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1368 if (asc == NULL) { 1369 /* Try later, no memory */ 1370 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1371 (struct sctp_inpcb *)NULL, 1372 (struct sctp_tcb *)NULL, 1373 (struct sctp_nets *)NULL); 1374 return; 1375 } 1376 LIST_INIT(&asc->list_of_work); 1377 asc->cnt = 0; 1378 1379 SCTP_WQ_ADDR_LOCK(); 1380 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1381 LIST_REMOVE(wi, sctp_nxt_addr); 1382 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1383 asc->cnt++; 1384 } 1385 SCTP_WQ_ADDR_UNLOCK(); 1386 1387 if (asc->cnt == 0) { 1388 SCTP_FREE(asc, SCTP_M_ASC_IT); 1389 } else { 1390 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep, 1391 sctp_asconf_iterator_stcb, 1392 NULL, /* No ep end for boundall */ 1393 SCTP_PCB_FLAGS_BOUNDALL, 1394 SCTP_PCB_ANY_FEATURES, 1395 SCTP_ASOC_ANY_STATE, 1396 (void *)asc, 0, 1397 sctp_asconf_iterator_end, NULL, 0); 1398 } 1399 } 1400 1401 void 1402 sctp_timeout_handler(void *t) 1403 { 1404 struct sctp_inpcb *inp; 1405 struct sctp_tcb *stcb; 1406 struct sctp_nets *net; 1407 struct sctp_timer *tmr; 1408 1409 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1410 struct socket *so; 1411 1412 #endif 1413 int did_output, type; 1414 1415 tmr = (struct sctp_timer *)t; 1416 inp = (struct sctp_inpcb *)tmr->ep; 1417 stcb = (struct sctp_tcb *)tmr->tcb; 1418 net = (struct sctp_nets *)tmr->net; 1419 CURVNET_SET((struct vnet *)tmr->vnet); 1420 did_output = 1; 1421 1422 #ifdef SCTP_AUDITING_ENABLED 1423 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1424 sctp_auditing(3, inp, stcb, net); 1425 #endif 1426 1427 /* sanity checks... */ 1428 if (tmr->self != (void *)tmr) { 1429 /* 1430 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1431 * (void *)tmr); 1432 */ 1433 CURVNET_RESTORE(); 1434 return; 1435 } 1436 tmr->stopped_from = 0xa001; 1437 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1438 /* 1439 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1440 * tmr->type); 1441 */ 1442 CURVNET_RESTORE(); 1443 return; 1444 } 1445 tmr->stopped_from = 0xa002; 1446 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1447 CURVNET_RESTORE(); 1448 return; 1449 } 1450 /* if this is an iterator timeout, get the struct and clear inp */ 1451 tmr->stopped_from = 0xa003; 1452 type = tmr->type; 1453 if (inp) { 1454 SCTP_INP_INCR_REF(inp); 1455 if ((inp->sctp_socket == NULL) && 1456 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1457 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1458 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1459 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1460 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1461 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1462 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1463 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1464 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1465 ) { 1466 SCTP_INP_DECR_REF(inp); 1467 CURVNET_RESTORE(); 1468 return; 1469 } 1470 } 1471 tmr->stopped_from = 0xa004; 1472 if (stcb) { 1473 atomic_add_int(&stcb->asoc.refcnt, 1); 1474 if (stcb->asoc.state == 0) { 1475 atomic_add_int(&stcb->asoc.refcnt, -1); 1476 if (inp) { 1477 SCTP_INP_DECR_REF(inp); 1478 } 1479 CURVNET_RESTORE(); 1480 return; 1481 } 1482 } 1483 tmr->stopped_from = 0xa005; 1484 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type); 1485 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1486 if (inp) { 1487 SCTP_INP_DECR_REF(inp); 1488 } 1489 if (stcb) { 1490 atomic_add_int(&stcb->asoc.refcnt, -1); 1491 } 1492 CURVNET_RESTORE(); 1493 return; 1494 } 1495 tmr->stopped_from = 0xa006; 1496 1497 if (stcb) { 1498 SCTP_TCB_LOCK(stcb); 1499 atomic_add_int(&stcb->asoc.refcnt, -1); 1500 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) && 1501 ((stcb->asoc.state == 0) || 1502 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1503 SCTP_TCB_UNLOCK(stcb); 1504 if (inp) { 1505 SCTP_INP_DECR_REF(inp); 1506 } 1507 CURVNET_RESTORE(); 1508 return; 1509 } 1510 } 1511 /* record in stopped what t-o occured */ 1512 tmr->stopped_from = tmr->type; 1513 1514 /* mark as being serviced now */ 1515 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1516 /* 1517 * Callout has been rescheduled. 1518 */ 1519 goto get_out; 1520 } 1521 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1522 /* 1523 * Not active, so no action. 1524 */ 1525 goto get_out; 1526 } 1527 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1528 1529 /* call the handler for the appropriate timer type */ 1530 switch (tmr->type) { 1531 case SCTP_TIMER_TYPE_ZERO_COPY: 1532 if (inp == NULL) { 1533 break; 1534 } 1535 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1536 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1537 } 1538 break; 1539 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1540 if (inp == NULL) { 1541 break; 1542 } 1543 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1544 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1545 } 1546 break; 1547 case SCTP_TIMER_TYPE_ADDR_WQ: 1548 sctp_handle_addr_wq(); 1549 break; 1550 case SCTP_TIMER_TYPE_SEND: 1551 if ((stcb == NULL) || (inp == NULL)) { 1552 break; 1553 } 1554 SCTP_STAT_INCR(sctps_timodata); 1555 stcb->asoc.timodata++; 1556 stcb->asoc.num_send_timers_up--; 1557 if (stcb->asoc.num_send_timers_up < 0) { 1558 stcb->asoc.num_send_timers_up = 0; 1559 } 1560 SCTP_TCB_LOCK_ASSERT(stcb); 1561 if (sctp_t3rxt_timer(inp, stcb, net)) { 1562 /* no need to unlock on tcb its gone */ 1563 1564 goto out_decr; 1565 } 1566 SCTP_TCB_LOCK_ASSERT(stcb); 1567 #ifdef SCTP_AUDITING_ENABLED 1568 sctp_auditing(4, inp, stcb, net); 1569 #endif 1570 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 if ((stcb->asoc.num_send_timers_up == 0) && 1572 (stcb->asoc.sent_queue_cnt > 0)) { 1573 struct sctp_tmit_chunk *chk; 1574 1575 /* 1576 * safeguard. If there on some on the sent queue 1577 * somewhere but no timers running something is 1578 * wrong... so we start a timer on the first chunk 1579 * on the send queue on whatever net it is sent to. 1580 */ 1581 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1582 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1583 chk->whoTo); 1584 } 1585 break; 1586 case SCTP_TIMER_TYPE_INIT: 1587 if ((stcb == NULL) || (inp == NULL)) { 1588 break; 1589 } 1590 SCTP_STAT_INCR(sctps_timoinit); 1591 stcb->asoc.timoinit++; 1592 if (sctp_t1init_timer(inp, stcb, net)) { 1593 /* no need to unlock on tcb its gone */ 1594 goto out_decr; 1595 } 1596 /* We do output but not here */ 1597 did_output = 0; 1598 break; 1599 case SCTP_TIMER_TYPE_RECV: 1600 if ((stcb == NULL) || (inp == NULL)) { 1601 break; 1602 } 1603 SCTP_STAT_INCR(sctps_timosack); 1604 stcb->asoc.timosack++; 1605 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1606 #ifdef SCTP_AUDITING_ENABLED 1607 sctp_auditing(4, inp, stcb, net); 1608 #endif 1609 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1610 break; 1611 case SCTP_TIMER_TYPE_SHUTDOWN: 1612 if ((stcb == NULL) || (inp == NULL)) { 1613 break; 1614 } 1615 if (sctp_shutdown_timer(inp, stcb, net)) { 1616 /* no need to unlock on tcb its gone */ 1617 goto out_decr; 1618 } 1619 SCTP_STAT_INCR(sctps_timoshutdown); 1620 stcb->asoc.timoshutdown++; 1621 #ifdef SCTP_AUDITING_ENABLED 1622 sctp_auditing(4, inp, stcb, net); 1623 #endif 1624 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1625 break; 1626 case SCTP_TIMER_TYPE_HEARTBEAT: 1627 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1628 break; 1629 } 1630 SCTP_STAT_INCR(sctps_timoheartbeat); 1631 stcb->asoc.timoheartbeat++; 1632 if (sctp_heartbeat_timer(inp, stcb, net)) { 1633 /* no need to unlock on tcb its gone */ 1634 goto out_decr; 1635 } 1636 #ifdef SCTP_AUDITING_ENABLED 1637 sctp_auditing(4, inp, stcb, net); 1638 #endif 1639 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1640 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1641 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1642 } 1643 break; 1644 case SCTP_TIMER_TYPE_COOKIE: 1645 if ((stcb == NULL) || (inp == NULL)) { 1646 break; 1647 } 1648 if (sctp_cookie_timer(inp, stcb, net)) { 1649 /* no need to unlock on tcb its gone */ 1650 goto out_decr; 1651 } 1652 SCTP_STAT_INCR(sctps_timocookie); 1653 stcb->asoc.timocookie++; 1654 #ifdef SCTP_AUDITING_ENABLED 1655 sctp_auditing(4, inp, stcb, net); 1656 #endif 1657 /* 1658 * We consider T3 and Cookie timer pretty much the same with 1659 * respect to where from in chunk_output. 1660 */ 1661 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1662 break; 1663 case SCTP_TIMER_TYPE_NEWCOOKIE: 1664 { 1665 struct timeval tv; 1666 int i, secret; 1667 1668 if (inp == NULL) { 1669 break; 1670 } 1671 SCTP_STAT_INCR(sctps_timosecret); 1672 (void)SCTP_GETTIME_TIMEVAL(&tv); 1673 SCTP_INP_WLOCK(inp); 1674 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1675 inp->sctp_ep.last_secret_number = 1676 inp->sctp_ep.current_secret_number; 1677 inp->sctp_ep.current_secret_number++; 1678 if (inp->sctp_ep.current_secret_number >= 1679 SCTP_HOW_MANY_SECRETS) { 1680 inp->sctp_ep.current_secret_number = 0; 1681 } 1682 secret = (int)inp->sctp_ep.current_secret_number; 1683 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1684 inp->sctp_ep.secret_key[secret][i] = 1685 sctp_select_initial_TSN(&inp->sctp_ep); 1686 } 1687 SCTP_INP_WUNLOCK(inp); 1688 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1689 } 1690 did_output = 0; 1691 break; 1692 case SCTP_TIMER_TYPE_PATHMTURAISE: 1693 if ((stcb == NULL) || (inp == NULL)) { 1694 break; 1695 } 1696 SCTP_STAT_INCR(sctps_timopathmtu); 1697 sctp_pathmtu_timer(inp, stcb, net); 1698 did_output = 0; 1699 break; 1700 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1701 if ((stcb == NULL) || (inp == NULL)) { 1702 break; 1703 } 1704 if (sctp_shutdownack_timer(inp, stcb, net)) { 1705 /* no need to unlock on tcb its gone */ 1706 goto out_decr; 1707 } 1708 SCTP_STAT_INCR(sctps_timoshutdownack); 1709 stcb->asoc.timoshutdownack++; 1710 #ifdef SCTP_AUDITING_ENABLED 1711 sctp_auditing(4, inp, stcb, net); 1712 #endif 1713 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1714 break; 1715 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1716 if ((stcb == NULL) || (inp == NULL)) { 1717 break; 1718 } 1719 SCTP_STAT_INCR(sctps_timoshutdownguard); 1720 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED); 1721 /* no need to unlock on tcb its gone */ 1722 goto out_decr; 1723 1724 case SCTP_TIMER_TYPE_STRRESET: 1725 if ((stcb == NULL) || (inp == NULL)) { 1726 break; 1727 } 1728 if (sctp_strreset_timer(inp, stcb, net)) { 1729 /* no need to unlock on tcb its gone */ 1730 goto out_decr; 1731 } 1732 SCTP_STAT_INCR(sctps_timostrmrst); 1733 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1734 break; 1735 case SCTP_TIMER_TYPE_ASCONF: 1736 if ((stcb == NULL) || (inp == NULL)) { 1737 break; 1738 } 1739 if (sctp_asconf_timer(inp, stcb, net)) { 1740 /* no need to unlock on tcb its gone */ 1741 goto out_decr; 1742 } 1743 SCTP_STAT_INCR(sctps_timoasconf); 1744 #ifdef SCTP_AUDITING_ENABLED 1745 sctp_auditing(4, inp, stcb, net); 1746 #endif 1747 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1748 break; 1749 case SCTP_TIMER_TYPE_PRIM_DELETED: 1750 if ((stcb == NULL) || (inp == NULL)) { 1751 break; 1752 } 1753 sctp_delete_prim_timer(inp, stcb, net); 1754 SCTP_STAT_INCR(sctps_timodelprim); 1755 break; 1756 1757 case SCTP_TIMER_TYPE_AUTOCLOSE: 1758 if ((stcb == NULL) || (inp == NULL)) { 1759 break; 1760 } 1761 SCTP_STAT_INCR(sctps_timoautoclose); 1762 sctp_autoclose_timer(inp, stcb, net); 1763 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1764 did_output = 0; 1765 break; 1766 case SCTP_TIMER_TYPE_ASOCKILL: 1767 if ((stcb == NULL) || (inp == NULL)) { 1768 break; 1769 } 1770 SCTP_STAT_INCR(sctps_timoassockill); 1771 /* Can we free it yet? */ 1772 SCTP_INP_DECR_REF(inp); 1773 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1775 so = SCTP_INP_SO(inp); 1776 atomic_add_int(&stcb->asoc.refcnt, 1); 1777 SCTP_TCB_UNLOCK(stcb); 1778 SCTP_SOCKET_LOCK(so, 1); 1779 SCTP_TCB_LOCK(stcb); 1780 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1781 #endif 1782 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1784 SCTP_SOCKET_UNLOCK(so, 1); 1785 #endif 1786 /* 1787 * free asoc, always unlocks (or destroy's) so prevent 1788 * duplicate unlock or unlock of a free mtx :-0 1789 */ 1790 stcb = NULL; 1791 goto out_no_decr; 1792 case SCTP_TIMER_TYPE_INPKILL: 1793 SCTP_STAT_INCR(sctps_timoinpkill); 1794 if (inp == NULL) { 1795 break; 1796 } 1797 /* 1798 * special case, take away our increment since WE are the 1799 * killer 1800 */ 1801 SCTP_INP_DECR_REF(inp); 1802 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1803 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1804 SCTP_CALLED_FROM_INPKILL_TIMER); 1805 inp = NULL; 1806 goto out_no_decr; 1807 default: 1808 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1809 tmr->type); 1810 break; 1811 } 1812 #ifdef SCTP_AUDITING_ENABLED 1813 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1814 if (inp) 1815 sctp_auditing(5, inp, stcb, net); 1816 #endif 1817 if ((did_output) && stcb) { 1818 /* 1819 * Now we need to clean up the control chunk chain if an 1820 * ECNE is on it. It must be marked as UNSENT again so next 1821 * call will continue to send it until such time that we get 1822 * a CWR, to remove it. It is, however, less likely that we 1823 * will find a ecn echo on the chain though. 1824 */ 1825 sctp_fix_ecn_echo(&stcb->asoc); 1826 } 1827 get_out: 1828 if (stcb) { 1829 SCTP_TCB_UNLOCK(stcb); 1830 } 1831 out_decr: 1832 if (inp) { 1833 SCTP_INP_DECR_REF(inp); 1834 } 1835 out_no_decr: 1836 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n", 1837 type); 1838 CURVNET_RESTORE(); 1839 } 1840 1841 void 1842 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1843 struct sctp_nets *net) 1844 { 1845 uint32_t to_ticks; 1846 struct sctp_timer *tmr; 1847 1848 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1849 return; 1850 1851 tmr = NULL; 1852 if (stcb) { 1853 SCTP_TCB_LOCK_ASSERT(stcb); 1854 } 1855 switch (t_type) { 1856 case SCTP_TIMER_TYPE_ZERO_COPY: 1857 tmr = &inp->sctp_ep.zero_copy_timer; 1858 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1859 break; 1860 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1861 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1862 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1863 break; 1864 case SCTP_TIMER_TYPE_ADDR_WQ: 1865 /* Only 1 tick away :-) */ 1866 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1867 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1868 break; 1869 case SCTP_TIMER_TYPE_SEND: 1870 /* Here we use the RTO timer */ 1871 { 1872 int rto_val; 1873 1874 if ((stcb == NULL) || (net == NULL)) { 1875 return; 1876 } 1877 tmr = &net->rxt_timer; 1878 if (net->RTO == 0) { 1879 rto_val = stcb->asoc.initial_rto; 1880 } else { 1881 rto_val = net->RTO; 1882 } 1883 to_ticks = MSEC_TO_TICKS(rto_val); 1884 } 1885 break; 1886 case SCTP_TIMER_TYPE_INIT: 1887 /* 1888 * Here we use the INIT timer default usually about 1 1889 * minute. 1890 */ 1891 if ((stcb == NULL) || (net == NULL)) { 1892 return; 1893 } 1894 tmr = &net->rxt_timer; 1895 if (net->RTO == 0) { 1896 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1897 } else { 1898 to_ticks = MSEC_TO_TICKS(net->RTO); 1899 } 1900 break; 1901 case SCTP_TIMER_TYPE_RECV: 1902 /* 1903 * Here we use the Delayed-Ack timer value from the inp 1904 * ususually about 200ms. 1905 */ 1906 if (stcb == NULL) { 1907 return; 1908 } 1909 tmr = &stcb->asoc.dack_timer; 1910 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1911 break; 1912 case SCTP_TIMER_TYPE_SHUTDOWN: 1913 /* Here we use the RTO of the destination. */ 1914 if ((stcb == NULL) || (net == NULL)) { 1915 return; 1916 } 1917 if (net->RTO == 0) { 1918 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1919 } else { 1920 to_ticks = MSEC_TO_TICKS(net->RTO); 1921 } 1922 tmr = &net->rxt_timer; 1923 break; 1924 case SCTP_TIMER_TYPE_HEARTBEAT: 1925 /* 1926 * the net is used here so that we can add in the RTO. Even 1927 * though we use a different timer. We also add the HB timer 1928 * PLUS a random jitter. 1929 */ 1930 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 1931 return; 1932 } else { 1933 uint32_t rndval; 1934 uint32_t jitter; 1935 1936 if ((net->dest_state & SCTP_ADDR_NOHB) && 1937 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1938 return; 1939 } 1940 if (net->RTO == 0) { 1941 to_ticks = stcb->asoc.initial_rto; 1942 } else { 1943 to_ticks = net->RTO; 1944 } 1945 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1946 jitter = rndval % to_ticks; 1947 if (jitter >= (to_ticks >> 1)) { 1948 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 1949 } else { 1950 to_ticks = to_ticks - jitter; 1951 } 1952 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1953 !(net->dest_state & SCTP_ADDR_PF)) { 1954 to_ticks += net->heart_beat_delay; 1955 } 1956 /* 1957 * Now we must convert the to_ticks that are now in 1958 * ms to ticks. 1959 */ 1960 to_ticks = MSEC_TO_TICKS(to_ticks); 1961 tmr = &net->hb_timer; 1962 } 1963 break; 1964 case SCTP_TIMER_TYPE_COOKIE: 1965 /* 1966 * Here we can use the RTO timer from the network since one 1967 * RTT was compelete. If a retran happened then we will be 1968 * using the RTO initial value. 1969 */ 1970 if ((stcb == NULL) || (net == NULL)) { 1971 return; 1972 } 1973 if (net->RTO == 0) { 1974 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1975 } else { 1976 to_ticks = MSEC_TO_TICKS(net->RTO); 1977 } 1978 tmr = &net->rxt_timer; 1979 break; 1980 case SCTP_TIMER_TYPE_NEWCOOKIE: 1981 /* 1982 * nothing needed but the endpoint here ususually about 60 1983 * minutes. 1984 */ 1985 if (inp == NULL) { 1986 return; 1987 } 1988 tmr = &inp->sctp_ep.signature_change; 1989 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1990 break; 1991 case SCTP_TIMER_TYPE_ASOCKILL: 1992 if (stcb == NULL) { 1993 return; 1994 } 1995 tmr = &stcb->asoc.strreset_timer; 1996 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1997 break; 1998 case SCTP_TIMER_TYPE_INPKILL: 1999 /* 2000 * The inp is setup to die. We re-use the signature_chage 2001 * timer since that has stopped and we are in the GONE 2002 * state. 2003 */ 2004 if (inp == NULL) { 2005 return; 2006 } 2007 tmr = &inp->sctp_ep.signature_change; 2008 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2009 break; 2010 case SCTP_TIMER_TYPE_PATHMTURAISE: 2011 /* 2012 * Here we use the value found in the EP for PMTU ususually 2013 * about 10 minutes. 2014 */ 2015 if ((stcb == NULL) || (inp == NULL)) { 2016 return; 2017 } 2018 if (net == NULL) { 2019 return; 2020 } 2021 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2022 return; 2023 } 2024 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2025 tmr = &net->pmtu_timer; 2026 break; 2027 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2028 /* Here we use the RTO of the destination */ 2029 if ((stcb == NULL) || (net == NULL)) { 2030 return; 2031 } 2032 if (net->RTO == 0) { 2033 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2034 } else { 2035 to_ticks = MSEC_TO_TICKS(net->RTO); 2036 } 2037 tmr = &net->rxt_timer; 2038 break; 2039 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2040 /* 2041 * Here we use the endpoints shutdown guard timer usually 2042 * about 3 minutes. 2043 */ 2044 if ((inp == NULL) || (stcb == NULL)) { 2045 return; 2046 } 2047 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2048 tmr = &stcb->asoc.shut_guard_timer; 2049 break; 2050 case SCTP_TIMER_TYPE_STRRESET: 2051 /* 2052 * Here the timer comes from the stcb but its value is from 2053 * the net's RTO. 2054 */ 2055 if ((stcb == NULL) || (net == NULL)) { 2056 return; 2057 } 2058 if (net->RTO == 0) { 2059 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2060 } else { 2061 to_ticks = MSEC_TO_TICKS(net->RTO); 2062 } 2063 tmr = &stcb->asoc.strreset_timer; 2064 break; 2065 case SCTP_TIMER_TYPE_ASCONF: 2066 /* 2067 * Here the timer comes from the stcb but its value is from 2068 * the net's RTO. 2069 */ 2070 if ((stcb == NULL) || (net == NULL)) { 2071 return; 2072 } 2073 if (net->RTO == 0) { 2074 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2075 } else { 2076 to_ticks = MSEC_TO_TICKS(net->RTO); 2077 } 2078 tmr = &stcb->asoc.asconf_timer; 2079 break; 2080 case SCTP_TIMER_TYPE_PRIM_DELETED: 2081 if ((stcb == NULL) || (net != NULL)) { 2082 return; 2083 } 2084 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2085 tmr = &stcb->asoc.delete_prim_timer; 2086 break; 2087 case SCTP_TIMER_TYPE_AUTOCLOSE: 2088 if (stcb == NULL) { 2089 return; 2090 } 2091 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2092 /* 2093 * Really an error since stcb is NOT set to 2094 * autoclose 2095 */ 2096 return; 2097 } 2098 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2099 tmr = &stcb->asoc.autoclose_timer; 2100 break; 2101 default: 2102 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2103 __FUNCTION__, t_type); 2104 return; 2105 break; 2106 } 2107 if ((to_ticks <= 0) || (tmr == NULL)) { 2108 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2109 __FUNCTION__, t_type, to_ticks, (void *)tmr); 2110 return; 2111 } 2112 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2113 /* 2114 * we do NOT allow you to have it already running. if it is 2115 * we leave the current one up unchanged 2116 */ 2117 return; 2118 } 2119 /* At this point we can proceed */ 2120 if (t_type == SCTP_TIMER_TYPE_SEND) { 2121 stcb->asoc.num_send_timers_up++; 2122 } 2123 tmr->stopped_from = 0; 2124 tmr->type = t_type; 2125 tmr->ep = (void *)inp; 2126 tmr->tcb = (void *)stcb; 2127 tmr->net = (void *)net; 2128 tmr->self = (void *)tmr; 2129 tmr->vnet = (void *)curvnet; 2130 tmr->ticks = sctp_get_tick_count(); 2131 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2132 return; 2133 } 2134 2135 void 2136 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2137 struct sctp_nets *net, uint32_t from) 2138 { 2139 struct sctp_timer *tmr; 2140 2141 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2142 (inp == NULL)) 2143 return; 2144 2145 tmr = NULL; 2146 if (stcb) { 2147 SCTP_TCB_LOCK_ASSERT(stcb); 2148 } 2149 switch (t_type) { 2150 case SCTP_TIMER_TYPE_ZERO_COPY: 2151 tmr = &inp->sctp_ep.zero_copy_timer; 2152 break; 2153 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2154 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2155 break; 2156 case SCTP_TIMER_TYPE_ADDR_WQ: 2157 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2158 break; 2159 case SCTP_TIMER_TYPE_SEND: 2160 if ((stcb == NULL) || (net == NULL)) { 2161 return; 2162 } 2163 tmr = &net->rxt_timer; 2164 break; 2165 case SCTP_TIMER_TYPE_INIT: 2166 if ((stcb == NULL) || (net == NULL)) { 2167 return; 2168 } 2169 tmr = &net->rxt_timer; 2170 break; 2171 case SCTP_TIMER_TYPE_RECV: 2172 if (stcb == NULL) { 2173 return; 2174 } 2175 tmr = &stcb->asoc.dack_timer; 2176 break; 2177 case SCTP_TIMER_TYPE_SHUTDOWN: 2178 if ((stcb == NULL) || (net == NULL)) { 2179 return; 2180 } 2181 tmr = &net->rxt_timer; 2182 break; 2183 case SCTP_TIMER_TYPE_HEARTBEAT: 2184 if ((stcb == NULL) || (net == NULL)) { 2185 return; 2186 } 2187 tmr = &net->hb_timer; 2188 break; 2189 case SCTP_TIMER_TYPE_COOKIE: 2190 if ((stcb == NULL) || (net == NULL)) { 2191 return; 2192 } 2193 tmr = &net->rxt_timer; 2194 break; 2195 case SCTP_TIMER_TYPE_NEWCOOKIE: 2196 /* nothing needed but the endpoint here */ 2197 tmr = &inp->sctp_ep.signature_change; 2198 /* 2199 * We re-use the newcookie timer for the INP kill timer. We 2200 * must assure that we do not kill it by accident. 2201 */ 2202 break; 2203 case SCTP_TIMER_TYPE_ASOCKILL: 2204 /* 2205 * Stop the asoc kill timer. 2206 */ 2207 if (stcb == NULL) { 2208 return; 2209 } 2210 tmr = &stcb->asoc.strreset_timer; 2211 break; 2212 2213 case SCTP_TIMER_TYPE_INPKILL: 2214 /* 2215 * The inp is setup to die. We re-use the signature_chage 2216 * timer since that has stopped and we are in the GONE 2217 * state. 2218 */ 2219 tmr = &inp->sctp_ep.signature_change; 2220 break; 2221 case SCTP_TIMER_TYPE_PATHMTURAISE: 2222 if ((stcb == NULL) || (net == NULL)) { 2223 return; 2224 } 2225 tmr = &net->pmtu_timer; 2226 break; 2227 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2228 if ((stcb == NULL) || (net == NULL)) { 2229 return; 2230 } 2231 tmr = &net->rxt_timer; 2232 break; 2233 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2234 if (stcb == NULL) { 2235 return; 2236 } 2237 tmr = &stcb->asoc.shut_guard_timer; 2238 break; 2239 case SCTP_TIMER_TYPE_STRRESET: 2240 if (stcb == NULL) { 2241 return; 2242 } 2243 tmr = &stcb->asoc.strreset_timer; 2244 break; 2245 case SCTP_TIMER_TYPE_ASCONF: 2246 if (stcb == NULL) { 2247 return; 2248 } 2249 tmr = &stcb->asoc.asconf_timer; 2250 break; 2251 case SCTP_TIMER_TYPE_PRIM_DELETED: 2252 if (stcb == NULL) { 2253 return; 2254 } 2255 tmr = &stcb->asoc.delete_prim_timer; 2256 break; 2257 case SCTP_TIMER_TYPE_AUTOCLOSE: 2258 if (stcb == NULL) { 2259 return; 2260 } 2261 tmr = &stcb->asoc.autoclose_timer; 2262 break; 2263 default: 2264 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2265 __FUNCTION__, t_type); 2266 break; 2267 } 2268 if (tmr == NULL) { 2269 return; 2270 } 2271 if ((tmr->type != t_type) && tmr->type) { 2272 /* 2273 * Ok we have a timer that is under joint use. Cookie timer 2274 * per chance with the SEND timer. We therefore are NOT 2275 * running the timer that the caller wants stopped. So just 2276 * return. 2277 */ 2278 return; 2279 } 2280 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2281 stcb->asoc.num_send_timers_up--; 2282 if (stcb->asoc.num_send_timers_up < 0) { 2283 stcb->asoc.num_send_timers_up = 0; 2284 } 2285 } 2286 tmr->self = NULL; 2287 tmr->stopped_from = from; 2288 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2289 return; 2290 } 2291 2292 uint32_t 2293 sctp_calculate_len(struct mbuf *m) 2294 { 2295 uint32_t tlen = 0; 2296 struct mbuf *at; 2297 2298 at = m; 2299 while (at) { 2300 tlen += SCTP_BUF_LEN(at); 2301 at = SCTP_BUF_NEXT(at); 2302 } 2303 return (tlen); 2304 } 2305 2306 void 2307 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2308 struct sctp_association *asoc, uint32_t mtu) 2309 { 2310 /* 2311 * Reset the P-MTU size on this association, this involves changing 2312 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2313 * allow the DF flag to be cleared. 2314 */ 2315 struct sctp_tmit_chunk *chk; 2316 unsigned int eff_mtu, ovh; 2317 2318 asoc->smallest_mtu = mtu; 2319 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2320 ovh = SCTP_MIN_OVERHEAD; 2321 } else { 2322 ovh = SCTP_MIN_V4_OVERHEAD; 2323 } 2324 eff_mtu = mtu - ovh; 2325 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2326 if (chk->send_size > eff_mtu) { 2327 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2328 } 2329 } 2330 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2331 if (chk->send_size > eff_mtu) { 2332 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2333 } 2334 } 2335 } 2336 2337 2338 /* 2339 * given an association and starting time of the current RTT period return 2340 * RTO in number of msecs net should point to the current network 2341 */ 2342 2343 uint32_t 2344 sctp_calculate_rto(struct sctp_tcb *stcb, 2345 struct sctp_association *asoc, 2346 struct sctp_nets *net, 2347 struct timeval *told, 2348 int safe, int rtt_from_sack) 2349 { 2350 /*- 2351 * given an association and the starting time of the current RTT 2352 * period (in value1/value2) return RTO in number of msecs. 2353 */ 2354 int32_t rtt; /* RTT in ms */ 2355 uint32_t new_rto; 2356 int first_measure = 0; 2357 struct timeval now, then, *old; 2358 2359 /* Copy it out for sparc64 */ 2360 if (safe == sctp_align_unsafe_makecopy) { 2361 old = &then; 2362 memcpy(&then, told, sizeof(struct timeval)); 2363 } else if (safe == sctp_align_safe_nocopy) { 2364 old = told; 2365 } else { 2366 /* error */ 2367 SCTP_PRINTF("Huh, bad rto calc call\n"); 2368 return (0); 2369 } 2370 /************************/ 2371 /* 1. calculate new RTT */ 2372 /************************/ 2373 /* get the current time */ 2374 if (stcb->asoc.use_precise_time) { 2375 (void)SCTP_GETPTIME_TIMEVAL(&now); 2376 } else { 2377 (void)SCTP_GETTIME_TIMEVAL(&now); 2378 } 2379 timevalsub(&now, old); 2380 /* store the current RTT in us */ 2381 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec + 2382 (uint64_t) now.tv_usec; 2383 2384 /* computer rtt in ms */ 2385 rtt = net->rtt / 1000; 2386 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2387 /* 2388 * Tell the CC module that a new update has just occurred 2389 * from a sack 2390 */ 2391 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2392 } 2393 /* 2394 * Do we need to determine the lan? We do this only on sacks i.e. 2395 * RTT being determined from data not non-data (HB/INIT->INITACK). 2396 */ 2397 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2398 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2399 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2400 net->lan_type = SCTP_LAN_INTERNET; 2401 } else { 2402 net->lan_type = SCTP_LAN_LOCAL; 2403 } 2404 } 2405 /***************************/ 2406 /* 2. update RTTVAR & SRTT */ 2407 /***************************/ 2408 /*- 2409 * Compute the scaled average lastsa and the 2410 * scaled variance lastsv as described in van Jacobson 2411 * Paper "Congestion Avoidance and Control", Annex A. 2412 * 2413 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2414 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2415 */ 2416 if (net->RTO_measured) { 2417 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2418 net->lastsa += rtt; 2419 if (rtt < 0) { 2420 rtt = -rtt; 2421 } 2422 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2423 net->lastsv += rtt; 2424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2425 rto_logging(net, SCTP_LOG_RTTVAR); 2426 } 2427 } else { 2428 /* First RTO measurment */ 2429 net->RTO_measured = 1; 2430 first_measure = 1; 2431 net->lastsa = rtt << SCTP_RTT_SHIFT; 2432 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2434 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2435 } 2436 } 2437 if (net->lastsv == 0) { 2438 net->lastsv = SCTP_CLOCK_GRANULARITY; 2439 } 2440 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2441 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2442 (stcb->asoc.sat_network_lockout == 0)) { 2443 stcb->asoc.sat_network = 1; 2444 } else if ((!first_measure) && stcb->asoc.sat_network) { 2445 stcb->asoc.sat_network = 0; 2446 stcb->asoc.sat_network_lockout = 1; 2447 } 2448 /* bound it, per C6/C7 in Section 5.3.1 */ 2449 if (new_rto < stcb->asoc.minrto) { 2450 new_rto = stcb->asoc.minrto; 2451 } 2452 if (new_rto > stcb->asoc.maxrto) { 2453 new_rto = stcb->asoc.maxrto; 2454 } 2455 /* we are now returning the RTO */ 2456 return (new_rto); 2457 } 2458 2459 /* 2460 * return a pointer to a contiguous piece of data from the given mbuf chain 2461 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2462 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2463 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2464 */ 2465 caddr_t 2466 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2467 { 2468 uint32_t count; 2469 uint8_t *ptr; 2470 2471 ptr = in_ptr; 2472 if ((off < 0) || (len <= 0)) 2473 return (NULL); 2474 2475 /* find the desired start location */ 2476 while ((m != NULL) && (off > 0)) { 2477 if (off < SCTP_BUF_LEN(m)) 2478 break; 2479 off -= SCTP_BUF_LEN(m); 2480 m = SCTP_BUF_NEXT(m); 2481 } 2482 if (m == NULL) 2483 return (NULL); 2484 2485 /* is the current mbuf large enough (eg. contiguous)? */ 2486 if ((SCTP_BUF_LEN(m) - off) >= len) { 2487 return (mtod(m, caddr_t)+off); 2488 } else { 2489 /* else, it spans more than one mbuf, so save a temp copy... */ 2490 while ((m != NULL) && (len > 0)) { 2491 count = min(SCTP_BUF_LEN(m) - off, len); 2492 bcopy(mtod(m, caddr_t)+off, ptr, count); 2493 len -= count; 2494 ptr += count; 2495 off = 0; 2496 m = SCTP_BUF_NEXT(m); 2497 } 2498 if ((m == NULL) && (len > 0)) 2499 return (NULL); 2500 else 2501 return ((caddr_t)in_ptr); 2502 } 2503 } 2504 2505 2506 2507 struct sctp_paramhdr * 2508 sctp_get_next_param(struct mbuf *m, 2509 int offset, 2510 struct sctp_paramhdr *pull, 2511 int pull_limit) 2512 { 2513 /* This just provides a typed signature to Peter's Pull routine */ 2514 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2515 (uint8_t *) pull)); 2516 } 2517 2518 2519 int 2520 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2521 { 2522 /* 2523 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2524 * padlen is > 3 this routine will fail. 2525 */ 2526 uint8_t *dp; 2527 int i; 2528 2529 if (padlen > 3) { 2530 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2531 return (ENOBUFS); 2532 } 2533 if (padlen <= M_TRAILINGSPACE(m)) { 2534 /* 2535 * The easy way. We hope the majority of the time we hit 2536 * here :) 2537 */ 2538 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2539 SCTP_BUF_LEN(m) += padlen; 2540 } else { 2541 /* Hard way we must grow the mbuf */ 2542 struct mbuf *tmp; 2543 2544 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2545 if (tmp == NULL) { 2546 /* Out of space GAK! we are in big trouble. */ 2547 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2548 return (ENOBUFS); 2549 } 2550 /* setup and insert in middle */ 2551 SCTP_BUF_LEN(tmp) = padlen; 2552 SCTP_BUF_NEXT(tmp) = NULL; 2553 SCTP_BUF_NEXT(m) = tmp; 2554 dp = mtod(tmp, uint8_t *); 2555 } 2556 /* zero out the pad */ 2557 for (i = 0; i < padlen; i++) { 2558 *dp = 0; 2559 dp++; 2560 } 2561 return (0); 2562 } 2563 2564 int 2565 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2566 { 2567 /* find the last mbuf in chain and pad it */ 2568 struct mbuf *m_at; 2569 2570 if (last_mbuf) { 2571 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2572 } else { 2573 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2574 if (SCTP_BUF_NEXT(m_at) == NULL) { 2575 return (sctp_add_pad_tombuf(m_at, padval)); 2576 } 2577 } 2578 } 2579 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 2580 return (EFAULT); 2581 } 2582 2583 static void 2584 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2585 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2586 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2587 SCTP_UNUSED 2588 #endif 2589 ) 2590 { 2591 struct mbuf *m_notify; 2592 struct sctp_assoc_change *sac; 2593 struct sctp_queued_to_read *control; 2594 size_t notif_len, abort_len; 2595 unsigned int i; 2596 2597 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2598 struct socket *so; 2599 2600 #endif 2601 2602 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2603 notif_len = sizeof(struct sctp_assoc_change); 2604 if (abort != NULL) { 2605 abort_len = ntohs(abort->ch.chunk_length); 2606 } else { 2607 abort_len = 0; 2608 } 2609 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2610 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2611 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2612 notif_len += abort_len; 2613 } 2614 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2615 if (m_notify == NULL) { 2616 /* Retry with smaller value. */ 2617 notif_len = sizeof(struct sctp_assoc_change); 2618 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2619 if (m_notify == NULL) { 2620 goto set_error; 2621 } 2622 } 2623 SCTP_BUF_NEXT(m_notify) = NULL; 2624 sac = mtod(m_notify, struct sctp_assoc_change *); 2625 sac->sac_type = SCTP_ASSOC_CHANGE; 2626 sac->sac_flags = 0; 2627 sac->sac_length = sizeof(struct sctp_assoc_change); 2628 sac->sac_state = state; 2629 sac->sac_error = error; 2630 /* XXX verify these stream counts */ 2631 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2632 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2633 sac->sac_assoc_id = sctp_get_associd(stcb); 2634 if (notif_len > sizeof(struct sctp_assoc_change)) { 2635 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2636 i = 0; 2637 if (stcb->asoc.peer_supports_prsctp) { 2638 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2639 } 2640 if (stcb->asoc.peer_supports_auth) { 2641 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2642 } 2643 if (stcb->asoc.peer_supports_asconf) { 2644 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2645 } 2646 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2647 if (stcb->asoc.peer_supports_strreset) { 2648 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2649 } 2650 sac->sac_length += i; 2651 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2652 memcpy(sac->sac_info, abort, abort_len); 2653 sac->sac_length += abort_len; 2654 } 2655 } 2656 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2657 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2658 0, 0, stcb->asoc.context, 0, 0, 0, 2659 m_notify); 2660 if (control != NULL) { 2661 control->length = SCTP_BUF_LEN(m_notify); 2662 /* not that we need this */ 2663 control->tail_mbuf = m_notify; 2664 control->spec_flags = M_NOTIFICATION; 2665 sctp_add_to_readq(stcb->sctp_ep, stcb, 2666 control, 2667 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2668 so_locked); 2669 } else { 2670 sctp_m_freem(m_notify); 2671 } 2672 } 2673 /* 2674 * For 1-to-1 style sockets, we send up and error when an ABORT 2675 * comes in. 2676 */ 2677 set_error: 2678 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2679 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2680 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2681 SOCK_LOCK(stcb->sctp_socket); 2682 if (from_peer) { 2683 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2684 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2685 stcb->sctp_socket->so_error = ECONNREFUSED; 2686 } else { 2687 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2688 stcb->sctp_socket->so_error = ECONNRESET; 2689 } 2690 } else { 2691 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) || 2692 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 2693 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 2694 stcb->sctp_socket->so_error = ETIMEDOUT; 2695 } else { 2696 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2697 stcb->sctp_socket->so_error = ECONNABORTED; 2698 } 2699 } 2700 } 2701 /* Wake ANY sleepers */ 2702 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2703 so = SCTP_INP_SO(stcb->sctp_ep); 2704 if (!so_locked) { 2705 atomic_add_int(&stcb->asoc.refcnt, 1); 2706 SCTP_TCB_UNLOCK(stcb); 2707 SCTP_SOCKET_LOCK(so, 1); 2708 SCTP_TCB_LOCK(stcb); 2709 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2710 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2711 SCTP_SOCKET_UNLOCK(so, 1); 2712 return; 2713 } 2714 } 2715 #endif 2716 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2717 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2718 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2719 socantrcvmore_locked(stcb->sctp_socket); 2720 } 2721 sorwakeup(stcb->sctp_socket); 2722 sowwakeup(stcb->sctp_socket); 2723 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2724 if (!so_locked) { 2725 SCTP_SOCKET_UNLOCK(so, 1); 2726 } 2727 #endif 2728 } 2729 2730 static void 2731 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2732 struct sockaddr *sa, uint32_t error) 2733 { 2734 struct mbuf *m_notify; 2735 struct sctp_paddr_change *spc; 2736 struct sctp_queued_to_read *control; 2737 2738 if ((stcb == NULL) || 2739 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2740 /* event not enabled */ 2741 return; 2742 } 2743 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2744 if (m_notify == NULL) 2745 return; 2746 SCTP_BUF_LEN(m_notify) = 0; 2747 spc = mtod(m_notify, struct sctp_paddr_change *); 2748 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2749 spc->spc_flags = 0; 2750 spc->spc_length = sizeof(struct sctp_paddr_change); 2751 switch (sa->sa_family) { 2752 #ifdef INET 2753 case AF_INET: 2754 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2755 break; 2756 #endif 2757 #ifdef INET6 2758 case AF_INET6: 2759 { 2760 struct sockaddr_in6 *sin6; 2761 2762 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2763 2764 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2765 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2766 if (sin6->sin6_scope_id == 0) { 2767 /* recover scope_id for user */ 2768 (void)sa6_recoverscope(sin6); 2769 } else { 2770 /* clear embedded scope_id for user */ 2771 in6_clearscope(&sin6->sin6_addr); 2772 } 2773 } 2774 break; 2775 } 2776 #endif 2777 default: 2778 /* TSNH */ 2779 break; 2780 } 2781 spc->spc_state = state; 2782 spc->spc_error = error; 2783 spc->spc_assoc_id = sctp_get_associd(stcb); 2784 2785 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2786 SCTP_BUF_NEXT(m_notify) = NULL; 2787 2788 /* append to socket */ 2789 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2790 0, 0, stcb->asoc.context, 0, 0, 0, 2791 m_notify); 2792 if (control == NULL) { 2793 /* no memory */ 2794 sctp_m_freem(m_notify); 2795 return; 2796 } 2797 control->length = SCTP_BUF_LEN(m_notify); 2798 control->spec_flags = M_NOTIFICATION; 2799 /* not that we need this */ 2800 control->tail_mbuf = m_notify; 2801 sctp_add_to_readq(stcb->sctp_ep, stcb, 2802 control, 2803 &stcb->sctp_socket->so_rcv, 1, 2804 SCTP_READ_LOCK_NOT_HELD, 2805 SCTP_SO_NOT_LOCKED); 2806 } 2807 2808 2809 static void 2810 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2811 struct sctp_tmit_chunk *chk, int so_locked 2812 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2813 SCTP_UNUSED 2814 #endif 2815 ) 2816 { 2817 struct mbuf *m_notify; 2818 struct sctp_send_failed *ssf; 2819 struct sctp_send_failed_event *ssfe; 2820 struct sctp_queued_to_read *control; 2821 int length; 2822 2823 if ((stcb == NULL) || 2824 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2825 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2826 /* event not enabled */ 2827 return; 2828 } 2829 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2830 length = sizeof(struct sctp_send_failed_event); 2831 } else { 2832 length = sizeof(struct sctp_send_failed); 2833 } 2834 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2835 if (m_notify == NULL) 2836 /* no space left */ 2837 return; 2838 length += chk->send_size; 2839 length -= sizeof(struct sctp_data_chunk); 2840 SCTP_BUF_LEN(m_notify) = 0; 2841 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2842 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2843 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2844 if (sent) { 2845 ssfe->ssfe_flags = SCTP_DATA_SENT; 2846 } else { 2847 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2848 } 2849 ssfe->ssfe_length = length; 2850 ssfe->ssfe_error = error; 2851 /* not exactly what the user sent in, but should be close :) */ 2852 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2853 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number; 2854 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2855 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype; 2856 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2857 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2858 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2859 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2860 } else { 2861 ssf = mtod(m_notify, struct sctp_send_failed *); 2862 ssf->ssf_type = SCTP_SEND_FAILED; 2863 if (sent) { 2864 ssf->ssf_flags = SCTP_DATA_SENT; 2865 } else { 2866 ssf->ssf_flags = SCTP_DATA_UNSENT; 2867 } 2868 ssf->ssf_length = length; 2869 ssf->ssf_error = error; 2870 /* not exactly what the user sent in, but should be close :) */ 2871 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2872 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2873 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2874 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2875 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2876 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2877 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2878 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2879 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2880 } 2881 if (chk->data) { 2882 /* 2883 * trim off the sctp chunk header(it should be there) 2884 */ 2885 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 2886 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 2887 sctp_mbuf_crush(chk->data); 2888 chk->send_size -= sizeof(struct sctp_data_chunk); 2889 } 2890 } 2891 SCTP_BUF_NEXT(m_notify) = chk->data; 2892 /* Steal off the mbuf */ 2893 chk->data = NULL; 2894 /* 2895 * For this case, we check the actual socket buffer, since the assoc 2896 * is going away we don't want to overfill the socket buffer for a 2897 * non-reader 2898 */ 2899 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2900 sctp_m_freem(m_notify); 2901 return; 2902 } 2903 /* append to socket */ 2904 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2905 0, 0, stcb->asoc.context, 0, 0, 0, 2906 m_notify); 2907 if (control == NULL) { 2908 /* no memory */ 2909 sctp_m_freem(m_notify); 2910 return; 2911 } 2912 control->spec_flags = M_NOTIFICATION; 2913 sctp_add_to_readq(stcb->sctp_ep, stcb, 2914 control, 2915 &stcb->sctp_socket->so_rcv, 1, 2916 SCTP_READ_LOCK_NOT_HELD, 2917 so_locked); 2918 } 2919 2920 2921 static void 2922 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2923 struct sctp_stream_queue_pending *sp, int so_locked 2924 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2925 SCTP_UNUSED 2926 #endif 2927 ) 2928 { 2929 struct mbuf *m_notify; 2930 struct sctp_send_failed *ssf; 2931 struct sctp_send_failed_event *ssfe; 2932 struct sctp_queued_to_read *control; 2933 int length; 2934 2935 if ((stcb == NULL) || 2936 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2937 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2938 /* event not enabled */ 2939 return; 2940 } 2941 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2942 length = sizeof(struct sctp_send_failed_event); 2943 } else { 2944 length = sizeof(struct sctp_send_failed); 2945 } 2946 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2947 if (m_notify == NULL) { 2948 /* no space left */ 2949 return; 2950 } 2951 length += sp->length; 2952 SCTP_BUF_LEN(m_notify) = 0; 2953 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2954 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2955 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2956 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2957 ssfe->ssfe_length = length; 2958 ssfe->ssfe_error = error; 2959 /* not exactly what the user sent in, but should be close :) */ 2960 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2961 ssfe->ssfe_info.snd_sid = sp->stream; 2962 if (sp->some_taken) { 2963 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 2964 } else { 2965 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 2966 } 2967 ssfe->ssfe_info.snd_ppid = sp->ppid; 2968 ssfe->ssfe_info.snd_context = sp->context; 2969 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2970 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2971 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2972 } else { 2973 ssf = mtod(m_notify, struct sctp_send_failed *); 2974 ssf->ssf_type = SCTP_SEND_FAILED; 2975 ssf->ssf_flags = SCTP_DATA_UNSENT; 2976 ssf->ssf_length = length; 2977 ssf->ssf_error = error; 2978 /* not exactly what the user sent in, but should be close :) */ 2979 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2980 ssf->ssf_info.sinfo_stream = sp->stream; 2981 ssf->ssf_info.sinfo_ssn = 0; 2982 if (sp->some_taken) { 2983 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 2984 } else { 2985 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 2986 } 2987 ssf->ssf_info.sinfo_ppid = sp->ppid; 2988 ssf->ssf_info.sinfo_context = sp->context; 2989 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2990 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2991 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2992 } 2993 SCTP_BUF_NEXT(m_notify) = sp->data; 2994 2995 /* Steal off the mbuf */ 2996 sp->data = NULL; 2997 /* 2998 * For this case, we check the actual socket buffer, since the assoc 2999 * is going away we don't want to overfill the socket buffer for a 3000 * non-reader 3001 */ 3002 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3003 sctp_m_freem(m_notify); 3004 return; 3005 } 3006 /* append to socket */ 3007 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3008 0, 0, stcb->asoc.context, 0, 0, 0, 3009 m_notify); 3010 if (control == NULL) { 3011 /* no memory */ 3012 sctp_m_freem(m_notify); 3013 return; 3014 } 3015 control->spec_flags = M_NOTIFICATION; 3016 sctp_add_to_readq(stcb->sctp_ep, stcb, 3017 control, 3018 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3019 } 3020 3021 3022 3023 static void 3024 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3025 { 3026 struct mbuf *m_notify; 3027 struct sctp_adaptation_event *sai; 3028 struct sctp_queued_to_read *control; 3029 3030 if ((stcb == NULL) || 3031 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3032 /* event not enabled */ 3033 return; 3034 } 3035 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3036 if (m_notify == NULL) 3037 /* no space left */ 3038 return; 3039 SCTP_BUF_LEN(m_notify) = 0; 3040 sai = mtod(m_notify, struct sctp_adaptation_event *); 3041 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3042 sai->sai_flags = 0; 3043 sai->sai_length = sizeof(struct sctp_adaptation_event); 3044 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3045 sai->sai_assoc_id = sctp_get_associd(stcb); 3046 3047 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3048 SCTP_BUF_NEXT(m_notify) = NULL; 3049 3050 /* append to socket */ 3051 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3052 0, 0, stcb->asoc.context, 0, 0, 0, 3053 m_notify); 3054 if (control == NULL) { 3055 /* no memory */ 3056 sctp_m_freem(m_notify); 3057 return; 3058 } 3059 control->length = SCTP_BUF_LEN(m_notify); 3060 control->spec_flags = M_NOTIFICATION; 3061 /* not that we need this */ 3062 control->tail_mbuf = m_notify; 3063 sctp_add_to_readq(stcb->sctp_ep, stcb, 3064 control, 3065 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3066 } 3067 3068 /* This always must be called with the read-queue LOCKED in the INP */ 3069 static void 3070 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3071 uint32_t val, int so_locked 3072 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3073 SCTP_UNUSED 3074 #endif 3075 ) 3076 { 3077 struct mbuf *m_notify; 3078 struct sctp_pdapi_event *pdapi; 3079 struct sctp_queued_to_read *control; 3080 struct sockbuf *sb; 3081 3082 if ((stcb == NULL) || 3083 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3084 /* event not enabled */ 3085 return; 3086 } 3087 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3088 return; 3089 } 3090 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3091 if (m_notify == NULL) 3092 /* no space left */ 3093 return; 3094 SCTP_BUF_LEN(m_notify) = 0; 3095 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3096 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3097 pdapi->pdapi_flags = 0; 3098 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3099 pdapi->pdapi_indication = error; 3100 pdapi->pdapi_stream = (val >> 16); 3101 pdapi->pdapi_seq = (val & 0x0000ffff); 3102 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3103 3104 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3105 SCTP_BUF_NEXT(m_notify) = NULL; 3106 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3107 0, 0, stcb->asoc.context, 0, 0, 0, 3108 m_notify); 3109 if (control == NULL) { 3110 /* no memory */ 3111 sctp_m_freem(m_notify); 3112 return; 3113 } 3114 control->spec_flags = M_NOTIFICATION; 3115 control->length = SCTP_BUF_LEN(m_notify); 3116 /* not that we need this */ 3117 control->tail_mbuf = m_notify; 3118 control->held_length = 0; 3119 control->length = 0; 3120 sb = &stcb->sctp_socket->so_rcv; 3121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3122 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3123 } 3124 sctp_sballoc(stcb, sb, m_notify); 3125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3126 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3127 } 3128 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3129 control->end_added = 1; 3130 if (stcb->asoc.control_pdapi) 3131 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3132 else { 3133 /* we really should not see this case */ 3134 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3135 } 3136 if (stcb->sctp_ep && stcb->sctp_socket) { 3137 /* This should always be the case */ 3138 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3139 struct socket *so; 3140 3141 so = SCTP_INP_SO(stcb->sctp_ep); 3142 if (!so_locked) { 3143 atomic_add_int(&stcb->asoc.refcnt, 1); 3144 SCTP_TCB_UNLOCK(stcb); 3145 SCTP_SOCKET_LOCK(so, 1); 3146 SCTP_TCB_LOCK(stcb); 3147 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3148 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3149 SCTP_SOCKET_UNLOCK(so, 1); 3150 return; 3151 } 3152 } 3153 #endif 3154 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3155 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3156 if (!so_locked) { 3157 SCTP_SOCKET_UNLOCK(so, 1); 3158 } 3159 #endif 3160 } 3161 } 3162 3163 static void 3164 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3165 { 3166 struct mbuf *m_notify; 3167 struct sctp_shutdown_event *sse; 3168 struct sctp_queued_to_read *control; 3169 3170 /* 3171 * For TCP model AND UDP connected sockets we will send an error up 3172 * when an SHUTDOWN completes 3173 */ 3174 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3175 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3176 /* mark socket closed for read/write and wakeup! */ 3177 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3178 struct socket *so; 3179 3180 so = SCTP_INP_SO(stcb->sctp_ep); 3181 atomic_add_int(&stcb->asoc.refcnt, 1); 3182 SCTP_TCB_UNLOCK(stcb); 3183 SCTP_SOCKET_LOCK(so, 1); 3184 SCTP_TCB_LOCK(stcb); 3185 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3186 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3187 SCTP_SOCKET_UNLOCK(so, 1); 3188 return; 3189 } 3190 #endif 3191 socantsendmore(stcb->sctp_socket); 3192 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3193 SCTP_SOCKET_UNLOCK(so, 1); 3194 #endif 3195 } 3196 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3197 /* event not enabled */ 3198 return; 3199 } 3200 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3201 if (m_notify == NULL) 3202 /* no space left */ 3203 return; 3204 sse = mtod(m_notify, struct sctp_shutdown_event *); 3205 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3206 sse->sse_flags = 0; 3207 sse->sse_length = sizeof(struct sctp_shutdown_event); 3208 sse->sse_assoc_id = sctp_get_associd(stcb); 3209 3210 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3211 SCTP_BUF_NEXT(m_notify) = NULL; 3212 3213 /* append to socket */ 3214 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3215 0, 0, stcb->asoc.context, 0, 0, 0, 3216 m_notify); 3217 if (control == NULL) { 3218 /* no memory */ 3219 sctp_m_freem(m_notify); 3220 return; 3221 } 3222 control->spec_flags = M_NOTIFICATION; 3223 control->length = SCTP_BUF_LEN(m_notify); 3224 /* not that we need this */ 3225 control->tail_mbuf = m_notify; 3226 sctp_add_to_readq(stcb->sctp_ep, stcb, 3227 control, 3228 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3229 } 3230 3231 static void 3232 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3233 int so_locked 3234 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3235 SCTP_UNUSED 3236 #endif 3237 ) 3238 { 3239 struct mbuf *m_notify; 3240 struct sctp_sender_dry_event *event; 3241 struct sctp_queued_to_read *control; 3242 3243 if ((stcb == NULL) || 3244 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3245 /* event not enabled */ 3246 return; 3247 } 3248 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3249 if (m_notify == NULL) { 3250 /* no space left */ 3251 return; 3252 } 3253 SCTP_BUF_LEN(m_notify) = 0; 3254 event = mtod(m_notify, struct sctp_sender_dry_event *); 3255 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3256 event->sender_dry_flags = 0; 3257 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3258 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3259 3260 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3261 SCTP_BUF_NEXT(m_notify) = NULL; 3262 3263 /* append to socket */ 3264 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3265 0, 0, stcb->asoc.context, 0, 0, 0, 3266 m_notify); 3267 if (control == NULL) { 3268 /* no memory */ 3269 sctp_m_freem(m_notify); 3270 return; 3271 } 3272 control->length = SCTP_BUF_LEN(m_notify); 3273 control->spec_flags = M_NOTIFICATION; 3274 /* not that we need this */ 3275 control->tail_mbuf = m_notify; 3276 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3277 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3278 } 3279 3280 3281 void 3282 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3283 { 3284 struct mbuf *m_notify; 3285 struct sctp_queued_to_read *control; 3286 struct sctp_stream_change_event *stradd; 3287 int len; 3288 3289 if ((stcb == NULL) || 3290 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3291 /* event not enabled */ 3292 return; 3293 } 3294 if ((stcb->asoc.peer_req_out) && flag) { 3295 /* Peer made the request, don't tell the local user */ 3296 stcb->asoc.peer_req_out = 0; 3297 return; 3298 } 3299 stcb->asoc.peer_req_out = 0; 3300 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3301 if (m_notify == NULL) 3302 /* no space left */ 3303 return; 3304 SCTP_BUF_LEN(m_notify) = 0; 3305 len = sizeof(struct sctp_stream_change_event); 3306 if (len > M_TRAILINGSPACE(m_notify)) { 3307 /* never enough room */ 3308 sctp_m_freem(m_notify); 3309 return; 3310 } 3311 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3312 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3313 stradd->strchange_flags = flag; 3314 stradd->strchange_length = len; 3315 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3316 stradd->strchange_instrms = numberin; 3317 stradd->strchange_outstrms = numberout; 3318 SCTP_BUF_LEN(m_notify) = len; 3319 SCTP_BUF_NEXT(m_notify) = NULL; 3320 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3321 /* no space */ 3322 sctp_m_freem(m_notify); 3323 return; 3324 } 3325 /* append to socket */ 3326 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3327 0, 0, stcb->asoc.context, 0, 0, 0, 3328 m_notify); 3329 if (control == NULL) { 3330 /* no memory */ 3331 sctp_m_freem(m_notify); 3332 return; 3333 } 3334 control->spec_flags = M_NOTIFICATION; 3335 control->length = SCTP_BUF_LEN(m_notify); 3336 /* not that we need this */ 3337 control->tail_mbuf = m_notify; 3338 sctp_add_to_readq(stcb->sctp_ep, stcb, 3339 control, 3340 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3341 } 3342 3343 void 3344 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3345 { 3346 struct mbuf *m_notify; 3347 struct sctp_queued_to_read *control; 3348 struct sctp_assoc_reset_event *strasoc; 3349 int len; 3350 3351 if ((stcb == NULL) || 3352 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3353 /* event not enabled */ 3354 return; 3355 } 3356 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3357 if (m_notify == NULL) 3358 /* no space left */ 3359 return; 3360 SCTP_BUF_LEN(m_notify) = 0; 3361 len = sizeof(struct sctp_assoc_reset_event); 3362 if (len > M_TRAILINGSPACE(m_notify)) { 3363 /* never enough room */ 3364 sctp_m_freem(m_notify); 3365 return; 3366 } 3367 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3368 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3369 strasoc->assocreset_flags = flag; 3370 strasoc->assocreset_length = len; 3371 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3372 strasoc->assocreset_local_tsn = sending_tsn; 3373 strasoc->assocreset_remote_tsn = recv_tsn; 3374 SCTP_BUF_LEN(m_notify) = len; 3375 SCTP_BUF_NEXT(m_notify) = NULL; 3376 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3377 /* no space */ 3378 sctp_m_freem(m_notify); 3379 return; 3380 } 3381 /* append to socket */ 3382 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3383 0, 0, stcb->asoc.context, 0, 0, 0, 3384 m_notify); 3385 if (control == NULL) { 3386 /* no memory */ 3387 sctp_m_freem(m_notify); 3388 return; 3389 } 3390 control->spec_flags = M_NOTIFICATION; 3391 control->length = SCTP_BUF_LEN(m_notify); 3392 /* not that we need this */ 3393 control->tail_mbuf = m_notify; 3394 sctp_add_to_readq(stcb->sctp_ep, stcb, 3395 control, 3396 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3397 } 3398 3399 3400 3401 static void 3402 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3403 int number_entries, uint16_t * list, int flag) 3404 { 3405 struct mbuf *m_notify; 3406 struct sctp_queued_to_read *control; 3407 struct sctp_stream_reset_event *strreset; 3408 int len; 3409 3410 if ((stcb == NULL) || 3411 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3412 /* event not enabled */ 3413 return; 3414 } 3415 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3416 if (m_notify == NULL) 3417 /* no space left */ 3418 return; 3419 SCTP_BUF_LEN(m_notify) = 0; 3420 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3421 if (len > M_TRAILINGSPACE(m_notify)) { 3422 /* never enough room */ 3423 sctp_m_freem(m_notify); 3424 return; 3425 } 3426 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3427 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3428 strreset->strreset_flags = flag; 3429 strreset->strreset_length = len; 3430 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3431 if (number_entries) { 3432 int i; 3433 3434 for (i = 0; i < number_entries; i++) { 3435 strreset->strreset_stream_list[i] = ntohs(list[i]); 3436 } 3437 } 3438 SCTP_BUF_LEN(m_notify) = len; 3439 SCTP_BUF_NEXT(m_notify) = NULL; 3440 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3441 /* no space */ 3442 sctp_m_freem(m_notify); 3443 return; 3444 } 3445 /* append to socket */ 3446 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3447 0, 0, stcb->asoc.context, 0, 0, 0, 3448 m_notify); 3449 if (control == NULL) { 3450 /* no memory */ 3451 sctp_m_freem(m_notify); 3452 return; 3453 } 3454 control->spec_flags = M_NOTIFICATION; 3455 control->length = SCTP_BUF_LEN(m_notify); 3456 /* not that we need this */ 3457 control->tail_mbuf = m_notify; 3458 sctp_add_to_readq(stcb->sctp_ep, stcb, 3459 control, 3460 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3461 } 3462 3463 3464 static void 3465 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3466 { 3467 struct mbuf *m_notify; 3468 struct sctp_remote_error *sre; 3469 struct sctp_queued_to_read *control; 3470 size_t notif_len, chunk_len; 3471 3472 if ((stcb == NULL) || 3473 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3474 return; 3475 } 3476 if (chunk != NULL) { 3477 chunk_len = ntohs(chunk->ch.chunk_length); 3478 } else { 3479 chunk_len = 0; 3480 } 3481 notif_len = sizeof(struct sctp_remote_error) + chunk_len; 3482 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3483 if (m_notify == NULL) { 3484 /* Retry with smaller value. */ 3485 notif_len = sizeof(struct sctp_remote_error); 3486 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3487 if (m_notify == NULL) { 3488 return; 3489 } 3490 } 3491 SCTP_BUF_NEXT(m_notify) = NULL; 3492 sre = mtod(m_notify, struct sctp_remote_error *); 3493 sre->sre_type = SCTP_REMOTE_ERROR; 3494 sre->sre_flags = 0; 3495 sre->sre_length = sizeof(struct sctp_remote_error); 3496 sre->sre_error = error; 3497 sre->sre_assoc_id = sctp_get_associd(stcb); 3498 if (notif_len > sizeof(struct sctp_remote_error)) { 3499 memcpy(sre->sre_data, chunk, chunk_len); 3500 sre->sre_length += chunk_len; 3501 } 3502 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3503 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3504 0, 0, stcb->asoc.context, 0, 0, 0, 3505 m_notify); 3506 if (control != NULL) { 3507 control->length = SCTP_BUF_LEN(m_notify); 3508 /* not that we need this */ 3509 control->tail_mbuf = m_notify; 3510 control->spec_flags = M_NOTIFICATION; 3511 sctp_add_to_readq(stcb->sctp_ep, stcb, 3512 control, 3513 &stcb->sctp_socket->so_rcv, 1, 3514 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3515 } else { 3516 sctp_m_freem(m_notify); 3517 } 3518 } 3519 3520 3521 void 3522 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3523 uint32_t error, void *data, int so_locked 3524 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3525 SCTP_UNUSED 3526 #endif 3527 ) 3528 { 3529 if ((stcb == NULL) || 3530 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3531 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3532 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3533 /* If the socket is gone we are out of here */ 3534 return; 3535 } 3536 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3537 return; 3538 } 3539 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3540 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) { 3541 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3542 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3543 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3544 /* Don't report these in front states */ 3545 return; 3546 } 3547 } 3548 switch (notification) { 3549 case SCTP_NOTIFY_ASSOC_UP: 3550 if (stcb->asoc.assoc_up_sent == 0) { 3551 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3552 stcb->asoc.assoc_up_sent = 1; 3553 } 3554 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3555 sctp_notify_adaptation_layer(stcb); 3556 } 3557 if (stcb->asoc.peer_supports_auth == 0) { 3558 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3559 NULL, so_locked); 3560 } 3561 break; 3562 case SCTP_NOTIFY_ASSOC_DOWN: 3563 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3564 break; 3565 case SCTP_NOTIFY_INTERFACE_DOWN: 3566 { 3567 struct sctp_nets *net; 3568 3569 net = (struct sctp_nets *)data; 3570 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3571 (struct sockaddr *)&net->ro._l_addr, error); 3572 break; 3573 } 3574 case SCTP_NOTIFY_INTERFACE_UP: 3575 { 3576 struct sctp_nets *net; 3577 3578 net = (struct sctp_nets *)data; 3579 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3580 (struct sockaddr *)&net->ro._l_addr, error); 3581 break; 3582 } 3583 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3584 { 3585 struct sctp_nets *net; 3586 3587 net = (struct sctp_nets *)data; 3588 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3589 (struct sockaddr *)&net->ro._l_addr, error); 3590 break; 3591 } 3592 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3593 sctp_notify_send_failed2(stcb, error, 3594 (struct sctp_stream_queue_pending *)data, so_locked); 3595 break; 3596 case SCTP_NOTIFY_SENT_DG_FAIL: 3597 sctp_notify_send_failed(stcb, 1, error, 3598 (struct sctp_tmit_chunk *)data, so_locked); 3599 break; 3600 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3601 sctp_notify_send_failed(stcb, 0, error, 3602 (struct sctp_tmit_chunk *)data, so_locked); 3603 break; 3604 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3605 { 3606 uint32_t val; 3607 3608 val = *((uint32_t *) data); 3609 3610 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3611 break; 3612 } 3613 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3614 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3615 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3616 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3617 } else { 3618 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3619 } 3620 break; 3621 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3622 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3623 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3624 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3625 } else { 3626 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3627 } 3628 break; 3629 case SCTP_NOTIFY_ASSOC_RESTART: 3630 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3631 if (stcb->asoc.peer_supports_auth == 0) { 3632 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3633 NULL, so_locked); 3634 } 3635 break; 3636 case SCTP_NOTIFY_STR_RESET_SEND: 3637 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN); 3638 break; 3639 case SCTP_NOTIFY_STR_RESET_RECV: 3640 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING); 3641 break; 3642 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3643 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3644 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3645 break; 3646 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3647 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3648 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3649 break; 3650 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3651 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3652 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3653 break; 3654 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3655 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3656 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3657 break; 3658 case SCTP_NOTIFY_ASCONF_ADD_IP: 3659 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3660 error); 3661 break; 3662 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3663 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3664 error); 3665 break; 3666 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3667 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3668 error); 3669 break; 3670 case SCTP_NOTIFY_PEER_SHUTDOWN: 3671 sctp_notify_shutdown_event(stcb); 3672 break; 3673 case SCTP_NOTIFY_AUTH_NEW_KEY: 3674 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3675 (uint16_t) (uintptr_t) data, 3676 so_locked); 3677 break; 3678 case SCTP_NOTIFY_AUTH_FREE_KEY: 3679 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3680 (uint16_t) (uintptr_t) data, 3681 so_locked); 3682 break; 3683 case SCTP_NOTIFY_NO_PEER_AUTH: 3684 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3685 (uint16_t) (uintptr_t) data, 3686 so_locked); 3687 break; 3688 case SCTP_NOTIFY_SENDER_DRY: 3689 sctp_notify_sender_dry_event(stcb, so_locked); 3690 break; 3691 case SCTP_NOTIFY_REMOTE_ERROR: 3692 sctp_notify_remote_error(stcb, error, data); 3693 break; 3694 default: 3695 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3696 __FUNCTION__, notification, notification); 3697 break; 3698 } /* end switch */ 3699 } 3700 3701 void 3702 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3703 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3704 SCTP_UNUSED 3705 #endif 3706 ) 3707 { 3708 struct sctp_association *asoc; 3709 struct sctp_stream_out *outs; 3710 struct sctp_tmit_chunk *chk, *nchk; 3711 struct sctp_stream_queue_pending *sp, *nsp; 3712 int i; 3713 3714 if (stcb == NULL) { 3715 return; 3716 } 3717 asoc = &stcb->asoc; 3718 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3719 /* already being freed */ 3720 return; 3721 } 3722 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3723 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3724 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3725 return; 3726 } 3727 /* now through all the gunk freeing chunks */ 3728 if (holds_lock == 0) { 3729 SCTP_TCB_SEND_LOCK(stcb); 3730 } 3731 /* sent queue SHOULD be empty */ 3732 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3733 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3734 asoc->sent_queue_cnt--; 3735 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3736 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3737 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3738 #ifdef INVARIANTS 3739 } else { 3740 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3741 #endif 3742 } 3743 } 3744 if (chk->data != NULL) { 3745 sctp_free_bufspace(stcb, asoc, chk, 1); 3746 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3747 error, chk, so_locked); 3748 if (chk->data) { 3749 sctp_m_freem(chk->data); 3750 chk->data = NULL; 3751 } 3752 } 3753 sctp_free_a_chunk(stcb, chk, so_locked); 3754 /* sa_ignore FREED_MEMORY */ 3755 } 3756 /* pending send queue SHOULD be empty */ 3757 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3758 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3759 asoc->send_queue_cnt--; 3760 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3761 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3762 #ifdef INVARIANTS 3763 } else { 3764 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3765 #endif 3766 } 3767 if (chk->data != NULL) { 3768 sctp_free_bufspace(stcb, asoc, chk, 1); 3769 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3770 error, chk, so_locked); 3771 if (chk->data) { 3772 sctp_m_freem(chk->data); 3773 chk->data = NULL; 3774 } 3775 } 3776 sctp_free_a_chunk(stcb, chk, so_locked); 3777 /* sa_ignore FREED_MEMORY */ 3778 } 3779 for (i = 0; i < asoc->streamoutcnt; i++) { 3780 /* For each stream */ 3781 outs = &asoc->strmout[i]; 3782 /* clean up any sends there */ 3783 asoc->locked_on_sending = NULL; 3784 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3785 asoc->stream_queue_cnt--; 3786 TAILQ_REMOVE(&outs->outqueue, sp, next); 3787 sctp_free_spbufspace(stcb, asoc, sp); 3788 if (sp->data) { 3789 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3790 error, (void *)sp, so_locked); 3791 if (sp->data) { 3792 sctp_m_freem(sp->data); 3793 sp->data = NULL; 3794 sp->tail_mbuf = NULL; 3795 sp->length = 0; 3796 } 3797 } 3798 if (sp->net) { 3799 sctp_free_remote_addr(sp->net); 3800 sp->net = NULL; 3801 } 3802 /* Free the chunk */ 3803 sctp_free_a_strmoq(stcb, sp, so_locked); 3804 /* sa_ignore FREED_MEMORY */ 3805 } 3806 } 3807 3808 if (holds_lock == 0) { 3809 SCTP_TCB_SEND_UNLOCK(stcb); 3810 } 3811 } 3812 3813 void 3814 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3815 struct sctp_abort_chunk *abort, int so_locked 3816 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3817 SCTP_UNUSED 3818 #endif 3819 ) 3820 { 3821 if (stcb == NULL) { 3822 return; 3823 } 3824 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3825 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3826 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3827 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3828 } 3829 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3830 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3831 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3832 return; 3833 } 3834 /* Tell them we lost the asoc */ 3835 sctp_report_all_outbound(stcb, error, 1, so_locked); 3836 if (from_peer) { 3837 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3838 } else { 3839 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3840 } 3841 } 3842 3843 void 3844 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3845 struct mbuf *m, int iphlen, 3846 struct sockaddr *src, struct sockaddr *dst, 3847 struct sctphdr *sh, struct mbuf *op_err, 3848 uint8_t use_mflowid, uint32_t mflowid, 3849 uint32_t vrf_id, uint16_t port) 3850 { 3851 uint32_t vtag; 3852 3853 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3854 struct socket *so; 3855 3856 #endif 3857 3858 vtag = 0; 3859 if (stcb != NULL) { 3860 /* We have a TCB to abort, send notification too */ 3861 vtag = stcb->asoc.peer_vtag; 3862 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3863 /* get the assoc vrf id and table id */ 3864 vrf_id = stcb->asoc.vrf_id; 3865 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3866 } 3867 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3868 use_mflowid, mflowid, 3869 vrf_id, port); 3870 if (stcb != NULL) { 3871 /* Ok, now lets free it */ 3872 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3873 so = SCTP_INP_SO(inp); 3874 atomic_add_int(&stcb->asoc.refcnt, 1); 3875 SCTP_TCB_UNLOCK(stcb); 3876 SCTP_SOCKET_LOCK(so, 1); 3877 SCTP_TCB_LOCK(stcb); 3878 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3879 #endif 3880 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3881 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3882 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3883 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3884 } 3885 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3887 SCTP_SOCKET_UNLOCK(so, 1); 3888 #endif 3889 } 3890 } 3891 3892 #ifdef SCTP_ASOCLOG_OF_TSNS 3893 void 3894 sctp_print_out_track_log(struct sctp_tcb *stcb) 3895 { 3896 #ifdef NOSIY_PRINTS 3897 int i; 3898 3899 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 3900 SCTP_PRINTF("IN bound TSN log-aaa\n"); 3901 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 3902 SCTP_PRINTF("None rcvd\n"); 3903 goto none_in; 3904 } 3905 if (stcb->asoc.tsn_in_wrapped) { 3906 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 3907 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3908 stcb->asoc.in_tsnlog[i].tsn, 3909 stcb->asoc.in_tsnlog[i].strm, 3910 stcb->asoc.in_tsnlog[i].seq, 3911 stcb->asoc.in_tsnlog[i].flgs, 3912 stcb->asoc.in_tsnlog[i].sz); 3913 } 3914 } 3915 if (stcb->asoc.tsn_in_at) { 3916 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 3917 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3918 stcb->asoc.in_tsnlog[i].tsn, 3919 stcb->asoc.in_tsnlog[i].strm, 3920 stcb->asoc.in_tsnlog[i].seq, 3921 stcb->asoc.in_tsnlog[i].flgs, 3922 stcb->asoc.in_tsnlog[i].sz); 3923 } 3924 } 3925 none_in: 3926 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 3927 if ((stcb->asoc.tsn_out_at == 0) && 3928 (stcb->asoc.tsn_out_wrapped == 0)) { 3929 SCTP_PRINTF("None sent\n"); 3930 } 3931 if (stcb->asoc.tsn_out_wrapped) { 3932 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 3933 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3934 stcb->asoc.out_tsnlog[i].tsn, 3935 stcb->asoc.out_tsnlog[i].strm, 3936 stcb->asoc.out_tsnlog[i].seq, 3937 stcb->asoc.out_tsnlog[i].flgs, 3938 stcb->asoc.out_tsnlog[i].sz); 3939 } 3940 } 3941 if (stcb->asoc.tsn_out_at) { 3942 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 3943 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3944 stcb->asoc.out_tsnlog[i].tsn, 3945 stcb->asoc.out_tsnlog[i].strm, 3946 stcb->asoc.out_tsnlog[i].seq, 3947 stcb->asoc.out_tsnlog[i].flgs, 3948 stcb->asoc.out_tsnlog[i].sz); 3949 } 3950 } 3951 #endif 3952 } 3953 3954 #endif 3955 3956 void 3957 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3958 struct mbuf *op_err, 3959 int so_locked 3960 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3961 SCTP_UNUSED 3962 #endif 3963 ) 3964 { 3965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3966 struct socket *so; 3967 3968 #endif 3969 3970 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3971 so = SCTP_INP_SO(inp); 3972 #endif 3973 if (stcb == NULL) { 3974 /* Got to have a TCB */ 3975 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3976 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 3977 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3978 SCTP_CALLED_DIRECTLY_NOCMPSET); 3979 } 3980 } 3981 return; 3982 } else { 3983 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3984 } 3985 /* notify the ulp */ 3986 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 3987 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 3988 } 3989 /* notify the peer */ 3990 sctp_send_abort_tcb(stcb, op_err, so_locked); 3991 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3992 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3993 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3994 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3995 } 3996 /* now free the asoc */ 3997 #ifdef SCTP_ASOCLOG_OF_TSNS 3998 sctp_print_out_track_log(stcb); 3999 #endif 4000 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4001 if (!so_locked) { 4002 atomic_add_int(&stcb->asoc.refcnt, 1); 4003 SCTP_TCB_UNLOCK(stcb); 4004 SCTP_SOCKET_LOCK(so, 1); 4005 SCTP_TCB_LOCK(stcb); 4006 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4007 } 4008 #endif 4009 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4011 if (!so_locked) { 4012 SCTP_SOCKET_UNLOCK(so, 1); 4013 } 4014 #endif 4015 } 4016 4017 void 4018 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4019 struct sockaddr *src, struct sockaddr *dst, 4020 struct sctphdr *sh, struct sctp_inpcb *inp, 4021 struct mbuf *cause, 4022 uint8_t use_mflowid, uint32_t mflowid, 4023 uint32_t vrf_id, uint16_t port) 4024 { 4025 struct sctp_chunkhdr *ch, chunk_buf; 4026 unsigned int chk_length; 4027 int contains_init_chunk; 4028 4029 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4030 /* Generate a TO address for future reference */ 4031 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4032 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4033 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4034 SCTP_CALLED_DIRECTLY_NOCMPSET); 4035 } 4036 } 4037 contains_init_chunk = 0; 4038 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4039 sizeof(*ch), (uint8_t *) & chunk_buf); 4040 while (ch != NULL) { 4041 chk_length = ntohs(ch->chunk_length); 4042 if (chk_length < sizeof(*ch)) { 4043 /* break to abort land */ 4044 break; 4045 } 4046 switch (ch->chunk_type) { 4047 case SCTP_INIT: 4048 contains_init_chunk = 1; 4049 break; 4050 case SCTP_COOKIE_ECHO: 4051 /* We hit here only if the assoc is being freed */ 4052 return; 4053 case SCTP_PACKET_DROPPED: 4054 /* we don't respond to pkt-dropped */ 4055 return; 4056 case SCTP_ABORT_ASSOCIATION: 4057 /* we don't respond with an ABORT to an ABORT */ 4058 return; 4059 case SCTP_SHUTDOWN_COMPLETE: 4060 /* 4061 * we ignore it since we are not waiting for it and 4062 * peer is gone 4063 */ 4064 return; 4065 case SCTP_SHUTDOWN_ACK: 4066 sctp_send_shutdown_complete2(src, dst, sh, 4067 use_mflowid, mflowid, 4068 vrf_id, port); 4069 return; 4070 default: 4071 break; 4072 } 4073 offset += SCTP_SIZE32(chk_length); 4074 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4075 sizeof(*ch), (uint8_t *) & chunk_buf); 4076 } 4077 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4078 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4079 (contains_init_chunk == 0))) { 4080 sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, 4081 use_mflowid, mflowid, 4082 vrf_id, port); 4083 } 4084 } 4085 4086 /* 4087 * check the inbound datagram to make sure there is not an abort inside it, 4088 * if there is return 1, else return 0. 4089 */ 4090 int 4091 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4092 { 4093 struct sctp_chunkhdr *ch; 4094 struct sctp_init_chunk *init_chk, chunk_buf; 4095 int offset; 4096 unsigned int chk_length; 4097 4098 offset = iphlen + sizeof(struct sctphdr); 4099 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4100 (uint8_t *) & chunk_buf); 4101 while (ch != NULL) { 4102 chk_length = ntohs(ch->chunk_length); 4103 if (chk_length < sizeof(*ch)) { 4104 /* packet is probably corrupt */ 4105 break; 4106 } 4107 /* we seem to be ok, is it an abort? */ 4108 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4109 /* yep, tell them */ 4110 return (1); 4111 } 4112 if (ch->chunk_type == SCTP_INITIATION) { 4113 /* need to update the Vtag */ 4114 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4115 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4116 if (init_chk != NULL) { 4117 *vtagfill = ntohl(init_chk->init.initiate_tag); 4118 } 4119 } 4120 /* Nope, move to the next chunk */ 4121 offset += SCTP_SIZE32(chk_length); 4122 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4123 sizeof(*ch), (uint8_t *) & chunk_buf); 4124 } 4125 return (0); 4126 } 4127 4128 /* 4129 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4130 * set (i.e. it's 0) so, create this function to compare link local scopes 4131 */ 4132 #ifdef INET6 4133 uint32_t 4134 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4135 { 4136 struct sockaddr_in6 a, b; 4137 4138 /* save copies */ 4139 a = *addr1; 4140 b = *addr2; 4141 4142 if (a.sin6_scope_id == 0) 4143 if (sa6_recoverscope(&a)) { 4144 /* can't get scope, so can't match */ 4145 return (0); 4146 } 4147 if (b.sin6_scope_id == 0) 4148 if (sa6_recoverscope(&b)) { 4149 /* can't get scope, so can't match */ 4150 return (0); 4151 } 4152 if (a.sin6_scope_id != b.sin6_scope_id) 4153 return (0); 4154 4155 return (1); 4156 } 4157 4158 /* 4159 * returns a sockaddr_in6 with embedded scope recovered and removed 4160 */ 4161 struct sockaddr_in6 * 4162 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4163 { 4164 /* check and strip embedded scope junk */ 4165 if (addr->sin6_family == AF_INET6) { 4166 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4167 if (addr->sin6_scope_id == 0) { 4168 *store = *addr; 4169 if (!sa6_recoverscope(store)) { 4170 /* use the recovered scope */ 4171 addr = store; 4172 } 4173 } else { 4174 /* else, return the original "to" addr */ 4175 in6_clearscope(&addr->sin6_addr); 4176 } 4177 } 4178 } 4179 return (addr); 4180 } 4181 4182 #endif 4183 4184 /* 4185 * are the two addresses the same? currently a "scopeless" check returns: 1 4186 * if same, 0 if not 4187 */ 4188 int 4189 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4190 { 4191 4192 /* must be valid */ 4193 if (sa1 == NULL || sa2 == NULL) 4194 return (0); 4195 4196 /* must be the same family */ 4197 if (sa1->sa_family != sa2->sa_family) 4198 return (0); 4199 4200 switch (sa1->sa_family) { 4201 #ifdef INET6 4202 case AF_INET6: 4203 { 4204 /* IPv6 addresses */ 4205 struct sockaddr_in6 *sin6_1, *sin6_2; 4206 4207 sin6_1 = (struct sockaddr_in6 *)sa1; 4208 sin6_2 = (struct sockaddr_in6 *)sa2; 4209 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4210 sin6_2)); 4211 } 4212 #endif 4213 #ifdef INET 4214 case AF_INET: 4215 { 4216 /* IPv4 addresses */ 4217 struct sockaddr_in *sin_1, *sin_2; 4218 4219 sin_1 = (struct sockaddr_in *)sa1; 4220 sin_2 = (struct sockaddr_in *)sa2; 4221 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4222 } 4223 #endif 4224 default: 4225 /* we don't do these... */ 4226 return (0); 4227 } 4228 } 4229 4230 void 4231 sctp_print_address(struct sockaddr *sa) 4232 { 4233 #ifdef INET6 4234 char ip6buf[INET6_ADDRSTRLEN]; 4235 4236 #endif 4237 4238 switch (sa->sa_family) { 4239 #ifdef INET6 4240 case AF_INET6: 4241 { 4242 struct sockaddr_in6 *sin6; 4243 4244 sin6 = (struct sockaddr_in6 *)sa; 4245 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4246 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4247 ntohs(sin6->sin6_port), 4248 sin6->sin6_scope_id); 4249 break; 4250 } 4251 #endif 4252 #ifdef INET 4253 case AF_INET: 4254 { 4255 struct sockaddr_in *sin; 4256 unsigned char *p; 4257 4258 sin = (struct sockaddr_in *)sa; 4259 p = (unsigned char *)&sin->sin_addr; 4260 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4261 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4262 break; 4263 } 4264 #endif 4265 default: 4266 SCTP_PRINTF("?\n"); 4267 break; 4268 } 4269 } 4270 4271 void 4272 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4273 struct sctp_inpcb *new_inp, 4274 struct sctp_tcb *stcb, 4275 int waitflags) 4276 { 4277 /* 4278 * go through our old INP and pull off any control structures that 4279 * belong to stcb and move then to the new inp. 4280 */ 4281 struct socket *old_so, *new_so; 4282 struct sctp_queued_to_read *control, *nctl; 4283 struct sctp_readhead tmp_queue; 4284 struct mbuf *m; 4285 int error = 0; 4286 4287 old_so = old_inp->sctp_socket; 4288 new_so = new_inp->sctp_socket; 4289 TAILQ_INIT(&tmp_queue); 4290 error = sblock(&old_so->so_rcv, waitflags); 4291 if (error) { 4292 /* 4293 * Gak, can't get sblock, we have a problem. data will be 4294 * left stranded.. and we don't dare look at it since the 4295 * other thread may be reading something. Oh well, its a 4296 * screwed up app that does a peeloff OR a accept while 4297 * reading from the main socket... actually its only the 4298 * peeloff() case, since I think read will fail on a 4299 * listening socket.. 4300 */ 4301 return; 4302 } 4303 /* lock the socket buffers */ 4304 SCTP_INP_READ_LOCK(old_inp); 4305 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4306 /* Pull off all for out target stcb */ 4307 if (control->stcb == stcb) { 4308 /* remove it we want it */ 4309 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4310 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4311 m = control->data; 4312 while (m) { 4313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4314 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4315 } 4316 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4318 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4319 } 4320 m = SCTP_BUF_NEXT(m); 4321 } 4322 } 4323 } 4324 SCTP_INP_READ_UNLOCK(old_inp); 4325 /* Remove the sb-lock on the old socket */ 4326 4327 sbunlock(&old_so->so_rcv); 4328 /* Now we move them over to the new socket buffer */ 4329 SCTP_INP_READ_LOCK(new_inp); 4330 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4331 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4332 m = control->data; 4333 while (m) { 4334 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4335 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4336 } 4337 sctp_sballoc(stcb, &new_so->so_rcv, m); 4338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4339 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4340 } 4341 m = SCTP_BUF_NEXT(m); 4342 } 4343 } 4344 SCTP_INP_READ_UNLOCK(new_inp); 4345 } 4346 4347 void 4348 sctp_add_to_readq(struct sctp_inpcb *inp, 4349 struct sctp_tcb *stcb, 4350 struct sctp_queued_to_read *control, 4351 struct sockbuf *sb, 4352 int end, 4353 int inp_read_lock_held, 4354 int so_locked 4355 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4356 SCTP_UNUSED 4357 #endif 4358 ) 4359 { 4360 /* 4361 * Here we must place the control on the end of the socket read 4362 * queue AND increment sb_cc so that select will work properly on 4363 * read. 4364 */ 4365 struct mbuf *m, *prev = NULL; 4366 4367 if (inp == NULL) { 4368 /* Gak, TSNH!! */ 4369 #ifdef INVARIANTS 4370 panic("Gak, inp NULL on add_to_readq"); 4371 #endif 4372 return; 4373 } 4374 if (inp_read_lock_held == 0) 4375 SCTP_INP_READ_LOCK(inp); 4376 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4377 sctp_free_remote_addr(control->whoFrom); 4378 if (control->data) { 4379 sctp_m_freem(control->data); 4380 control->data = NULL; 4381 } 4382 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4383 if (inp_read_lock_held == 0) 4384 SCTP_INP_READ_UNLOCK(inp); 4385 return; 4386 } 4387 if (!(control->spec_flags & M_NOTIFICATION)) { 4388 atomic_add_int(&inp->total_recvs, 1); 4389 if (!control->do_not_ref_stcb) { 4390 atomic_add_int(&stcb->total_recvs, 1); 4391 } 4392 } 4393 m = control->data; 4394 control->held_length = 0; 4395 control->length = 0; 4396 while (m) { 4397 if (SCTP_BUF_LEN(m) == 0) { 4398 /* Skip mbufs with NO length */ 4399 if (prev == NULL) { 4400 /* First one */ 4401 control->data = sctp_m_free(m); 4402 m = control->data; 4403 } else { 4404 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4405 m = SCTP_BUF_NEXT(prev); 4406 } 4407 if (m == NULL) { 4408 control->tail_mbuf = prev; 4409 } 4410 continue; 4411 } 4412 prev = m; 4413 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4414 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4415 } 4416 sctp_sballoc(stcb, sb, m); 4417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4418 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4419 } 4420 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4421 m = SCTP_BUF_NEXT(m); 4422 } 4423 if (prev != NULL) { 4424 control->tail_mbuf = prev; 4425 } else { 4426 /* Everything got collapsed out?? */ 4427 sctp_free_remote_addr(control->whoFrom); 4428 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4429 if (inp_read_lock_held == 0) 4430 SCTP_INP_READ_UNLOCK(inp); 4431 return; 4432 } 4433 if (end) { 4434 control->end_added = 1; 4435 } 4436 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4437 if (inp_read_lock_held == 0) 4438 SCTP_INP_READ_UNLOCK(inp); 4439 if (inp && inp->sctp_socket) { 4440 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4441 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4442 } else { 4443 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4444 struct socket *so; 4445 4446 so = SCTP_INP_SO(inp); 4447 if (!so_locked) { 4448 if (stcb) { 4449 atomic_add_int(&stcb->asoc.refcnt, 1); 4450 SCTP_TCB_UNLOCK(stcb); 4451 } 4452 SCTP_SOCKET_LOCK(so, 1); 4453 if (stcb) { 4454 SCTP_TCB_LOCK(stcb); 4455 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4456 } 4457 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4458 SCTP_SOCKET_UNLOCK(so, 1); 4459 return; 4460 } 4461 } 4462 #endif 4463 sctp_sorwakeup(inp, inp->sctp_socket); 4464 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4465 if (!so_locked) { 4466 SCTP_SOCKET_UNLOCK(so, 1); 4467 } 4468 #endif 4469 } 4470 } 4471 } 4472 4473 4474 int 4475 sctp_append_to_readq(struct sctp_inpcb *inp, 4476 struct sctp_tcb *stcb, 4477 struct sctp_queued_to_read *control, 4478 struct mbuf *m, 4479 int end, 4480 int ctls_cumack, 4481 struct sockbuf *sb) 4482 { 4483 /* 4484 * A partial delivery API event is underway. OR we are appending on 4485 * the reassembly queue. 4486 * 4487 * If PDAPI this means we need to add m to the end of the data. 4488 * Increase the length in the control AND increment the sb_cc. 4489 * Otherwise sb is NULL and all we need to do is put it at the end 4490 * of the mbuf chain. 4491 */ 4492 int len = 0; 4493 struct mbuf *mm, *tail = NULL, *prev = NULL; 4494 4495 if (inp) { 4496 SCTP_INP_READ_LOCK(inp); 4497 } 4498 if (control == NULL) { 4499 get_out: 4500 if (inp) { 4501 SCTP_INP_READ_UNLOCK(inp); 4502 } 4503 return (-1); 4504 } 4505 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4506 SCTP_INP_READ_UNLOCK(inp); 4507 return (0); 4508 } 4509 if (control->end_added) { 4510 /* huh this one is complete? */ 4511 goto get_out; 4512 } 4513 mm = m; 4514 if (mm == NULL) { 4515 goto get_out; 4516 } 4517 while (mm) { 4518 if (SCTP_BUF_LEN(mm) == 0) { 4519 /* Skip mbufs with NO lenght */ 4520 if (prev == NULL) { 4521 /* First one */ 4522 m = sctp_m_free(mm); 4523 mm = m; 4524 } else { 4525 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 4526 mm = SCTP_BUF_NEXT(prev); 4527 } 4528 continue; 4529 } 4530 prev = mm; 4531 len += SCTP_BUF_LEN(mm); 4532 if (sb) { 4533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4534 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4535 } 4536 sctp_sballoc(stcb, sb, mm); 4537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4538 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4539 } 4540 } 4541 mm = SCTP_BUF_NEXT(mm); 4542 } 4543 if (prev) { 4544 tail = prev; 4545 } else { 4546 /* Really there should always be a prev */ 4547 if (m == NULL) { 4548 /* Huh nothing left? */ 4549 #ifdef INVARIANTS 4550 panic("Nothing left to add?"); 4551 #else 4552 goto get_out; 4553 #endif 4554 } 4555 tail = m; 4556 } 4557 if (control->tail_mbuf) { 4558 /* append */ 4559 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4560 control->tail_mbuf = tail; 4561 } else { 4562 /* nothing there */ 4563 #ifdef INVARIANTS 4564 if (control->data != NULL) { 4565 panic("This should NOT happen"); 4566 } 4567 #endif 4568 control->data = m; 4569 control->tail_mbuf = tail; 4570 } 4571 atomic_add_int(&control->length, len); 4572 if (end) { 4573 /* message is complete */ 4574 if (stcb && (control == stcb->asoc.control_pdapi)) { 4575 stcb->asoc.control_pdapi = NULL; 4576 } 4577 control->held_length = 0; 4578 control->end_added = 1; 4579 } 4580 if (stcb == NULL) { 4581 control->do_not_ref_stcb = 1; 4582 } 4583 /* 4584 * When we are appending in partial delivery, the cum-ack is used 4585 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4586 * is populated in the outbound sinfo structure from the true cumack 4587 * if the association exists... 4588 */ 4589 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4590 if (inp) { 4591 SCTP_INP_READ_UNLOCK(inp); 4592 } 4593 if (inp && inp->sctp_socket) { 4594 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4595 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4596 } else { 4597 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4598 struct socket *so; 4599 4600 so = SCTP_INP_SO(inp); 4601 if (stcb) { 4602 atomic_add_int(&stcb->asoc.refcnt, 1); 4603 SCTP_TCB_UNLOCK(stcb); 4604 } 4605 SCTP_SOCKET_LOCK(so, 1); 4606 if (stcb) { 4607 SCTP_TCB_LOCK(stcb); 4608 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4609 } 4610 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4611 SCTP_SOCKET_UNLOCK(so, 1); 4612 return (0); 4613 } 4614 #endif 4615 sctp_sorwakeup(inp, inp->sctp_socket); 4616 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4617 SCTP_SOCKET_UNLOCK(so, 1); 4618 #endif 4619 } 4620 } 4621 return (0); 4622 } 4623 4624 4625 4626 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4627 *************ALTERNATE ROUTING CODE 4628 */ 4629 4630 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4631 *************ALTERNATE ROUTING CODE 4632 */ 4633 4634 struct mbuf * 4635 sctp_generate_cause(uint16_t code, char *info) 4636 { 4637 struct mbuf *m; 4638 struct sctp_gen_error_cause *cause; 4639 size_t info_len, len; 4640 4641 if ((code == 0) || (info == NULL)) { 4642 return (NULL); 4643 } 4644 info_len = strlen(info); 4645 len = sizeof(struct sctp_paramhdr) + info_len; 4646 m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 4647 if (m != NULL) { 4648 SCTP_BUF_LEN(m) = len; 4649 cause = mtod(m, struct sctp_gen_error_cause *); 4650 cause->code = htons(code); 4651 cause->length = htons((uint16_t) len); 4652 memcpy(cause->info, info, info_len); 4653 } 4654 return (m); 4655 } 4656 4657 #ifdef SCTP_MBCNT_LOGGING 4658 void 4659 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4660 struct sctp_tmit_chunk *tp1, int chk_cnt) 4661 { 4662 if (tp1->data == NULL) { 4663 return; 4664 } 4665 asoc->chunks_on_out_queue -= chk_cnt; 4666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4667 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4668 asoc->total_output_queue_size, 4669 tp1->book_size, 4670 0, 4671 tp1->mbcnt); 4672 } 4673 if (asoc->total_output_queue_size >= tp1->book_size) { 4674 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4675 } else { 4676 asoc->total_output_queue_size = 0; 4677 } 4678 4679 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4680 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4681 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4682 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4683 } else { 4684 stcb->sctp_socket->so_snd.sb_cc = 0; 4685 4686 } 4687 } 4688 } 4689 4690 #endif 4691 4692 int 4693 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4694 uint8_t sent, int so_locked 4695 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4696 SCTP_UNUSED 4697 #endif 4698 ) 4699 { 4700 struct sctp_stream_out *strq; 4701 struct sctp_tmit_chunk *chk = NULL, *tp2; 4702 struct sctp_stream_queue_pending *sp; 4703 uint16_t stream = 0, seq = 0; 4704 uint8_t foundeom = 0; 4705 int ret_sz = 0; 4706 int notdone; 4707 int do_wakeup_routine = 0; 4708 4709 stream = tp1->rec.data.stream_number; 4710 seq = tp1->rec.data.stream_seq; 4711 do { 4712 ret_sz += tp1->book_size; 4713 if (tp1->data != NULL) { 4714 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4715 sctp_flight_size_decrease(tp1); 4716 sctp_total_flight_decrease(stcb, tp1); 4717 } 4718 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4719 stcb->asoc.peers_rwnd += tp1->send_size; 4720 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4721 if (sent) { 4722 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4723 } else { 4724 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4725 } 4726 if (tp1->data) { 4727 sctp_m_freem(tp1->data); 4728 tp1->data = NULL; 4729 } 4730 do_wakeup_routine = 1; 4731 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4732 stcb->asoc.sent_queue_cnt_removeable--; 4733 } 4734 } 4735 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4736 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4737 SCTP_DATA_NOT_FRAG) { 4738 /* not frag'ed we ae done */ 4739 notdone = 0; 4740 foundeom = 1; 4741 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4742 /* end of frag, we are done */ 4743 notdone = 0; 4744 foundeom = 1; 4745 } else { 4746 /* 4747 * Its a begin or middle piece, we must mark all of 4748 * it 4749 */ 4750 notdone = 1; 4751 tp1 = TAILQ_NEXT(tp1, sctp_next); 4752 } 4753 } while (tp1 && notdone); 4754 if (foundeom == 0) { 4755 /* 4756 * The multi-part message was scattered across the send and 4757 * sent queue. 4758 */ 4759 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4760 if ((tp1->rec.data.stream_number != stream) || 4761 (tp1->rec.data.stream_seq != seq)) { 4762 break; 4763 } 4764 /* 4765 * save to chk in case we have some on stream out 4766 * queue. If so and we have an un-transmitted one we 4767 * don't have to fudge the TSN. 4768 */ 4769 chk = tp1; 4770 ret_sz += tp1->book_size; 4771 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4772 if (sent) { 4773 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4774 } else { 4775 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4776 } 4777 if (tp1->data) { 4778 sctp_m_freem(tp1->data); 4779 tp1->data = NULL; 4780 } 4781 /* No flight involved here book the size to 0 */ 4782 tp1->book_size = 0; 4783 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4784 foundeom = 1; 4785 } 4786 do_wakeup_routine = 1; 4787 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4788 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4789 /* 4790 * on to the sent queue so we can wait for it to be 4791 * passed by. 4792 */ 4793 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4794 sctp_next); 4795 stcb->asoc.send_queue_cnt--; 4796 stcb->asoc.sent_queue_cnt++; 4797 } 4798 } 4799 if (foundeom == 0) { 4800 /* 4801 * Still no eom found. That means there is stuff left on the 4802 * stream out queue.. yuck. 4803 */ 4804 SCTP_TCB_SEND_LOCK(stcb); 4805 strq = &stcb->asoc.strmout[stream]; 4806 sp = TAILQ_FIRST(&strq->outqueue); 4807 if (sp != NULL) { 4808 sp->discard_rest = 1; 4809 /* 4810 * We may need to put a chunk on the queue that 4811 * holds the TSN that would have been sent with the 4812 * LAST bit. 4813 */ 4814 if (chk == NULL) { 4815 /* Yep, we have to */ 4816 sctp_alloc_a_chunk(stcb, chk); 4817 if (chk == NULL) { 4818 /* 4819 * we are hosed. All we can do is 4820 * nothing.. which will cause an 4821 * abort if the peer is paying 4822 * attention. 4823 */ 4824 goto oh_well; 4825 } 4826 memset(chk, 0, sizeof(*chk)); 4827 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG; 4828 chk->sent = SCTP_FORWARD_TSN_SKIP; 4829 chk->asoc = &stcb->asoc; 4830 chk->rec.data.stream_seq = strq->next_sequence_send; 4831 chk->rec.data.stream_number = sp->stream; 4832 chk->rec.data.payloadtype = sp->ppid; 4833 chk->rec.data.context = sp->context; 4834 chk->flags = sp->act_flags; 4835 if (sp->net) 4836 chk->whoTo = sp->net; 4837 else 4838 chk->whoTo = stcb->asoc.primary_destination; 4839 atomic_add_int(&chk->whoTo->ref_count, 1); 4840 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4841 stcb->asoc.pr_sctp_cnt++; 4842 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4843 stcb->asoc.sent_queue_cnt++; 4844 stcb->asoc.pr_sctp_cnt++; 4845 } else { 4846 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4847 } 4848 strq->next_sequence_send++; 4849 oh_well: 4850 if (sp->data) { 4851 /* 4852 * Pull any data to free up the SB and allow 4853 * sender to "add more" while we will throw 4854 * away :-) 4855 */ 4856 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4857 ret_sz += sp->length; 4858 do_wakeup_routine = 1; 4859 sp->some_taken = 1; 4860 sctp_m_freem(sp->data); 4861 sp->data = NULL; 4862 sp->tail_mbuf = NULL; 4863 sp->length = 0; 4864 } 4865 } 4866 SCTP_TCB_SEND_UNLOCK(stcb); 4867 } 4868 if (do_wakeup_routine) { 4869 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4870 struct socket *so; 4871 4872 so = SCTP_INP_SO(stcb->sctp_ep); 4873 if (!so_locked) { 4874 atomic_add_int(&stcb->asoc.refcnt, 1); 4875 SCTP_TCB_UNLOCK(stcb); 4876 SCTP_SOCKET_LOCK(so, 1); 4877 SCTP_TCB_LOCK(stcb); 4878 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4879 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4880 /* assoc was freed while we were unlocked */ 4881 SCTP_SOCKET_UNLOCK(so, 1); 4882 return (ret_sz); 4883 } 4884 } 4885 #endif 4886 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4887 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4888 if (!so_locked) { 4889 SCTP_SOCKET_UNLOCK(so, 1); 4890 } 4891 #endif 4892 } 4893 return (ret_sz); 4894 } 4895 4896 /* 4897 * checks to see if the given address, sa, is one that is currently known by 4898 * the kernel note: can't distinguish the same address on multiple interfaces 4899 * and doesn't handle multiple addresses with different zone/scope id's note: 4900 * ifa_ifwithaddr() compares the entire sockaddr struct 4901 */ 4902 struct sctp_ifa * 4903 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4904 int holds_lock) 4905 { 4906 struct sctp_laddr *laddr; 4907 4908 if (holds_lock == 0) { 4909 SCTP_INP_RLOCK(inp); 4910 } 4911 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4912 if (laddr->ifa == NULL) 4913 continue; 4914 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4915 continue; 4916 #ifdef INET 4917 if (addr->sa_family == AF_INET) { 4918 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4919 laddr->ifa->address.sin.sin_addr.s_addr) { 4920 /* found him. */ 4921 if (holds_lock == 0) { 4922 SCTP_INP_RUNLOCK(inp); 4923 } 4924 return (laddr->ifa); 4925 break; 4926 } 4927 } 4928 #endif 4929 #ifdef INET6 4930 if (addr->sa_family == AF_INET6) { 4931 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4932 &laddr->ifa->address.sin6)) { 4933 /* found him. */ 4934 if (holds_lock == 0) { 4935 SCTP_INP_RUNLOCK(inp); 4936 } 4937 return (laddr->ifa); 4938 break; 4939 } 4940 } 4941 #endif 4942 } 4943 if (holds_lock == 0) { 4944 SCTP_INP_RUNLOCK(inp); 4945 } 4946 return (NULL); 4947 } 4948 4949 uint32_t 4950 sctp_get_ifa_hash_val(struct sockaddr *addr) 4951 { 4952 switch (addr->sa_family) { 4953 #ifdef INET 4954 case AF_INET: 4955 { 4956 struct sockaddr_in *sin; 4957 4958 sin = (struct sockaddr_in *)addr; 4959 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4960 } 4961 #endif 4962 #ifdef INET6 4963 case AF_INET6: 4964 { 4965 struct sockaddr_in6 *sin6; 4966 uint32_t hash_of_addr; 4967 4968 sin6 = (struct sockaddr_in6 *)addr; 4969 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4970 sin6->sin6_addr.s6_addr32[1] + 4971 sin6->sin6_addr.s6_addr32[2] + 4972 sin6->sin6_addr.s6_addr32[3]); 4973 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4974 return (hash_of_addr); 4975 } 4976 #endif 4977 default: 4978 break; 4979 } 4980 return (0); 4981 } 4982 4983 struct sctp_ifa * 4984 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4985 { 4986 struct sctp_ifa *sctp_ifap; 4987 struct sctp_vrf *vrf; 4988 struct sctp_ifalist *hash_head; 4989 uint32_t hash_of_addr; 4990 4991 if (holds_lock == 0) 4992 SCTP_IPI_ADDR_RLOCK(); 4993 4994 vrf = sctp_find_vrf(vrf_id); 4995 if (vrf == NULL) { 4996 stage_right: 4997 if (holds_lock == 0) 4998 SCTP_IPI_ADDR_RUNLOCK(); 4999 return (NULL); 5000 } 5001 hash_of_addr = sctp_get_ifa_hash_val(addr); 5002 5003 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 5004 if (hash_head == NULL) { 5005 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5006 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 5007 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 5008 sctp_print_address(addr); 5009 SCTP_PRINTF("No such bucket for address\n"); 5010 if (holds_lock == 0) 5011 SCTP_IPI_ADDR_RUNLOCK(); 5012 5013 return (NULL); 5014 } 5015 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5016 if (sctp_ifap == NULL) { 5017 #ifdef INVARIANTS 5018 panic("Huh LIST_FOREACH corrupt"); 5019 goto stage_right; 5020 #else 5021 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n"); 5022 goto stage_right; 5023 #endif 5024 } 5025 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5026 continue; 5027 #ifdef INET 5028 if (addr->sa_family == AF_INET) { 5029 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5030 sctp_ifap->address.sin.sin_addr.s_addr) { 5031 /* found him. */ 5032 if (holds_lock == 0) 5033 SCTP_IPI_ADDR_RUNLOCK(); 5034 return (sctp_ifap); 5035 break; 5036 } 5037 } 5038 #endif 5039 #ifdef INET6 5040 if (addr->sa_family == AF_INET6) { 5041 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5042 &sctp_ifap->address.sin6)) { 5043 /* found him. */ 5044 if (holds_lock == 0) 5045 SCTP_IPI_ADDR_RUNLOCK(); 5046 return (sctp_ifap); 5047 break; 5048 } 5049 } 5050 #endif 5051 } 5052 if (holds_lock == 0) 5053 SCTP_IPI_ADDR_RUNLOCK(); 5054 return (NULL); 5055 } 5056 5057 static void 5058 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 5059 uint32_t rwnd_req) 5060 { 5061 /* User pulled some data, do we need a rwnd update? */ 5062 int r_unlocked = 0; 5063 uint32_t dif, rwnd; 5064 struct socket *so = NULL; 5065 5066 if (stcb == NULL) 5067 return; 5068 5069 atomic_add_int(&stcb->asoc.refcnt, 1); 5070 5071 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5072 SCTP_STATE_SHUTDOWN_RECEIVED | 5073 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5074 /* Pre-check If we are freeing no update */ 5075 goto no_lock; 5076 } 5077 SCTP_INP_INCR_REF(stcb->sctp_ep); 5078 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5079 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5080 goto out; 5081 } 5082 so = stcb->sctp_socket; 5083 if (so == NULL) { 5084 goto out; 5085 } 5086 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5087 /* Have you have freed enough to look */ 5088 *freed_so_far = 0; 5089 /* Yep, its worth a look and the lock overhead */ 5090 5091 /* Figure out what the rwnd would be */ 5092 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5093 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5094 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5095 } else { 5096 dif = 0; 5097 } 5098 if (dif >= rwnd_req) { 5099 if (hold_rlock) { 5100 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5101 r_unlocked = 1; 5102 } 5103 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5104 /* 5105 * One last check before we allow the guy possibly 5106 * to get in. There is a race, where the guy has not 5107 * reached the gate. In that case 5108 */ 5109 goto out; 5110 } 5111 SCTP_TCB_LOCK(stcb); 5112 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5113 /* No reports here */ 5114 SCTP_TCB_UNLOCK(stcb); 5115 goto out; 5116 } 5117 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5118 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5119 5120 sctp_chunk_output(stcb->sctp_ep, stcb, 5121 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5122 /* make sure no timer is running */ 5123 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5124 SCTP_TCB_UNLOCK(stcb); 5125 } else { 5126 /* Update how much we have pending */ 5127 stcb->freed_by_sorcv_sincelast = dif; 5128 } 5129 out: 5130 if (so && r_unlocked && hold_rlock) { 5131 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5132 } 5133 SCTP_INP_DECR_REF(stcb->sctp_ep); 5134 no_lock: 5135 atomic_add_int(&stcb->asoc.refcnt, -1); 5136 return; 5137 } 5138 5139 int 5140 sctp_sorecvmsg(struct socket *so, 5141 struct uio *uio, 5142 struct mbuf **mp, 5143 struct sockaddr *from, 5144 int fromlen, 5145 int *msg_flags, 5146 struct sctp_sndrcvinfo *sinfo, 5147 int filling_sinfo) 5148 { 5149 /* 5150 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5151 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5152 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5153 * On the way out we may send out any combination of: 5154 * MSG_NOTIFICATION MSG_EOR 5155 * 5156 */ 5157 struct sctp_inpcb *inp = NULL; 5158 int my_len = 0; 5159 int cp_len = 0, error = 0; 5160 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5161 struct mbuf *m = NULL; 5162 struct sctp_tcb *stcb = NULL; 5163 int wakeup_read_socket = 0; 5164 int freecnt_applied = 0; 5165 int out_flags = 0, in_flags = 0; 5166 int block_allowed = 1; 5167 uint32_t freed_so_far = 0; 5168 uint32_t copied_so_far = 0; 5169 int in_eeor_mode = 0; 5170 int no_rcv_needed = 0; 5171 uint32_t rwnd_req = 0; 5172 int hold_sblock = 0; 5173 int hold_rlock = 0; 5174 int slen = 0; 5175 uint32_t held_length = 0; 5176 int sockbuf_lock = 0; 5177 5178 if (uio == NULL) { 5179 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5180 return (EINVAL); 5181 } 5182 if (msg_flags) { 5183 in_flags = *msg_flags; 5184 if (in_flags & MSG_PEEK) 5185 SCTP_STAT_INCR(sctps_read_peeks); 5186 } else { 5187 in_flags = 0; 5188 } 5189 slen = uio->uio_resid; 5190 5191 /* Pull in and set up our int flags */ 5192 if (in_flags & MSG_OOB) { 5193 /* Out of band's NOT supported */ 5194 return (EOPNOTSUPP); 5195 } 5196 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5197 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5198 return (EINVAL); 5199 } 5200 if ((in_flags & (MSG_DONTWAIT 5201 | MSG_NBIO 5202 )) || 5203 SCTP_SO_IS_NBIO(so)) { 5204 block_allowed = 0; 5205 } 5206 /* setup the endpoint */ 5207 inp = (struct sctp_inpcb *)so->so_pcb; 5208 if (inp == NULL) { 5209 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5210 return (EFAULT); 5211 } 5212 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5213 /* Must be at least a MTU's worth */ 5214 if (rwnd_req < SCTP_MIN_RWND) 5215 rwnd_req = SCTP_MIN_RWND; 5216 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5217 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5218 sctp_misc_ints(SCTP_SORECV_ENTER, 5219 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 5220 } 5221 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5222 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5223 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 5224 } 5225 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5226 if (error) { 5227 goto release_unlocked; 5228 } 5229 sockbuf_lock = 1; 5230 restart: 5231 5232 5233 restart_nosblocks: 5234 if (hold_sblock == 0) { 5235 SOCKBUF_LOCK(&so->so_rcv); 5236 hold_sblock = 1; 5237 } 5238 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5239 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5240 goto out; 5241 } 5242 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5243 if (so->so_error) { 5244 error = so->so_error; 5245 if ((in_flags & MSG_PEEK) == 0) 5246 so->so_error = 0; 5247 goto out; 5248 } else { 5249 if (so->so_rcv.sb_cc == 0) { 5250 /* indicate EOF */ 5251 error = 0; 5252 goto out; 5253 } 5254 } 5255 } 5256 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 5257 /* we need to wait for data */ 5258 if ((so->so_rcv.sb_cc == 0) && 5259 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5260 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5261 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5262 /* 5263 * For active open side clear flags for 5264 * re-use passive open is blocked by 5265 * connect. 5266 */ 5267 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5268 /* 5269 * You were aborted, passive side 5270 * always hits here 5271 */ 5272 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5273 error = ECONNRESET; 5274 } 5275 so->so_state &= ~(SS_ISCONNECTING | 5276 SS_ISDISCONNECTING | 5277 SS_ISCONFIRMING | 5278 SS_ISCONNECTED); 5279 if (error == 0) { 5280 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5281 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5282 error = ENOTCONN; 5283 } 5284 } 5285 goto out; 5286 } 5287 } 5288 error = sbwait(&so->so_rcv); 5289 if (error) { 5290 goto out; 5291 } 5292 held_length = 0; 5293 goto restart_nosblocks; 5294 } else if (so->so_rcv.sb_cc == 0) { 5295 if (so->so_error) { 5296 error = so->so_error; 5297 if ((in_flags & MSG_PEEK) == 0) 5298 so->so_error = 0; 5299 } else { 5300 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5301 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5302 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5303 /* 5304 * For active open side clear flags 5305 * for re-use passive open is 5306 * blocked by connect. 5307 */ 5308 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5309 /* 5310 * You were aborted, passive 5311 * side always hits here 5312 */ 5313 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5314 error = ECONNRESET; 5315 } 5316 so->so_state &= ~(SS_ISCONNECTING | 5317 SS_ISDISCONNECTING | 5318 SS_ISCONFIRMING | 5319 SS_ISCONNECTED); 5320 if (error == 0) { 5321 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5322 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5323 error = ENOTCONN; 5324 } 5325 } 5326 goto out; 5327 } 5328 } 5329 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5330 error = EWOULDBLOCK; 5331 } 5332 goto out; 5333 } 5334 if (hold_sblock == 1) { 5335 SOCKBUF_UNLOCK(&so->so_rcv); 5336 hold_sblock = 0; 5337 } 5338 /* we possibly have data we can read */ 5339 /* sa_ignore FREED_MEMORY */ 5340 control = TAILQ_FIRST(&inp->read_queue); 5341 if (control == NULL) { 5342 /* 5343 * This could be happening since the appender did the 5344 * increment but as not yet did the tailq insert onto the 5345 * read_queue 5346 */ 5347 if (hold_rlock == 0) { 5348 SCTP_INP_READ_LOCK(inp); 5349 } 5350 control = TAILQ_FIRST(&inp->read_queue); 5351 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5352 #ifdef INVARIANTS 5353 panic("Huh, its non zero and nothing on control?"); 5354 #endif 5355 so->so_rcv.sb_cc = 0; 5356 } 5357 SCTP_INP_READ_UNLOCK(inp); 5358 hold_rlock = 0; 5359 goto restart; 5360 } 5361 if ((control->length == 0) && 5362 (control->do_not_ref_stcb)) { 5363 /* 5364 * Clean up code for freeing assoc that left behind a 5365 * pdapi.. maybe a peer in EEOR that just closed after 5366 * sending and never indicated a EOR. 5367 */ 5368 if (hold_rlock == 0) { 5369 hold_rlock = 1; 5370 SCTP_INP_READ_LOCK(inp); 5371 } 5372 control->held_length = 0; 5373 if (control->data) { 5374 /* Hmm there is data here .. fix */ 5375 struct mbuf *m_tmp; 5376 int cnt = 0; 5377 5378 m_tmp = control->data; 5379 while (m_tmp) { 5380 cnt += SCTP_BUF_LEN(m_tmp); 5381 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5382 control->tail_mbuf = m_tmp; 5383 control->end_added = 1; 5384 } 5385 m_tmp = SCTP_BUF_NEXT(m_tmp); 5386 } 5387 control->length = cnt; 5388 } else { 5389 /* remove it */ 5390 TAILQ_REMOVE(&inp->read_queue, control, next); 5391 /* Add back any hiddend data */ 5392 sctp_free_remote_addr(control->whoFrom); 5393 sctp_free_a_readq(stcb, control); 5394 } 5395 if (hold_rlock) { 5396 hold_rlock = 0; 5397 SCTP_INP_READ_UNLOCK(inp); 5398 } 5399 goto restart; 5400 } 5401 if ((control->length == 0) && 5402 (control->end_added == 1)) { 5403 /* 5404 * Do we also need to check for (control->pdapi_aborted == 5405 * 1)? 5406 */ 5407 if (hold_rlock == 0) { 5408 hold_rlock = 1; 5409 SCTP_INP_READ_LOCK(inp); 5410 } 5411 TAILQ_REMOVE(&inp->read_queue, control, next); 5412 if (control->data) { 5413 #ifdef INVARIANTS 5414 panic("control->data not null but control->length == 0"); 5415 #else 5416 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5417 sctp_m_freem(control->data); 5418 control->data = NULL; 5419 #endif 5420 } 5421 if (control->aux_data) { 5422 sctp_m_free(control->aux_data); 5423 control->aux_data = NULL; 5424 } 5425 sctp_free_remote_addr(control->whoFrom); 5426 sctp_free_a_readq(stcb, control); 5427 if (hold_rlock) { 5428 hold_rlock = 0; 5429 SCTP_INP_READ_UNLOCK(inp); 5430 } 5431 goto restart; 5432 } 5433 if (control->length == 0) { 5434 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5435 (filling_sinfo)) { 5436 /* find a more suitable one then this */ 5437 ctl = TAILQ_NEXT(control, next); 5438 while (ctl) { 5439 if ((ctl->stcb != control->stcb) && (ctl->length) && 5440 (ctl->some_taken || 5441 (ctl->spec_flags & M_NOTIFICATION) || 5442 ((ctl->do_not_ref_stcb == 0) && 5443 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5444 ) { 5445 /*- 5446 * If we have a different TCB next, and there is data 5447 * present. If we have already taken some (pdapi), OR we can 5448 * ref the tcb and no delivery as started on this stream, we 5449 * take it. Note we allow a notification on a different 5450 * assoc to be delivered.. 5451 */ 5452 control = ctl; 5453 goto found_one; 5454 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5455 (ctl->length) && 5456 ((ctl->some_taken) || 5457 ((ctl->do_not_ref_stcb == 0) && 5458 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5459 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5460 /*- 5461 * If we have the same tcb, and there is data present, and we 5462 * have the strm interleave feature present. Then if we have 5463 * taken some (pdapi) or we can refer to tht tcb AND we have 5464 * not started a delivery for this stream, we can take it. 5465 * Note we do NOT allow a notificaiton on the same assoc to 5466 * be delivered. 5467 */ 5468 control = ctl; 5469 goto found_one; 5470 } 5471 ctl = TAILQ_NEXT(ctl, next); 5472 } 5473 } 5474 /* 5475 * if we reach here, not suitable replacement is available 5476 * <or> fragment interleave is NOT on. So stuff the sb_cc 5477 * into the our held count, and its time to sleep again. 5478 */ 5479 held_length = so->so_rcv.sb_cc; 5480 control->held_length = so->so_rcv.sb_cc; 5481 goto restart; 5482 } 5483 /* Clear the held length since there is something to read */ 5484 control->held_length = 0; 5485 if (hold_rlock) { 5486 SCTP_INP_READ_UNLOCK(inp); 5487 hold_rlock = 0; 5488 } 5489 found_one: 5490 /* 5491 * If we reach here, control has a some data for us to read off. 5492 * Note that stcb COULD be NULL. 5493 */ 5494 control->some_taken++; 5495 if (hold_sblock) { 5496 SOCKBUF_UNLOCK(&so->so_rcv); 5497 hold_sblock = 0; 5498 } 5499 stcb = control->stcb; 5500 if (stcb) { 5501 if ((control->do_not_ref_stcb == 0) && 5502 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5503 if (freecnt_applied == 0) 5504 stcb = NULL; 5505 } else if (control->do_not_ref_stcb == 0) { 5506 /* you can't free it on me please */ 5507 /* 5508 * The lock on the socket buffer protects us so the 5509 * free code will stop. But since we used the 5510 * socketbuf lock and the sender uses the tcb_lock 5511 * to increment, we need to use the atomic add to 5512 * the refcnt 5513 */ 5514 if (freecnt_applied) { 5515 #ifdef INVARIANTS 5516 panic("refcnt already incremented"); 5517 #else 5518 SCTP_PRINTF("refcnt already incremented?\n"); 5519 #endif 5520 } else { 5521 atomic_add_int(&stcb->asoc.refcnt, 1); 5522 freecnt_applied = 1; 5523 } 5524 /* 5525 * Setup to remember how much we have not yet told 5526 * the peer our rwnd has opened up. Note we grab the 5527 * value from the tcb from last time. Note too that 5528 * sack sending clears this when a sack is sent, 5529 * which is fine. Once we hit the rwnd_req, we then 5530 * will go to the sctp_user_rcvd() that will not 5531 * lock until it KNOWs it MUST send a WUP-SACK. 5532 */ 5533 freed_so_far = stcb->freed_by_sorcv_sincelast; 5534 stcb->freed_by_sorcv_sincelast = 0; 5535 } 5536 } 5537 if (stcb && 5538 ((control->spec_flags & M_NOTIFICATION) == 0) && 5539 control->do_not_ref_stcb == 0) { 5540 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5541 } 5542 /* First lets get off the sinfo and sockaddr info */ 5543 if ((sinfo) && filling_sinfo) { 5544 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 5545 nxt = TAILQ_NEXT(control, next); 5546 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5547 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5548 struct sctp_extrcvinfo *s_extra; 5549 5550 s_extra = (struct sctp_extrcvinfo *)sinfo; 5551 if ((nxt) && 5552 (nxt->length)) { 5553 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5554 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5555 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5556 } 5557 if (nxt->spec_flags & M_NOTIFICATION) { 5558 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5559 } 5560 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id; 5561 s_extra->sreinfo_next_length = nxt->length; 5562 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid; 5563 s_extra->sreinfo_next_stream = nxt->sinfo_stream; 5564 if (nxt->tail_mbuf != NULL) { 5565 if (nxt->end_added) { 5566 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5567 } 5568 } 5569 } else { 5570 /* 5571 * we explicitly 0 this, since the memcpy 5572 * got some other things beyond the older 5573 * sinfo_ that is on the control's structure 5574 * :-D 5575 */ 5576 nxt = NULL; 5577 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 5578 s_extra->sreinfo_next_aid = 0; 5579 s_extra->sreinfo_next_length = 0; 5580 s_extra->sreinfo_next_ppid = 0; 5581 s_extra->sreinfo_next_stream = 0; 5582 } 5583 } 5584 /* 5585 * update off the real current cum-ack, if we have an stcb. 5586 */ 5587 if ((control->do_not_ref_stcb == 0) && stcb) 5588 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5589 /* 5590 * mask off the high bits, we keep the actual chunk bits in 5591 * there. 5592 */ 5593 sinfo->sinfo_flags &= 0x00ff; 5594 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5595 sinfo->sinfo_flags |= SCTP_UNORDERED; 5596 } 5597 } 5598 #ifdef SCTP_ASOCLOG_OF_TSNS 5599 { 5600 int index, newindex; 5601 struct sctp_pcbtsn_rlog *entry; 5602 5603 do { 5604 index = inp->readlog_index; 5605 newindex = index + 1; 5606 if (newindex >= SCTP_READ_LOG_SIZE) { 5607 newindex = 0; 5608 } 5609 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5610 entry = &inp->readlog[index]; 5611 entry->vtag = control->sinfo_assoc_id; 5612 entry->strm = control->sinfo_stream; 5613 entry->seq = control->sinfo_ssn; 5614 entry->sz = control->length; 5615 entry->flgs = control->sinfo_flags; 5616 } 5617 #endif 5618 if (fromlen && from) { 5619 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len); 5620 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5621 #ifdef INET6 5622 case AF_INET6: 5623 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 5624 break; 5625 #endif 5626 #ifdef INET 5627 case AF_INET: 5628 ((struct sockaddr_in *)from)->sin_port = control->port_from; 5629 break; 5630 #endif 5631 default: 5632 break; 5633 } 5634 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5635 5636 #if defined(INET) && defined(INET6) 5637 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 5638 (from->sa_family == AF_INET) && 5639 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 5640 struct sockaddr_in *sin; 5641 struct sockaddr_in6 sin6; 5642 5643 sin = (struct sockaddr_in *)from; 5644 bzero(&sin6, sizeof(sin6)); 5645 sin6.sin6_family = AF_INET6; 5646 sin6.sin6_len = sizeof(struct sockaddr_in6); 5647 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 5648 bcopy(&sin->sin_addr, 5649 &sin6.sin6_addr.s6_addr32[3], 5650 sizeof(sin6.sin6_addr.s6_addr32[3])); 5651 sin6.sin6_port = sin->sin_port; 5652 memcpy(from, &sin6, sizeof(struct sockaddr_in6)); 5653 } 5654 #endif 5655 #ifdef INET6 5656 { 5657 struct sockaddr_in6 lsa6, *from6; 5658 5659 from6 = (struct sockaddr_in6 *)from; 5660 sctp_recover_scope_mac(from6, (&lsa6)); 5661 } 5662 #endif 5663 } 5664 /* now copy out what data we can */ 5665 if (mp == NULL) { 5666 /* copy out each mbuf in the chain up to length */ 5667 get_more_data: 5668 m = control->data; 5669 while (m) { 5670 /* Move out all we can */ 5671 cp_len = (int)uio->uio_resid; 5672 my_len = (int)SCTP_BUF_LEN(m); 5673 if (cp_len > my_len) { 5674 /* not enough in this buf */ 5675 cp_len = my_len; 5676 } 5677 if (hold_rlock) { 5678 SCTP_INP_READ_UNLOCK(inp); 5679 hold_rlock = 0; 5680 } 5681 if (cp_len > 0) 5682 error = uiomove(mtod(m, char *), cp_len, uio); 5683 /* re-read */ 5684 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5685 goto release; 5686 } 5687 if ((control->do_not_ref_stcb == 0) && stcb && 5688 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5689 no_rcv_needed = 1; 5690 } 5691 if (error) { 5692 /* error we are out of here */ 5693 goto release; 5694 } 5695 if ((SCTP_BUF_NEXT(m) == NULL) && 5696 (cp_len >= SCTP_BUF_LEN(m)) && 5697 ((control->end_added == 0) || 5698 (control->end_added && 5699 (TAILQ_NEXT(control, next) == NULL))) 5700 ) { 5701 SCTP_INP_READ_LOCK(inp); 5702 hold_rlock = 1; 5703 } 5704 if (cp_len == SCTP_BUF_LEN(m)) { 5705 if ((SCTP_BUF_NEXT(m) == NULL) && 5706 (control->end_added)) { 5707 out_flags |= MSG_EOR; 5708 if ((control->do_not_ref_stcb == 0) && 5709 (control->stcb != NULL) && 5710 ((control->spec_flags & M_NOTIFICATION) == 0)) 5711 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5712 } 5713 if (control->spec_flags & M_NOTIFICATION) { 5714 out_flags |= MSG_NOTIFICATION; 5715 } 5716 /* we ate up the mbuf */ 5717 if (in_flags & MSG_PEEK) { 5718 /* just looking */ 5719 m = SCTP_BUF_NEXT(m); 5720 copied_so_far += cp_len; 5721 } else { 5722 /* dispose of the mbuf */ 5723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5724 sctp_sblog(&so->so_rcv, 5725 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5726 } 5727 sctp_sbfree(control, stcb, &so->so_rcv, m); 5728 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5729 sctp_sblog(&so->so_rcv, 5730 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5731 } 5732 copied_so_far += cp_len; 5733 freed_so_far += cp_len; 5734 freed_so_far += MSIZE; 5735 atomic_subtract_int(&control->length, cp_len); 5736 control->data = sctp_m_free(m); 5737 m = control->data; 5738 /* 5739 * been through it all, must hold sb 5740 * lock ok to null tail 5741 */ 5742 if (control->data == NULL) { 5743 #ifdef INVARIANTS 5744 if ((control->end_added == 0) || 5745 (TAILQ_NEXT(control, next) == NULL)) { 5746 /* 5747 * If the end is not 5748 * added, OR the 5749 * next is NOT null 5750 * we MUST have the 5751 * lock. 5752 */ 5753 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5754 panic("Hmm we don't own the lock?"); 5755 } 5756 } 5757 #endif 5758 control->tail_mbuf = NULL; 5759 #ifdef INVARIANTS 5760 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5761 panic("end_added, nothing left and no MSG_EOR"); 5762 } 5763 #endif 5764 } 5765 } 5766 } else { 5767 /* Do we need to trim the mbuf? */ 5768 if (control->spec_flags & M_NOTIFICATION) { 5769 out_flags |= MSG_NOTIFICATION; 5770 } 5771 if ((in_flags & MSG_PEEK) == 0) { 5772 SCTP_BUF_RESV_UF(m, cp_len); 5773 SCTP_BUF_LEN(m) -= cp_len; 5774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5775 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5776 } 5777 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5778 if ((control->do_not_ref_stcb == 0) && 5779 stcb) { 5780 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5781 } 5782 copied_so_far += cp_len; 5783 freed_so_far += cp_len; 5784 freed_so_far += MSIZE; 5785 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5786 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5787 SCTP_LOG_SBRESULT, 0); 5788 } 5789 atomic_subtract_int(&control->length, cp_len); 5790 } else { 5791 copied_so_far += cp_len; 5792 } 5793 } 5794 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5795 break; 5796 } 5797 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5798 (control->do_not_ref_stcb == 0) && 5799 (freed_so_far >= rwnd_req)) { 5800 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5801 } 5802 } /* end while(m) */ 5803 /* 5804 * At this point we have looked at it all and we either have 5805 * a MSG_EOR/or read all the user wants... <OR> 5806 * control->length == 0. 5807 */ 5808 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5809 /* we are done with this control */ 5810 if (control->length == 0) { 5811 if (control->data) { 5812 #ifdef INVARIANTS 5813 panic("control->data not null at read eor?"); 5814 #else 5815 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5816 sctp_m_freem(control->data); 5817 control->data = NULL; 5818 #endif 5819 } 5820 done_with_control: 5821 if (TAILQ_NEXT(control, next) == NULL) { 5822 /* 5823 * If we don't have a next we need a 5824 * lock, if there is a next 5825 * interrupt is filling ahead of us 5826 * and we don't need a lock to 5827 * remove this guy (which is the 5828 * head of the queue). 5829 */ 5830 if (hold_rlock == 0) { 5831 SCTP_INP_READ_LOCK(inp); 5832 hold_rlock = 1; 5833 } 5834 } 5835 TAILQ_REMOVE(&inp->read_queue, control, next); 5836 /* Add back any hiddend data */ 5837 if (control->held_length) { 5838 held_length = 0; 5839 control->held_length = 0; 5840 wakeup_read_socket = 1; 5841 } 5842 if (control->aux_data) { 5843 sctp_m_free(control->aux_data); 5844 control->aux_data = NULL; 5845 } 5846 no_rcv_needed = control->do_not_ref_stcb; 5847 sctp_free_remote_addr(control->whoFrom); 5848 control->data = NULL; 5849 sctp_free_a_readq(stcb, control); 5850 control = NULL; 5851 if ((freed_so_far >= rwnd_req) && 5852 (no_rcv_needed == 0)) 5853 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5854 5855 } else { 5856 /* 5857 * The user did not read all of this 5858 * message, turn off the returned MSG_EOR 5859 * since we are leaving more behind on the 5860 * control to read. 5861 */ 5862 #ifdef INVARIANTS 5863 if (control->end_added && 5864 (control->data == NULL) && 5865 (control->tail_mbuf == NULL)) { 5866 panic("Gak, control->length is corrupt?"); 5867 } 5868 #endif 5869 no_rcv_needed = control->do_not_ref_stcb; 5870 out_flags &= ~MSG_EOR; 5871 } 5872 } 5873 if (out_flags & MSG_EOR) { 5874 goto release; 5875 } 5876 if ((uio->uio_resid == 0) || 5877 ((in_eeor_mode) && 5878 (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) { 5879 goto release; 5880 } 5881 /* 5882 * If I hit here the receiver wants more and this message is 5883 * NOT done (pd-api). So two questions. Can we block? if not 5884 * we are done. Did the user NOT set MSG_WAITALL? 5885 */ 5886 if (block_allowed == 0) { 5887 goto release; 5888 } 5889 /* 5890 * We need to wait for more data a few things: - We don't 5891 * sbunlock() so we don't get someone else reading. - We 5892 * must be sure to account for the case where what is added 5893 * is NOT to our control when we wakeup. 5894 */ 5895 5896 /* 5897 * Do we need to tell the transport a rwnd update might be 5898 * needed before we go to sleep? 5899 */ 5900 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5901 ((freed_so_far >= rwnd_req) && 5902 (control->do_not_ref_stcb == 0) && 5903 (no_rcv_needed == 0))) { 5904 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5905 } 5906 wait_some_more: 5907 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5908 goto release; 5909 } 5910 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5911 goto release; 5912 5913 if (hold_rlock == 1) { 5914 SCTP_INP_READ_UNLOCK(inp); 5915 hold_rlock = 0; 5916 } 5917 if (hold_sblock == 0) { 5918 SOCKBUF_LOCK(&so->so_rcv); 5919 hold_sblock = 1; 5920 } 5921 if ((copied_so_far) && (control->length == 0) && 5922 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5923 goto release; 5924 } 5925 if (so->so_rcv.sb_cc <= control->held_length) { 5926 error = sbwait(&so->so_rcv); 5927 if (error) { 5928 goto release; 5929 } 5930 control->held_length = 0; 5931 } 5932 if (hold_sblock) { 5933 SOCKBUF_UNLOCK(&so->so_rcv); 5934 hold_sblock = 0; 5935 } 5936 if (control->length == 0) { 5937 /* still nothing here */ 5938 if (control->end_added == 1) { 5939 /* he aborted, or is done i.e.did a shutdown */ 5940 out_flags |= MSG_EOR; 5941 if (control->pdapi_aborted) { 5942 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5943 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5944 5945 out_flags |= MSG_TRUNC; 5946 } else { 5947 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5948 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5949 } 5950 goto done_with_control; 5951 } 5952 if (so->so_rcv.sb_cc > held_length) { 5953 control->held_length = so->so_rcv.sb_cc; 5954 held_length = 0; 5955 } 5956 goto wait_some_more; 5957 } else if (control->data == NULL) { 5958 /* 5959 * we must re-sync since data is probably being 5960 * added 5961 */ 5962 SCTP_INP_READ_LOCK(inp); 5963 if ((control->length > 0) && (control->data == NULL)) { 5964 /* 5965 * big trouble.. we have the lock and its 5966 * corrupt? 5967 */ 5968 #ifdef INVARIANTS 5969 panic("Impossible data==NULL length !=0"); 5970 #endif 5971 out_flags |= MSG_EOR; 5972 out_flags |= MSG_TRUNC; 5973 control->length = 0; 5974 SCTP_INP_READ_UNLOCK(inp); 5975 goto done_with_control; 5976 } 5977 SCTP_INP_READ_UNLOCK(inp); 5978 /* We will fall around to get more data */ 5979 } 5980 goto get_more_data; 5981 } else { 5982 /*- 5983 * Give caller back the mbuf chain, 5984 * store in uio_resid the length 5985 */ 5986 wakeup_read_socket = 0; 5987 if ((control->end_added == 0) || 5988 (TAILQ_NEXT(control, next) == NULL)) { 5989 /* Need to get rlock */ 5990 if (hold_rlock == 0) { 5991 SCTP_INP_READ_LOCK(inp); 5992 hold_rlock = 1; 5993 } 5994 } 5995 if (control->end_added) { 5996 out_flags |= MSG_EOR; 5997 if ((control->do_not_ref_stcb == 0) && 5998 (control->stcb != NULL) && 5999 ((control->spec_flags & M_NOTIFICATION) == 0)) 6000 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 6001 } 6002 if (control->spec_flags & M_NOTIFICATION) { 6003 out_flags |= MSG_NOTIFICATION; 6004 } 6005 uio->uio_resid = control->length; 6006 *mp = control->data; 6007 m = control->data; 6008 while (m) { 6009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6010 sctp_sblog(&so->so_rcv, 6011 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6012 } 6013 sctp_sbfree(control, stcb, &so->so_rcv, m); 6014 freed_so_far += SCTP_BUF_LEN(m); 6015 freed_so_far += MSIZE; 6016 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6017 sctp_sblog(&so->so_rcv, 6018 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6019 } 6020 m = SCTP_BUF_NEXT(m); 6021 } 6022 control->data = control->tail_mbuf = NULL; 6023 control->length = 0; 6024 if (out_flags & MSG_EOR) { 6025 /* Done with this control */ 6026 goto done_with_control; 6027 } 6028 } 6029 release: 6030 if (hold_rlock == 1) { 6031 SCTP_INP_READ_UNLOCK(inp); 6032 hold_rlock = 0; 6033 } 6034 if (hold_sblock == 1) { 6035 SOCKBUF_UNLOCK(&so->so_rcv); 6036 hold_sblock = 0; 6037 } 6038 sbunlock(&so->so_rcv); 6039 sockbuf_lock = 0; 6040 6041 release_unlocked: 6042 if (hold_sblock) { 6043 SOCKBUF_UNLOCK(&so->so_rcv); 6044 hold_sblock = 0; 6045 } 6046 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6047 if ((freed_so_far >= rwnd_req) && 6048 (control && (control->do_not_ref_stcb == 0)) && 6049 (no_rcv_needed == 0)) 6050 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6051 } 6052 out: 6053 if (msg_flags) { 6054 *msg_flags = out_flags; 6055 } 6056 if (((out_flags & MSG_EOR) == 0) && 6057 ((in_flags & MSG_PEEK) == 0) && 6058 (sinfo) && 6059 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6060 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6061 struct sctp_extrcvinfo *s_extra; 6062 6063 s_extra = (struct sctp_extrcvinfo *)sinfo; 6064 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 6065 } 6066 if (hold_rlock == 1) { 6067 SCTP_INP_READ_UNLOCK(inp); 6068 } 6069 if (hold_sblock) { 6070 SOCKBUF_UNLOCK(&so->so_rcv); 6071 } 6072 if (sockbuf_lock) { 6073 sbunlock(&so->so_rcv); 6074 } 6075 if (freecnt_applied) { 6076 /* 6077 * The lock on the socket buffer protects us so the free 6078 * code will stop. But since we used the socketbuf lock and 6079 * the sender uses the tcb_lock to increment, we need to use 6080 * the atomic add to the refcnt. 6081 */ 6082 if (stcb == NULL) { 6083 #ifdef INVARIANTS 6084 panic("stcb for refcnt has gone NULL?"); 6085 goto stage_left; 6086 #else 6087 goto stage_left; 6088 #endif 6089 } 6090 atomic_add_int(&stcb->asoc.refcnt, -1); 6091 /* Save the value back for next time */ 6092 stcb->freed_by_sorcv_sincelast = freed_so_far; 6093 } 6094 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6095 if (stcb) { 6096 sctp_misc_ints(SCTP_SORECV_DONE, 6097 freed_so_far, 6098 ((uio) ? (slen - uio->uio_resid) : slen), 6099 stcb->asoc.my_rwnd, 6100 so->so_rcv.sb_cc); 6101 } else { 6102 sctp_misc_ints(SCTP_SORECV_DONE, 6103 freed_so_far, 6104 ((uio) ? (slen - uio->uio_resid) : slen), 6105 0, 6106 so->so_rcv.sb_cc); 6107 } 6108 } 6109 stage_left: 6110 if (wakeup_read_socket) { 6111 sctp_sorwakeup(inp, so); 6112 } 6113 return (error); 6114 } 6115 6116 6117 #ifdef SCTP_MBUF_LOGGING 6118 struct mbuf * 6119 sctp_m_free(struct mbuf *m) 6120 { 6121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6122 if (SCTP_BUF_IS_EXTENDED(m)) { 6123 sctp_log_mb(m, SCTP_MBUF_IFREE); 6124 } 6125 } 6126 return (m_free(m)); 6127 } 6128 6129 void 6130 sctp_m_freem(struct mbuf *mb) 6131 { 6132 while (mb != NULL) 6133 mb = sctp_m_free(mb); 6134 } 6135 6136 #endif 6137 6138 int 6139 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6140 { 6141 /* 6142 * Given a local address. For all associations that holds the 6143 * address, request a peer-set-primary. 6144 */ 6145 struct sctp_ifa *ifa; 6146 struct sctp_laddr *wi; 6147 6148 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6149 if (ifa == NULL) { 6150 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6151 return (EADDRNOTAVAIL); 6152 } 6153 /* 6154 * Now that we have the ifa we must awaken the iterator with this 6155 * message. 6156 */ 6157 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6158 if (wi == NULL) { 6159 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6160 return (ENOMEM); 6161 } 6162 /* Now incr the count and int wi structure */ 6163 SCTP_INCR_LADDR_COUNT(); 6164 bzero(wi, sizeof(*wi)); 6165 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6166 wi->ifa = ifa; 6167 wi->action = SCTP_SET_PRIM_ADDR; 6168 atomic_add_int(&ifa->refcount, 1); 6169 6170 /* Now add it to the work queue */ 6171 SCTP_WQ_ADDR_LOCK(); 6172 /* 6173 * Should this really be a tailq? As it is we will process the 6174 * newest first :-0 6175 */ 6176 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6177 SCTP_WQ_ADDR_UNLOCK(); 6178 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6179 (struct sctp_inpcb *)NULL, 6180 (struct sctp_tcb *)NULL, 6181 (struct sctp_nets *)NULL); 6182 return (0); 6183 } 6184 6185 6186 int 6187 sctp_soreceive(struct socket *so, 6188 struct sockaddr **psa, 6189 struct uio *uio, 6190 struct mbuf **mp0, 6191 struct mbuf **controlp, 6192 int *flagsp) 6193 { 6194 int error, fromlen; 6195 uint8_t sockbuf[256]; 6196 struct sockaddr *from; 6197 struct sctp_extrcvinfo sinfo; 6198 int filling_sinfo = 1; 6199 struct sctp_inpcb *inp; 6200 6201 inp = (struct sctp_inpcb *)so->so_pcb; 6202 /* pickup the assoc we are reading from */ 6203 if (inp == NULL) { 6204 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6205 return (EINVAL); 6206 } 6207 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6208 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6209 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6210 (controlp == NULL)) { 6211 /* user does not want the sndrcv ctl */ 6212 filling_sinfo = 0; 6213 } 6214 if (psa) { 6215 from = (struct sockaddr *)sockbuf; 6216 fromlen = sizeof(sockbuf); 6217 from->sa_len = 0; 6218 } else { 6219 from = NULL; 6220 fromlen = 0; 6221 } 6222 6223 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6224 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6225 if ((controlp) && (filling_sinfo)) { 6226 /* copy back the sinfo in a CMSG format */ 6227 if (filling_sinfo) 6228 *controlp = sctp_build_ctl_nchunk(inp, 6229 (struct sctp_sndrcvinfo *)&sinfo); 6230 else 6231 *controlp = NULL; 6232 } 6233 if (psa) { 6234 /* copy back the address info */ 6235 if (from && from->sa_len) { 6236 *psa = sodupsockaddr(from, M_NOWAIT); 6237 } else { 6238 *psa = NULL; 6239 } 6240 } 6241 return (error); 6242 } 6243 6244 6245 6246 6247 6248 int 6249 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6250 int totaddr, int *error) 6251 { 6252 int added = 0; 6253 int i; 6254 struct sctp_inpcb *inp; 6255 struct sockaddr *sa; 6256 size_t incr = 0; 6257 6258 #ifdef INET 6259 struct sockaddr_in *sin; 6260 6261 #endif 6262 #ifdef INET6 6263 struct sockaddr_in6 *sin6; 6264 6265 #endif 6266 6267 sa = addr; 6268 inp = stcb->sctp_ep; 6269 *error = 0; 6270 for (i = 0; i < totaddr; i++) { 6271 switch (sa->sa_family) { 6272 #ifdef INET 6273 case AF_INET: 6274 incr = sizeof(struct sockaddr_in); 6275 sin = (struct sockaddr_in *)sa; 6276 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6277 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6278 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6279 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6280 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6281 *error = EINVAL; 6282 goto out_now; 6283 } 6284 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6285 /* assoc gone no un-lock */ 6286 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6287 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6288 *error = ENOBUFS; 6289 goto out_now; 6290 } 6291 added++; 6292 break; 6293 #endif 6294 #ifdef INET6 6295 case AF_INET6: 6296 incr = sizeof(struct sockaddr_in6); 6297 sin6 = (struct sockaddr_in6 *)sa; 6298 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6299 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6300 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6301 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6302 *error = EINVAL; 6303 goto out_now; 6304 } 6305 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6306 /* assoc gone no un-lock */ 6307 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6308 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6309 *error = ENOBUFS; 6310 goto out_now; 6311 } 6312 added++; 6313 break; 6314 #endif 6315 default: 6316 break; 6317 } 6318 sa = (struct sockaddr *)((caddr_t)sa + incr); 6319 } 6320 out_now: 6321 return (added); 6322 } 6323 6324 struct sctp_tcb * 6325 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6326 int *totaddr, int *num_v4, int *num_v6, int *error, 6327 int limit, int *bad_addr) 6328 { 6329 struct sockaddr *sa; 6330 struct sctp_tcb *stcb = NULL; 6331 size_t incr, at, i; 6332 6333 at = incr = 0; 6334 sa = addr; 6335 6336 *error = *num_v6 = *num_v4 = 0; 6337 /* account and validate addresses */ 6338 for (i = 0; i < (size_t)*totaddr; i++) { 6339 switch (sa->sa_family) { 6340 #ifdef INET 6341 case AF_INET: 6342 (*num_v4) += 1; 6343 incr = sizeof(struct sockaddr_in); 6344 if (sa->sa_len != incr) { 6345 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6346 *error = EINVAL; 6347 *bad_addr = 1; 6348 return (NULL); 6349 } 6350 break; 6351 #endif 6352 #ifdef INET6 6353 case AF_INET6: 6354 { 6355 struct sockaddr_in6 *sin6; 6356 6357 sin6 = (struct sockaddr_in6 *)sa; 6358 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6359 /* Must be non-mapped for connectx */ 6360 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6361 *error = EINVAL; 6362 *bad_addr = 1; 6363 return (NULL); 6364 } 6365 (*num_v6) += 1; 6366 incr = sizeof(struct sockaddr_in6); 6367 if (sa->sa_len != incr) { 6368 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6369 *error = EINVAL; 6370 *bad_addr = 1; 6371 return (NULL); 6372 } 6373 break; 6374 } 6375 #endif 6376 default: 6377 *totaddr = i; 6378 /* we are done */ 6379 break; 6380 } 6381 if (i == (size_t)*totaddr) { 6382 break; 6383 } 6384 SCTP_INP_INCR_REF(inp); 6385 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6386 if (stcb != NULL) { 6387 /* Already have or am bring up an association */ 6388 return (stcb); 6389 } else { 6390 SCTP_INP_DECR_REF(inp); 6391 } 6392 if ((at + incr) > (size_t)limit) { 6393 *totaddr = i; 6394 break; 6395 } 6396 sa = (struct sockaddr *)((caddr_t)sa + incr); 6397 } 6398 return ((struct sctp_tcb *)NULL); 6399 } 6400 6401 /* 6402 * sctp_bindx(ADD) for one address. 6403 * assumes all arguments are valid/checked by caller. 6404 */ 6405 void 6406 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6407 struct sockaddr *sa, sctp_assoc_t assoc_id, 6408 uint32_t vrf_id, int *error, void *p) 6409 { 6410 struct sockaddr *addr_touse; 6411 6412 #ifdef INET6 6413 struct sockaddr_in sin; 6414 6415 #endif 6416 6417 /* see if we're bound all already! */ 6418 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6419 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6420 *error = EINVAL; 6421 return; 6422 } 6423 addr_touse = sa; 6424 #ifdef INET6 6425 if (sa->sa_family == AF_INET6) { 6426 struct sockaddr_in6 *sin6; 6427 6428 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6429 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6430 *error = EINVAL; 6431 return; 6432 } 6433 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6434 /* can only bind v6 on PF_INET6 sockets */ 6435 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6436 *error = EINVAL; 6437 return; 6438 } 6439 sin6 = (struct sockaddr_in6 *)addr_touse; 6440 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6441 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6442 SCTP_IPV6_V6ONLY(inp)) { 6443 /* can't bind v4-mapped on PF_INET sockets */ 6444 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6445 *error = EINVAL; 6446 return; 6447 } 6448 in6_sin6_2_sin(&sin, sin6); 6449 addr_touse = (struct sockaddr *)&sin; 6450 } 6451 } 6452 #endif 6453 #ifdef INET 6454 if (sa->sa_family == AF_INET) { 6455 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6457 *error = EINVAL; 6458 return; 6459 } 6460 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6461 SCTP_IPV6_V6ONLY(inp)) { 6462 /* can't bind v4 on PF_INET sockets */ 6463 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6464 *error = EINVAL; 6465 return; 6466 } 6467 } 6468 #endif 6469 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6470 if (p == NULL) { 6471 /* Can't get proc for Net/Open BSD */ 6472 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6473 *error = EINVAL; 6474 return; 6475 } 6476 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6477 return; 6478 } 6479 /* 6480 * No locks required here since bind and mgmt_ep_sa all do their own 6481 * locking. If we do something for the FIX: below we may need to 6482 * lock in that case. 6483 */ 6484 if (assoc_id == 0) { 6485 /* add the address */ 6486 struct sctp_inpcb *lep; 6487 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6488 6489 /* validate the incoming port */ 6490 if ((lsin->sin_port != 0) && 6491 (lsin->sin_port != inp->sctp_lport)) { 6492 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6493 *error = EINVAL; 6494 return; 6495 } else { 6496 /* user specified 0 port, set it to existing port */ 6497 lsin->sin_port = inp->sctp_lport; 6498 } 6499 6500 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6501 if (lep != NULL) { 6502 /* 6503 * We must decrement the refcount since we have the 6504 * ep already and are binding. No remove going on 6505 * here. 6506 */ 6507 SCTP_INP_DECR_REF(lep); 6508 } 6509 if (lep == inp) { 6510 /* already bound to it.. ok */ 6511 return; 6512 } else if (lep == NULL) { 6513 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6514 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6515 SCTP_ADD_IP_ADDRESS, 6516 vrf_id, NULL); 6517 } else { 6518 *error = EADDRINUSE; 6519 } 6520 if (*error) 6521 return; 6522 } else { 6523 /* 6524 * FIX: decide whether we allow assoc based bindx 6525 */ 6526 } 6527 } 6528 6529 /* 6530 * sctp_bindx(DELETE) for one address. 6531 * assumes all arguments are valid/checked by caller. 6532 */ 6533 void 6534 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6535 struct sockaddr *sa, sctp_assoc_t assoc_id, 6536 uint32_t vrf_id, int *error) 6537 { 6538 struct sockaddr *addr_touse; 6539 6540 #ifdef INET6 6541 struct sockaddr_in sin; 6542 6543 #endif 6544 6545 /* see if we're bound all already! */ 6546 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6547 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6548 *error = EINVAL; 6549 return; 6550 } 6551 addr_touse = sa; 6552 #ifdef INET6 6553 if (sa->sa_family == AF_INET6) { 6554 struct sockaddr_in6 *sin6; 6555 6556 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6557 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6558 *error = EINVAL; 6559 return; 6560 } 6561 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6562 /* can only bind v6 on PF_INET6 sockets */ 6563 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6564 *error = EINVAL; 6565 return; 6566 } 6567 sin6 = (struct sockaddr_in6 *)addr_touse; 6568 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6569 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6570 SCTP_IPV6_V6ONLY(inp)) { 6571 /* can't bind mapped-v4 on PF_INET sockets */ 6572 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6573 *error = EINVAL; 6574 return; 6575 } 6576 in6_sin6_2_sin(&sin, sin6); 6577 addr_touse = (struct sockaddr *)&sin; 6578 } 6579 } 6580 #endif 6581 #ifdef INET 6582 if (sa->sa_family == AF_INET) { 6583 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6584 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6585 *error = EINVAL; 6586 return; 6587 } 6588 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6589 SCTP_IPV6_V6ONLY(inp)) { 6590 /* can't bind v4 on PF_INET sockets */ 6591 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6592 *error = EINVAL; 6593 return; 6594 } 6595 } 6596 #endif 6597 /* 6598 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6599 * below is ever changed we may need to lock before calling 6600 * association level binding. 6601 */ 6602 if (assoc_id == 0) { 6603 /* delete the address */ 6604 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6605 SCTP_DEL_IP_ADDRESS, 6606 vrf_id, NULL); 6607 } else { 6608 /* 6609 * FIX: decide whether we allow assoc based bindx 6610 */ 6611 } 6612 } 6613 6614 /* 6615 * returns the valid local address count for an assoc, taking into account 6616 * all scoping rules 6617 */ 6618 int 6619 sctp_local_addr_count(struct sctp_tcb *stcb) 6620 { 6621 int loopback_scope; 6622 6623 #if defined(INET) 6624 int ipv4_local_scope, ipv4_addr_legal; 6625 6626 #endif 6627 #if defined (INET6) 6628 int local_scope, site_scope, ipv6_addr_legal; 6629 6630 #endif 6631 struct sctp_vrf *vrf; 6632 struct sctp_ifn *sctp_ifn; 6633 struct sctp_ifa *sctp_ifa; 6634 int count = 0; 6635 6636 /* Turn on all the appropriate scopes */ 6637 loopback_scope = stcb->asoc.scope.loopback_scope; 6638 #if defined(INET) 6639 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6640 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6641 #endif 6642 #if defined(INET6) 6643 local_scope = stcb->asoc.scope.local_scope; 6644 site_scope = stcb->asoc.scope.site_scope; 6645 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6646 #endif 6647 SCTP_IPI_ADDR_RLOCK(); 6648 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6649 if (vrf == NULL) { 6650 /* no vrf, no addresses */ 6651 SCTP_IPI_ADDR_RUNLOCK(); 6652 return (0); 6653 } 6654 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6655 /* 6656 * bound all case: go through all ifns on the vrf 6657 */ 6658 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6659 if ((loopback_scope == 0) && 6660 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6661 continue; 6662 } 6663 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6664 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6665 continue; 6666 switch (sctp_ifa->address.sa.sa_family) { 6667 #ifdef INET 6668 case AF_INET: 6669 if (ipv4_addr_legal) { 6670 struct sockaddr_in *sin; 6671 6672 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 6673 if (sin->sin_addr.s_addr == 0) { 6674 /* 6675 * skip unspecified 6676 * addrs 6677 */ 6678 continue; 6679 } 6680 if ((ipv4_local_scope == 0) && 6681 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6682 continue; 6683 } 6684 /* count this one */ 6685 count++; 6686 } else { 6687 continue; 6688 } 6689 break; 6690 #endif 6691 #ifdef INET6 6692 case AF_INET6: 6693 if (ipv6_addr_legal) { 6694 struct sockaddr_in6 *sin6; 6695 6696 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 6697 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6698 continue; 6699 } 6700 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6701 if (local_scope == 0) 6702 continue; 6703 if (sin6->sin6_scope_id == 0) { 6704 if (sa6_recoverscope(sin6) != 0) 6705 /* 6706 * 6707 * bad 6708 * 6709 * li 6710 * nk 6711 * 6712 * loc 6713 * al 6714 * 6715 * add 6716 * re 6717 * ss 6718 * */ 6719 continue; 6720 } 6721 } 6722 if ((site_scope == 0) && 6723 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6724 continue; 6725 } 6726 /* count this one */ 6727 count++; 6728 } 6729 break; 6730 #endif 6731 default: 6732 /* TSNH */ 6733 break; 6734 } 6735 } 6736 } 6737 } else { 6738 /* 6739 * subset bound case 6740 */ 6741 struct sctp_laddr *laddr; 6742 6743 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6744 sctp_nxt_addr) { 6745 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6746 continue; 6747 } 6748 /* count this one */ 6749 count++; 6750 } 6751 } 6752 SCTP_IPI_ADDR_RUNLOCK(); 6753 return (count); 6754 } 6755 6756 #if defined(SCTP_LOCAL_TRACE_BUF) 6757 6758 void 6759 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6760 { 6761 uint32_t saveindex, newindex; 6762 6763 do { 6764 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6765 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6766 newindex = 1; 6767 } else { 6768 newindex = saveindex + 1; 6769 } 6770 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6771 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6772 saveindex = 0; 6773 } 6774 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6775 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6776 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6777 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6778 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6779 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6780 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6781 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6782 } 6783 6784 #endif 6785 static void 6786 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored) 6787 { 6788 struct ip *iph; 6789 6790 #ifdef INET6 6791 struct ip6_hdr *ip6; 6792 6793 #endif 6794 struct mbuf *sp, *last; 6795 struct udphdr *uhdr; 6796 uint16_t port; 6797 6798 if ((m->m_flags & M_PKTHDR) == 0) { 6799 /* Can't handle one that is not a pkt hdr */ 6800 goto out; 6801 } 6802 /* Pull the src port */ 6803 iph = mtod(m, struct ip *); 6804 uhdr = (struct udphdr *)((caddr_t)iph + off); 6805 port = uhdr->uh_sport; 6806 /* 6807 * Split out the mbuf chain. Leave the IP header in m, place the 6808 * rest in the sp. 6809 */ 6810 sp = m_split(m, off, M_NOWAIT); 6811 if (sp == NULL) { 6812 /* Gak, drop packet, we can't do a split */ 6813 goto out; 6814 } 6815 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6816 /* Gak, packet can't have an SCTP header in it - too small */ 6817 m_freem(sp); 6818 goto out; 6819 } 6820 /* Now pull up the UDP header and SCTP header together */ 6821 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6822 if (sp == NULL) { 6823 /* Gak pullup failed */ 6824 goto out; 6825 } 6826 /* Trim out the UDP header */ 6827 m_adj(sp, sizeof(struct udphdr)); 6828 6829 /* Now reconstruct the mbuf chain */ 6830 for (last = m; last->m_next; last = last->m_next); 6831 last->m_next = sp; 6832 m->m_pkthdr.len += sp->m_pkthdr.len; 6833 iph = mtod(m, struct ip *); 6834 switch (iph->ip_v) { 6835 #ifdef INET 6836 case IPVERSION: 6837 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6838 sctp_input_with_port(m, off, port); 6839 break; 6840 #endif 6841 #ifdef INET6 6842 case IPV6_VERSION >> 4: 6843 ip6 = mtod(m, struct ip6_hdr *); 6844 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6845 sctp6_input_with_port(&m, &off, port); 6846 break; 6847 #endif 6848 default: 6849 goto out; 6850 break; 6851 } 6852 return; 6853 out: 6854 m_freem(m); 6855 } 6856 6857 void 6858 sctp_over_udp_stop(void) 6859 { 6860 /* 6861 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6862 * for writting! 6863 */ 6864 #ifdef INET 6865 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6866 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 6867 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 6868 } 6869 #endif 6870 #ifdef INET6 6871 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6872 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 6873 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 6874 } 6875 #endif 6876 } 6877 6878 int 6879 sctp_over_udp_start(void) 6880 { 6881 uint16_t port; 6882 int ret; 6883 6884 #ifdef INET 6885 struct sockaddr_in sin; 6886 6887 #endif 6888 #ifdef INET6 6889 struct sockaddr_in6 sin6; 6890 6891 #endif 6892 /* 6893 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6894 * for writting! 6895 */ 6896 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 6897 if (ntohs(port) == 0) { 6898 /* Must have a port set */ 6899 return (EINVAL); 6900 } 6901 #ifdef INET 6902 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6903 /* Already running -- must stop first */ 6904 return (EALREADY); 6905 } 6906 #endif 6907 #ifdef INET6 6908 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6909 /* Already running -- must stop first */ 6910 return (EALREADY); 6911 } 6912 #endif 6913 #ifdef INET 6914 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 6915 SOCK_DGRAM, IPPROTO_UDP, 6916 curthread->td_ucred, curthread))) { 6917 sctp_over_udp_stop(); 6918 return (ret); 6919 } 6920 /* Call the special UDP hook. */ 6921 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 6922 sctp_recv_udp_tunneled_packet))) { 6923 sctp_over_udp_stop(); 6924 return (ret); 6925 } 6926 /* Ok, we have a socket, bind it to the port. */ 6927 memset(&sin, 0, sizeof(struct sockaddr_in)); 6928 sin.sin_len = sizeof(struct sockaddr_in); 6929 sin.sin_family = AF_INET; 6930 sin.sin_port = htons(port); 6931 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 6932 (struct sockaddr *)&sin, curthread))) { 6933 sctp_over_udp_stop(); 6934 return (ret); 6935 } 6936 #endif 6937 #ifdef INET6 6938 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 6939 SOCK_DGRAM, IPPROTO_UDP, 6940 curthread->td_ucred, curthread))) { 6941 sctp_over_udp_stop(); 6942 return (ret); 6943 } 6944 /* Call the special UDP hook. */ 6945 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 6946 sctp_recv_udp_tunneled_packet))) { 6947 sctp_over_udp_stop(); 6948 return (ret); 6949 } 6950 /* Ok, we have a socket, bind it to the port. */ 6951 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 6952 sin6.sin6_len = sizeof(struct sockaddr_in6); 6953 sin6.sin6_family = AF_INET6; 6954 sin6.sin6_port = htons(port); 6955 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 6956 (struct sockaddr *)&sin6, curthread))) { 6957 sctp_over_udp_stop(); 6958 return (ret); 6959 } 6960 #endif 6961 return (0); 6962 } 6963