1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/udp.h> 53 #include <netinet/udp_var.h> 54 #include <sys/proc.h> 55 56 57 #ifndef KTR_SCTP 58 #define KTR_SCTP KTR_SUBSYS 59 #endif 60 61 extern struct sctp_cc_functions sctp_cc_functions[]; 62 extern struct sctp_ss_functions sctp_ss_functions[]; 63 64 void 65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 66 { 67 struct sctp_cwnd_log sctp_clog; 68 69 sctp_clog.x.sb.stcb = stcb; 70 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 71 if (stcb) 72 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 73 else 74 sctp_clog.x.sb.stcb_sbcc = 0; 75 sctp_clog.x.sb.incr = incr; 76 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 77 SCTP_LOG_EVENT_SB, 78 from, 79 sctp_clog.x.misc.log1, 80 sctp_clog.x.misc.log2, 81 sctp_clog.x.misc.log3, 82 sctp_clog.x.misc.log4); 83 } 84 85 void 86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 87 { 88 struct sctp_cwnd_log sctp_clog; 89 90 sctp_clog.x.close.inp = (void *)inp; 91 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 92 if (stcb) { 93 sctp_clog.x.close.stcb = (void *)stcb; 94 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 95 } else { 96 sctp_clog.x.close.stcb = 0; 97 sctp_clog.x.close.state = 0; 98 } 99 sctp_clog.x.close.loc = loc; 100 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 101 SCTP_LOG_EVENT_CLOSE, 102 0, 103 sctp_clog.x.misc.log1, 104 sctp_clog.x.misc.log2, 105 sctp_clog.x.misc.log3, 106 sctp_clog.x.misc.log4); 107 } 108 109 void 110 rto_logging(struct sctp_nets *net, int from) 111 { 112 struct sctp_cwnd_log sctp_clog; 113 114 memset(&sctp_clog, 0, sizeof(sctp_clog)); 115 sctp_clog.x.rto.net = (void *)net; 116 sctp_clog.x.rto.rtt = net->rtt / 1000; 117 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 118 SCTP_LOG_EVENT_RTT, 119 from, 120 sctp_clog.x.misc.log1, 121 sctp_clog.x.misc.log2, 122 sctp_clog.x.misc.log3, 123 sctp_clog.x.misc.log4); 124 } 125 126 void 127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 128 { 129 struct sctp_cwnd_log sctp_clog; 130 131 sctp_clog.x.strlog.stcb = stcb; 132 sctp_clog.x.strlog.n_tsn = tsn; 133 sctp_clog.x.strlog.n_sseq = sseq; 134 sctp_clog.x.strlog.e_tsn = 0; 135 sctp_clog.x.strlog.e_sseq = 0; 136 sctp_clog.x.strlog.strm = stream; 137 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 138 SCTP_LOG_EVENT_STRM, 139 from, 140 sctp_clog.x.misc.log1, 141 sctp_clog.x.misc.log2, 142 sctp_clog.x.misc.log3, 143 sctp_clog.x.misc.log4); 144 } 145 146 void 147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 148 { 149 struct sctp_cwnd_log sctp_clog; 150 151 sctp_clog.x.nagle.stcb = (void *)stcb; 152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 157 SCTP_LOG_EVENT_NAGLE, 158 action, 159 sctp_clog.x.misc.log1, 160 sctp_clog.x.misc.log2, 161 sctp_clog.x.misc.log3, 162 sctp_clog.x.misc.log4); 163 } 164 165 void 166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 167 { 168 struct sctp_cwnd_log sctp_clog; 169 170 sctp_clog.x.sack.cumack = cumack; 171 sctp_clog.x.sack.oldcumack = old_cumack; 172 sctp_clog.x.sack.tsn = tsn; 173 sctp_clog.x.sack.numGaps = gaps; 174 sctp_clog.x.sack.numDups = dups; 175 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 176 SCTP_LOG_EVENT_SACK, 177 from, 178 sctp_clog.x.misc.log1, 179 sctp_clog.x.misc.log2, 180 sctp_clog.x.misc.log3, 181 sctp_clog.x.misc.log4); 182 } 183 184 void 185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 186 { 187 struct sctp_cwnd_log sctp_clog; 188 189 memset(&sctp_clog, 0, sizeof(sctp_clog)); 190 sctp_clog.x.map.base = map; 191 sctp_clog.x.map.cum = cum; 192 sctp_clog.x.map.high = high; 193 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 194 SCTP_LOG_EVENT_MAP, 195 from, 196 sctp_clog.x.misc.log1, 197 sctp_clog.x.misc.log2, 198 sctp_clog.x.misc.log3, 199 sctp_clog.x.misc.log4); 200 } 201 202 void 203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 204 { 205 struct sctp_cwnd_log sctp_clog; 206 207 memset(&sctp_clog, 0, sizeof(sctp_clog)); 208 sctp_clog.x.fr.largest_tsn = biggest_tsn; 209 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 210 sctp_clog.x.fr.tsn = tsn; 211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 212 SCTP_LOG_EVENT_FR, 213 from, 214 sctp_clog.x.misc.log1, 215 sctp_clog.x.misc.log2, 216 sctp_clog.x.misc.log3, 217 sctp_clog.x.misc.log4); 218 } 219 220 void 221 sctp_log_mb(struct mbuf *m, int from) 222 { 223 struct sctp_cwnd_log sctp_clog; 224 225 sctp_clog.x.mb.mp = m; 226 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 227 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 228 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 229 if (SCTP_BUF_IS_EXTENDED(m)) { 230 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 231 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 232 } else { 233 sctp_clog.x.mb.ext = 0; 234 sctp_clog.x.mb.refcnt = 0; 235 } 236 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 237 SCTP_LOG_EVENT_MBUF, 238 from, 239 sctp_clog.x.misc.log1, 240 sctp_clog.x.misc.log2, 241 sctp_clog.x.misc.log3, 242 sctp_clog.x.misc.log4); 243 } 244 245 void 246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 247 { 248 struct sctp_cwnd_log sctp_clog; 249 250 if (control == NULL) { 251 SCTP_PRINTF("Gak log of NULL?\n"); 252 return; 253 } 254 sctp_clog.x.strlog.stcb = control->stcb; 255 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 256 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 257 sctp_clog.x.strlog.strm = control->sinfo_stream; 258 if (poschk != NULL) { 259 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 260 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 261 } else { 262 sctp_clog.x.strlog.e_tsn = 0; 263 sctp_clog.x.strlog.e_sseq = 0; 264 } 265 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 266 SCTP_LOG_EVENT_STRM, 267 from, 268 sctp_clog.x.misc.log1, 269 sctp_clog.x.misc.log2, 270 sctp_clog.x.misc.log3, 271 sctp_clog.x.misc.log4); 272 } 273 274 void 275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 276 { 277 struct sctp_cwnd_log sctp_clog; 278 279 sctp_clog.x.cwnd.net = net; 280 if (stcb->asoc.send_queue_cnt > 255) 281 sctp_clog.x.cwnd.cnt_in_send = 255; 282 else 283 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 284 if (stcb->asoc.stream_queue_cnt > 255) 285 sctp_clog.x.cwnd.cnt_in_str = 255; 286 else 287 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 288 289 if (net) { 290 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 291 sctp_clog.x.cwnd.inflight = net->flight_size; 292 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 293 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 294 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 295 } 296 if (SCTP_CWNDLOG_PRESEND == from) { 297 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 298 } 299 sctp_clog.x.cwnd.cwnd_augment = augment; 300 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 301 SCTP_LOG_EVENT_CWND, 302 from, 303 sctp_clog.x.misc.log1, 304 sctp_clog.x.misc.log2, 305 sctp_clog.x.misc.log3, 306 sctp_clog.x.misc.log4); 307 } 308 309 void 310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 311 { 312 struct sctp_cwnd_log sctp_clog; 313 314 memset(&sctp_clog, 0, sizeof(sctp_clog)); 315 if (inp) { 316 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 317 318 } else { 319 sctp_clog.x.lock.sock = (void *)NULL; 320 } 321 sctp_clog.x.lock.inp = (void *)inp; 322 if (stcb) { 323 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 324 } else { 325 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 326 } 327 if (inp) { 328 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 329 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 330 } else { 331 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 332 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 333 } 334 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 335 if (inp && (inp->sctp_socket)) { 336 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 337 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 338 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 339 } else { 340 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 341 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 342 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 343 } 344 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 345 SCTP_LOG_LOCK_EVENT, 346 from, 347 sctp_clog.x.misc.log1, 348 sctp_clog.x.misc.log2, 349 sctp_clog.x.misc.log3, 350 sctp_clog.x.misc.log4); 351 } 352 353 void 354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 355 { 356 struct sctp_cwnd_log sctp_clog; 357 358 memset(&sctp_clog, 0, sizeof(sctp_clog)); 359 sctp_clog.x.cwnd.net = net; 360 sctp_clog.x.cwnd.cwnd_new_value = error; 361 sctp_clog.x.cwnd.inflight = net->flight_size; 362 sctp_clog.x.cwnd.cwnd_augment = burst; 363 if (stcb->asoc.send_queue_cnt > 255) 364 sctp_clog.x.cwnd.cnt_in_send = 255; 365 else 366 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 367 if (stcb->asoc.stream_queue_cnt > 255) 368 sctp_clog.x.cwnd.cnt_in_str = 255; 369 else 370 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 371 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 372 SCTP_LOG_EVENT_MAXBURST, 373 from, 374 sctp_clog.x.misc.log1, 375 sctp_clog.x.misc.log2, 376 sctp_clog.x.misc.log3, 377 sctp_clog.x.misc.log4); 378 } 379 380 void 381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 382 { 383 struct sctp_cwnd_log sctp_clog; 384 385 sctp_clog.x.rwnd.rwnd = peers_rwnd; 386 sctp_clog.x.rwnd.send_size = snd_size; 387 sctp_clog.x.rwnd.overhead = overhead; 388 sctp_clog.x.rwnd.new_rwnd = 0; 389 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 390 SCTP_LOG_EVENT_RWND, 391 from, 392 sctp_clog.x.misc.log1, 393 sctp_clog.x.misc.log2, 394 sctp_clog.x.misc.log3, 395 sctp_clog.x.misc.log4); 396 } 397 398 void 399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 400 { 401 struct sctp_cwnd_log sctp_clog; 402 403 sctp_clog.x.rwnd.rwnd = peers_rwnd; 404 sctp_clog.x.rwnd.send_size = flight_size; 405 sctp_clog.x.rwnd.overhead = overhead; 406 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 407 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 408 SCTP_LOG_EVENT_RWND, 409 from, 410 sctp_clog.x.misc.log1, 411 sctp_clog.x.misc.log2, 412 sctp_clog.x.misc.log3, 413 sctp_clog.x.misc.log4); 414 } 415 416 void 417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 418 { 419 struct sctp_cwnd_log sctp_clog; 420 421 sctp_clog.x.mbcnt.total_queue_size = total_oq; 422 sctp_clog.x.mbcnt.size_change = book; 423 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 424 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 425 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 426 SCTP_LOG_EVENT_MBCNT, 427 from, 428 sctp_clog.x.misc.log1, 429 sctp_clog.x.misc.log2, 430 sctp_clog.x.misc.log3, 431 sctp_clog.x.misc.log4); 432 } 433 434 void 435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 436 { 437 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 438 SCTP_LOG_MISC_EVENT, 439 from, 440 a, b, c, d); 441 } 442 443 void 444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 445 { 446 struct sctp_cwnd_log sctp_clog; 447 448 sctp_clog.x.wake.stcb = (void *)stcb; 449 sctp_clog.x.wake.wake_cnt = wake_cnt; 450 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 451 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 452 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 453 454 if (stcb->asoc.stream_queue_cnt < 0xff) 455 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 456 else 457 sctp_clog.x.wake.stream_qcnt = 0xff; 458 459 if (stcb->asoc.chunks_on_out_queue < 0xff) 460 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 461 else 462 sctp_clog.x.wake.chunks_on_oque = 0xff; 463 464 sctp_clog.x.wake.sctpflags = 0; 465 /* set in the defered mode stuff */ 466 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 467 sctp_clog.x.wake.sctpflags |= 1; 468 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 469 sctp_clog.x.wake.sctpflags |= 2; 470 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 471 sctp_clog.x.wake.sctpflags |= 4; 472 /* what about the sb */ 473 if (stcb->sctp_socket) { 474 struct socket *so = stcb->sctp_socket; 475 476 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 477 } else { 478 sctp_clog.x.wake.sbflags = 0xff; 479 } 480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 481 SCTP_LOG_EVENT_WAKE, 482 from, 483 sctp_clog.x.misc.log1, 484 sctp_clog.x.misc.log2, 485 sctp_clog.x.misc.log3, 486 sctp_clog.x.misc.log4); 487 } 488 489 void 490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen) 491 { 492 struct sctp_cwnd_log sctp_clog; 493 494 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 495 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 496 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 497 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 498 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 499 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 500 sctp_clog.x.blk.sndlen = sendlen; 501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 502 SCTP_LOG_EVENT_BLOCK, 503 from, 504 sctp_clog.x.misc.log1, 505 sctp_clog.x.misc.log2, 506 sctp_clog.x.misc.log3, 507 sctp_clog.x.misc.log4); 508 } 509 510 int 511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 512 { 513 /* May need to fix this if ktrdump does not work */ 514 return (0); 515 } 516 517 #ifdef SCTP_AUDITING_ENABLED 518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 519 static int sctp_audit_indx = 0; 520 521 static 522 void 523 sctp_print_audit_report(void) 524 { 525 int i; 526 int cnt; 527 528 cnt = 0; 529 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 530 if ((sctp_audit_data[i][0] == 0xe0) && 531 (sctp_audit_data[i][1] == 0x01)) { 532 cnt = 0; 533 SCTP_PRINTF("\n"); 534 } else if (sctp_audit_data[i][0] == 0xf0) { 535 cnt = 0; 536 SCTP_PRINTF("\n"); 537 } else if ((sctp_audit_data[i][0] == 0xc0) && 538 (sctp_audit_data[i][1] == 0x01)) { 539 SCTP_PRINTF("\n"); 540 cnt = 0; 541 } 542 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 543 (uint32_t) sctp_audit_data[i][1]); 544 cnt++; 545 if ((cnt % 14) == 0) 546 SCTP_PRINTF("\n"); 547 } 548 for (i = 0; i < sctp_audit_indx; i++) { 549 if ((sctp_audit_data[i][0] == 0xe0) && 550 (sctp_audit_data[i][1] == 0x01)) { 551 cnt = 0; 552 SCTP_PRINTF("\n"); 553 } else if (sctp_audit_data[i][0] == 0xf0) { 554 cnt = 0; 555 SCTP_PRINTF("\n"); 556 } else if ((sctp_audit_data[i][0] == 0xc0) && 557 (sctp_audit_data[i][1] == 0x01)) { 558 SCTP_PRINTF("\n"); 559 cnt = 0; 560 } 561 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 562 (uint32_t) sctp_audit_data[i][1]); 563 cnt++; 564 if ((cnt % 14) == 0) 565 SCTP_PRINTF("\n"); 566 } 567 SCTP_PRINTF("\n"); 568 } 569 570 void 571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 572 struct sctp_nets *net) 573 { 574 int resend_cnt, tot_out, rep, tot_book_cnt; 575 struct sctp_nets *lnet; 576 struct sctp_tmit_chunk *chk; 577 578 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 579 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 580 sctp_audit_indx++; 581 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 582 sctp_audit_indx = 0; 583 } 584 if (inp == NULL) { 585 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 586 sctp_audit_data[sctp_audit_indx][1] = 0x01; 587 sctp_audit_indx++; 588 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 589 sctp_audit_indx = 0; 590 } 591 return; 592 } 593 if (stcb == NULL) { 594 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 595 sctp_audit_data[sctp_audit_indx][1] = 0x02; 596 sctp_audit_indx++; 597 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 598 sctp_audit_indx = 0; 599 } 600 return; 601 } 602 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 603 sctp_audit_data[sctp_audit_indx][1] = 604 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 605 sctp_audit_indx++; 606 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 607 sctp_audit_indx = 0; 608 } 609 rep = 0; 610 tot_book_cnt = 0; 611 resend_cnt = tot_out = 0; 612 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 613 if (chk->sent == SCTP_DATAGRAM_RESEND) { 614 resend_cnt++; 615 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 616 tot_out += chk->book_size; 617 tot_book_cnt++; 618 } 619 } 620 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 621 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 622 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 623 sctp_audit_indx++; 624 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 625 sctp_audit_indx = 0; 626 } 627 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 628 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 629 rep = 1; 630 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 631 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 632 sctp_audit_data[sctp_audit_indx][1] = 633 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 634 sctp_audit_indx++; 635 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 636 sctp_audit_indx = 0; 637 } 638 } 639 if (tot_out != stcb->asoc.total_flight) { 640 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 641 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 642 sctp_audit_indx++; 643 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 644 sctp_audit_indx = 0; 645 } 646 rep = 1; 647 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 648 (int)stcb->asoc.total_flight); 649 stcb->asoc.total_flight = tot_out; 650 } 651 if (tot_book_cnt != stcb->asoc.total_flight_count) { 652 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 653 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 654 sctp_audit_indx++; 655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 656 sctp_audit_indx = 0; 657 } 658 rep = 1; 659 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 660 661 stcb->asoc.total_flight_count = tot_book_cnt; 662 } 663 tot_out = 0; 664 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 665 tot_out += lnet->flight_size; 666 } 667 if (tot_out != stcb->asoc.total_flight) { 668 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 669 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 670 sctp_audit_indx++; 671 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 672 sctp_audit_indx = 0; 673 } 674 rep = 1; 675 SCTP_PRINTF("real flight:%d net total was %d\n", 676 stcb->asoc.total_flight, tot_out); 677 /* now corrective action */ 678 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 679 680 tot_out = 0; 681 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 682 if ((chk->whoTo == lnet) && 683 (chk->sent < SCTP_DATAGRAM_RESEND)) { 684 tot_out += chk->book_size; 685 } 686 } 687 if (lnet->flight_size != tot_out) { 688 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 689 (void *)lnet, lnet->flight_size, 690 tot_out); 691 lnet->flight_size = tot_out; 692 } 693 } 694 } 695 if (rep) { 696 sctp_print_audit_report(); 697 } 698 } 699 700 void 701 sctp_audit_log(uint8_t ev, uint8_t fd) 702 { 703 704 sctp_audit_data[sctp_audit_indx][0] = ev; 705 sctp_audit_data[sctp_audit_indx][1] = fd; 706 sctp_audit_indx++; 707 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 708 sctp_audit_indx = 0; 709 } 710 } 711 712 #endif 713 714 /* 715 * sctp_stop_timers_for_shutdown() should be called 716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 717 * state to make sure that all timers are stopped. 718 */ 719 void 720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 721 { 722 struct sctp_association *asoc; 723 struct sctp_nets *net; 724 725 asoc = &stcb->asoc; 726 727 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 728 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 729 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 730 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 731 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 732 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 733 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 734 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 735 } 736 } 737 738 /* 739 * a list of sizes based on typical mtu's, used only if next hop size not 740 * returned. 741 */ 742 static uint32_t sctp_mtu_sizes[] = { 743 68, 744 296, 745 508, 746 512, 747 544, 748 576, 749 1006, 750 1492, 751 1500, 752 1536, 753 2002, 754 2048, 755 4352, 756 4464, 757 8166, 758 17914, 759 32000, 760 65535 761 }; 762 763 /* 764 * Return the largest MTU smaller than val. If there is no 765 * entry, just return val. 766 */ 767 uint32_t 768 sctp_get_prev_mtu(uint32_t val) 769 { 770 uint32_t i; 771 772 if (val <= sctp_mtu_sizes[0]) { 773 return (val); 774 } 775 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 776 if (val <= sctp_mtu_sizes[i]) { 777 break; 778 } 779 } 780 return (sctp_mtu_sizes[i - 1]); 781 } 782 783 /* 784 * Return the smallest MTU larger than val. If there is no 785 * entry, just return val. 786 */ 787 uint32_t 788 sctp_get_next_mtu(uint32_t val) 789 { 790 /* select another MTU that is just bigger than this one */ 791 uint32_t i; 792 793 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 794 if (val < sctp_mtu_sizes[i]) { 795 return (sctp_mtu_sizes[i]); 796 } 797 } 798 return (val); 799 } 800 801 void 802 sctp_fill_random_store(struct sctp_pcb *m) 803 { 804 /* 805 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 806 * our counter. The result becomes our good random numbers and we 807 * then setup to give these out. Note that we do no locking to 808 * protect this. This is ok, since if competing folks call this we 809 * will get more gobbled gook in the random store which is what we 810 * want. There is a danger that two guys will use the same random 811 * numbers, but thats ok too since that is random as well :-> 812 */ 813 m->store_at = 0; 814 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 815 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 816 sizeof(m->random_counter), (uint8_t *) m->random_store); 817 m->random_counter++; 818 } 819 820 uint32_t 821 sctp_select_initial_TSN(struct sctp_pcb *inp) 822 { 823 /* 824 * A true implementation should use random selection process to get 825 * the initial stream sequence number, using RFC1750 as a good 826 * guideline 827 */ 828 uint32_t x, *xp; 829 uint8_t *p; 830 int store_at, new_store; 831 832 if (inp->initial_sequence_debug != 0) { 833 uint32_t ret; 834 835 ret = inp->initial_sequence_debug; 836 inp->initial_sequence_debug++; 837 return (ret); 838 } 839 retry: 840 store_at = inp->store_at; 841 new_store = store_at + sizeof(uint32_t); 842 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 843 new_store = 0; 844 } 845 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 846 goto retry; 847 } 848 if (new_store == 0) { 849 /* Refill the random store */ 850 sctp_fill_random_store(inp); 851 } 852 p = &inp->random_store[store_at]; 853 xp = (uint32_t *) p; 854 x = *xp; 855 return (x); 856 } 857 858 uint32_t 859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 860 { 861 uint32_t x; 862 struct timeval now; 863 864 if (check) { 865 (void)SCTP_GETTIME_TIMEVAL(&now); 866 } 867 for (;;) { 868 x = sctp_select_initial_TSN(&inp->sctp_ep); 869 if (x == 0) { 870 /* we never use 0 */ 871 continue; 872 } 873 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 874 break; 875 } 876 } 877 return (x); 878 } 879 880 int 881 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 882 uint32_t override_tag, uint32_t vrf_id) 883 { 884 struct sctp_association *asoc; 885 886 /* 887 * Anything set to zero is taken care of by the allocation routine's 888 * bzero 889 */ 890 891 /* 892 * Up front select what scoping to apply on addresses I tell my peer 893 * Not sure what to do with these right now, we will need to come up 894 * with a way to set them. We may need to pass them through from the 895 * caller in the sctp_aloc_assoc() function. 896 */ 897 int i; 898 899 asoc = &stcb->asoc; 900 /* init all variables to a known value. */ 901 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 902 asoc->max_burst = inp->sctp_ep.max_burst; 903 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 904 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 905 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 906 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 907 asoc->ecn_allowed = inp->sctp_ecn_enable; 908 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off); 909 asoc->sctp_cmt_pf = (uint8_t) 0; 910 asoc->sctp_frag_point = inp->sctp_frag_point; 911 asoc->sctp_features = inp->sctp_features; 912 asoc->default_dscp = inp->sctp_ep.default_dscp; 913 #ifdef INET6 914 if (inp->sctp_ep.default_flowlabel) { 915 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 916 } else { 917 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 918 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 919 asoc->default_flowlabel &= 0x000fffff; 920 asoc->default_flowlabel |= 0x80000000; 921 } else { 922 asoc->default_flowlabel = 0; 923 } 924 } 925 #endif 926 asoc->sb_send_resv = 0; 927 if (override_tag) { 928 asoc->my_vtag = override_tag; 929 } else { 930 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 931 } 932 /* Get the nonce tags */ 933 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 934 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 935 asoc->vrf_id = vrf_id; 936 937 #ifdef SCTP_ASOCLOG_OF_TSNS 938 asoc->tsn_in_at = 0; 939 asoc->tsn_out_at = 0; 940 asoc->tsn_in_wrapped = 0; 941 asoc->tsn_out_wrapped = 0; 942 asoc->cumack_log_at = 0; 943 asoc->cumack_log_atsnt = 0; 944 #endif 945 #ifdef SCTP_FS_SPEC_LOG 946 asoc->fs_index = 0; 947 #endif 948 asoc->refcnt = 0; 949 asoc->assoc_up_sent = 0; 950 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 951 sctp_select_initial_TSN(&inp->sctp_ep); 952 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 953 /* we are optimisitic here */ 954 asoc->peer_supports_pktdrop = 1; 955 asoc->peer_supports_nat = 0; 956 asoc->sent_queue_retran_cnt = 0; 957 958 /* for CMT */ 959 asoc->last_net_cmt_send_started = NULL; 960 961 /* This will need to be adjusted */ 962 asoc->last_acked_seq = asoc->init_seq_number - 1; 963 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 964 asoc->asconf_seq_in = asoc->last_acked_seq; 965 966 /* here we are different, we hold the next one we expect */ 967 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 968 969 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 970 asoc->initial_rto = inp->sctp_ep.initial_rto; 971 972 asoc->max_init_times = inp->sctp_ep.max_init_times; 973 asoc->max_send_times = inp->sctp_ep.max_send_times; 974 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 975 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 976 asoc->free_chunk_cnt = 0; 977 978 asoc->iam_blocking = 0; 979 asoc->context = inp->sctp_context; 980 asoc->local_strreset_support = inp->local_strreset_support; 981 asoc->def_send = inp->def_send; 982 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 983 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 984 asoc->pr_sctp_cnt = 0; 985 asoc->total_output_queue_size = 0; 986 987 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 988 asoc->scope.ipv6_addr_legal = 1; 989 if (SCTP_IPV6_V6ONLY(inp) == 0) { 990 asoc->scope.ipv4_addr_legal = 1; 991 } else { 992 asoc->scope.ipv4_addr_legal = 0; 993 } 994 } else { 995 asoc->scope.ipv6_addr_legal = 0; 996 asoc->scope.ipv4_addr_legal = 1; 997 } 998 999 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1000 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1001 1002 asoc->smallest_mtu = inp->sctp_frag_point; 1003 asoc->minrto = inp->sctp_ep.sctp_minrto; 1004 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1005 1006 asoc->locked_on_sending = NULL; 1007 asoc->stream_locked_on = 0; 1008 asoc->ecn_echo_cnt_onq = 0; 1009 asoc->stream_locked = 0; 1010 1011 asoc->send_sack = 1; 1012 1013 LIST_INIT(&asoc->sctp_restricted_addrs); 1014 1015 TAILQ_INIT(&asoc->nets); 1016 TAILQ_INIT(&asoc->pending_reply_queue); 1017 TAILQ_INIT(&asoc->asconf_ack_sent); 1018 /* Setup to fill the hb random cache at first HB */ 1019 asoc->hb_random_idx = 4; 1020 1021 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1022 1023 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1024 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1025 1026 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1027 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1028 1029 /* 1030 * Now the stream parameters, here we allocate space for all streams 1031 * that we request by default. 1032 */ 1033 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1034 inp->sctp_ep.pre_open_stream_count; 1035 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1036 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1037 SCTP_M_STRMO); 1038 if (asoc->strmout == NULL) { 1039 /* big trouble no memory */ 1040 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1041 return (ENOMEM); 1042 } 1043 for (i = 0; i < asoc->streamoutcnt; i++) { 1044 /* 1045 * inbound side must be set to 0xffff, also NOTE when we get 1046 * the INIT-ACK back (for INIT sender) we MUST reduce the 1047 * count (streamoutcnt) but first check if we sent to any of 1048 * the upper streams that were dropped (if some were). Those 1049 * that were dropped must be notified to the upper layer as 1050 * failed to send. 1051 */ 1052 asoc->strmout[i].next_sequence_send = 0x0; 1053 TAILQ_INIT(&asoc->strmout[i].outqueue); 1054 asoc->strmout[i].chunks_on_queues = 0; 1055 asoc->strmout[i].stream_no = i; 1056 asoc->strmout[i].last_msg_incomplete = 0; 1057 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL); 1058 } 1059 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1060 1061 /* Now the mapping array */ 1062 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1063 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1064 SCTP_M_MAP); 1065 if (asoc->mapping_array == NULL) { 1066 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1067 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1068 return (ENOMEM); 1069 } 1070 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1071 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1072 SCTP_M_MAP); 1073 if (asoc->nr_mapping_array == NULL) { 1074 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1075 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1076 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1077 return (ENOMEM); 1078 } 1079 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1080 1081 /* Now the init of the other outqueues */ 1082 TAILQ_INIT(&asoc->free_chunks); 1083 TAILQ_INIT(&asoc->control_send_queue); 1084 TAILQ_INIT(&asoc->asconf_send_queue); 1085 TAILQ_INIT(&asoc->send_queue); 1086 TAILQ_INIT(&asoc->sent_queue); 1087 TAILQ_INIT(&asoc->reasmqueue); 1088 TAILQ_INIT(&asoc->resetHead); 1089 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1090 TAILQ_INIT(&asoc->asconf_queue); 1091 /* authentication fields */ 1092 asoc->authinfo.random = NULL; 1093 asoc->authinfo.active_keyid = 0; 1094 asoc->authinfo.assoc_key = NULL; 1095 asoc->authinfo.assoc_keyid = 0; 1096 asoc->authinfo.recv_key = NULL; 1097 asoc->authinfo.recv_keyid = 0; 1098 LIST_INIT(&asoc->shared_keys); 1099 asoc->marked_retrans = 0; 1100 asoc->port = inp->sctp_ep.port; 1101 asoc->timoinit = 0; 1102 asoc->timodata = 0; 1103 asoc->timosack = 0; 1104 asoc->timoshutdown = 0; 1105 asoc->timoheartbeat = 0; 1106 asoc->timocookie = 0; 1107 asoc->timoshutdownack = 0; 1108 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1109 asoc->discontinuity_time = asoc->start_time; 1110 /* 1111 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1112 * freed later when the association is freed. 1113 */ 1114 return (0); 1115 } 1116 1117 void 1118 sctp_print_mapping_array(struct sctp_association *asoc) 1119 { 1120 unsigned int i, limit; 1121 1122 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1123 asoc->mapping_array_size, 1124 asoc->mapping_array_base_tsn, 1125 asoc->cumulative_tsn, 1126 asoc->highest_tsn_inside_map, 1127 asoc->highest_tsn_inside_nr_map); 1128 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1129 if (asoc->mapping_array[limit - 1] != 0) { 1130 break; 1131 } 1132 } 1133 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1134 for (i = 0; i < limit; i++) { 1135 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1136 } 1137 if (limit % 16) 1138 SCTP_PRINTF("\n"); 1139 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1140 if (asoc->nr_mapping_array[limit - 1]) { 1141 break; 1142 } 1143 } 1144 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1145 for (i = 0; i < limit; i++) { 1146 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1147 } 1148 if (limit % 16) 1149 SCTP_PRINTF("\n"); 1150 } 1151 1152 int 1153 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1154 { 1155 /* mapping array needs to grow */ 1156 uint8_t *new_array1, *new_array2; 1157 uint32_t new_size; 1158 1159 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1160 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1161 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1162 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1163 /* can't get more, forget it */ 1164 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1165 if (new_array1) { 1166 SCTP_FREE(new_array1, SCTP_M_MAP); 1167 } 1168 if (new_array2) { 1169 SCTP_FREE(new_array2, SCTP_M_MAP); 1170 } 1171 return (-1); 1172 } 1173 memset(new_array1, 0, new_size); 1174 memset(new_array2, 0, new_size); 1175 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1176 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1177 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1178 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1179 asoc->mapping_array = new_array1; 1180 asoc->nr_mapping_array = new_array2; 1181 asoc->mapping_array_size = new_size; 1182 return (0); 1183 } 1184 1185 1186 static void 1187 sctp_iterator_work(struct sctp_iterator *it) 1188 { 1189 int iteration_count = 0; 1190 int inp_skip = 0; 1191 int first_in = 1; 1192 struct sctp_inpcb *tinp; 1193 1194 SCTP_INP_INFO_RLOCK(); 1195 SCTP_ITERATOR_LOCK(); 1196 if (it->inp) { 1197 SCTP_INP_RLOCK(it->inp); 1198 SCTP_INP_DECR_REF(it->inp); 1199 } 1200 if (it->inp == NULL) { 1201 /* iterator is complete */ 1202 done_with_iterator: 1203 SCTP_ITERATOR_UNLOCK(); 1204 SCTP_INP_INFO_RUNLOCK(); 1205 if (it->function_atend != NULL) { 1206 (*it->function_atend) (it->pointer, it->val); 1207 } 1208 SCTP_FREE(it, SCTP_M_ITER); 1209 return; 1210 } 1211 select_a_new_ep: 1212 if (first_in) { 1213 first_in = 0; 1214 } else { 1215 SCTP_INP_RLOCK(it->inp); 1216 } 1217 while (((it->pcb_flags) && 1218 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1219 ((it->pcb_features) && 1220 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1221 /* endpoint flags or features don't match, so keep looking */ 1222 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1223 SCTP_INP_RUNLOCK(it->inp); 1224 goto done_with_iterator; 1225 } 1226 tinp = it->inp; 1227 it->inp = LIST_NEXT(it->inp, sctp_list); 1228 SCTP_INP_RUNLOCK(tinp); 1229 if (it->inp == NULL) { 1230 goto done_with_iterator; 1231 } 1232 SCTP_INP_RLOCK(it->inp); 1233 } 1234 /* now go through each assoc which is in the desired state */ 1235 if (it->done_current_ep == 0) { 1236 if (it->function_inp != NULL) 1237 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1238 it->done_current_ep = 1; 1239 } 1240 if (it->stcb == NULL) { 1241 /* run the per instance function */ 1242 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1243 } 1244 if ((inp_skip) || it->stcb == NULL) { 1245 if (it->function_inp_end != NULL) { 1246 inp_skip = (*it->function_inp_end) (it->inp, 1247 it->pointer, 1248 it->val); 1249 } 1250 SCTP_INP_RUNLOCK(it->inp); 1251 goto no_stcb; 1252 } 1253 while (it->stcb) { 1254 SCTP_TCB_LOCK(it->stcb); 1255 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1256 /* not in the right state... keep looking */ 1257 SCTP_TCB_UNLOCK(it->stcb); 1258 goto next_assoc; 1259 } 1260 /* see if we have limited out the iterator loop */ 1261 iteration_count++; 1262 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1263 /* Pause to let others grab the lock */ 1264 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1265 SCTP_TCB_UNLOCK(it->stcb); 1266 SCTP_INP_INCR_REF(it->inp); 1267 SCTP_INP_RUNLOCK(it->inp); 1268 SCTP_ITERATOR_UNLOCK(); 1269 SCTP_INP_INFO_RUNLOCK(); 1270 SCTP_INP_INFO_RLOCK(); 1271 SCTP_ITERATOR_LOCK(); 1272 if (sctp_it_ctl.iterator_flags) { 1273 /* We won't be staying here */ 1274 SCTP_INP_DECR_REF(it->inp); 1275 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1276 if (sctp_it_ctl.iterator_flags & 1277 SCTP_ITERATOR_STOP_CUR_IT) { 1278 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1279 goto done_with_iterator; 1280 } 1281 if (sctp_it_ctl.iterator_flags & 1282 SCTP_ITERATOR_STOP_CUR_INP) { 1283 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1284 goto no_stcb; 1285 } 1286 /* If we reach here huh? */ 1287 SCTP_PRINTF("Unknown it ctl flag %x\n", 1288 sctp_it_ctl.iterator_flags); 1289 sctp_it_ctl.iterator_flags = 0; 1290 } 1291 SCTP_INP_RLOCK(it->inp); 1292 SCTP_INP_DECR_REF(it->inp); 1293 SCTP_TCB_LOCK(it->stcb); 1294 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1295 iteration_count = 0; 1296 } 1297 /* run function on this one */ 1298 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1299 1300 /* 1301 * we lie here, it really needs to have its own type but 1302 * first I must verify that this won't effect things :-0 1303 */ 1304 if (it->no_chunk_output == 0) 1305 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1306 1307 SCTP_TCB_UNLOCK(it->stcb); 1308 next_assoc: 1309 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1310 if (it->stcb == NULL) { 1311 /* Run last function */ 1312 if (it->function_inp_end != NULL) { 1313 inp_skip = (*it->function_inp_end) (it->inp, 1314 it->pointer, 1315 it->val); 1316 } 1317 } 1318 } 1319 SCTP_INP_RUNLOCK(it->inp); 1320 no_stcb: 1321 /* done with all assocs on this endpoint, move on to next endpoint */ 1322 it->done_current_ep = 0; 1323 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1324 it->inp = NULL; 1325 } else { 1326 it->inp = LIST_NEXT(it->inp, sctp_list); 1327 } 1328 if (it->inp == NULL) { 1329 goto done_with_iterator; 1330 } 1331 goto select_a_new_ep; 1332 } 1333 1334 void 1335 sctp_iterator_worker(void) 1336 { 1337 struct sctp_iterator *it, *nit; 1338 1339 /* This function is called with the WQ lock in place */ 1340 1341 sctp_it_ctl.iterator_running = 1; 1342 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1343 sctp_it_ctl.cur_it = it; 1344 /* now lets work on this one */ 1345 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1346 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1347 CURVNET_SET(it->vn); 1348 sctp_iterator_work(it); 1349 sctp_it_ctl.cur_it = NULL; 1350 CURVNET_RESTORE(); 1351 SCTP_IPI_ITERATOR_WQ_LOCK(); 1352 /* sa_ignore FREED_MEMORY */ 1353 } 1354 sctp_it_ctl.iterator_running = 0; 1355 return; 1356 } 1357 1358 1359 static void 1360 sctp_handle_addr_wq(void) 1361 { 1362 /* deal with the ADDR wq from the rtsock calls */ 1363 struct sctp_laddr *wi, *nwi; 1364 struct sctp_asconf_iterator *asc; 1365 1366 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1367 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1368 if (asc == NULL) { 1369 /* Try later, no memory */ 1370 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1371 (struct sctp_inpcb *)NULL, 1372 (struct sctp_tcb *)NULL, 1373 (struct sctp_nets *)NULL); 1374 return; 1375 } 1376 LIST_INIT(&asc->list_of_work); 1377 asc->cnt = 0; 1378 1379 SCTP_WQ_ADDR_LOCK(); 1380 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1381 LIST_REMOVE(wi, sctp_nxt_addr); 1382 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1383 asc->cnt++; 1384 } 1385 SCTP_WQ_ADDR_UNLOCK(); 1386 1387 if (asc->cnt == 0) { 1388 SCTP_FREE(asc, SCTP_M_ASC_IT); 1389 } else { 1390 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep, 1391 sctp_asconf_iterator_stcb, 1392 NULL, /* No ep end for boundall */ 1393 SCTP_PCB_FLAGS_BOUNDALL, 1394 SCTP_PCB_ANY_FEATURES, 1395 SCTP_ASOC_ANY_STATE, 1396 (void *)asc, 0, 1397 sctp_asconf_iterator_end, NULL, 0); 1398 } 1399 } 1400 1401 void 1402 sctp_timeout_handler(void *t) 1403 { 1404 struct sctp_inpcb *inp; 1405 struct sctp_tcb *stcb; 1406 struct sctp_nets *net; 1407 struct sctp_timer *tmr; 1408 1409 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1410 struct socket *so; 1411 1412 #endif 1413 int did_output, type; 1414 1415 tmr = (struct sctp_timer *)t; 1416 inp = (struct sctp_inpcb *)tmr->ep; 1417 stcb = (struct sctp_tcb *)tmr->tcb; 1418 net = (struct sctp_nets *)tmr->net; 1419 CURVNET_SET((struct vnet *)tmr->vnet); 1420 did_output = 1; 1421 1422 #ifdef SCTP_AUDITING_ENABLED 1423 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1424 sctp_auditing(3, inp, stcb, net); 1425 #endif 1426 1427 /* sanity checks... */ 1428 if (tmr->self != (void *)tmr) { 1429 /* 1430 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1431 * (void *)tmr); 1432 */ 1433 CURVNET_RESTORE(); 1434 return; 1435 } 1436 tmr->stopped_from = 0xa001; 1437 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1438 /* 1439 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1440 * tmr->type); 1441 */ 1442 CURVNET_RESTORE(); 1443 return; 1444 } 1445 tmr->stopped_from = 0xa002; 1446 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1447 CURVNET_RESTORE(); 1448 return; 1449 } 1450 /* if this is an iterator timeout, get the struct and clear inp */ 1451 tmr->stopped_from = 0xa003; 1452 type = tmr->type; 1453 if (inp) { 1454 SCTP_INP_INCR_REF(inp); 1455 if ((inp->sctp_socket == NULL) && 1456 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1457 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1458 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1459 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1460 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1461 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1462 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1463 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1464 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1465 ) { 1466 SCTP_INP_DECR_REF(inp); 1467 CURVNET_RESTORE(); 1468 return; 1469 } 1470 } 1471 tmr->stopped_from = 0xa004; 1472 if (stcb) { 1473 atomic_add_int(&stcb->asoc.refcnt, 1); 1474 if (stcb->asoc.state == 0) { 1475 atomic_add_int(&stcb->asoc.refcnt, -1); 1476 if (inp) { 1477 SCTP_INP_DECR_REF(inp); 1478 } 1479 CURVNET_RESTORE(); 1480 return; 1481 } 1482 } 1483 tmr->stopped_from = 0xa005; 1484 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type); 1485 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1486 if (inp) { 1487 SCTP_INP_DECR_REF(inp); 1488 } 1489 if (stcb) { 1490 atomic_add_int(&stcb->asoc.refcnt, -1); 1491 } 1492 CURVNET_RESTORE(); 1493 return; 1494 } 1495 tmr->stopped_from = 0xa006; 1496 1497 if (stcb) { 1498 SCTP_TCB_LOCK(stcb); 1499 atomic_add_int(&stcb->asoc.refcnt, -1); 1500 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) && 1501 ((stcb->asoc.state == 0) || 1502 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1503 SCTP_TCB_UNLOCK(stcb); 1504 if (inp) { 1505 SCTP_INP_DECR_REF(inp); 1506 } 1507 CURVNET_RESTORE(); 1508 return; 1509 } 1510 } 1511 /* record in stopped what t-o occured */ 1512 tmr->stopped_from = tmr->type; 1513 1514 /* mark as being serviced now */ 1515 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1516 /* 1517 * Callout has been rescheduled. 1518 */ 1519 goto get_out; 1520 } 1521 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1522 /* 1523 * Not active, so no action. 1524 */ 1525 goto get_out; 1526 } 1527 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1528 1529 /* call the handler for the appropriate timer type */ 1530 switch (tmr->type) { 1531 case SCTP_TIMER_TYPE_ZERO_COPY: 1532 if (inp == NULL) { 1533 break; 1534 } 1535 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1536 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1537 } 1538 break; 1539 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1540 if (inp == NULL) { 1541 break; 1542 } 1543 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1544 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1545 } 1546 break; 1547 case SCTP_TIMER_TYPE_ADDR_WQ: 1548 sctp_handle_addr_wq(); 1549 break; 1550 case SCTP_TIMER_TYPE_SEND: 1551 if ((stcb == NULL) || (inp == NULL)) { 1552 break; 1553 } 1554 SCTP_STAT_INCR(sctps_timodata); 1555 stcb->asoc.timodata++; 1556 stcb->asoc.num_send_timers_up--; 1557 if (stcb->asoc.num_send_timers_up < 0) { 1558 stcb->asoc.num_send_timers_up = 0; 1559 } 1560 SCTP_TCB_LOCK_ASSERT(stcb); 1561 if (sctp_t3rxt_timer(inp, stcb, net)) { 1562 /* no need to unlock on tcb its gone */ 1563 1564 goto out_decr; 1565 } 1566 SCTP_TCB_LOCK_ASSERT(stcb); 1567 #ifdef SCTP_AUDITING_ENABLED 1568 sctp_auditing(4, inp, stcb, net); 1569 #endif 1570 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 if ((stcb->asoc.num_send_timers_up == 0) && 1572 (stcb->asoc.sent_queue_cnt > 0)) { 1573 struct sctp_tmit_chunk *chk; 1574 1575 /* 1576 * safeguard. If there on some on the sent queue 1577 * somewhere but no timers running something is 1578 * wrong... so we start a timer on the first chunk 1579 * on the send queue on whatever net it is sent to. 1580 */ 1581 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1582 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1583 chk->whoTo); 1584 } 1585 break; 1586 case SCTP_TIMER_TYPE_INIT: 1587 if ((stcb == NULL) || (inp == NULL)) { 1588 break; 1589 } 1590 SCTP_STAT_INCR(sctps_timoinit); 1591 stcb->asoc.timoinit++; 1592 if (sctp_t1init_timer(inp, stcb, net)) { 1593 /* no need to unlock on tcb its gone */ 1594 goto out_decr; 1595 } 1596 /* We do output but not here */ 1597 did_output = 0; 1598 break; 1599 case SCTP_TIMER_TYPE_RECV: 1600 if ((stcb == NULL) || (inp == NULL)) { 1601 break; 1602 } 1603 SCTP_STAT_INCR(sctps_timosack); 1604 stcb->asoc.timosack++; 1605 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1606 #ifdef SCTP_AUDITING_ENABLED 1607 sctp_auditing(4, inp, stcb, net); 1608 #endif 1609 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1610 break; 1611 case SCTP_TIMER_TYPE_SHUTDOWN: 1612 if ((stcb == NULL) || (inp == NULL)) { 1613 break; 1614 } 1615 if (sctp_shutdown_timer(inp, stcb, net)) { 1616 /* no need to unlock on tcb its gone */ 1617 goto out_decr; 1618 } 1619 SCTP_STAT_INCR(sctps_timoshutdown); 1620 stcb->asoc.timoshutdown++; 1621 #ifdef SCTP_AUDITING_ENABLED 1622 sctp_auditing(4, inp, stcb, net); 1623 #endif 1624 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1625 break; 1626 case SCTP_TIMER_TYPE_HEARTBEAT: 1627 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1628 break; 1629 } 1630 SCTP_STAT_INCR(sctps_timoheartbeat); 1631 stcb->asoc.timoheartbeat++; 1632 if (sctp_heartbeat_timer(inp, stcb, net)) { 1633 /* no need to unlock on tcb its gone */ 1634 goto out_decr; 1635 } 1636 #ifdef SCTP_AUDITING_ENABLED 1637 sctp_auditing(4, inp, stcb, net); 1638 #endif 1639 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1640 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1641 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1642 } 1643 break; 1644 case SCTP_TIMER_TYPE_COOKIE: 1645 if ((stcb == NULL) || (inp == NULL)) { 1646 break; 1647 } 1648 if (sctp_cookie_timer(inp, stcb, net)) { 1649 /* no need to unlock on tcb its gone */ 1650 goto out_decr; 1651 } 1652 SCTP_STAT_INCR(sctps_timocookie); 1653 stcb->asoc.timocookie++; 1654 #ifdef SCTP_AUDITING_ENABLED 1655 sctp_auditing(4, inp, stcb, net); 1656 #endif 1657 /* 1658 * We consider T3 and Cookie timer pretty much the same with 1659 * respect to where from in chunk_output. 1660 */ 1661 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1662 break; 1663 case SCTP_TIMER_TYPE_NEWCOOKIE: 1664 { 1665 struct timeval tv; 1666 int i, secret; 1667 1668 if (inp == NULL) { 1669 break; 1670 } 1671 SCTP_STAT_INCR(sctps_timosecret); 1672 (void)SCTP_GETTIME_TIMEVAL(&tv); 1673 SCTP_INP_WLOCK(inp); 1674 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1675 inp->sctp_ep.last_secret_number = 1676 inp->sctp_ep.current_secret_number; 1677 inp->sctp_ep.current_secret_number++; 1678 if (inp->sctp_ep.current_secret_number >= 1679 SCTP_HOW_MANY_SECRETS) { 1680 inp->sctp_ep.current_secret_number = 0; 1681 } 1682 secret = (int)inp->sctp_ep.current_secret_number; 1683 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1684 inp->sctp_ep.secret_key[secret][i] = 1685 sctp_select_initial_TSN(&inp->sctp_ep); 1686 } 1687 SCTP_INP_WUNLOCK(inp); 1688 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1689 } 1690 did_output = 0; 1691 break; 1692 case SCTP_TIMER_TYPE_PATHMTURAISE: 1693 if ((stcb == NULL) || (inp == NULL)) { 1694 break; 1695 } 1696 SCTP_STAT_INCR(sctps_timopathmtu); 1697 sctp_pathmtu_timer(inp, stcb, net); 1698 did_output = 0; 1699 break; 1700 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1701 if ((stcb == NULL) || (inp == NULL)) { 1702 break; 1703 } 1704 if (sctp_shutdownack_timer(inp, stcb, net)) { 1705 /* no need to unlock on tcb its gone */ 1706 goto out_decr; 1707 } 1708 SCTP_STAT_INCR(sctps_timoshutdownack); 1709 stcb->asoc.timoshutdownack++; 1710 #ifdef SCTP_AUDITING_ENABLED 1711 sctp_auditing(4, inp, stcb, net); 1712 #endif 1713 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1714 break; 1715 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1716 if ((stcb == NULL) || (inp == NULL)) { 1717 break; 1718 } 1719 SCTP_STAT_INCR(sctps_timoshutdownguard); 1720 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED); 1721 /* no need to unlock on tcb its gone */ 1722 goto out_decr; 1723 1724 case SCTP_TIMER_TYPE_STRRESET: 1725 if ((stcb == NULL) || (inp == NULL)) { 1726 break; 1727 } 1728 if (sctp_strreset_timer(inp, stcb, net)) { 1729 /* no need to unlock on tcb its gone */ 1730 goto out_decr; 1731 } 1732 SCTP_STAT_INCR(sctps_timostrmrst); 1733 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1734 break; 1735 case SCTP_TIMER_TYPE_ASCONF: 1736 if ((stcb == NULL) || (inp == NULL)) { 1737 break; 1738 } 1739 if (sctp_asconf_timer(inp, stcb, net)) { 1740 /* no need to unlock on tcb its gone */ 1741 goto out_decr; 1742 } 1743 SCTP_STAT_INCR(sctps_timoasconf); 1744 #ifdef SCTP_AUDITING_ENABLED 1745 sctp_auditing(4, inp, stcb, net); 1746 #endif 1747 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1748 break; 1749 case SCTP_TIMER_TYPE_PRIM_DELETED: 1750 if ((stcb == NULL) || (inp == NULL)) { 1751 break; 1752 } 1753 sctp_delete_prim_timer(inp, stcb, net); 1754 SCTP_STAT_INCR(sctps_timodelprim); 1755 break; 1756 1757 case SCTP_TIMER_TYPE_AUTOCLOSE: 1758 if ((stcb == NULL) || (inp == NULL)) { 1759 break; 1760 } 1761 SCTP_STAT_INCR(sctps_timoautoclose); 1762 sctp_autoclose_timer(inp, stcb, net); 1763 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1764 did_output = 0; 1765 break; 1766 case SCTP_TIMER_TYPE_ASOCKILL: 1767 if ((stcb == NULL) || (inp == NULL)) { 1768 break; 1769 } 1770 SCTP_STAT_INCR(sctps_timoassockill); 1771 /* Can we free it yet? */ 1772 SCTP_INP_DECR_REF(inp); 1773 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1775 so = SCTP_INP_SO(inp); 1776 atomic_add_int(&stcb->asoc.refcnt, 1); 1777 SCTP_TCB_UNLOCK(stcb); 1778 SCTP_SOCKET_LOCK(so, 1); 1779 SCTP_TCB_LOCK(stcb); 1780 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1781 #endif 1782 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1784 SCTP_SOCKET_UNLOCK(so, 1); 1785 #endif 1786 /* 1787 * free asoc, always unlocks (or destroy's) so prevent 1788 * duplicate unlock or unlock of a free mtx :-0 1789 */ 1790 stcb = NULL; 1791 goto out_no_decr; 1792 case SCTP_TIMER_TYPE_INPKILL: 1793 SCTP_STAT_INCR(sctps_timoinpkill); 1794 if (inp == NULL) { 1795 break; 1796 } 1797 /* 1798 * special case, take away our increment since WE are the 1799 * killer 1800 */ 1801 SCTP_INP_DECR_REF(inp); 1802 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1803 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1804 SCTP_CALLED_FROM_INPKILL_TIMER); 1805 inp = NULL; 1806 goto out_no_decr; 1807 default: 1808 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1809 tmr->type); 1810 break; 1811 } 1812 #ifdef SCTP_AUDITING_ENABLED 1813 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1814 if (inp) 1815 sctp_auditing(5, inp, stcb, net); 1816 #endif 1817 if ((did_output) && stcb) { 1818 /* 1819 * Now we need to clean up the control chunk chain if an 1820 * ECNE is on it. It must be marked as UNSENT again so next 1821 * call will continue to send it until such time that we get 1822 * a CWR, to remove it. It is, however, less likely that we 1823 * will find a ecn echo on the chain though. 1824 */ 1825 sctp_fix_ecn_echo(&stcb->asoc); 1826 } 1827 get_out: 1828 if (stcb) { 1829 SCTP_TCB_UNLOCK(stcb); 1830 } 1831 out_decr: 1832 if (inp) { 1833 SCTP_INP_DECR_REF(inp); 1834 } 1835 out_no_decr: 1836 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n", 1837 type); 1838 CURVNET_RESTORE(); 1839 } 1840 1841 void 1842 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1843 struct sctp_nets *net) 1844 { 1845 uint32_t to_ticks; 1846 struct sctp_timer *tmr; 1847 1848 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1849 return; 1850 1851 tmr = NULL; 1852 if (stcb) { 1853 SCTP_TCB_LOCK_ASSERT(stcb); 1854 } 1855 switch (t_type) { 1856 case SCTP_TIMER_TYPE_ZERO_COPY: 1857 tmr = &inp->sctp_ep.zero_copy_timer; 1858 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1859 break; 1860 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1861 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1862 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1863 break; 1864 case SCTP_TIMER_TYPE_ADDR_WQ: 1865 /* Only 1 tick away :-) */ 1866 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1867 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1868 break; 1869 case SCTP_TIMER_TYPE_SEND: 1870 /* Here we use the RTO timer */ 1871 { 1872 int rto_val; 1873 1874 if ((stcb == NULL) || (net == NULL)) { 1875 return; 1876 } 1877 tmr = &net->rxt_timer; 1878 if (net->RTO == 0) { 1879 rto_val = stcb->asoc.initial_rto; 1880 } else { 1881 rto_val = net->RTO; 1882 } 1883 to_ticks = MSEC_TO_TICKS(rto_val); 1884 } 1885 break; 1886 case SCTP_TIMER_TYPE_INIT: 1887 /* 1888 * Here we use the INIT timer default usually about 1 1889 * minute. 1890 */ 1891 if ((stcb == NULL) || (net == NULL)) { 1892 return; 1893 } 1894 tmr = &net->rxt_timer; 1895 if (net->RTO == 0) { 1896 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1897 } else { 1898 to_ticks = MSEC_TO_TICKS(net->RTO); 1899 } 1900 break; 1901 case SCTP_TIMER_TYPE_RECV: 1902 /* 1903 * Here we use the Delayed-Ack timer value from the inp 1904 * ususually about 200ms. 1905 */ 1906 if (stcb == NULL) { 1907 return; 1908 } 1909 tmr = &stcb->asoc.dack_timer; 1910 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1911 break; 1912 case SCTP_TIMER_TYPE_SHUTDOWN: 1913 /* Here we use the RTO of the destination. */ 1914 if ((stcb == NULL) || (net == NULL)) { 1915 return; 1916 } 1917 if (net->RTO == 0) { 1918 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1919 } else { 1920 to_ticks = MSEC_TO_TICKS(net->RTO); 1921 } 1922 tmr = &net->rxt_timer; 1923 break; 1924 case SCTP_TIMER_TYPE_HEARTBEAT: 1925 /* 1926 * the net is used here so that we can add in the RTO. Even 1927 * though we use a different timer. We also add the HB timer 1928 * PLUS a random jitter. 1929 */ 1930 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 1931 return; 1932 } else { 1933 uint32_t rndval; 1934 uint32_t jitter; 1935 1936 if ((net->dest_state & SCTP_ADDR_NOHB) && 1937 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1938 return; 1939 } 1940 if (net->RTO == 0) { 1941 to_ticks = stcb->asoc.initial_rto; 1942 } else { 1943 to_ticks = net->RTO; 1944 } 1945 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1946 jitter = rndval % to_ticks; 1947 if (jitter >= (to_ticks >> 1)) { 1948 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 1949 } else { 1950 to_ticks = to_ticks - jitter; 1951 } 1952 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1953 !(net->dest_state & SCTP_ADDR_PF)) { 1954 to_ticks += net->heart_beat_delay; 1955 } 1956 /* 1957 * Now we must convert the to_ticks that are now in 1958 * ms to ticks. 1959 */ 1960 to_ticks = MSEC_TO_TICKS(to_ticks); 1961 tmr = &net->hb_timer; 1962 } 1963 break; 1964 case SCTP_TIMER_TYPE_COOKIE: 1965 /* 1966 * Here we can use the RTO timer from the network since one 1967 * RTT was compelete. If a retran happened then we will be 1968 * using the RTO initial value. 1969 */ 1970 if ((stcb == NULL) || (net == NULL)) { 1971 return; 1972 } 1973 if (net->RTO == 0) { 1974 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1975 } else { 1976 to_ticks = MSEC_TO_TICKS(net->RTO); 1977 } 1978 tmr = &net->rxt_timer; 1979 break; 1980 case SCTP_TIMER_TYPE_NEWCOOKIE: 1981 /* 1982 * nothing needed but the endpoint here ususually about 60 1983 * minutes. 1984 */ 1985 if (inp == NULL) { 1986 return; 1987 } 1988 tmr = &inp->sctp_ep.signature_change; 1989 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1990 break; 1991 case SCTP_TIMER_TYPE_ASOCKILL: 1992 if (stcb == NULL) { 1993 return; 1994 } 1995 tmr = &stcb->asoc.strreset_timer; 1996 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1997 break; 1998 case SCTP_TIMER_TYPE_INPKILL: 1999 /* 2000 * The inp is setup to die. We re-use the signature_chage 2001 * timer since that has stopped and we are in the GONE 2002 * state. 2003 */ 2004 if (inp == NULL) { 2005 return; 2006 } 2007 tmr = &inp->sctp_ep.signature_change; 2008 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2009 break; 2010 case SCTP_TIMER_TYPE_PATHMTURAISE: 2011 /* 2012 * Here we use the value found in the EP for PMTU ususually 2013 * about 10 minutes. 2014 */ 2015 if ((stcb == NULL) || (inp == NULL)) { 2016 return; 2017 } 2018 if (net == NULL) { 2019 return; 2020 } 2021 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2022 return; 2023 } 2024 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2025 tmr = &net->pmtu_timer; 2026 break; 2027 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2028 /* Here we use the RTO of the destination */ 2029 if ((stcb == NULL) || (net == NULL)) { 2030 return; 2031 } 2032 if (net->RTO == 0) { 2033 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2034 } else { 2035 to_ticks = MSEC_TO_TICKS(net->RTO); 2036 } 2037 tmr = &net->rxt_timer; 2038 break; 2039 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2040 /* 2041 * Here we use the endpoints shutdown guard timer usually 2042 * about 3 minutes. 2043 */ 2044 if ((inp == NULL) || (stcb == NULL)) { 2045 return; 2046 } 2047 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2048 tmr = &stcb->asoc.shut_guard_timer; 2049 break; 2050 case SCTP_TIMER_TYPE_STRRESET: 2051 /* 2052 * Here the timer comes from the stcb but its value is from 2053 * the net's RTO. 2054 */ 2055 if ((stcb == NULL) || (net == NULL)) { 2056 return; 2057 } 2058 if (net->RTO == 0) { 2059 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2060 } else { 2061 to_ticks = MSEC_TO_TICKS(net->RTO); 2062 } 2063 tmr = &stcb->asoc.strreset_timer; 2064 break; 2065 case SCTP_TIMER_TYPE_ASCONF: 2066 /* 2067 * Here the timer comes from the stcb but its value is from 2068 * the net's RTO. 2069 */ 2070 if ((stcb == NULL) || (net == NULL)) { 2071 return; 2072 } 2073 if (net->RTO == 0) { 2074 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2075 } else { 2076 to_ticks = MSEC_TO_TICKS(net->RTO); 2077 } 2078 tmr = &stcb->asoc.asconf_timer; 2079 break; 2080 case SCTP_TIMER_TYPE_PRIM_DELETED: 2081 if ((stcb == NULL) || (net != NULL)) { 2082 return; 2083 } 2084 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2085 tmr = &stcb->asoc.delete_prim_timer; 2086 break; 2087 case SCTP_TIMER_TYPE_AUTOCLOSE: 2088 if (stcb == NULL) { 2089 return; 2090 } 2091 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2092 /* 2093 * Really an error since stcb is NOT set to 2094 * autoclose 2095 */ 2096 return; 2097 } 2098 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2099 tmr = &stcb->asoc.autoclose_timer; 2100 break; 2101 default: 2102 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2103 __FUNCTION__, t_type); 2104 return; 2105 break; 2106 } 2107 if ((to_ticks <= 0) || (tmr == NULL)) { 2108 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2109 __FUNCTION__, t_type, to_ticks, (void *)tmr); 2110 return; 2111 } 2112 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2113 /* 2114 * we do NOT allow you to have it already running. if it is 2115 * we leave the current one up unchanged 2116 */ 2117 return; 2118 } 2119 /* At this point we can proceed */ 2120 if (t_type == SCTP_TIMER_TYPE_SEND) { 2121 stcb->asoc.num_send_timers_up++; 2122 } 2123 tmr->stopped_from = 0; 2124 tmr->type = t_type; 2125 tmr->ep = (void *)inp; 2126 tmr->tcb = (void *)stcb; 2127 tmr->net = (void *)net; 2128 tmr->self = (void *)tmr; 2129 tmr->vnet = (void *)curvnet; 2130 tmr->ticks = sctp_get_tick_count(); 2131 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2132 return; 2133 } 2134 2135 void 2136 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2137 struct sctp_nets *net, uint32_t from) 2138 { 2139 struct sctp_timer *tmr; 2140 2141 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2142 (inp == NULL)) 2143 return; 2144 2145 tmr = NULL; 2146 if (stcb) { 2147 SCTP_TCB_LOCK_ASSERT(stcb); 2148 } 2149 switch (t_type) { 2150 case SCTP_TIMER_TYPE_ZERO_COPY: 2151 tmr = &inp->sctp_ep.zero_copy_timer; 2152 break; 2153 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2154 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2155 break; 2156 case SCTP_TIMER_TYPE_ADDR_WQ: 2157 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2158 break; 2159 case SCTP_TIMER_TYPE_SEND: 2160 if ((stcb == NULL) || (net == NULL)) { 2161 return; 2162 } 2163 tmr = &net->rxt_timer; 2164 break; 2165 case SCTP_TIMER_TYPE_INIT: 2166 if ((stcb == NULL) || (net == NULL)) { 2167 return; 2168 } 2169 tmr = &net->rxt_timer; 2170 break; 2171 case SCTP_TIMER_TYPE_RECV: 2172 if (stcb == NULL) { 2173 return; 2174 } 2175 tmr = &stcb->asoc.dack_timer; 2176 break; 2177 case SCTP_TIMER_TYPE_SHUTDOWN: 2178 if ((stcb == NULL) || (net == NULL)) { 2179 return; 2180 } 2181 tmr = &net->rxt_timer; 2182 break; 2183 case SCTP_TIMER_TYPE_HEARTBEAT: 2184 if ((stcb == NULL) || (net == NULL)) { 2185 return; 2186 } 2187 tmr = &net->hb_timer; 2188 break; 2189 case SCTP_TIMER_TYPE_COOKIE: 2190 if ((stcb == NULL) || (net == NULL)) { 2191 return; 2192 } 2193 tmr = &net->rxt_timer; 2194 break; 2195 case SCTP_TIMER_TYPE_NEWCOOKIE: 2196 /* nothing needed but the endpoint here */ 2197 tmr = &inp->sctp_ep.signature_change; 2198 /* 2199 * We re-use the newcookie timer for the INP kill timer. We 2200 * must assure that we do not kill it by accident. 2201 */ 2202 break; 2203 case SCTP_TIMER_TYPE_ASOCKILL: 2204 /* 2205 * Stop the asoc kill timer. 2206 */ 2207 if (stcb == NULL) { 2208 return; 2209 } 2210 tmr = &stcb->asoc.strreset_timer; 2211 break; 2212 2213 case SCTP_TIMER_TYPE_INPKILL: 2214 /* 2215 * The inp is setup to die. We re-use the signature_chage 2216 * timer since that has stopped and we are in the GONE 2217 * state. 2218 */ 2219 tmr = &inp->sctp_ep.signature_change; 2220 break; 2221 case SCTP_TIMER_TYPE_PATHMTURAISE: 2222 if ((stcb == NULL) || (net == NULL)) { 2223 return; 2224 } 2225 tmr = &net->pmtu_timer; 2226 break; 2227 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2228 if ((stcb == NULL) || (net == NULL)) { 2229 return; 2230 } 2231 tmr = &net->rxt_timer; 2232 break; 2233 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2234 if (stcb == NULL) { 2235 return; 2236 } 2237 tmr = &stcb->asoc.shut_guard_timer; 2238 break; 2239 case SCTP_TIMER_TYPE_STRRESET: 2240 if (stcb == NULL) { 2241 return; 2242 } 2243 tmr = &stcb->asoc.strreset_timer; 2244 break; 2245 case SCTP_TIMER_TYPE_ASCONF: 2246 if (stcb == NULL) { 2247 return; 2248 } 2249 tmr = &stcb->asoc.asconf_timer; 2250 break; 2251 case SCTP_TIMER_TYPE_PRIM_DELETED: 2252 if (stcb == NULL) { 2253 return; 2254 } 2255 tmr = &stcb->asoc.delete_prim_timer; 2256 break; 2257 case SCTP_TIMER_TYPE_AUTOCLOSE: 2258 if (stcb == NULL) { 2259 return; 2260 } 2261 tmr = &stcb->asoc.autoclose_timer; 2262 break; 2263 default: 2264 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2265 __FUNCTION__, t_type); 2266 break; 2267 } 2268 if (tmr == NULL) { 2269 return; 2270 } 2271 if ((tmr->type != t_type) && tmr->type) { 2272 /* 2273 * Ok we have a timer that is under joint use. Cookie timer 2274 * per chance with the SEND timer. We therefore are NOT 2275 * running the timer that the caller wants stopped. So just 2276 * return. 2277 */ 2278 return; 2279 } 2280 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2281 stcb->asoc.num_send_timers_up--; 2282 if (stcb->asoc.num_send_timers_up < 0) { 2283 stcb->asoc.num_send_timers_up = 0; 2284 } 2285 } 2286 tmr->self = NULL; 2287 tmr->stopped_from = from; 2288 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2289 return; 2290 } 2291 2292 uint32_t 2293 sctp_calculate_len(struct mbuf *m) 2294 { 2295 uint32_t tlen = 0; 2296 struct mbuf *at; 2297 2298 at = m; 2299 while (at) { 2300 tlen += SCTP_BUF_LEN(at); 2301 at = SCTP_BUF_NEXT(at); 2302 } 2303 return (tlen); 2304 } 2305 2306 void 2307 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2308 struct sctp_association *asoc, uint32_t mtu) 2309 { 2310 /* 2311 * Reset the P-MTU size on this association, this involves changing 2312 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2313 * allow the DF flag to be cleared. 2314 */ 2315 struct sctp_tmit_chunk *chk; 2316 unsigned int eff_mtu, ovh; 2317 2318 asoc->smallest_mtu = mtu; 2319 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2320 ovh = SCTP_MIN_OVERHEAD; 2321 } else { 2322 ovh = SCTP_MIN_V4_OVERHEAD; 2323 } 2324 eff_mtu = mtu - ovh; 2325 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2326 if (chk->send_size > eff_mtu) { 2327 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2328 } 2329 } 2330 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2331 if (chk->send_size > eff_mtu) { 2332 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2333 } 2334 } 2335 } 2336 2337 2338 /* 2339 * given an association and starting time of the current RTT period return 2340 * RTO in number of msecs net should point to the current network 2341 */ 2342 2343 uint32_t 2344 sctp_calculate_rto(struct sctp_tcb *stcb, 2345 struct sctp_association *asoc, 2346 struct sctp_nets *net, 2347 struct timeval *told, 2348 int safe, int rtt_from_sack) 2349 { 2350 /*- 2351 * given an association and the starting time of the current RTT 2352 * period (in value1/value2) return RTO in number of msecs. 2353 */ 2354 int32_t rtt; /* RTT in ms */ 2355 uint32_t new_rto; 2356 int first_measure = 0; 2357 struct timeval now, then, *old; 2358 2359 /* Copy it out for sparc64 */ 2360 if (safe == sctp_align_unsafe_makecopy) { 2361 old = &then; 2362 memcpy(&then, told, sizeof(struct timeval)); 2363 } else if (safe == sctp_align_safe_nocopy) { 2364 old = told; 2365 } else { 2366 /* error */ 2367 SCTP_PRINTF("Huh, bad rto calc call\n"); 2368 return (0); 2369 } 2370 /************************/ 2371 /* 1. calculate new RTT */ 2372 /************************/ 2373 /* get the current time */ 2374 if (stcb->asoc.use_precise_time) { 2375 (void)SCTP_GETPTIME_TIMEVAL(&now); 2376 } else { 2377 (void)SCTP_GETTIME_TIMEVAL(&now); 2378 } 2379 timevalsub(&now, old); 2380 /* store the current RTT in us */ 2381 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec + 2382 (uint64_t) now.tv_usec; 2383 2384 /* computer rtt in ms */ 2385 rtt = net->rtt / 1000; 2386 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2387 /* 2388 * Tell the CC module that a new update has just occurred 2389 * from a sack 2390 */ 2391 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2392 } 2393 /* 2394 * Do we need to determine the lan? We do this only on sacks i.e. 2395 * RTT being determined from data not non-data (HB/INIT->INITACK). 2396 */ 2397 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2398 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2399 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2400 net->lan_type = SCTP_LAN_INTERNET; 2401 } else { 2402 net->lan_type = SCTP_LAN_LOCAL; 2403 } 2404 } 2405 /***************************/ 2406 /* 2. update RTTVAR & SRTT */ 2407 /***************************/ 2408 /*- 2409 * Compute the scaled average lastsa and the 2410 * scaled variance lastsv as described in van Jacobson 2411 * Paper "Congestion Avoidance and Control", Annex A. 2412 * 2413 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2414 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2415 */ 2416 if (net->RTO_measured) { 2417 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2418 net->lastsa += rtt; 2419 if (rtt < 0) { 2420 rtt = -rtt; 2421 } 2422 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2423 net->lastsv += rtt; 2424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2425 rto_logging(net, SCTP_LOG_RTTVAR); 2426 } 2427 } else { 2428 /* First RTO measurment */ 2429 net->RTO_measured = 1; 2430 first_measure = 1; 2431 net->lastsa = rtt << SCTP_RTT_SHIFT; 2432 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2434 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2435 } 2436 } 2437 if (net->lastsv == 0) { 2438 net->lastsv = SCTP_CLOCK_GRANULARITY; 2439 } 2440 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2441 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2442 (stcb->asoc.sat_network_lockout == 0)) { 2443 stcb->asoc.sat_network = 1; 2444 } else if ((!first_measure) && stcb->asoc.sat_network) { 2445 stcb->asoc.sat_network = 0; 2446 stcb->asoc.sat_network_lockout = 1; 2447 } 2448 /* bound it, per C6/C7 in Section 5.3.1 */ 2449 if (new_rto < stcb->asoc.minrto) { 2450 new_rto = stcb->asoc.minrto; 2451 } 2452 if (new_rto > stcb->asoc.maxrto) { 2453 new_rto = stcb->asoc.maxrto; 2454 } 2455 /* we are now returning the RTO */ 2456 return (new_rto); 2457 } 2458 2459 /* 2460 * return a pointer to a contiguous piece of data from the given mbuf chain 2461 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2462 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2463 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2464 */ 2465 caddr_t 2466 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2467 { 2468 uint32_t count; 2469 uint8_t *ptr; 2470 2471 ptr = in_ptr; 2472 if ((off < 0) || (len <= 0)) 2473 return (NULL); 2474 2475 /* find the desired start location */ 2476 while ((m != NULL) && (off > 0)) { 2477 if (off < SCTP_BUF_LEN(m)) 2478 break; 2479 off -= SCTP_BUF_LEN(m); 2480 m = SCTP_BUF_NEXT(m); 2481 } 2482 if (m == NULL) 2483 return (NULL); 2484 2485 /* is the current mbuf large enough (eg. contiguous)? */ 2486 if ((SCTP_BUF_LEN(m) - off) >= len) { 2487 return (mtod(m, caddr_t)+off); 2488 } else { 2489 /* else, it spans more than one mbuf, so save a temp copy... */ 2490 while ((m != NULL) && (len > 0)) { 2491 count = min(SCTP_BUF_LEN(m) - off, len); 2492 bcopy(mtod(m, caddr_t)+off, ptr, count); 2493 len -= count; 2494 ptr += count; 2495 off = 0; 2496 m = SCTP_BUF_NEXT(m); 2497 } 2498 if ((m == NULL) && (len > 0)) 2499 return (NULL); 2500 else 2501 return ((caddr_t)in_ptr); 2502 } 2503 } 2504 2505 2506 2507 struct sctp_paramhdr * 2508 sctp_get_next_param(struct mbuf *m, 2509 int offset, 2510 struct sctp_paramhdr *pull, 2511 int pull_limit) 2512 { 2513 /* This just provides a typed signature to Peter's Pull routine */ 2514 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2515 (uint8_t *) pull)); 2516 } 2517 2518 2519 int 2520 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2521 { 2522 /* 2523 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2524 * padlen is > 3 this routine will fail. 2525 */ 2526 uint8_t *dp; 2527 int i; 2528 2529 if (padlen > 3) { 2530 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2531 return (ENOBUFS); 2532 } 2533 if (padlen <= M_TRAILINGSPACE(m)) { 2534 /* 2535 * The easy way. We hope the majority of the time we hit 2536 * here :) 2537 */ 2538 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2539 SCTP_BUF_LEN(m) += padlen; 2540 } else { 2541 /* Hard way we must grow the mbuf */ 2542 struct mbuf *tmp; 2543 2544 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2545 if (tmp == NULL) { 2546 /* Out of space GAK! we are in big trouble. */ 2547 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2548 return (ENOBUFS); 2549 } 2550 /* setup and insert in middle */ 2551 SCTP_BUF_LEN(tmp) = padlen; 2552 SCTP_BUF_NEXT(tmp) = NULL; 2553 SCTP_BUF_NEXT(m) = tmp; 2554 dp = mtod(tmp, uint8_t *); 2555 } 2556 /* zero out the pad */ 2557 for (i = 0; i < padlen; i++) { 2558 *dp = 0; 2559 dp++; 2560 } 2561 return (0); 2562 } 2563 2564 int 2565 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2566 { 2567 /* find the last mbuf in chain and pad it */ 2568 struct mbuf *m_at; 2569 2570 if (last_mbuf) { 2571 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2572 } else { 2573 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2574 if (SCTP_BUF_NEXT(m_at) == NULL) { 2575 return (sctp_add_pad_tombuf(m_at, padval)); 2576 } 2577 } 2578 } 2579 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 2580 return (EFAULT); 2581 } 2582 2583 static void 2584 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2585 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2586 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2587 SCTP_UNUSED 2588 #endif 2589 ) 2590 { 2591 struct mbuf *m_notify; 2592 struct sctp_assoc_change *sac; 2593 struct sctp_queued_to_read *control; 2594 size_t notif_len, abort_len; 2595 unsigned int i; 2596 2597 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2598 struct socket *so; 2599 2600 #endif 2601 2602 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2603 notif_len = sizeof(struct sctp_assoc_change); 2604 if (abort != NULL) { 2605 abort_len = htons(abort->ch.chunk_length); 2606 } else { 2607 abort_len = 0; 2608 } 2609 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2610 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2611 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2612 notif_len += abort_len; 2613 } 2614 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2615 if (m_notify == NULL) { 2616 /* Retry with smaller value. */ 2617 notif_len = sizeof(struct sctp_assoc_change); 2618 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2619 if (m_notify == NULL) { 2620 goto set_error; 2621 } 2622 } 2623 SCTP_BUF_NEXT(m_notify) = NULL; 2624 sac = mtod(m_notify, struct sctp_assoc_change *); 2625 sac->sac_type = SCTP_ASSOC_CHANGE; 2626 sac->sac_flags = 0; 2627 sac->sac_length = sizeof(struct sctp_assoc_change); 2628 sac->sac_state = state; 2629 sac->sac_error = error; 2630 /* XXX verify these stream counts */ 2631 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2632 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2633 sac->sac_assoc_id = sctp_get_associd(stcb); 2634 if (notif_len > sizeof(struct sctp_assoc_change)) { 2635 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2636 i = 0; 2637 if (stcb->asoc.peer_supports_prsctp) { 2638 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2639 } 2640 if (stcb->asoc.peer_supports_auth) { 2641 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2642 } 2643 if (stcb->asoc.peer_supports_asconf) { 2644 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2645 } 2646 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2647 if (stcb->asoc.peer_supports_strreset) { 2648 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2649 } 2650 sac->sac_length += i; 2651 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2652 memcpy(sac->sac_info, abort, abort_len); 2653 sac->sac_length += abort_len; 2654 } 2655 } 2656 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2657 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2658 0, 0, stcb->asoc.context, 0, 0, 0, 2659 m_notify); 2660 if (control != NULL) { 2661 control->length = SCTP_BUF_LEN(m_notify); 2662 /* not that we need this */ 2663 control->tail_mbuf = m_notify; 2664 control->spec_flags = M_NOTIFICATION; 2665 sctp_add_to_readq(stcb->sctp_ep, stcb, 2666 control, 2667 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2668 so_locked); 2669 } else { 2670 sctp_m_freem(m_notify); 2671 } 2672 } 2673 /* 2674 * For 1-to-1 style sockets, we send up and error when an ABORT 2675 * comes in. 2676 */ 2677 set_error: 2678 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2679 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2680 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2681 SOCK_LOCK(stcb->sctp_socket); 2682 if (from_peer) { 2683 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2684 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2685 stcb->sctp_socket->so_error = ECONNREFUSED; 2686 } else { 2687 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2688 stcb->sctp_socket->so_error = ECONNRESET; 2689 } 2690 } else { 2691 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2692 stcb->sctp_socket->so_error = ECONNABORTED; 2693 } 2694 } 2695 /* Wake ANY sleepers */ 2696 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2697 so = SCTP_INP_SO(stcb->sctp_ep); 2698 if (!so_locked) { 2699 atomic_add_int(&stcb->asoc.refcnt, 1); 2700 SCTP_TCB_UNLOCK(stcb); 2701 SCTP_SOCKET_LOCK(so, 1); 2702 SCTP_TCB_LOCK(stcb); 2703 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2704 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2705 SCTP_SOCKET_UNLOCK(so, 1); 2706 return; 2707 } 2708 } 2709 #endif 2710 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2711 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2712 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2713 socantrcvmore_locked(stcb->sctp_socket); 2714 } 2715 sorwakeup(stcb->sctp_socket); 2716 sowwakeup(stcb->sctp_socket); 2717 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2718 if (!so_locked) { 2719 SCTP_SOCKET_UNLOCK(so, 1); 2720 } 2721 #endif 2722 } 2723 2724 static void 2725 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2726 struct sockaddr *sa, uint32_t error) 2727 { 2728 struct mbuf *m_notify; 2729 struct sctp_paddr_change *spc; 2730 struct sctp_queued_to_read *control; 2731 2732 if ((stcb == NULL) || 2733 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2734 /* event not enabled */ 2735 return; 2736 } 2737 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2738 if (m_notify == NULL) 2739 return; 2740 SCTP_BUF_LEN(m_notify) = 0; 2741 spc = mtod(m_notify, struct sctp_paddr_change *); 2742 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2743 spc->spc_flags = 0; 2744 spc->spc_length = sizeof(struct sctp_paddr_change); 2745 switch (sa->sa_family) { 2746 #ifdef INET 2747 case AF_INET: 2748 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2749 break; 2750 #endif 2751 #ifdef INET6 2752 case AF_INET6: 2753 { 2754 struct sockaddr_in6 *sin6; 2755 2756 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2757 2758 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2759 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2760 if (sin6->sin6_scope_id == 0) { 2761 /* recover scope_id for user */ 2762 (void)sa6_recoverscope(sin6); 2763 } else { 2764 /* clear embedded scope_id for user */ 2765 in6_clearscope(&sin6->sin6_addr); 2766 } 2767 } 2768 break; 2769 } 2770 #endif 2771 default: 2772 /* TSNH */ 2773 break; 2774 } 2775 spc->spc_state = state; 2776 spc->spc_error = error; 2777 spc->spc_assoc_id = sctp_get_associd(stcb); 2778 2779 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2780 SCTP_BUF_NEXT(m_notify) = NULL; 2781 2782 /* append to socket */ 2783 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2784 0, 0, stcb->asoc.context, 0, 0, 0, 2785 m_notify); 2786 if (control == NULL) { 2787 /* no memory */ 2788 sctp_m_freem(m_notify); 2789 return; 2790 } 2791 control->length = SCTP_BUF_LEN(m_notify); 2792 control->spec_flags = M_NOTIFICATION; 2793 /* not that we need this */ 2794 control->tail_mbuf = m_notify; 2795 sctp_add_to_readq(stcb->sctp_ep, stcb, 2796 control, 2797 &stcb->sctp_socket->so_rcv, 1, 2798 SCTP_READ_LOCK_NOT_HELD, 2799 SCTP_SO_NOT_LOCKED); 2800 } 2801 2802 2803 static void 2804 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2805 struct sctp_tmit_chunk *chk, int so_locked 2806 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2807 SCTP_UNUSED 2808 #endif 2809 ) 2810 { 2811 struct mbuf *m_notify; 2812 struct sctp_send_failed *ssf; 2813 struct sctp_send_failed_event *ssfe; 2814 struct sctp_queued_to_read *control; 2815 int length; 2816 2817 if ((stcb == NULL) || 2818 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2819 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2820 /* event not enabled */ 2821 return; 2822 } 2823 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2824 length = sizeof(struct sctp_send_failed_event); 2825 } else { 2826 length = sizeof(struct sctp_send_failed); 2827 } 2828 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2829 if (m_notify == NULL) 2830 /* no space left */ 2831 return; 2832 length += chk->send_size; 2833 length -= sizeof(struct sctp_data_chunk); 2834 SCTP_BUF_LEN(m_notify) = 0; 2835 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2836 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2837 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2838 if (sent) { 2839 ssfe->ssfe_flags = SCTP_DATA_SENT; 2840 } else { 2841 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2842 } 2843 ssfe->ssfe_length = length; 2844 ssfe->ssfe_error = error; 2845 /* not exactly what the user sent in, but should be close :) */ 2846 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2847 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number; 2848 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2849 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype; 2850 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2851 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2852 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2853 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2854 } else { 2855 ssf = mtod(m_notify, struct sctp_send_failed *); 2856 ssf->ssf_type = SCTP_SEND_FAILED; 2857 if (sent) { 2858 ssf->ssf_flags = SCTP_DATA_SENT; 2859 } else { 2860 ssf->ssf_flags = SCTP_DATA_UNSENT; 2861 } 2862 ssf->ssf_length = length; 2863 ssf->ssf_error = error; 2864 /* not exactly what the user sent in, but should be close :) */ 2865 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2866 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2867 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2868 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2869 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2870 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2871 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2872 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2873 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2874 } 2875 if (chk->data) { 2876 /* 2877 * trim off the sctp chunk header(it should be there) 2878 */ 2879 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 2880 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 2881 sctp_mbuf_crush(chk->data); 2882 chk->send_size -= sizeof(struct sctp_data_chunk); 2883 } 2884 } 2885 SCTP_BUF_NEXT(m_notify) = chk->data; 2886 /* Steal off the mbuf */ 2887 chk->data = NULL; 2888 /* 2889 * For this case, we check the actual socket buffer, since the assoc 2890 * is going away we don't want to overfill the socket buffer for a 2891 * non-reader 2892 */ 2893 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2894 sctp_m_freem(m_notify); 2895 return; 2896 } 2897 /* append to socket */ 2898 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2899 0, 0, stcb->asoc.context, 0, 0, 0, 2900 m_notify); 2901 if (control == NULL) { 2902 /* no memory */ 2903 sctp_m_freem(m_notify); 2904 return; 2905 } 2906 control->spec_flags = M_NOTIFICATION; 2907 sctp_add_to_readq(stcb->sctp_ep, stcb, 2908 control, 2909 &stcb->sctp_socket->so_rcv, 1, 2910 SCTP_READ_LOCK_NOT_HELD, 2911 so_locked); 2912 } 2913 2914 2915 static void 2916 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2917 struct sctp_stream_queue_pending *sp, int so_locked 2918 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2919 SCTP_UNUSED 2920 #endif 2921 ) 2922 { 2923 struct mbuf *m_notify; 2924 struct sctp_send_failed *ssf; 2925 struct sctp_send_failed_event *ssfe; 2926 struct sctp_queued_to_read *control; 2927 int length; 2928 2929 if ((stcb == NULL) || 2930 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2931 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2932 /* event not enabled */ 2933 return; 2934 } 2935 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2936 length = sizeof(struct sctp_send_failed_event); 2937 } else { 2938 length = sizeof(struct sctp_send_failed); 2939 } 2940 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2941 if (m_notify == NULL) { 2942 /* no space left */ 2943 return; 2944 } 2945 length += sp->length; 2946 SCTP_BUF_LEN(m_notify) = 0; 2947 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2948 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2949 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2950 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2951 ssfe->ssfe_length = length; 2952 ssfe->ssfe_error = error; 2953 /* not exactly what the user sent in, but should be close :) */ 2954 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2955 ssfe->ssfe_info.snd_sid = sp->stream; 2956 if (sp->some_taken) { 2957 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 2958 } else { 2959 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 2960 } 2961 ssfe->ssfe_info.snd_ppid = sp->ppid; 2962 ssfe->ssfe_info.snd_context = sp->context; 2963 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2964 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2965 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2966 } else { 2967 ssf = mtod(m_notify, struct sctp_send_failed *); 2968 ssf->ssf_type = SCTP_SEND_FAILED; 2969 ssf->ssf_flags = SCTP_DATA_UNSENT; 2970 ssf->ssf_length = length; 2971 ssf->ssf_error = error; 2972 /* not exactly what the user sent in, but should be close :) */ 2973 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2974 ssf->ssf_info.sinfo_stream = sp->stream; 2975 ssf->ssf_info.sinfo_ssn = 0; 2976 if (sp->some_taken) { 2977 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 2978 } else { 2979 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 2980 } 2981 ssf->ssf_info.sinfo_ppid = sp->ppid; 2982 ssf->ssf_info.sinfo_context = sp->context; 2983 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2984 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2985 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2986 } 2987 SCTP_BUF_NEXT(m_notify) = sp->data; 2988 2989 /* Steal off the mbuf */ 2990 sp->data = NULL; 2991 /* 2992 * For this case, we check the actual socket buffer, since the assoc 2993 * is going away we don't want to overfill the socket buffer for a 2994 * non-reader 2995 */ 2996 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2997 sctp_m_freem(m_notify); 2998 return; 2999 } 3000 /* append to socket */ 3001 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3002 0, 0, stcb->asoc.context, 0, 0, 0, 3003 m_notify); 3004 if (control == NULL) { 3005 /* no memory */ 3006 sctp_m_freem(m_notify); 3007 return; 3008 } 3009 control->spec_flags = M_NOTIFICATION; 3010 sctp_add_to_readq(stcb->sctp_ep, stcb, 3011 control, 3012 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3013 } 3014 3015 3016 3017 static void 3018 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3019 { 3020 struct mbuf *m_notify; 3021 struct sctp_adaptation_event *sai; 3022 struct sctp_queued_to_read *control; 3023 3024 if ((stcb == NULL) || 3025 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3026 /* event not enabled */ 3027 return; 3028 } 3029 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3030 if (m_notify == NULL) 3031 /* no space left */ 3032 return; 3033 SCTP_BUF_LEN(m_notify) = 0; 3034 sai = mtod(m_notify, struct sctp_adaptation_event *); 3035 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3036 sai->sai_flags = 0; 3037 sai->sai_length = sizeof(struct sctp_adaptation_event); 3038 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3039 sai->sai_assoc_id = sctp_get_associd(stcb); 3040 3041 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3042 SCTP_BUF_NEXT(m_notify) = NULL; 3043 3044 /* append to socket */ 3045 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3046 0, 0, stcb->asoc.context, 0, 0, 0, 3047 m_notify); 3048 if (control == NULL) { 3049 /* no memory */ 3050 sctp_m_freem(m_notify); 3051 return; 3052 } 3053 control->length = SCTP_BUF_LEN(m_notify); 3054 control->spec_flags = M_NOTIFICATION; 3055 /* not that we need this */ 3056 control->tail_mbuf = m_notify; 3057 sctp_add_to_readq(stcb->sctp_ep, stcb, 3058 control, 3059 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3060 } 3061 3062 /* This always must be called with the read-queue LOCKED in the INP */ 3063 static void 3064 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3065 uint32_t val, int so_locked 3066 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3067 SCTP_UNUSED 3068 #endif 3069 ) 3070 { 3071 struct mbuf *m_notify; 3072 struct sctp_pdapi_event *pdapi; 3073 struct sctp_queued_to_read *control; 3074 struct sockbuf *sb; 3075 3076 if ((stcb == NULL) || 3077 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3078 /* event not enabled */ 3079 return; 3080 } 3081 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3082 return; 3083 } 3084 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3085 if (m_notify == NULL) 3086 /* no space left */ 3087 return; 3088 SCTP_BUF_LEN(m_notify) = 0; 3089 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3090 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3091 pdapi->pdapi_flags = 0; 3092 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3093 pdapi->pdapi_indication = error; 3094 pdapi->pdapi_stream = (val >> 16); 3095 pdapi->pdapi_seq = (val & 0x0000ffff); 3096 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3097 3098 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3099 SCTP_BUF_NEXT(m_notify) = NULL; 3100 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3101 0, 0, stcb->asoc.context, 0, 0, 0, 3102 m_notify); 3103 if (control == NULL) { 3104 /* no memory */ 3105 sctp_m_freem(m_notify); 3106 return; 3107 } 3108 control->spec_flags = M_NOTIFICATION; 3109 control->length = SCTP_BUF_LEN(m_notify); 3110 /* not that we need this */ 3111 control->tail_mbuf = m_notify; 3112 control->held_length = 0; 3113 control->length = 0; 3114 sb = &stcb->sctp_socket->so_rcv; 3115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3116 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3117 } 3118 sctp_sballoc(stcb, sb, m_notify); 3119 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3120 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3121 } 3122 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3123 control->end_added = 1; 3124 if (stcb->asoc.control_pdapi) 3125 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3126 else { 3127 /* we really should not see this case */ 3128 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3129 } 3130 if (stcb->sctp_ep && stcb->sctp_socket) { 3131 /* This should always be the case */ 3132 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3133 struct socket *so; 3134 3135 so = SCTP_INP_SO(stcb->sctp_ep); 3136 if (!so_locked) { 3137 atomic_add_int(&stcb->asoc.refcnt, 1); 3138 SCTP_TCB_UNLOCK(stcb); 3139 SCTP_SOCKET_LOCK(so, 1); 3140 SCTP_TCB_LOCK(stcb); 3141 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3142 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3143 SCTP_SOCKET_UNLOCK(so, 1); 3144 return; 3145 } 3146 } 3147 #endif 3148 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3149 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3150 if (!so_locked) { 3151 SCTP_SOCKET_UNLOCK(so, 1); 3152 } 3153 #endif 3154 } 3155 } 3156 3157 static void 3158 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3159 { 3160 struct mbuf *m_notify; 3161 struct sctp_shutdown_event *sse; 3162 struct sctp_queued_to_read *control; 3163 3164 /* 3165 * For TCP model AND UDP connected sockets we will send an error up 3166 * when an SHUTDOWN completes 3167 */ 3168 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3169 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3170 /* mark socket closed for read/write and wakeup! */ 3171 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3172 struct socket *so; 3173 3174 so = SCTP_INP_SO(stcb->sctp_ep); 3175 atomic_add_int(&stcb->asoc.refcnt, 1); 3176 SCTP_TCB_UNLOCK(stcb); 3177 SCTP_SOCKET_LOCK(so, 1); 3178 SCTP_TCB_LOCK(stcb); 3179 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3180 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3181 SCTP_SOCKET_UNLOCK(so, 1); 3182 return; 3183 } 3184 #endif 3185 socantsendmore(stcb->sctp_socket); 3186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3187 SCTP_SOCKET_UNLOCK(so, 1); 3188 #endif 3189 } 3190 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3191 /* event not enabled */ 3192 return; 3193 } 3194 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3195 if (m_notify == NULL) 3196 /* no space left */ 3197 return; 3198 sse = mtod(m_notify, struct sctp_shutdown_event *); 3199 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3200 sse->sse_flags = 0; 3201 sse->sse_length = sizeof(struct sctp_shutdown_event); 3202 sse->sse_assoc_id = sctp_get_associd(stcb); 3203 3204 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3205 SCTP_BUF_NEXT(m_notify) = NULL; 3206 3207 /* append to socket */ 3208 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3209 0, 0, stcb->asoc.context, 0, 0, 0, 3210 m_notify); 3211 if (control == NULL) { 3212 /* no memory */ 3213 sctp_m_freem(m_notify); 3214 return; 3215 } 3216 control->spec_flags = M_NOTIFICATION; 3217 control->length = SCTP_BUF_LEN(m_notify); 3218 /* not that we need this */ 3219 control->tail_mbuf = m_notify; 3220 sctp_add_to_readq(stcb->sctp_ep, stcb, 3221 control, 3222 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3223 } 3224 3225 static void 3226 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3227 int so_locked 3228 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3229 SCTP_UNUSED 3230 #endif 3231 ) 3232 { 3233 struct mbuf *m_notify; 3234 struct sctp_sender_dry_event *event; 3235 struct sctp_queued_to_read *control; 3236 3237 if ((stcb == NULL) || 3238 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3239 /* event not enabled */ 3240 return; 3241 } 3242 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3243 if (m_notify == NULL) { 3244 /* no space left */ 3245 return; 3246 } 3247 SCTP_BUF_LEN(m_notify) = 0; 3248 event = mtod(m_notify, struct sctp_sender_dry_event *); 3249 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3250 event->sender_dry_flags = 0; 3251 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3252 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3253 3254 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3255 SCTP_BUF_NEXT(m_notify) = NULL; 3256 3257 /* append to socket */ 3258 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3259 0, 0, stcb->asoc.context, 0, 0, 0, 3260 m_notify); 3261 if (control == NULL) { 3262 /* no memory */ 3263 sctp_m_freem(m_notify); 3264 return; 3265 } 3266 control->length = SCTP_BUF_LEN(m_notify); 3267 control->spec_flags = M_NOTIFICATION; 3268 /* not that we need this */ 3269 control->tail_mbuf = m_notify; 3270 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3271 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3272 } 3273 3274 3275 void 3276 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3277 { 3278 struct mbuf *m_notify; 3279 struct sctp_queued_to_read *control; 3280 struct sctp_stream_change_event *stradd; 3281 int len; 3282 3283 if ((stcb == NULL) || 3284 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3285 /* event not enabled */ 3286 return; 3287 } 3288 if ((stcb->asoc.peer_req_out) && flag) { 3289 /* Peer made the request, don't tell the local user */ 3290 stcb->asoc.peer_req_out = 0; 3291 return; 3292 } 3293 stcb->asoc.peer_req_out = 0; 3294 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3295 if (m_notify == NULL) 3296 /* no space left */ 3297 return; 3298 SCTP_BUF_LEN(m_notify) = 0; 3299 len = sizeof(struct sctp_stream_change_event); 3300 if (len > M_TRAILINGSPACE(m_notify)) { 3301 /* never enough room */ 3302 sctp_m_freem(m_notify); 3303 return; 3304 } 3305 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3306 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3307 stradd->strchange_flags = flag; 3308 stradd->strchange_length = len; 3309 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3310 stradd->strchange_instrms = numberin; 3311 stradd->strchange_outstrms = numberout; 3312 SCTP_BUF_LEN(m_notify) = len; 3313 SCTP_BUF_NEXT(m_notify) = NULL; 3314 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3315 /* no space */ 3316 sctp_m_freem(m_notify); 3317 return; 3318 } 3319 /* append to socket */ 3320 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3321 0, 0, stcb->asoc.context, 0, 0, 0, 3322 m_notify); 3323 if (control == NULL) { 3324 /* no memory */ 3325 sctp_m_freem(m_notify); 3326 return; 3327 } 3328 control->spec_flags = M_NOTIFICATION; 3329 control->length = SCTP_BUF_LEN(m_notify); 3330 /* not that we need this */ 3331 control->tail_mbuf = m_notify; 3332 sctp_add_to_readq(stcb->sctp_ep, stcb, 3333 control, 3334 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3335 } 3336 3337 void 3338 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3339 { 3340 struct mbuf *m_notify; 3341 struct sctp_queued_to_read *control; 3342 struct sctp_assoc_reset_event *strasoc; 3343 int len; 3344 3345 if ((stcb == NULL) || 3346 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3347 /* event not enabled */ 3348 return; 3349 } 3350 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3351 if (m_notify == NULL) 3352 /* no space left */ 3353 return; 3354 SCTP_BUF_LEN(m_notify) = 0; 3355 len = sizeof(struct sctp_assoc_reset_event); 3356 if (len > M_TRAILINGSPACE(m_notify)) { 3357 /* never enough room */ 3358 sctp_m_freem(m_notify); 3359 return; 3360 } 3361 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3362 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3363 strasoc->assocreset_flags = flag; 3364 strasoc->assocreset_length = len; 3365 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3366 strasoc->assocreset_local_tsn = sending_tsn; 3367 strasoc->assocreset_remote_tsn = recv_tsn; 3368 SCTP_BUF_LEN(m_notify) = len; 3369 SCTP_BUF_NEXT(m_notify) = NULL; 3370 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3371 /* no space */ 3372 sctp_m_freem(m_notify); 3373 return; 3374 } 3375 /* append to socket */ 3376 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3377 0, 0, stcb->asoc.context, 0, 0, 0, 3378 m_notify); 3379 if (control == NULL) { 3380 /* no memory */ 3381 sctp_m_freem(m_notify); 3382 return; 3383 } 3384 control->spec_flags = M_NOTIFICATION; 3385 control->length = SCTP_BUF_LEN(m_notify); 3386 /* not that we need this */ 3387 control->tail_mbuf = m_notify; 3388 sctp_add_to_readq(stcb->sctp_ep, stcb, 3389 control, 3390 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3391 } 3392 3393 3394 3395 static void 3396 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3397 int number_entries, uint16_t * list, int flag) 3398 { 3399 struct mbuf *m_notify; 3400 struct sctp_queued_to_read *control; 3401 struct sctp_stream_reset_event *strreset; 3402 int len; 3403 3404 if ((stcb == NULL) || 3405 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3406 /* event not enabled */ 3407 return; 3408 } 3409 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3410 if (m_notify == NULL) 3411 /* no space left */ 3412 return; 3413 SCTP_BUF_LEN(m_notify) = 0; 3414 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3415 if (len > M_TRAILINGSPACE(m_notify)) { 3416 /* never enough room */ 3417 sctp_m_freem(m_notify); 3418 return; 3419 } 3420 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3421 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3422 strreset->strreset_flags = flag; 3423 strreset->strreset_length = len; 3424 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3425 if (number_entries) { 3426 int i; 3427 3428 for (i = 0; i < number_entries; i++) { 3429 strreset->strreset_stream_list[i] = ntohs(list[i]); 3430 } 3431 } 3432 SCTP_BUF_LEN(m_notify) = len; 3433 SCTP_BUF_NEXT(m_notify) = NULL; 3434 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3435 /* no space */ 3436 sctp_m_freem(m_notify); 3437 return; 3438 } 3439 /* append to socket */ 3440 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3441 0, 0, stcb->asoc.context, 0, 0, 0, 3442 m_notify); 3443 if (control == NULL) { 3444 /* no memory */ 3445 sctp_m_freem(m_notify); 3446 return; 3447 } 3448 control->spec_flags = M_NOTIFICATION; 3449 control->length = SCTP_BUF_LEN(m_notify); 3450 /* not that we need this */ 3451 control->tail_mbuf = m_notify; 3452 sctp_add_to_readq(stcb->sctp_ep, stcb, 3453 control, 3454 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3455 } 3456 3457 3458 static void 3459 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3460 { 3461 struct mbuf *m_notify; 3462 struct sctp_remote_error *sre; 3463 struct sctp_queued_to_read *control; 3464 size_t notif_len, chunk_len; 3465 3466 if ((stcb == NULL) || 3467 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3468 return; 3469 } 3470 if (chunk != NULL) { 3471 chunk_len = htons(chunk->ch.chunk_length); 3472 } else { 3473 chunk_len = 0; 3474 } 3475 notif_len = sizeof(struct sctp_remote_error) + chunk_len; 3476 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3477 if (m_notify == NULL) { 3478 /* Retry with smaller value. */ 3479 notif_len = sizeof(struct sctp_remote_error); 3480 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3481 if (m_notify == NULL) { 3482 return; 3483 } 3484 } 3485 SCTP_BUF_NEXT(m_notify) = NULL; 3486 sre = mtod(m_notify, struct sctp_remote_error *); 3487 sre->sre_type = SCTP_REMOTE_ERROR; 3488 sre->sre_flags = 0; 3489 sre->sre_length = sizeof(struct sctp_remote_error); 3490 sre->sre_error = error; 3491 sre->sre_assoc_id = sctp_get_associd(stcb); 3492 if (notif_len > sizeof(struct sctp_remote_error)) { 3493 memcpy(sre->sre_data, chunk, chunk_len); 3494 sre->sre_length += chunk_len; 3495 } 3496 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3497 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3498 0, 0, stcb->asoc.context, 0, 0, 0, 3499 m_notify); 3500 if (control != NULL) { 3501 control->length = SCTP_BUF_LEN(m_notify); 3502 /* not that we need this */ 3503 control->tail_mbuf = m_notify; 3504 control->spec_flags = M_NOTIFICATION; 3505 sctp_add_to_readq(stcb->sctp_ep, stcb, 3506 control, 3507 &stcb->sctp_socket->so_rcv, 1, 3508 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3509 } else { 3510 sctp_m_freem(m_notify); 3511 } 3512 } 3513 3514 3515 void 3516 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3517 uint32_t error, void *data, int so_locked 3518 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3519 SCTP_UNUSED 3520 #endif 3521 ) 3522 { 3523 if ((stcb == NULL) || 3524 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3525 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3526 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3527 /* If the socket is gone we are out of here */ 3528 return; 3529 } 3530 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3531 return; 3532 } 3533 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3534 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) { 3535 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3536 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3537 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3538 /* Don't report these in front states */ 3539 return; 3540 } 3541 } 3542 switch (notification) { 3543 case SCTP_NOTIFY_ASSOC_UP: 3544 if (stcb->asoc.assoc_up_sent == 0) { 3545 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3546 stcb->asoc.assoc_up_sent = 1; 3547 } 3548 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3549 sctp_notify_adaptation_layer(stcb); 3550 } 3551 if (stcb->asoc.peer_supports_auth == 0) { 3552 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3553 NULL, so_locked); 3554 } 3555 break; 3556 case SCTP_NOTIFY_ASSOC_DOWN: 3557 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3558 break; 3559 case SCTP_NOTIFY_INTERFACE_DOWN: 3560 { 3561 struct sctp_nets *net; 3562 3563 net = (struct sctp_nets *)data; 3564 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3565 (struct sockaddr *)&net->ro._l_addr, error); 3566 break; 3567 } 3568 case SCTP_NOTIFY_INTERFACE_UP: 3569 { 3570 struct sctp_nets *net; 3571 3572 net = (struct sctp_nets *)data; 3573 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3574 (struct sockaddr *)&net->ro._l_addr, error); 3575 break; 3576 } 3577 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3578 { 3579 struct sctp_nets *net; 3580 3581 net = (struct sctp_nets *)data; 3582 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3583 (struct sockaddr *)&net->ro._l_addr, error); 3584 break; 3585 } 3586 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3587 sctp_notify_send_failed2(stcb, error, 3588 (struct sctp_stream_queue_pending *)data, so_locked); 3589 break; 3590 case SCTP_NOTIFY_SENT_DG_FAIL: 3591 sctp_notify_send_failed(stcb, 1, error, 3592 (struct sctp_tmit_chunk *)data, so_locked); 3593 break; 3594 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3595 sctp_notify_send_failed(stcb, 0, error, 3596 (struct sctp_tmit_chunk *)data, so_locked); 3597 break; 3598 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3599 { 3600 uint32_t val; 3601 3602 val = *((uint32_t *) data); 3603 3604 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3605 break; 3606 } 3607 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3608 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3609 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3610 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3611 } else { 3612 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3613 } 3614 break; 3615 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3616 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3617 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3618 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3619 } else { 3620 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3621 } 3622 break; 3623 case SCTP_NOTIFY_ASSOC_RESTART: 3624 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3625 if (stcb->asoc.peer_supports_auth == 0) { 3626 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3627 NULL, so_locked); 3628 } 3629 break; 3630 case SCTP_NOTIFY_STR_RESET_SEND: 3631 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN); 3632 break; 3633 case SCTP_NOTIFY_STR_RESET_RECV: 3634 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING); 3635 break; 3636 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3637 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3638 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3639 break; 3640 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3641 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3642 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3643 break; 3644 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3645 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3646 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3647 break; 3648 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3649 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3650 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3651 break; 3652 case SCTP_NOTIFY_ASCONF_ADD_IP: 3653 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3654 error); 3655 break; 3656 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3657 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3658 error); 3659 break; 3660 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3661 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3662 error); 3663 break; 3664 case SCTP_NOTIFY_PEER_SHUTDOWN: 3665 sctp_notify_shutdown_event(stcb); 3666 break; 3667 case SCTP_NOTIFY_AUTH_NEW_KEY: 3668 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3669 (uint16_t) (uintptr_t) data, 3670 so_locked); 3671 break; 3672 case SCTP_NOTIFY_AUTH_FREE_KEY: 3673 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3674 (uint16_t) (uintptr_t) data, 3675 so_locked); 3676 break; 3677 case SCTP_NOTIFY_NO_PEER_AUTH: 3678 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3679 (uint16_t) (uintptr_t) data, 3680 so_locked); 3681 break; 3682 case SCTP_NOTIFY_SENDER_DRY: 3683 sctp_notify_sender_dry_event(stcb, so_locked); 3684 break; 3685 case SCTP_NOTIFY_REMOTE_ERROR: 3686 sctp_notify_remote_error(stcb, error, data); 3687 break; 3688 default: 3689 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3690 __FUNCTION__, notification, notification); 3691 break; 3692 } /* end switch */ 3693 } 3694 3695 void 3696 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3697 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3698 SCTP_UNUSED 3699 #endif 3700 ) 3701 { 3702 struct sctp_association *asoc; 3703 struct sctp_stream_out *outs; 3704 struct sctp_tmit_chunk *chk, *nchk; 3705 struct sctp_stream_queue_pending *sp, *nsp; 3706 int i; 3707 3708 if (stcb == NULL) { 3709 return; 3710 } 3711 asoc = &stcb->asoc; 3712 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3713 /* already being freed */ 3714 return; 3715 } 3716 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3717 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3718 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3719 return; 3720 } 3721 /* now through all the gunk freeing chunks */ 3722 if (holds_lock == 0) { 3723 SCTP_TCB_SEND_LOCK(stcb); 3724 } 3725 /* sent queue SHOULD be empty */ 3726 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3727 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3728 asoc->sent_queue_cnt--; 3729 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3730 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3731 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3732 #ifdef INVARIANTS 3733 } else { 3734 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3735 #endif 3736 } 3737 } 3738 if (chk->data != NULL) { 3739 sctp_free_bufspace(stcb, asoc, chk, 1); 3740 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3741 error, chk, so_locked); 3742 if (chk->data) { 3743 sctp_m_freem(chk->data); 3744 chk->data = NULL; 3745 } 3746 } 3747 sctp_free_a_chunk(stcb, chk, so_locked); 3748 /* sa_ignore FREED_MEMORY */ 3749 } 3750 /* pending send queue SHOULD be empty */ 3751 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3752 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3753 asoc->send_queue_cnt--; 3754 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3755 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3756 #ifdef INVARIANTS 3757 } else { 3758 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3759 #endif 3760 } 3761 if (chk->data != NULL) { 3762 sctp_free_bufspace(stcb, asoc, chk, 1); 3763 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3764 error, chk, so_locked); 3765 if (chk->data) { 3766 sctp_m_freem(chk->data); 3767 chk->data = NULL; 3768 } 3769 } 3770 sctp_free_a_chunk(stcb, chk, so_locked); 3771 /* sa_ignore FREED_MEMORY */ 3772 } 3773 for (i = 0; i < asoc->streamoutcnt; i++) { 3774 /* For each stream */ 3775 outs = &asoc->strmout[i]; 3776 /* clean up any sends there */ 3777 asoc->locked_on_sending = NULL; 3778 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3779 asoc->stream_queue_cnt--; 3780 TAILQ_REMOVE(&outs->outqueue, sp, next); 3781 sctp_free_spbufspace(stcb, asoc, sp); 3782 if (sp->data) { 3783 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3784 error, (void *)sp, so_locked); 3785 if (sp->data) { 3786 sctp_m_freem(sp->data); 3787 sp->data = NULL; 3788 sp->tail_mbuf = NULL; 3789 sp->length = 0; 3790 } 3791 } 3792 if (sp->net) { 3793 sctp_free_remote_addr(sp->net); 3794 sp->net = NULL; 3795 } 3796 /* Free the chunk */ 3797 sctp_free_a_strmoq(stcb, sp, so_locked); 3798 /* sa_ignore FREED_MEMORY */ 3799 } 3800 } 3801 3802 if (holds_lock == 0) { 3803 SCTP_TCB_SEND_UNLOCK(stcb); 3804 } 3805 } 3806 3807 void 3808 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3809 struct sctp_abort_chunk *abort, int so_locked 3810 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3811 SCTP_UNUSED 3812 #endif 3813 ) 3814 { 3815 if (stcb == NULL) { 3816 return; 3817 } 3818 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3819 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3820 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3821 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3822 } 3823 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3824 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3825 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3826 return; 3827 } 3828 /* Tell them we lost the asoc */ 3829 sctp_report_all_outbound(stcb, error, 1, so_locked); 3830 if (from_peer) { 3831 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3832 } else { 3833 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3834 } 3835 } 3836 3837 void 3838 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3839 struct mbuf *m, int iphlen, 3840 struct sockaddr *src, struct sockaddr *dst, 3841 struct sctphdr *sh, struct mbuf *op_err, 3842 uint8_t use_mflowid, uint32_t mflowid, 3843 uint32_t vrf_id, uint16_t port) 3844 { 3845 uint32_t vtag; 3846 3847 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3848 struct socket *so; 3849 3850 #endif 3851 3852 vtag = 0; 3853 if (stcb != NULL) { 3854 /* We have a TCB to abort, send notification too */ 3855 vtag = stcb->asoc.peer_vtag; 3856 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3857 /* get the assoc vrf id and table id */ 3858 vrf_id = stcb->asoc.vrf_id; 3859 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3860 } 3861 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3862 use_mflowid, mflowid, 3863 vrf_id, port); 3864 if (stcb != NULL) { 3865 /* Ok, now lets free it */ 3866 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3867 so = SCTP_INP_SO(inp); 3868 atomic_add_int(&stcb->asoc.refcnt, 1); 3869 SCTP_TCB_UNLOCK(stcb); 3870 SCTP_SOCKET_LOCK(so, 1); 3871 SCTP_TCB_LOCK(stcb); 3872 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3873 #endif 3874 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3875 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3876 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3877 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3878 } 3879 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3880 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3881 SCTP_SOCKET_UNLOCK(so, 1); 3882 #endif 3883 } 3884 } 3885 3886 #ifdef SCTP_ASOCLOG_OF_TSNS 3887 void 3888 sctp_print_out_track_log(struct sctp_tcb *stcb) 3889 { 3890 #ifdef NOSIY_PRINTS 3891 int i; 3892 3893 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 3894 SCTP_PRINTF("IN bound TSN log-aaa\n"); 3895 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 3896 SCTP_PRINTF("None rcvd\n"); 3897 goto none_in; 3898 } 3899 if (stcb->asoc.tsn_in_wrapped) { 3900 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 3901 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3902 stcb->asoc.in_tsnlog[i].tsn, 3903 stcb->asoc.in_tsnlog[i].strm, 3904 stcb->asoc.in_tsnlog[i].seq, 3905 stcb->asoc.in_tsnlog[i].flgs, 3906 stcb->asoc.in_tsnlog[i].sz); 3907 } 3908 } 3909 if (stcb->asoc.tsn_in_at) { 3910 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 3911 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3912 stcb->asoc.in_tsnlog[i].tsn, 3913 stcb->asoc.in_tsnlog[i].strm, 3914 stcb->asoc.in_tsnlog[i].seq, 3915 stcb->asoc.in_tsnlog[i].flgs, 3916 stcb->asoc.in_tsnlog[i].sz); 3917 } 3918 } 3919 none_in: 3920 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 3921 if ((stcb->asoc.tsn_out_at == 0) && 3922 (stcb->asoc.tsn_out_wrapped == 0)) { 3923 SCTP_PRINTF("None sent\n"); 3924 } 3925 if (stcb->asoc.tsn_out_wrapped) { 3926 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 3927 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3928 stcb->asoc.out_tsnlog[i].tsn, 3929 stcb->asoc.out_tsnlog[i].strm, 3930 stcb->asoc.out_tsnlog[i].seq, 3931 stcb->asoc.out_tsnlog[i].flgs, 3932 stcb->asoc.out_tsnlog[i].sz); 3933 } 3934 } 3935 if (stcb->asoc.tsn_out_at) { 3936 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 3937 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3938 stcb->asoc.out_tsnlog[i].tsn, 3939 stcb->asoc.out_tsnlog[i].strm, 3940 stcb->asoc.out_tsnlog[i].seq, 3941 stcb->asoc.out_tsnlog[i].flgs, 3942 stcb->asoc.out_tsnlog[i].sz); 3943 } 3944 } 3945 #endif 3946 } 3947 3948 #endif 3949 3950 void 3951 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3952 struct mbuf *op_err, 3953 int so_locked 3954 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3955 SCTP_UNUSED 3956 #endif 3957 ) 3958 { 3959 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3960 struct socket *so; 3961 3962 #endif 3963 3964 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3965 so = SCTP_INP_SO(inp); 3966 #endif 3967 if (stcb == NULL) { 3968 /* Got to have a TCB */ 3969 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3970 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3971 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3972 SCTP_CALLED_DIRECTLY_NOCMPSET); 3973 } 3974 } 3975 return; 3976 } else { 3977 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3978 } 3979 /* notify the ulp */ 3980 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 3981 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 3982 } 3983 /* notify the peer */ 3984 sctp_send_abort_tcb(stcb, op_err, so_locked); 3985 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3986 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3987 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3988 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3989 } 3990 /* now free the asoc */ 3991 #ifdef SCTP_ASOCLOG_OF_TSNS 3992 sctp_print_out_track_log(stcb); 3993 #endif 3994 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3995 if (!so_locked) { 3996 atomic_add_int(&stcb->asoc.refcnt, 1); 3997 SCTP_TCB_UNLOCK(stcb); 3998 SCTP_SOCKET_LOCK(so, 1); 3999 SCTP_TCB_LOCK(stcb); 4000 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4001 } 4002 #endif 4003 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4004 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4005 if (!so_locked) { 4006 SCTP_SOCKET_UNLOCK(so, 1); 4007 } 4008 #endif 4009 } 4010 4011 void 4012 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4013 struct sockaddr *src, struct sockaddr *dst, 4014 struct sctphdr *sh, struct sctp_inpcb *inp, 4015 uint8_t use_mflowid, uint32_t mflowid, 4016 uint32_t vrf_id, uint16_t port) 4017 { 4018 struct sctp_chunkhdr *ch, chunk_buf; 4019 unsigned int chk_length; 4020 int contains_init_chunk; 4021 4022 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4023 /* Generate a TO address for future reference */ 4024 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4025 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 4026 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4027 SCTP_CALLED_DIRECTLY_NOCMPSET); 4028 } 4029 } 4030 contains_init_chunk = 0; 4031 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4032 sizeof(*ch), (uint8_t *) & chunk_buf); 4033 while (ch != NULL) { 4034 chk_length = ntohs(ch->chunk_length); 4035 if (chk_length < sizeof(*ch)) { 4036 /* break to abort land */ 4037 break; 4038 } 4039 switch (ch->chunk_type) { 4040 case SCTP_INIT: 4041 contains_init_chunk = 1; 4042 break; 4043 case SCTP_COOKIE_ECHO: 4044 /* We hit here only if the assoc is being freed */ 4045 return; 4046 case SCTP_PACKET_DROPPED: 4047 /* we don't respond to pkt-dropped */ 4048 return; 4049 case SCTP_ABORT_ASSOCIATION: 4050 /* we don't respond with an ABORT to an ABORT */ 4051 return; 4052 case SCTP_SHUTDOWN_COMPLETE: 4053 /* 4054 * we ignore it since we are not waiting for it and 4055 * peer is gone 4056 */ 4057 return; 4058 case SCTP_SHUTDOWN_ACK: 4059 sctp_send_shutdown_complete2(src, dst, sh, 4060 use_mflowid, mflowid, 4061 vrf_id, port); 4062 return; 4063 default: 4064 break; 4065 } 4066 offset += SCTP_SIZE32(chk_length); 4067 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4068 sizeof(*ch), (uint8_t *) & chunk_buf); 4069 } 4070 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4071 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4072 (contains_init_chunk == 0))) { 4073 sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL, 4074 use_mflowid, mflowid, 4075 vrf_id, port); 4076 } 4077 } 4078 4079 /* 4080 * check the inbound datagram to make sure there is not an abort inside it, 4081 * if there is return 1, else return 0. 4082 */ 4083 int 4084 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4085 { 4086 struct sctp_chunkhdr *ch; 4087 struct sctp_init_chunk *init_chk, chunk_buf; 4088 int offset; 4089 unsigned int chk_length; 4090 4091 offset = iphlen + sizeof(struct sctphdr); 4092 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4093 (uint8_t *) & chunk_buf); 4094 while (ch != NULL) { 4095 chk_length = ntohs(ch->chunk_length); 4096 if (chk_length < sizeof(*ch)) { 4097 /* packet is probably corrupt */ 4098 break; 4099 } 4100 /* we seem to be ok, is it an abort? */ 4101 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4102 /* yep, tell them */ 4103 return (1); 4104 } 4105 if (ch->chunk_type == SCTP_INITIATION) { 4106 /* need to update the Vtag */ 4107 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4108 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4109 if (init_chk != NULL) { 4110 *vtagfill = ntohl(init_chk->init.initiate_tag); 4111 } 4112 } 4113 /* Nope, move to the next chunk */ 4114 offset += SCTP_SIZE32(chk_length); 4115 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4116 sizeof(*ch), (uint8_t *) & chunk_buf); 4117 } 4118 return (0); 4119 } 4120 4121 /* 4122 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4123 * set (i.e. it's 0) so, create this function to compare link local scopes 4124 */ 4125 #ifdef INET6 4126 uint32_t 4127 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4128 { 4129 struct sockaddr_in6 a, b; 4130 4131 /* save copies */ 4132 a = *addr1; 4133 b = *addr2; 4134 4135 if (a.sin6_scope_id == 0) 4136 if (sa6_recoverscope(&a)) { 4137 /* can't get scope, so can't match */ 4138 return (0); 4139 } 4140 if (b.sin6_scope_id == 0) 4141 if (sa6_recoverscope(&b)) { 4142 /* can't get scope, so can't match */ 4143 return (0); 4144 } 4145 if (a.sin6_scope_id != b.sin6_scope_id) 4146 return (0); 4147 4148 return (1); 4149 } 4150 4151 /* 4152 * returns a sockaddr_in6 with embedded scope recovered and removed 4153 */ 4154 struct sockaddr_in6 * 4155 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4156 { 4157 /* check and strip embedded scope junk */ 4158 if (addr->sin6_family == AF_INET6) { 4159 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4160 if (addr->sin6_scope_id == 0) { 4161 *store = *addr; 4162 if (!sa6_recoverscope(store)) { 4163 /* use the recovered scope */ 4164 addr = store; 4165 } 4166 } else { 4167 /* else, return the original "to" addr */ 4168 in6_clearscope(&addr->sin6_addr); 4169 } 4170 } 4171 } 4172 return (addr); 4173 } 4174 4175 #endif 4176 4177 /* 4178 * are the two addresses the same? currently a "scopeless" check returns: 1 4179 * if same, 0 if not 4180 */ 4181 int 4182 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4183 { 4184 4185 /* must be valid */ 4186 if (sa1 == NULL || sa2 == NULL) 4187 return (0); 4188 4189 /* must be the same family */ 4190 if (sa1->sa_family != sa2->sa_family) 4191 return (0); 4192 4193 switch (sa1->sa_family) { 4194 #ifdef INET6 4195 case AF_INET6: 4196 { 4197 /* IPv6 addresses */ 4198 struct sockaddr_in6 *sin6_1, *sin6_2; 4199 4200 sin6_1 = (struct sockaddr_in6 *)sa1; 4201 sin6_2 = (struct sockaddr_in6 *)sa2; 4202 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4203 sin6_2)); 4204 } 4205 #endif 4206 #ifdef INET 4207 case AF_INET: 4208 { 4209 /* IPv4 addresses */ 4210 struct sockaddr_in *sin_1, *sin_2; 4211 4212 sin_1 = (struct sockaddr_in *)sa1; 4213 sin_2 = (struct sockaddr_in *)sa2; 4214 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4215 } 4216 #endif 4217 default: 4218 /* we don't do these... */ 4219 return (0); 4220 } 4221 } 4222 4223 void 4224 sctp_print_address(struct sockaddr *sa) 4225 { 4226 #ifdef INET6 4227 char ip6buf[INET6_ADDRSTRLEN]; 4228 4229 #endif 4230 4231 switch (sa->sa_family) { 4232 #ifdef INET6 4233 case AF_INET6: 4234 { 4235 struct sockaddr_in6 *sin6; 4236 4237 sin6 = (struct sockaddr_in6 *)sa; 4238 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4239 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4240 ntohs(sin6->sin6_port), 4241 sin6->sin6_scope_id); 4242 break; 4243 } 4244 #endif 4245 #ifdef INET 4246 case AF_INET: 4247 { 4248 struct sockaddr_in *sin; 4249 unsigned char *p; 4250 4251 sin = (struct sockaddr_in *)sa; 4252 p = (unsigned char *)&sin->sin_addr; 4253 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4254 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4255 break; 4256 } 4257 #endif 4258 default: 4259 SCTP_PRINTF("?\n"); 4260 break; 4261 } 4262 } 4263 4264 void 4265 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4266 struct sctp_inpcb *new_inp, 4267 struct sctp_tcb *stcb, 4268 int waitflags) 4269 { 4270 /* 4271 * go through our old INP and pull off any control structures that 4272 * belong to stcb and move then to the new inp. 4273 */ 4274 struct socket *old_so, *new_so; 4275 struct sctp_queued_to_read *control, *nctl; 4276 struct sctp_readhead tmp_queue; 4277 struct mbuf *m; 4278 int error = 0; 4279 4280 old_so = old_inp->sctp_socket; 4281 new_so = new_inp->sctp_socket; 4282 TAILQ_INIT(&tmp_queue); 4283 error = sblock(&old_so->so_rcv, waitflags); 4284 if (error) { 4285 /* 4286 * Gak, can't get sblock, we have a problem. data will be 4287 * left stranded.. and we don't dare look at it since the 4288 * other thread may be reading something. Oh well, its a 4289 * screwed up app that does a peeloff OR a accept while 4290 * reading from the main socket... actually its only the 4291 * peeloff() case, since I think read will fail on a 4292 * listening socket.. 4293 */ 4294 return; 4295 } 4296 /* lock the socket buffers */ 4297 SCTP_INP_READ_LOCK(old_inp); 4298 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4299 /* Pull off all for out target stcb */ 4300 if (control->stcb == stcb) { 4301 /* remove it we want it */ 4302 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4303 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4304 m = control->data; 4305 while (m) { 4306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4307 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4308 } 4309 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4311 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4312 } 4313 m = SCTP_BUF_NEXT(m); 4314 } 4315 } 4316 } 4317 SCTP_INP_READ_UNLOCK(old_inp); 4318 /* Remove the sb-lock on the old socket */ 4319 4320 sbunlock(&old_so->so_rcv); 4321 /* Now we move them over to the new socket buffer */ 4322 SCTP_INP_READ_LOCK(new_inp); 4323 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4324 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4325 m = control->data; 4326 while (m) { 4327 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4328 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4329 } 4330 sctp_sballoc(stcb, &new_so->so_rcv, m); 4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4332 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4333 } 4334 m = SCTP_BUF_NEXT(m); 4335 } 4336 } 4337 SCTP_INP_READ_UNLOCK(new_inp); 4338 } 4339 4340 void 4341 sctp_add_to_readq(struct sctp_inpcb *inp, 4342 struct sctp_tcb *stcb, 4343 struct sctp_queued_to_read *control, 4344 struct sockbuf *sb, 4345 int end, 4346 int inp_read_lock_held, 4347 int so_locked 4348 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4349 SCTP_UNUSED 4350 #endif 4351 ) 4352 { 4353 /* 4354 * Here we must place the control on the end of the socket read 4355 * queue AND increment sb_cc so that select will work properly on 4356 * read. 4357 */ 4358 struct mbuf *m, *prev = NULL; 4359 4360 if (inp == NULL) { 4361 /* Gak, TSNH!! */ 4362 #ifdef INVARIANTS 4363 panic("Gak, inp NULL on add_to_readq"); 4364 #endif 4365 return; 4366 } 4367 if (inp_read_lock_held == 0) 4368 SCTP_INP_READ_LOCK(inp); 4369 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4370 sctp_free_remote_addr(control->whoFrom); 4371 if (control->data) { 4372 sctp_m_freem(control->data); 4373 control->data = NULL; 4374 } 4375 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4376 if (inp_read_lock_held == 0) 4377 SCTP_INP_READ_UNLOCK(inp); 4378 return; 4379 } 4380 if (!(control->spec_flags & M_NOTIFICATION)) { 4381 atomic_add_int(&inp->total_recvs, 1); 4382 if (!control->do_not_ref_stcb) { 4383 atomic_add_int(&stcb->total_recvs, 1); 4384 } 4385 } 4386 m = control->data; 4387 control->held_length = 0; 4388 control->length = 0; 4389 while (m) { 4390 if (SCTP_BUF_LEN(m) == 0) { 4391 /* Skip mbufs with NO length */ 4392 if (prev == NULL) { 4393 /* First one */ 4394 control->data = sctp_m_free(m); 4395 m = control->data; 4396 } else { 4397 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4398 m = SCTP_BUF_NEXT(prev); 4399 } 4400 if (m == NULL) { 4401 control->tail_mbuf = prev; 4402 } 4403 continue; 4404 } 4405 prev = m; 4406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4407 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4408 } 4409 sctp_sballoc(stcb, sb, m); 4410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4411 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4412 } 4413 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4414 m = SCTP_BUF_NEXT(m); 4415 } 4416 if (prev != NULL) { 4417 control->tail_mbuf = prev; 4418 } else { 4419 /* Everything got collapsed out?? */ 4420 sctp_free_remote_addr(control->whoFrom); 4421 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4422 if (inp_read_lock_held == 0) 4423 SCTP_INP_READ_UNLOCK(inp); 4424 return; 4425 } 4426 if (end) { 4427 control->end_added = 1; 4428 } 4429 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4430 if (inp_read_lock_held == 0) 4431 SCTP_INP_READ_UNLOCK(inp); 4432 if (inp && inp->sctp_socket) { 4433 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4434 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4435 } else { 4436 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4437 struct socket *so; 4438 4439 so = SCTP_INP_SO(inp); 4440 if (!so_locked) { 4441 if (stcb) { 4442 atomic_add_int(&stcb->asoc.refcnt, 1); 4443 SCTP_TCB_UNLOCK(stcb); 4444 } 4445 SCTP_SOCKET_LOCK(so, 1); 4446 if (stcb) { 4447 SCTP_TCB_LOCK(stcb); 4448 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4449 } 4450 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4451 SCTP_SOCKET_UNLOCK(so, 1); 4452 return; 4453 } 4454 } 4455 #endif 4456 sctp_sorwakeup(inp, inp->sctp_socket); 4457 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4458 if (!so_locked) { 4459 SCTP_SOCKET_UNLOCK(so, 1); 4460 } 4461 #endif 4462 } 4463 } 4464 } 4465 4466 4467 int 4468 sctp_append_to_readq(struct sctp_inpcb *inp, 4469 struct sctp_tcb *stcb, 4470 struct sctp_queued_to_read *control, 4471 struct mbuf *m, 4472 int end, 4473 int ctls_cumack, 4474 struct sockbuf *sb) 4475 { 4476 /* 4477 * A partial delivery API event is underway. OR we are appending on 4478 * the reassembly queue. 4479 * 4480 * If PDAPI this means we need to add m to the end of the data. 4481 * Increase the length in the control AND increment the sb_cc. 4482 * Otherwise sb is NULL and all we need to do is put it at the end 4483 * of the mbuf chain. 4484 */ 4485 int len = 0; 4486 struct mbuf *mm, *tail = NULL, *prev = NULL; 4487 4488 if (inp) { 4489 SCTP_INP_READ_LOCK(inp); 4490 } 4491 if (control == NULL) { 4492 get_out: 4493 if (inp) { 4494 SCTP_INP_READ_UNLOCK(inp); 4495 } 4496 return (-1); 4497 } 4498 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4499 SCTP_INP_READ_UNLOCK(inp); 4500 return (0); 4501 } 4502 if (control->end_added) { 4503 /* huh this one is complete? */ 4504 goto get_out; 4505 } 4506 mm = m; 4507 if (mm == NULL) { 4508 goto get_out; 4509 } 4510 while (mm) { 4511 if (SCTP_BUF_LEN(mm) == 0) { 4512 /* Skip mbufs with NO lenght */ 4513 if (prev == NULL) { 4514 /* First one */ 4515 m = sctp_m_free(mm); 4516 mm = m; 4517 } else { 4518 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 4519 mm = SCTP_BUF_NEXT(prev); 4520 } 4521 continue; 4522 } 4523 prev = mm; 4524 len += SCTP_BUF_LEN(mm); 4525 if (sb) { 4526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4527 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4528 } 4529 sctp_sballoc(stcb, sb, mm); 4530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4531 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4532 } 4533 } 4534 mm = SCTP_BUF_NEXT(mm); 4535 } 4536 if (prev) { 4537 tail = prev; 4538 } else { 4539 /* Really there should always be a prev */ 4540 if (m == NULL) { 4541 /* Huh nothing left? */ 4542 #ifdef INVARIANTS 4543 panic("Nothing left to add?"); 4544 #else 4545 goto get_out; 4546 #endif 4547 } 4548 tail = m; 4549 } 4550 if (control->tail_mbuf) { 4551 /* append */ 4552 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4553 control->tail_mbuf = tail; 4554 } else { 4555 /* nothing there */ 4556 #ifdef INVARIANTS 4557 if (control->data != NULL) { 4558 panic("This should NOT happen"); 4559 } 4560 #endif 4561 control->data = m; 4562 control->tail_mbuf = tail; 4563 } 4564 atomic_add_int(&control->length, len); 4565 if (end) { 4566 /* message is complete */ 4567 if (stcb && (control == stcb->asoc.control_pdapi)) { 4568 stcb->asoc.control_pdapi = NULL; 4569 } 4570 control->held_length = 0; 4571 control->end_added = 1; 4572 } 4573 if (stcb == NULL) { 4574 control->do_not_ref_stcb = 1; 4575 } 4576 /* 4577 * When we are appending in partial delivery, the cum-ack is used 4578 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4579 * is populated in the outbound sinfo structure from the true cumack 4580 * if the association exists... 4581 */ 4582 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4583 if (inp) { 4584 SCTP_INP_READ_UNLOCK(inp); 4585 } 4586 if (inp && inp->sctp_socket) { 4587 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4588 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4589 } else { 4590 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4591 struct socket *so; 4592 4593 so = SCTP_INP_SO(inp); 4594 if (stcb) { 4595 atomic_add_int(&stcb->asoc.refcnt, 1); 4596 SCTP_TCB_UNLOCK(stcb); 4597 } 4598 SCTP_SOCKET_LOCK(so, 1); 4599 if (stcb) { 4600 SCTP_TCB_LOCK(stcb); 4601 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4602 } 4603 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4604 SCTP_SOCKET_UNLOCK(so, 1); 4605 return (0); 4606 } 4607 #endif 4608 sctp_sorwakeup(inp, inp->sctp_socket); 4609 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4610 SCTP_SOCKET_UNLOCK(so, 1); 4611 #endif 4612 } 4613 } 4614 return (0); 4615 } 4616 4617 4618 4619 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4620 *************ALTERNATE ROUTING CODE 4621 */ 4622 4623 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4624 *************ALTERNATE ROUTING CODE 4625 */ 4626 4627 struct mbuf * 4628 sctp_generate_invmanparam(int err) 4629 { 4630 /* Return a MBUF with a invalid mandatory parameter */ 4631 struct mbuf *m; 4632 4633 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); 4634 if (m) { 4635 struct sctp_paramhdr *ph; 4636 4637 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4638 ph = mtod(m, struct sctp_paramhdr *); 4639 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4640 ph->param_type = htons(err); 4641 } 4642 return (m); 4643 } 4644 4645 #ifdef SCTP_MBCNT_LOGGING 4646 void 4647 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4648 struct sctp_tmit_chunk *tp1, int chk_cnt) 4649 { 4650 if (tp1->data == NULL) { 4651 return; 4652 } 4653 asoc->chunks_on_out_queue -= chk_cnt; 4654 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4655 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4656 asoc->total_output_queue_size, 4657 tp1->book_size, 4658 0, 4659 tp1->mbcnt); 4660 } 4661 if (asoc->total_output_queue_size >= tp1->book_size) { 4662 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4663 } else { 4664 asoc->total_output_queue_size = 0; 4665 } 4666 4667 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4668 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4669 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4670 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4671 } else { 4672 stcb->sctp_socket->so_snd.sb_cc = 0; 4673 4674 } 4675 } 4676 } 4677 4678 #endif 4679 4680 int 4681 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4682 uint8_t sent, int so_locked 4683 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4684 SCTP_UNUSED 4685 #endif 4686 ) 4687 { 4688 struct sctp_stream_out *strq; 4689 struct sctp_tmit_chunk *chk = NULL, *tp2; 4690 struct sctp_stream_queue_pending *sp; 4691 uint16_t stream = 0, seq = 0; 4692 uint8_t foundeom = 0; 4693 int ret_sz = 0; 4694 int notdone; 4695 int do_wakeup_routine = 0; 4696 4697 stream = tp1->rec.data.stream_number; 4698 seq = tp1->rec.data.stream_seq; 4699 do { 4700 ret_sz += tp1->book_size; 4701 if (tp1->data != NULL) { 4702 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4703 sctp_flight_size_decrease(tp1); 4704 sctp_total_flight_decrease(stcb, tp1); 4705 } 4706 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4707 stcb->asoc.peers_rwnd += tp1->send_size; 4708 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4709 if (sent) { 4710 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4711 } else { 4712 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4713 } 4714 if (tp1->data) { 4715 sctp_m_freem(tp1->data); 4716 tp1->data = NULL; 4717 } 4718 do_wakeup_routine = 1; 4719 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4720 stcb->asoc.sent_queue_cnt_removeable--; 4721 } 4722 } 4723 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4724 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4725 SCTP_DATA_NOT_FRAG) { 4726 /* not frag'ed we ae done */ 4727 notdone = 0; 4728 foundeom = 1; 4729 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4730 /* end of frag, we are done */ 4731 notdone = 0; 4732 foundeom = 1; 4733 } else { 4734 /* 4735 * Its a begin or middle piece, we must mark all of 4736 * it 4737 */ 4738 notdone = 1; 4739 tp1 = TAILQ_NEXT(tp1, sctp_next); 4740 } 4741 } while (tp1 && notdone); 4742 if (foundeom == 0) { 4743 /* 4744 * The multi-part message was scattered across the send and 4745 * sent queue. 4746 */ 4747 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4748 if ((tp1->rec.data.stream_number != stream) || 4749 (tp1->rec.data.stream_seq != seq)) { 4750 break; 4751 } 4752 /* 4753 * save to chk in case we have some on stream out 4754 * queue. If so and we have an un-transmitted one we 4755 * don't have to fudge the TSN. 4756 */ 4757 chk = tp1; 4758 ret_sz += tp1->book_size; 4759 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4760 if (sent) { 4761 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4762 } else { 4763 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4764 } 4765 if (tp1->data) { 4766 sctp_m_freem(tp1->data); 4767 tp1->data = NULL; 4768 } 4769 /* No flight involved here book the size to 0 */ 4770 tp1->book_size = 0; 4771 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4772 foundeom = 1; 4773 } 4774 do_wakeup_routine = 1; 4775 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4776 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4777 /* 4778 * on to the sent queue so we can wait for it to be 4779 * passed by. 4780 */ 4781 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4782 sctp_next); 4783 stcb->asoc.send_queue_cnt--; 4784 stcb->asoc.sent_queue_cnt++; 4785 } 4786 } 4787 if (foundeom == 0) { 4788 /* 4789 * Still no eom found. That means there is stuff left on the 4790 * stream out queue.. yuck. 4791 */ 4792 SCTP_TCB_SEND_LOCK(stcb); 4793 strq = &stcb->asoc.strmout[stream]; 4794 sp = TAILQ_FIRST(&strq->outqueue); 4795 if (sp != NULL) { 4796 sp->discard_rest = 1; 4797 /* 4798 * We may need to put a chunk on the queue that 4799 * holds the TSN that would have been sent with the 4800 * LAST bit. 4801 */ 4802 if (chk == NULL) { 4803 /* Yep, we have to */ 4804 sctp_alloc_a_chunk(stcb, chk); 4805 if (chk == NULL) { 4806 /* 4807 * we are hosed. All we can do is 4808 * nothing.. which will cause an 4809 * abort if the peer is paying 4810 * attention. 4811 */ 4812 goto oh_well; 4813 } 4814 memset(chk, 0, sizeof(*chk)); 4815 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG; 4816 chk->sent = SCTP_FORWARD_TSN_SKIP; 4817 chk->asoc = &stcb->asoc; 4818 chk->rec.data.stream_seq = strq->next_sequence_send; 4819 chk->rec.data.stream_number = sp->stream; 4820 chk->rec.data.payloadtype = sp->ppid; 4821 chk->rec.data.context = sp->context; 4822 chk->flags = sp->act_flags; 4823 if (sp->net) 4824 chk->whoTo = sp->net; 4825 else 4826 chk->whoTo = stcb->asoc.primary_destination; 4827 atomic_add_int(&chk->whoTo->ref_count, 1); 4828 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4829 stcb->asoc.pr_sctp_cnt++; 4830 chk->pr_sctp_on = 1; 4831 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4832 stcb->asoc.sent_queue_cnt++; 4833 stcb->asoc.pr_sctp_cnt++; 4834 } else { 4835 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4836 } 4837 strq->next_sequence_send++; 4838 oh_well: 4839 if (sp->data) { 4840 /* 4841 * Pull any data to free up the SB and allow 4842 * sender to "add more" while we will throw 4843 * away :-) 4844 */ 4845 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4846 ret_sz += sp->length; 4847 do_wakeup_routine = 1; 4848 sp->some_taken = 1; 4849 sctp_m_freem(sp->data); 4850 sp->data = NULL; 4851 sp->tail_mbuf = NULL; 4852 sp->length = 0; 4853 } 4854 } 4855 SCTP_TCB_SEND_UNLOCK(stcb); 4856 } 4857 if (do_wakeup_routine) { 4858 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4859 struct socket *so; 4860 4861 so = SCTP_INP_SO(stcb->sctp_ep); 4862 if (!so_locked) { 4863 atomic_add_int(&stcb->asoc.refcnt, 1); 4864 SCTP_TCB_UNLOCK(stcb); 4865 SCTP_SOCKET_LOCK(so, 1); 4866 SCTP_TCB_LOCK(stcb); 4867 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4868 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4869 /* assoc was freed while we were unlocked */ 4870 SCTP_SOCKET_UNLOCK(so, 1); 4871 return (ret_sz); 4872 } 4873 } 4874 #endif 4875 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4877 if (!so_locked) { 4878 SCTP_SOCKET_UNLOCK(so, 1); 4879 } 4880 #endif 4881 } 4882 return (ret_sz); 4883 } 4884 4885 /* 4886 * checks to see if the given address, sa, is one that is currently known by 4887 * the kernel note: can't distinguish the same address on multiple interfaces 4888 * and doesn't handle multiple addresses with different zone/scope id's note: 4889 * ifa_ifwithaddr() compares the entire sockaddr struct 4890 */ 4891 struct sctp_ifa * 4892 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4893 int holds_lock) 4894 { 4895 struct sctp_laddr *laddr; 4896 4897 if (holds_lock == 0) { 4898 SCTP_INP_RLOCK(inp); 4899 } 4900 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4901 if (laddr->ifa == NULL) 4902 continue; 4903 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4904 continue; 4905 #ifdef INET 4906 if (addr->sa_family == AF_INET) { 4907 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4908 laddr->ifa->address.sin.sin_addr.s_addr) { 4909 /* found him. */ 4910 if (holds_lock == 0) { 4911 SCTP_INP_RUNLOCK(inp); 4912 } 4913 return (laddr->ifa); 4914 break; 4915 } 4916 } 4917 #endif 4918 #ifdef INET6 4919 if (addr->sa_family == AF_INET6) { 4920 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4921 &laddr->ifa->address.sin6)) { 4922 /* found him. */ 4923 if (holds_lock == 0) { 4924 SCTP_INP_RUNLOCK(inp); 4925 } 4926 return (laddr->ifa); 4927 break; 4928 } 4929 } 4930 #endif 4931 } 4932 if (holds_lock == 0) { 4933 SCTP_INP_RUNLOCK(inp); 4934 } 4935 return (NULL); 4936 } 4937 4938 uint32_t 4939 sctp_get_ifa_hash_val(struct sockaddr *addr) 4940 { 4941 switch (addr->sa_family) { 4942 #ifdef INET 4943 case AF_INET: 4944 { 4945 struct sockaddr_in *sin; 4946 4947 sin = (struct sockaddr_in *)addr; 4948 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4949 } 4950 #endif 4951 #ifdef INET6 4952 case AF_INET6: 4953 { 4954 struct sockaddr_in6 *sin6; 4955 uint32_t hash_of_addr; 4956 4957 sin6 = (struct sockaddr_in6 *)addr; 4958 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4959 sin6->sin6_addr.s6_addr32[1] + 4960 sin6->sin6_addr.s6_addr32[2] + 4961 sin6->sin6_addr.s6_addr32[3]); 4962 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4963 return (hash_of_addr); 4964 } 4965 #endif 4966 default: 4967 break; 4968 } 4969 return (0); 4970 } 4971 4972 struct sctp_ifa * 4973 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4974 { 4975 struct sctp_ifa *sctp_ifap; 4976 struct sctp_vrf *vrf; 4977 struct sctp_ifalist *hash_head; 4978 uint32_t hash_of_addr; 4979 4980 if (holds_lock == 0) 4981 SCTP_IPI_ADDR_RLOCK(); 4982 4983 vrf = sctp_find_vrf(vrf_id); 4984 if (vrf == NULL) { 4985 stage_right: 4986 if (holds_lock == 0) 4987 SCTP_IPI_ADDR_RUNLOCK(); 4988 return (NULL); 4989 } 4990 hash_of_addr = sctp_get_ifa_hash_val(addr); 4991 4992 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 4993 if (hash_head == NULL) { 4994 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 4995 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 4996 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 4997 sctp_print_address(addr); 4998 SCTP_PRINTF("No such bucket for address\n"); 4999 if (holds_lock == 0) 5000 SCTP_IPI_ADDR_RUNLOCK(); 5001 5002 return (NULL); 5003 } 5004 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5005 if (sctp_ifap == NULL) { 5006 #ifdef INVARIANTS 5007 panic("Huh LIST_FOREACH corrupt"); 5008 goto stage_right; 5009 #else 5010 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n"); 5011 goto stage_right; 5012 #endif 5013 } 5014 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5015 continue; 5016 #ifdef INET 5017 if (addr->sa_family == AF_INET) { 5018 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5019 sctp_ifap->address.sin.sin_addr.s_addr) { 5020 /* found him. */ 5021 if (holds_lock == 0) 5022 SCTP_IPI_ADDR_RUNLOCK(); 5023 return (sctp_ifap); 5024 break; 5025 } 5026 } 5027 #endif 5028 #ifdef INET6 5029 if (addr->sa_family == AF_INET6) { 5030 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5031 &sctp_ifap->address.sin6)) { 5032 /* found him. */ 5033 if (holds_lock == 0) 5034 SCTP_IPI_ADDR_RUNLOCK(); 5035 return (sctp_ifap); 5036 break; 5037 } 5038 } 5039 #endif 5040 } 5041 if (holds_lock == 0) 5042 SCTP_IPI_ADDR_RUNLOCK(); 5043 return (NULL); 5044 } 5045 5046 static void 5047 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 5048 uint32_t rwnd_req) 5049 { 5050 /* User pulled some data, do we need a rwnd update? */ 5051 int r_unlocked = 0; 5052 uint32_t dif, rwnd; 5053 struct socket *so = NULL; 5054 5055 if (stcb == NULL) 5056 return; 5057 5058 atomic_add_int(&stcb->asoc.refcnt, 1); 5059 5060 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5061 SCTP_STATE_SHUTDOWN_RECEIVED | 5062 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5063 /* Pre-check If we are freeing no update */ 5064 goto no_lock; 5065 } 5066 SCTP_INP_INCR_REF(stcb->sctp_ep); 5067 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5068 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5069 goto out; 5070 } 5071 so = stcb->sctp_socket; 5072 if (so == NULL) { 5073 goto out; 5074 } 5075 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5076 /* Have you have freed enough to look */ 5077 *freed_so_far = 0; 5078 /* Yep, its worth a look and the lock overhead */ 5079 5080 /* Figure out what the rwnd would be */ 5081 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5082 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5083 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5084 } else { 5085 dif = 0; 5086 } 5087 if (dif >= rwnd_req) { 5088 if (hold_rlock) { 5089 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5090 r_unlocked = 1; 5091 } 5092 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5093 /* 5094 * One last check before we allow the guy possibly 5095 * to get in. There is a race, where the guy has not 5096 * reached the gate. In that case 5097 */ 5098 goto out; 5099 } 5100 SCTP_TCB_LOCK(stcb); 5101 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5102 /* No reports here */ 5103 SCTP_TCB_UNLOCK(stcb); 5104 goto out; 5105 } 5106 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5107 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5108 5109 sctp_chunk_output(stcb->sctp_ep, stcb, 5110 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5111 /* make sure no timer is running */ 5112 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5113 SCTP_TCB_UNLOCK(stcb); 5114 } else { 5115 /* Update how much we have pending */ 5116 stcb->freed_by_sorcv_sincelast = dif; 5117 } 5118 out: 5119 if (so && r_unlocked && hold_rlock) { 5120 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5121 } 5122 SCTP_INP_DECR_REF(stcb->sctp_ep); 5123 no_lock: 5124 atomic_add_int(&stcb->asoc.refcnt, -1); 5125 return; 5126 } 5127 5128 int 5129 sctp_sorecvmsg(struct socket *so, 5130 struct uio *uio, 5131 struct mbuf **mp, 5132 struct sockaddr *from, 5133 int fromlen, 5134 int *msg_flags, 5135 struct sctp_sndrcvinfo *sinfo, 5136 int filling_sinfo) 5137 { 5138 /* 5139 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5140 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5141 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5142 * On the way out we may send out any combination of: 5143 * MSG_NOTIFICATION MSG_EOR 5144 * 5145 */ 5146 struct sctp_inpcb *inp = NULL; 5147 int my_len = 0; 5148 int cp_len = 0, error = 0; 5149 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5150 struct mbuf *m = NULL; 5151 struct sctp_tcb *stcb = NULL; 5152 int wakeup_read_socket = 0; 5153 int freecnt_applied = 0; 5154 int out_flags = 0, in_flags = 0; 5155 int block_allowed = 1; 5156 uint32_t freed_so_far = 0; 5157 uint32_t copied_so_far = 0; 5158 int in_eeor_mode = 0; 5159 int no_rcv_needed = 0; 5160 uint32_t rwnd_req = 0; 5161 int hold_sblock = 0; 5162 int hold_rlock = 0; 5163 int slen = 0; 5164 uint32_t held_length = 0; 5165 int sockbuf_lock = 0; 5166 5167 if (uio == NULL) { 5168 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5169 return (EINVAL); 5170 } 5171 if (msg_flags) { 5172 in_flags = *msg_flags; 5173 if (in_flags & MSG_PEEK) 5174 SCTP_STAT_INCR(sctps_read_peeks); 5175 } else { 5176 in_flags = 0; 5177 } 5178 slen = uio->uio_resid; 5179 5180 /* Pull in and set up our int flags */ 5181 if (in_flags & MSG_OOB) { 5182 /* Out of band's NOT supported */ 5183 return (EOPNOTSUPP); 5184 } 5185 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5186 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5187 return (EINVAL); 5188 } 5189 if ((in_flags & (MSG_DONTWAIT 5190 | MSG_NBIO 5191 )) || 5192 SCTP_SO_IS_NBIO(so)) { 5193 block_allowed = 0; 5194 } 5195 /* setup the endpoint */ 5196 inp = (struct sctp_inpcb *)so->so_pcb; 5197 if (inp == NULL) { 5198 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5199 return (EFAULT); 5200 } 5201 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5202 /* Must be at least a MTU's worth */ 5203 if (rwnd_req < SCTP_MIN_RWND) 5204 rwnd_req = SCTP_MIN_RWND; 5205 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5207 sctp_misc_ints(SCTP_SORECV_ENTER, 5208 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 5209 } 5210 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5211 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5212 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 5213 } 5214 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5215 if (error) { 5216 goto release_unlocked; 5217 } 5218 sockbuf_lock = 1; 5219 restart: 5220 5221 5222 restart_nosblocks: 5223 if (hold_sblock == 0) { 5224 SOCKBUF_LOCK(&so->so_rcv); 5225 hold_sblock = 1; 5226 } 5227 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5228 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5229 goto out; 5230 } 5231 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5232 if (so->so_error) { 5233 error = so->so_error; 5234 if ((in_flags & MSG_PEEK) == 0) 5235 so->so_error = 0; 5236 goto out; 5237 } else { 5238 if (so->so_rcv.sb_cc == 0) { 5239 /* indicate EOF */ 5240 error = 0; 5241 goto out; 5242 } 5243 } 5244 } 5245 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 5246 /* we need to wait for data */ 5247 if ((so->so_rcv.sb_cc == 0) && 5248 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5249 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5250 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5251 /* 5252 * For active open side clear flags for 5253 * re-use passive open is blocked by 5254 * connect. 5255 */ 5256 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5257 /* 5258 * You were aborted, passive side 5259 * always hits here 5260 */ 5261 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5262 error = ECONNRESET; 5263 } 5264 so->so_state &= ~(SS_ISCONNECTING | 5265 SS_ISDISCONNECTING | 5266 SS_ISCONFIRMING | 5267 SS_ISCONNECTED); 5268 if (error == 0) { 5269 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5270 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5271 error = ENOTCONN; 5272 } 5273 } 5274 goto out; 5275 } 5276 } 5277 error = sbwait(&so->so_rcv); 5278 if (error) { 5279 goto out; 5280 } 5281 held_length = 0; 5282 goto restart_nosblocks; 5283 } else if (so->so_rcv.sb_cc == 0) { 5284 if (so->so_error) { 5285 error = so->so_error; 5286 if ((in_flags & MSG_PEEK) == 0) 5287 so->so_error = 0; 5288 } else { 5289 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5290 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5291 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5292 /* 5293 * For active open side clear flags 5294 * for re-use passive open is 5295 * blocked by connect. 5296 */ 5297 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5298 /* 5299 * You were aborted, passive 5300 * side always hits here 5301 */ 5302 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5303 error = ECONNRESET; 5304 } 5305 so->so_state &= ~(SS_ISCONNECTING | 5306 SS_ISDISCONNECTING | 5307 SS_ISCONFIRMING | 5308 SS_ISCONNECTED); 5309 if (error == 0) { 5310 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5311 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5312 error = ENOTCONN; 5313 } 5314 } 5315 goto out; 5316 } 5317 } 5318 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5319 error = EWOULDBLOCK; 5320 } 5321 goto out; 5322 } 5323 if (hold_sblock == 1) { 5324 SOCKBUF_UNLOCK(&so->so_rcv); 5325 hold_sblock = 0; 5326 } 5327 /* we possibly have data we can read */ 5328 /* sa_ignore FREED_MEMORY */ 5329 control = TAILQ_FIRST(&inp->read_queue); 5330 if (control == NULL) { 5331 /* 5332 * This could be happening since the appender did the 5333 * increment but as not yet did the tailq insert onto the 5334 * read_queue 5335 */ 5336 if (hold_rlock == 0) { 5337 SCTP_INP_READ_LOCK(inp); 5338 } 5339 control = TAILQ_FIRST(&inp->read_queue); 5340 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5341 #ifdef INVARIANTS 5342 panic("Huh, its non zero and nothing on control?"); 5343 #endif 5344 so->so_rcv.sb_cc = 0; 5345 } 5346 SCTP_INP_READ_UNLOCK(inp); 5347 hold_rlock = 0; 5348 goto restart; 5349 } 5350 if ((control->length == 0) && 5351 (control->do_not_ref_stcb)) { 5352 /* 5353 * Clean up code for freeing assoc that left behind a 5354 * pdapi.. maybe a peer in EEOR that just closed after 5355 * sending and never indicated a EOR. 5356 */ 5357 if (hold_rlock == 0) { 5358 hold_rlock = 1; 5359 SCTP_INP_READ_LOCK(inp); 5360 } 5361 control->held_length = 0; 5362 if (control->data) { 5363 /* Hmm there is data here .. fix */ 5364 struct mbuf *m_tmp; 5365 int cnt = 0; 5366 5367 m_tmp = control->data; 5368 while (m_tmp) { 5369 cnt += SCTP_BUF_LEN(m_tmp); 5370 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5371 control->tail_mbuf = m_tmp; 5372 control->end_added = 1; 5373 } 5374 m_tmp = SCTP_BUF_NEXT(m_tmp); 5375 } 5376 control->length = cnt; 5377 } else { 5378 /* remove it */ 5379 TAILQ_REMOVE(&inp->read_queue, control, next); 5380 /* Add back any hiddend data */ 5381 sctp_free_remote_addr(control->whoFrom); 5382 sctp_free_a_readq(stcb, control); 5383 } 5384 if (hold_rlock) { 5385 hold_rlock = 0; 5386 SCTP_INP_READ_UNLOCK(inp); 5387 } 5388 goto restart; 5389 } 5390 if ((control->length == 0) && 5391 (control->end_added == 1)) { 5392 /* 5393 * Do we also need to check for (control->pdapi_aborted == 5394 * 1)? 5395 */ 5396 if (hold_rlock == 0) { 5397 hold_rlock = 1; 5398 SCTP_INP_READ_LOCK(inp); 5399 } 5400 TAILQ_REMOVE(&inp->read_queue, control, next); 5401 if (control->data) { 5402 #ifdef INVARIANTS 5403 panic("control->data not null but control->length == 0"); 5404 #else 5405 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5406 sctp_m_freem(control->data); 5407 control->data = NULL; 5408 #endif 5409 } 5410 if (control->aux_data) { 5411 sctp_m_free(control->aux_data); 5412 control->aux_data = NULL; 5413 } 5414 sctp_free_remote_addr(control->whoFrom); 5415 sctp_free_a_readq(stcb, control); 5416 if (hold_rlock) { 5417 hold_rlock = 0; 5418 SCTP_INP_READ_UNLOCK(inp); 5419 } 5420 goto restart; 5421 } 5422 if (control->length == 0) { 5423 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5424 (filling_sinfo)) { 5425 /* find a more suitable one then this */ 5426 ctl = TAILQ_NEXT(control, next); 5427 while (ctl) { 5428 if ((ctl->stcb != control->stcb) && (ctl->length) && 5429 (ctl->some_taken || 5430 (ctl->spec_flags & M_NOTIFICATION) || 5431 ((ctl->do_not_ref_stcb == 0) && 5432 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5433 ) { 5434 /*- 5435 * If we have a different TCB next, and there is data 5436 * present. If we have already taken some (pdapi), OR we can 5437 * ref the tcb and no delivery as started on this stream, we 5438 * take it. Note we allow a notification on a different 5439 * assoc to be delivered.. 5440 */ 5441 control = ctl; 5442 goto found_one; 5443 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5444 (ctl->length) && 5445 ((ctl->some_taken) || 5446 ((ctl->do_not_ref_stcb == 0) && 5447 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5448 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5449 /*- 5450 * If we have the same tcb, and there is data present, and we 5451 * have the strm interleave feature present. Then if we have 5452 * taken some (pdapi) or we can refer to tht tcb AND we have 5453 * not started a delivery for this stream, we can take it. 5454 * Note we do NOT allow a notificaiton on the same assoc to 5455 * be delivered. 5456 */ 5457 control = ctl; 5458 goto found_one; 5459 } 5460 ctl = TAILQ_NEXT(ctl, next); 5461 } 5462 } 5463 /* 5464 * if we reach here, not suitable replacement is available 5465 * <or> fragment interleave is NOT on. So stuff the sb_cc 5466 * into the our held count, and its time to sleep again. 5467 */ 5468 held_length = so->so_rcv.sb_cc; 5469 control->held_length = so->so_rcv.sb_cc; 5470 goto restart; 5471 } 5472 /* Clear the held length since there is something to read */ 5473 control->held_length = 0; 5474 if (hold_rlock) { 5475 SCTP_INP_READ_UNLOCK(inp); 5476 hold_rlock = 0; 5477 } 5478 found_one: 5479 /* 5480 * If we reach here, control has a some data for us to read off. 5481 * Note that stcb COULD be NULL. 5482 */ 5483 control->some_taken++; 5484 if (hold_sblock) { 5485 SOCKBUF_UNLOCK(&so->so_rcv); 5486 hold_sblock = 0; 5487 } 5488 stcb = control->stcb; 5489 if (stcb) { 5490 if ((control->do_not_ref_stcb == 0) && 5491 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5492 if (freecnt_applied == 0) 5493 stcb = NULL; 5494 } else if (control->do_not_ref_stcb == 0) { 5495 /* you can't free it on me please */ 5496 /* 5497 * The lock on the socket buffer protects us so the 5498 * free code will stop. But since we used the 5499 * socketbuf lock and the sender uses the tcb_lock 5500 * to increment, we need to use the atomic add to 5501 * the refcnt 5502 */ 5503 if (freecnt_applied) { 5504 #ifdef INVARIANTS 5505 panic("refcnt already incremented"); 5506 #else 5507 SCTP_PRINTF("refcnt already incremented?\n"); 5508 #endif 5509 } else { 5510 atomic_add_int(&stcb->asoc.refcnt, 1); 5511 freecnt_applied = 1; 5512 } 5513 /* 5514 * Setup to remember how much we have not yet told 5515 * the peer our rwnd has opened up. Note we grab the 5516 * value from the tcb from last time. Note too that 5517 * sack sending clears this when a sack is sent, 5518 * which is fine. Once we hit the rwnd_req, we then 5519 * will go to the sctp_user_rcvd() that will not 5520 * lock until it KNOWs it MUST send a WUP-SACK. 5521 */ 5522 freed_so_far = stcb->freed_by_sorcv_sincelast; 5523 stcb->freed_by_sorcv_sincelast = 0; 5524 } 5525 } 5526 if (stcb && 5527 ((control->spec_flags & M_NOTIFICATION) == 0) && 5528 control->do_not_ref_stcb == 0) { 5529 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5530 } 5531 /* First lets get off the sinfo and sockaddr info */ 5532 if ((sinfo) && filling_sinfo) { 5533 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 5534 nxt = TAILQ_NEXT(control, next); 5535 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5536 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5537 struct sctp_extrcvinfo *s_extra; 5538 5539 s_extra = (struct sctp_extrcvinfo *)sinfo; 5540 if ((nxt) && 5541 (nxt->length)) { 5542 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5543 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5544 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5545 } 5546 if (nxt->spec_flags & M_NOTIFICATION) { 5547 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5548 } 5549 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id; 5550 s_extra->sreinfo_next_length = nxt->length; 5551 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid; 5552 s_extra->sreinfo_next_stream = nxt->sinfo_stream; 5553 if (nxt->tail_mbuf != NULL) { 5554 if (nxt->end_added) { 5555 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5556 } 5557 } 5558 } else { 5559 /* 5560 * we explicitly 0 this, since the memcpy 5561 * got some other things beyond the older 5562 * sinfo_ that is on the control's structure 5563 * :-D 5564 */ 5565 nxt = NULL; 5566 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 5567 s_extra->sreinfo_next_aid = 0; 5568 s_extra->sreinfo_next_length = 0; 5569 s_extra->sreinfo_next_ppid = 0; 5570 s_extra->sreinfo_next_stream = 0; 5571 } 5572 } 5573 /* 5574 * update off the real current cum-ack, if we have an stcb. 5575 */ 5576 if ((control->do_not_ref_stcb == 0) && stcb) 5577 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5578 /* 5579 * mask off the high bits, we keep the actual chunk bits in 5580 * there. 5581 */ 5582 sinfo->sinfo_flags &= 0x00ff; 5583 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5584 sinfo->sinfo_flags |= SCTP_UNORDERED; 5585 } 5586 } 5587 #ifdef SCTP_ASOCLOG_OF_TSNS 5588 { 5589 int index, newindex; 5590 struct sctp_pcbtsn_rlog *entry; 5591 5592 do { 5593 index = inp->readlog_index; 5594 newindex = index + 1; 5595 if (newindex >= SCTP_READ_LOG_SIZE) { 5596 newindex = 0; 5597 } 5598 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5599 entry = &inp->readlog[index]; 5600 entry->vtag = control->sinfo_assoc_id; 5601 entry->strm = control->sinfo_stream; 5602 entry->seq = control->sinfo_ssn; 5603 entry->sz = control->length; 5604 entry->flgs = control->sinfo_flags; 5605 } 5606 #endif 5607 if (fromlen && from) { 5608 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len); 5609 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5610 #ifdef INET6 5611 case AF_INET6: 5612 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 5613 break; 5614 #endif 5615 #ifdef INET 5616 case AF_INET: 5617 ((struct sockaddr_in *)from)->sin_port = control->port_from; 5618 break; 5619 #endif 5620 default: 5621 break; 5622 } 5623 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5624 5625 #if defined(INET) && defined(INET6) 5626 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 5627 (from->sa_family == AF_INET) && 5628 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 5629 struct sockaddr_in *sin; 5630 struct sockaddr_in6 sin6; 5631 5632 sin = (struct sockaddr_in *)from; 5633 bzero(&sin6, sizeof(sin6)); 5634 sin6.sin6_family = AF_INET6; 5635 sin6.sin6_len = sizeof(struct sockaddr_in6); 5636 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 5637 bcopy(&sin->sin_addr, 5638 &sin6.sin6_addr.s6_addr32[3], 5639 sizeof(sin6.sin6_addr.s6_addr32[3])); 5640 sin6.sin6_port = sin->sin_port; 5641 memcpy(from, &sin6, sizeof(struct sockaddr_in6)); 5642 } 5643 #endif 5644 #ifdef INET6 5645 { 5646 struct sockaddr_in6 lsa6, *from6; 5647 5648 from6 = (struct sockaddr_in6 *)from; 5649 sctp_recover_scope_mac(from6, (&lsa6)); 5650 } 5651 #endif 5652 } 5653 /* now copy out what data we can */ 5654 if (mp == NULL) { 5655 /* copy out each mbuf in the chain up to length */ 5656 get_more_data: 5657 m = control->data; 5658 while (m) { 5659 /* Move out all we can */ 5660 cp_len = (int)uio->uio_resid; 5661 my_len = (int)SCTP_BUF_LEN(m); 5662 if (cp_len > my_len) { 5663 /* not enough in this buf */ 5664 cp_len = my_len; 5665 } 5666 if (hold_rlock) { 5667 SCTP_INP_READ_UNLOCK(inp); 5668 hold_rlock = 0; 5669 } 5670 if (cp_len > 0) 5671 error = uiomove(mtod(m, char *), cp_len, uio); 5672 /* re-read */ 5673 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5674 goto release; 5675 } 5676 if ((control->do_not_ref_stcb == 0) && stcb && 5677 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5678 no_rcv_needed = 1; 5679 } 5680 if (error) { 5681 /* error we are out of here */ 5682 goto release; 5683 } 5684 if ((SCTP_BUF_NEXT(m) == NULL) && 5685 (cp_len >= SCTP_BUF_LEN(m)) && 5686 ((control->end_added == 0) || 5687 (control->end_added && 5688 (TAILQ_NEXT(control, next) == NULL))) 5689 ) { 5690 SCTP_INP_READ_LOCK(inp); 5691 hold_rlock = 1; 5692 } 5693 if (cp_len == SCTP_BUF_LEN(m)) { 5694 if ((SCTP_BUF_NEXT(m) == NULL) && 5695 (control->end_added)) { 5696 out_flags |= MSG_EOR; 5697 if ((control->do_not_ref_stcb == 0) && 5698 (control->stcb != NULL) && 5699 ((control->spec_flags & M_NOTIFICATION) == 0)) 5700 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5701 } 5702 if (control->spec_flags & M_NOTIFICATION) { 5703 out_flags |= MSG_NOTIFICATION; 5704 } 5705 /* we ate up the mbuf */ 5706 if (in_flags & MSG_PEEK) { 5707 /* just looking */ 5708 m = SCTP_BUF_NEXT(m); 5709 copied_so_far += cp_len; 5710 } else { 5711 /* dispose of the mbuf */ 5712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5713 sctp_sblog(&so->so_rcv, 5714 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5715 } 5716 sctp_sbfree(control, stcb, &so->so_rcv, m); 5717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5718 sctp_sblog(&so->so_rcv, 5719 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5720 } 5721 copied_so_far += cp_len; 5722 freed_so_far += cp_len; 5723 freed_so_far += MSIZE; 5724 atomic_subtract_int(&control->length, cp_len); 5725 control->data = sctp_m_free(m); 5726 m = control->data; 5727 /* 5728 * been through it all, must hold sb 5729 * lock ok to null tail 5730 */ 5731 if (control->data == NULL) { 5732 #ifdef INVARIANTS 5733 if ((control->end_added == 0) || 5734 (TAILQ_NEXT(control, next) == NULL)) { 5735 /* 5736 * If the end is not 5737 * added, OR the 5738 * next is NOT null 5739 * we MUST have the 5740 * lock. 5741 */ 5742 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5743 panic("Hmm we don't own the lock?"); 5744 } 5745 } 5746 #endif 5747 control->tail_mbuf = NULL; 5748 #ifdef INVARIANTS 5749 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5750 panic("end_added, nothing left and no MSG_EOR"); 5751 } 5752 #endif 5753 } 5754 } 5755 } else { 5756 /* Do we need to trim the mbuf? */ 5757 if (control->spec_flags & M_NOTIFICATION) { 5758 out_flags |= MSG_NOTIFICATION; 5759 } 5760 if ((in_flags & MSG_PEEK) == 0) { 5761 SCTP_BUF_RESV_UF(m, cp_len); 5762 SCTP_BUF_LEN(m) -= cp_len; 5763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5764 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5765 } 5766 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5767 if ((control->do_not_ref_stcb == 0) && 5768 stcb) { 5769 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5770 } 5771 copied_so_far += cp_len; 5772 freed_so_far += cp_len; 5773 freed_so_far += MSIZE; 5774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5775 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5776 SCTP_LOG_SBRESULT, 0); 5777 } 5778 atomic_subtract_int(&control->length, cp_len); 5779 } else { 5780 copied_so_far += cp_len; 5781 } 5782 } 5783 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5784 break; 5785 } 5786 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5787 (control->do_not_ref_stcb == 0) && 5788 (freed_so_far >= rwnd_req)) { 5789 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5790 } 5791 } /* end while(m) */ 5792 /* 5793 * At this point we have looked at it all and we either have 5794 * a MSG_EOR/or read all the user wants... <OR> 5795 * control->length == 0. 5796 */ 5797 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5798 /* we are done with this control */ 5799 if (control->length == 0) { 5800 if (control->data) { 5801 #ifdef INVARIANTS 5802 panic("control->data not null at read eor?"); 5803 #else 5804 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5805 sctp_m_freem(control->data); 5806 control->data = NULL; 5807 #endif 5808 } 5809 done_with_control: 5810 if (TAILQ_NEXT(control, next) == NULL) { 5811 /* 5812 * If we don't have a next we need a 5813 * lock, if there is a next 5814 * interrupt is filling ahead of us 5815 * and we don't need a lock to 5816 * remove this guy (which is the 5817 * head of the queue). 5818 */ 5819 if (hold_rlock == 0) { 5820 SCTP_INP_READ_LOCK(inp); 5821 hold_rlock = 1; 5822 } 5823 } 5824 TAILQ_REMOVE(&inp->read_queue, control, next); 5825 /* Add back any hiddend data */ 5826 if (control->held_length) { 5827 held_length = 0; 5828 control->held_length = 0; 5829 wakeup_read_socket = 1; 5830 } 5831 if (control->aux_data) { 5832 sctp_m_free(control->aux_data); 5833 control->aux_data = NULL; 5834 } 5835 no_rcv_needed = control->do_not_ref_stcb; 5836 sctp_free_remote_addr(control->whoFrom); 5837 control->data = NULL; 5838 sctp_free_a_readq(stcb, control); 5839 control = NULL; 5840 if ((freed_so_far >= rwnd_req) && 5841 (no_rcv_needed == 0)) 5842 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5843 5844 } else { 5845 /* 5846 * The user did not read all of this 5847 * message, turn off the returned MSG_EOR 5848 * since we are leaving more behind on the 5849 * control to read. 5850 */ 5851 #ifdef INVARIANTS 5852 if (control->end_added && 5853 (control->data == NULL) && 5854 (control->tail_mbuf == NULL)) { 5855 panic("Gak, control->length is corrupt?"); 5856 } 5857 #endif 5858 no_rcv_needed = control->do_not_ref_stcb; 5859 out_flags &= ~MSG_EOR; 5860 } 5861 } 5862 if (out_flags & MSG_EOR) { 5863 goto release; 5864 } 5865 if ((uio->uio_resid == 0) || 5866 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5867 ) { 5868 goto release; 5869 } 5870 /* 5871 * If I hit here the receiver wants more and this message is 5872 * NOT done (pd-api). So two questions. Can we block? if not 5873 * we are done. Did the user NOT set MSG_WAITALL? 5874 */ 5875 if (block_allowed == 0) { 5876 goto release; 5877 } 5878 /* 5879 * We need to wait for more data a few things: - We don't 5880 * sbunlock() so we don't get someone else reading. - We 5881 * must be sure to account for the case where what is added 5882 * is NOT to our control when we wakeup. 5883 */ 5884 5885 /* 5886 * Do we need to tell the transport a rwnd update might be 5887 * needed before we go to sleep? 5888 */ 5889 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5890 ((freed_so_far >= rwnd_req) && 5891 (control->do_not_ref_stcb == 0) && 5892 (no_rcv_needed == 0))) { 5893 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5894 } 5895 wait_some_more: 5896 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5897 goto release; 5898 } 5899 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5900 goto release; 5901 5902 if (hold_rlock == 1) { 5903 SCTP_INP_READ_UNLOCK(inp); 5904 hold_rlock = 0; 5905 } 5906 if (hold_sblock == 0) { 5907 SOCKBUF_LOCK(&so->so_rcv); 5908 hold_sblock = 1; 5909 } 5910 if ((copied_so_far) && (control->length == 0) && 5911 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5912 goto release; 5913 } 5914 if (so->so_rcv.sb_cc <= control->held_length) { 5915 error = sbwait(&so->so_rcv); 5916 if (error) { 5917 goto release; 5918 } 5919 control->held_length = 0; 5920 } 5921 if (hold_sblock) { 5922 SOCKBUF_UNLOCK(&so->so_rcv); 5923 hold_sblock = 0; 5924 } 5925 if (control->length == 0) { 5926 /* still nothing here */ 5927 if (control->end_added == 1) { 5928 /* he aborted, or is done i.e.did a shutdown */ 5929 out_flags |= MSG_EOR; 5930 if (control->pdapi_aborted) { 5931 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5932 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5933 5934 out_flags |= MSG_TRUNC; 5935 } else { 5936 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5937 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5938 } 5939 goto done_with_control; 5940 } 5941 if (so->so_rcv.sb_cc > held_length) { 5942 control->held_length = so->so_rcv.sb_cc; 5943 held_length = 0; 5944 } 5945 goto wait_some_more; 5946 } else if (control->data == NULL) { 5947 /* 5948 * we must re-sync since data is probably being 5949 * added 5950 */ 5951 SCTP_INP_READ_LOCK(inp); 5952 if ((control->length > 0) && (control->data == NULL)) { 5953 /* 5954 * big trouble.. we have the lock and its 5955 * corrupt? 5956 */ 5957 #ifdef INVARIANTS 5958 panic("Impossible data==NULL length !=0"); 5959 #endif 5960 out_flags |= MSG_EOR; 5961 out_flags |= MSG_TRUNC; 5962 control->length = 0; 5963 SCTP_INP_READ_UNLOCK(inp); 5964 goto done_with_control; 5965 } 5966 SCTP_INP_READ_UNLOCK(inp); 5967 /* We will fall around to get more data */ 5968 } 5969 goto get_more_data; 5970 } else { 5971 /*- 5972 * Give caller back the mbuf chain, 5973 * store in uio_resid the length 5974 */ 5975 wakeup_read_socket = 0; 5976 if ((control->end_added == 0) || 5977 (TAILQ_NEXT(control, next) == NULL)) { 5978 /* Need to get rlock */ 5979 if (hold_rlock == 0) { 5980 SCTP_INP_READ_LOCK(inp); 5981 hold_rlock = 1; 5982 } 5983 } 5984 if (control->end_added) { 5985 out_flags |= MSG_EOR; 5986 if ((control->do_not_ref_stcb == 0) && 5987 (control->stcb != NULL) && 5988 ((control->spec_flags & M_NOTIFICATION) == 0)) 5989 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5990 } 5991 if (control->spec_flags & M_NOTIFICATION) { 5992 out_flags |= MSG_NOTIFICATION; 5993 } 5994 uio->uio_resid = control->length; 5995 *mp = control->data; 5996 m = control->data; 5997 while (m) { 5998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5999 sctp_sblog(&so->so_rcv, 6000 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6001 } 6002 sctp_sbfree(control, stcb, &so->so_rcv, m); 6003 freed_so_far += SCTP_BUF_LEN(m); 6004 freed_so_far += MSIZE; 6005 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6006 sctp_sblog(&so->so_rcv, 6007 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6008 } 6009 m = SCTP_BUF_NEXT(m); 6010 } 6011 control->data = control->tail_mbuf = NULL; 6012 control->length = 0; 6013 if (out_flags & MSG_EOR) { 6014 /* Done with this control */ 6015 goto done_with_control; 6016 } 6017 } 6018 release: 6019 if (hold_rlock == 1) { 6020 SCTP_INP_READ_UNLOCK(inp); 6021 hold_rlock = 0; 6022 } 6023 if (hold_sblock == 1) { 6024 SOCKBUF_UNLOCK(&so->so_rcv); 6025 hold_sblock = 0; 6026 } 6027 sbunlock(&so->so_rcv); 6028 sockbuf_lock = 0; 6029 6030 release_unlocked: 6031 if (hold_sblock) { 6032 SOCKBUF_UNLOCK(&so->so_rcv); 6033 hold_sblock = 0; 6034 } 6035 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6036 if ((freed_so_far >= rwnd_req) && 6037 (control && (control->do_not_ref_stcb == 0)) && 6038 (no_rcv_needed == 0)) 6039 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6040 } 6041 out: 6042 if (msg_flags) { 6043 *msg_flags = out_flags; 6044 } 6045 if (((out_flags & MSG_EOR) == 0) && 6046 ((in_flags & MSG_PEEK) == 0) && 6047 (sinfo) && 6048 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6049 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6050 struct sctp_extrcvinfo *s_extra; 6051 6052 s_extra = (struct sctp_extrcvinfo *)sinfo; 6053 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 6054 } 6055 if (hold_rlock == 1) { 6056 SCTP_INP_READ_UNLOCK(inp); 6057 } 6058 if (hold_sblock) { 6059 SOCKBUF_UNLOCK(&so->so_rcv); 6060 } 6061 if (sockbuf_lock) { 6062 sbunlock(&so->so_rcv); 6063 } 6064 if (freecnt_applied) { 6065 /* 6066 * The lock on the socket buffer protects us so the free 6067 * code will stop. But since we used the socketbuf lock and 6068 * the sender uses the tcb_lock to increment, we need to use 6069 * the atomic add to the refcnt. 6070 */ 6071 if (stcb == NULL) { 6072 #ifdef INVARIANTS 6073 panic("stcb for refcnt has gone NULL?"); 6074 goto stage_left; 6075 #else 6076 goto stage_left; 6077 #endif 6078 } 6079 atomic_add_int(&stcb->asoc.refcnt, -1); 6080 /* Save the value back for next time */ 6081 stcb->freed_by_sorcv_sincelast = freed_so_far; 6082 } 6083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6084 if (stcb) { 6085 sctp_misc_ints(SCTP_SORECV_DONE, 6086 freed_so_far, 6087 ((uio) ? (slen - uio->uio_resid) : slen), 6088 stcb->asoc.my_rwnd, 6089 so->so_rcv.sb_cc); 6090 } else { 6091 sctp_misc_ints(SCTP_SORECV_DONE, 6092 freed_so_far, 6093 ((uio) ? (slen - uio->uio_resid) : slen), 6094 0, 6095 so->so_rcv.sb_cc); 6096 } 6097 } 6098 stage_left: 6099 if (wakeup_read_socket) { 6100 sctp_sorwakeup(inp, so); 6101 } 6102 return (error); 6103 } 6104 6105 6106 #ifdef SCTP_MBUF_LOGGING 6107 struct mbuf * 6108 sctp_m_free(struct mbuf *m) 6109 { 6110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6111 if (SCTP_BUF_IS_EXTENDED(m)) { 6112 sctp_log_mb(m, SCTP_MBUF_IFREE); 6113 } 6114 } 6115 return (m_free(m)); 6116 } 6117 6118 void 6119 sctp_m_freem(struct mbuf *mb) 6120 { 6121 while (mb != NULL) 6122 mb = sctp_m_free(mb); 6123 } 6124 6125 #endif 6126 6127 int 6128 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6129 { 6130 /* 6131 * Given a local address. For all associations that holds the 6132 * address, request a peer-set-primary. 6133 */ 6134 struct sctp_ifa *ifa; 6135 struct sctp_laddr *wi; 6136 6137 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6138 if (ifa == NULL) { 6139 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6140 return (EADDRNOTAVAIL); 6141 } 6142 /* 6143 * Now that we have the ifa we must awaken the iterator with this 6144 * message. 6145 */ 6146 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6147 if (wi == NULL) { 6148 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6149 return (ENOMEM); 6150 } 6151 /* Now incr the count and int wi structure */ 6152 SCTP_INCR_LADDR_COUNT(); 6153 bzero(wi, sizeof(*wi)); 6154 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6155 wi->ifa = ifa; 6156 wi->action = SCTP_SET_PRIM_ADDR; 6157 atomic_add_int(&ifa->refcount, 1); 6158 6159 /* Now add it to the work queue */ 6160 SCTP_WQ_ADDR_LOCK(); 6161 /* 6162 * Should this really be a tailq? As it is we will process the 6163 * newest first :-0 6164 */ 6165 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6166 SCTP_WQ_ADDR_UNLOCK(); 6167 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6168 (struct sctp_inpcb *)NULL, 6169 (struct sctp_tcb *)NULL, 6170 (struct sctp_nets *)NULL); 6171 return (0); 6172 } 6173 6174 6175 int 6176 sctp_soreceive(struct socket *so, 6177 struct sockaddr **psa, 6178 struct uio *uio, 6179 struct mbuf **mp0, 6180 struct mbuf **controlp, 6181 int *flagsp) 6182 { 6183 int error, fromlen; 6184 uint8_t sockbuf[256]; 6185 struct sockaddr *from; 6186 struct sctp_extrcvinfo sinfo; 6187 int filling_sinfo = 1; 6188 struct sctp_inpcb *inp; 6189 6190 inp = (struct sctp_inpcb *)so->so_pcb; 6191 /* pickup the assoc we are reading from */ 6192 if (inp == NULL) { 6193 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6194 return (EINVAL); 6195 } 6196 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6197 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6198 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6199 (controlp == NULL)) { 6200 /* user does not want the sndrcv ctl */ 6201 filling_sinfo = 0; 6202 } 6203 if (psa) { 6204 from = (struct sockaddr *)sockbuf; 6205 fromlen = sizeof(sockbuf); 6206 from->sa_len = 0; 6207 } else { 6208 from = NULL; 6209 fromlen = 0; 6210 } 6211 6212 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6213 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6214 if ((controlp) && (filling_sinfo)) { 6215 /* copy back the sinfo in a CMSG format */ 6216 if (filling_sinfo) 6217 *controlp = sctp_build_ctl_nchunk(inp, 6218 (struct sctp_sndrcvinfo *)&sinfo); 6219 else 6220 *controlp = NULL; 6221 } 6222 if (psa) { 6223 /* copy back the address info */ 6224 if (from && from->sa_len) { 6225 *psa = sodupsockaddr(from, M_NOWAIT); 6226 } else { 6227 *psa = NULL; 6228 } 6229 } 6230 return (error); 6231 } 6232 6233 6234 6235 6236 6237 int 6238 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6239 int totaddr, int *error) 6240 { 6241 int added = 0; 6242 int i; 6243 struct sctp_inpcb *inp; 6244 struct sockaddr *sa; 6245 size_t incr = 0; 6246 6247 #ifdef INET 6248 struct sockaddr_in *sin; 6249 6250 #endif 6251 #ifdef INET6 6252 struct sockaddr_in6 *sin6; 6253 6254 #endif 6255 6256 sa = addr; 6257 inp = stcb->sctp_ep; 6258 *error = 0; 6259 for (i = 0; i < totaddr; i++) { 6260 switch (sa->sa_family) { 6261 #ifdef INET 6262 case AF_INET: 6263 incr = sizeof(struct sockaddr_in); 6264 sin = (struct sockaddr_in *)sa; 6265 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6266 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6267 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6268 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6269 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6270 *error = EINVAL; 6271 goto out_now; 6272 } 6273 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6274 /* assoc gone no un-lock */ 6275 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6276 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6277 *error = ENOBUFS; 6278 goto out_now; 6279 } 6280 added++; 6281 break; 6282 #endif 6283 #ifdef INET6 6284 case AF_INET6: 6285 incr = sizeof(struct sockaddr_in6); 6286 sin6 = (struct sockaddr_in6 *)sa; 6287 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6288 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6289 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6290 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6291 *error = EINVAL; 6292 goto out_now; 6293 } 6294 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6295 /* assoc gone no un-lock */ 6296 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6297 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6298 *error = ENOBUFS; 6299 goto out_now; 6300 } 6301 added++; 6302 break; 6303 #endif 6304 default: 6305 break; 6306 } 6307 sa = (struct sockaddr *)((caddr_t)sa + incr); 6308 } 6309 out_now: 6310 return (added); 6311 } 6312 6313 struct sctp_tcb * 6314 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6315 int *totaddr, int *num_v4, int *num_v6, int *error, 6316 int limit, int *bad_addr) 6317 { 6318 struct sockaddr *sa; 6319 struct sctp_tcb *stcb = NULL; 6320 size_t incr, at, i; 6321 6322 at = incr = 0; 6323 sa = addr; 6324 6325 *error = *num_v6 = *num_v4 = 0; 6326 /* account and validate addresses */ 6327 for (i = 0; i < (size_t)*totaddr; i++) { 6328 switch (sa->sa_family) { 6329 #ifdef INET 6330 case AF_INET: 6331 (*num_v4) += 1; 6332 incr = sizeof(struct sockaddr_in); 6333 if (sa->sa_len != incr) { 6334 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6335 *error = EINVAL; 6336 *bad_addr = 1; 6337 return (NULL); 6338 } 6339 break; 6340 #endif 6341 #ifdef INET6 6342 case AF_INET6: 6343 { 6344 struct sockaddr_in6 *sin6; 6345 6346 sin6 = (struct sockaddr_in6 *)sa; 6347 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6348 /* Must be non-mapped for connectx */ 6349 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6350 *error = EINVAL; 6351 *bad_addr = 1; 6352 return (NULL); 6353 } 6354 (*num_v6) += 1; 6355 incr = sizeof(struct sockaddr_in6); 6356 if (sa->sa_len != incr) { 6357 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6358 *error = EINVAL; 6359 *bad_addr = 1; 6360 return (NULL); 6361 } 6362 break; 6363 } 6364 #endif 6365 default: 6366 *totaddr = i; 6367 /* we are done */ 6368 break; 6369 } 6370 if (i == (size_t)*totaddr) { 6371 break; 6372 } 6373 SCTP_INP_INCR_REF(inp); 6374 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6375 if (stcb != NULL) { 6376 /* Already have or am bring up an association */ 6377 return (stcb); 6378 } else { 6379 SCTP_INP_DECR_REF(inp); 6380 } 6381 if ((at + incr) > (size_t)limit) { 6382 *totaddr = i; 6383 break; 6384 } 6385 sa = (struct sockaddr *)((caddr_t)sa + incr); 6386 } 6387 return ((struct sctp_tcb *)NULL); 6388 } 6389 6390 /* 6391 * sctp_bindx(ADD) for one address. 6392 * assumes all arguments are valid/checked by caller. 6393 */ 6394 void 6395 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6396 struct sockaddr *sa, sctp_assoc_t assoc_id, 6397 uint32_t vrf_id, int *error, void *p) 6398 { 6399 struct sockaddr *addr_touse; 6400 6401 #ifdef INET6 6402 struct sockaddr_in sin; 6403 6404 #endif 6405 6406 /* see if we're bound all already! */ 6407 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6408 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6409 *error = EINVAL; 6410 return; 6411 } 6412 addr_touse = sa; 6413 #ifdef INET6 6414 if (sa->sa_family == AF_INET6) { 6415 struct sockaddr_in6 *sin6; 6416 6417 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6418 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6419 *error = EINVAL; 6420 return; 6421 } 6422 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6423 /* can only bind v6 on PF_INET6 sockets */ 6424 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6425 *error = EINVAL; 6426 return; 6427 } 6428 sin6 = (struct sockaddr_in6 *)addr_touse; 6429 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6430 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6431 SCTP_IPV6_V6ONLY(inp)) { 6432 /* can't bind v4-mapped on PF_INET sockets */ 6433 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6434 *error = EINVAL; 6435 return; 6436 } 6437 in6_sin6_2_sin(&sin, sin6); 6438 addr_touse = (struct sockaddr *)&sin; 6439 } 6440 } 6441 #endif 6442 #ifdef INET 6443 if (sa->sa_family == AF_INET) { 6444 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6445 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6446 *error = EINVAL; 6447 return; 6448 } 6449 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6450 SCTP_IPV6_V6ONLY(inp)) { 6451 /* can't bind v4 on PF_INET sockets */ 6452 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6453 *error = EINVAL; 6454 return; 6455 } 6456 } 6457 #endif 6458 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6459 if (p == NULL) { 6460 /* Can't get proc for Net/Open BSD */ 6461 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6462 *error = EINVAL; 6463 return; 6464 } 6465 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6466 return; 6467 } 6468 /* 6469 * No locks required here since bind and mgmt_ep_sa all do their own 6470 * locking. If we do something for the FIX: below we may need to 6471 * lock in that case. 6472 */ 6473 if (assoc_id == 0) { 6474 /* add the address */ 6475 struct sctp_inpcb *lep; 6476 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6477 6478 /* validate the incoming port */ 6479 if ((lsin->sin_port != 0) && 6480 (lsin->sin_port != inp->sctp_lport)) { 6481 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6482 *error = EINVAL; 6483 return; 6484 } else { 6485 /* user specified 0 port, set it to existing port */ 6486 lsin->sin_port = inp->sctp_lport; 6487 } 6488 6489 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6490 if (lep != NULL) { 6491 /* 6492 * We must decrement the refcount since we have the 6493 * ep already and are binding. No remove going on 6494 * here. 6495 */ 6496 SCTP_INP_DECR_REF(lep); 6497 } 6498 if (lep == inp) { 6499 /* already bound to it.. ok */ 6500 return; 6501 } else if (lep == NULL) { 6502 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6503 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6504 SCTP_ADD_IP_ADDRESS, 6505 vrf_id, NULL); 6506 } else { 6507 *error = EADDRINUSE; 6508 } 6509 if (*error) 6510 return; 6511 } else { 6512 /* 6513 * FIX: decide whether we allow assoc based bindx 6514 */ 6515 } 6516 } 6517 6518 /* 6519 * sctp_bindx(DELETE) for one address. 6520 * assumes all arguments are valid/checked by caller. 6521 */ 6522 void 6523 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6524 struct sockaddr *sa, sctp_assoc_t assoc_id, 6525 uint32_t vrf_id, int *error) 6526 { 6527 struct sockaddr *addr_touse; 6528 6529 #ifdef INET6 6530 struct sockaddr_in sin; 6531 6532 #endif 6533 6534 /* see if we're bound all already! */ 6535 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6536 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6537 *error = EINVAL; 6538 return; 6539 } 6540 addr_touse = sa; 6541 #ifdef INET6 6542 if (sa->sa_family == AF_INET6) { 6543 struct sockaddr_in6 *sin6; 6544 6545 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6546 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6547 *error = EINVAL; 6548 return; 6549 } 6550 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6551 /* can only bind v6 on PF_INET6 sockets */ 6552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6553 *error = EINVAL; 6554 return; 6555 } 6556 sin6 = (struct sockaddr_in6 *)addr_touse; 6557 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6558 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6559 SCTP_IPV6_V6ONLY(inp)) { 6560 /* can't bind mapped-v4 on PF_INET sockets */ 6561 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6562 *error = EINVAL; 6563 return; 6564 } 6565 in6_sin6_2_sin(&sin, sin6); 6566 addr_touse = (struct sockaddr *)&sin; 6567 } 6568 } 6569 #endif 6570 #ifdef INET 6571 if (sa->sa_family == AF_INET) { 6572 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6573 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6574 *error = EINVAL; 6575 return; 6576 } 6577 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6578 SCTP_IPV6_V6ONLY(inp)) { 6579 /* can't bind v4 on PF_INET sockets */ 6580 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6581 *error = EINVAL; 6582 return; 6583 } 6584 } 6585 #endif 6586 /* 6587 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6588 * below is ever changed we may need to lock before calling 6589 * association level binding. 6590 */ 6591 if (assoc_id == 0) { 6592 /* delete the address */ 6593 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6594 SCTP_DEL_IP_ADDRESS, 6595 vrf_id, NULL); 6596 } else { 6597 /* 6598 * FIX: decide whether we allow assoc based bindx 6599 */ 6600 } 6601 } 6602 6603 /* 6604 * returns the valid local address count for an assoc, taking into account 6605 * all scoping rules 6606 */ 6607 int 6608 sctp_local_addr_count(struct sctp_tcb *stcb) 6609 { 6610 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 6611 int ipv4_addr_legal, ipv6_addr_legal; 6612 struct sctp_vrf *vrf; 6613 struct sctp_ifn *sctp_ifn; 6614 struct sctp_ifa *sctp_ifa; 6615 int count = 0; 6616 6617 /* Turn on all the appropriate scopes */ 6618 loopback_scope = stcb->asoc.scope.loopback_scope; 6619 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6620 local_scope = stcb->asoc.scope.local_scope; 6621 site_scope = stcb->asoc.scope.site_scope; 6622 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6623 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6624 SCTP_IPI_ADDR_RLOCK(); 6625 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6626 if (vrf == NULL) { 6627 /* no vrf, no addresses */ 6628 SCTP_IPI_ADDR_RUNLOCK(); 6629 return (0); 6630 } 6631 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6632 /* 6633 * bound all case: go through all ifns on the vrf 6634 */ 6635 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6636 if ((loopback_scope == 0) && 6637 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6638 continue; 6639 } 6640 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6641 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6642 continue; 6643 switch (sctp_ifa->address.sa.sa_family) { 6644 #ifdef INET 6645 case AF_INET: 6646 if (ipv4_addr_legal) { 6647 struct sockaddr_in *sin; 6648 6649 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 6650 if (sin->sin_addr.s_addr == 0) { 6651 /* 6652 * skip unspecified 6653 * addrs 6654 */ 6655 continue; 6656 } 6657 if ((ipv4_local_scope == 0) && 6658 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6659 continue; 6660 } 6661 /* count this one */ 6662 count++; 6663 } else { 6664 continue; 6665 } 6666 break; 6667 #endif 6668 #ifdef INET6 6669 case AF_INET6: 6670 if (ipv6_addr_legal) { 6671 struct sockaddr_in6 *sin6; 6672 6673 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 6674 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6675 continue; 6676 } 6677 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6678 if (local_scope == 0) 6679 continue; 6680 if (sin6->sin6_scope_id == 0) { 6681 if (sa6_recoverscope(sin6) != 0) 6682 /* 6683 * 6684 * bad 6685 * 6686 * li 6687 * nk 6688 * 6689 * loc 6690 * al 6691 * 6692 * add 6693 * re 6694 * ss 6695 * */ 6696 continue; 6697 } 6698 } 6699 if ((site_scope == 0) && 6700 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6701 continue; 6702 } 6703 /* count this one */ 6704 count++; 6705 } 6706 break; 6707 #endif 6708 default: 6709 /* TSNH */ 6710 break; 6711 } 6712 } 6713 } 6714 } else { 6715 /* 6716 * subset bound case 6717 */ 6718 struct sctp_laddr *laddr; 6719 6720 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6721 sctp_nxt_addr) { 6722 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6723 continue; 6724 } 6725 /* count this one */ 6726 count++; 6727 } 6728 } 6729 SCTP_IPI_ADDR_RUNLOCK(); 6730 return (count); 6731 } 6732 6733 #if defined(SCTP_LOCAL_TRACE_BUF) 6734 6735 void 6736 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6737 { 6738 uint32_t saveindex, newindex; 6739 6740 do { 6741 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6742 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6743 newindex = 1; 6744 } else { 6745 newindex = saveindex + 1; 6746 } 6747 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6748 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6749 saveindex = 0; 6750 } 6751 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6752 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6753 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6754 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6755 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6756 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6757 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6758 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6759 } 6760 6761 #endif 6762 static void 6763 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored) 6764 { 6765 struct ip *iph; 6766 6767 #ifdef INET6 6768 struct ip6_hdr *ip6; 6769 6770 #endif 6771 struct mbuf *sp, *last; 6772 struct udphdr *uhdr; 6773 uint16_t port; 6774 6775 if ((m->m_flags & M_PKTHDR) == 0) { 6776 /* Can't handle one that is not a pkt hdr */ 6777 goto out; 6778 } 6779 /* Pull the src port */ 6780 iph = mtod(m, struct ip *); 6781 uhdr = (struct udphdr *)((caddr_t)iph + off); 6782 port = uhdr->uh_sport; 6783 /* 6784 * Split out the mbuf chain. Leave the IP header in m, place the 6785 * rest in the sp. 6786 */ 6787 sp = m_split(m, off, M_NOWAIT); 6788 if (sp == NULL) { 6789 /* Gak, drop packet, we can't do a split */ 6790 goto out; 6791 } 6792 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6793 /* Gak, packet can't have an SCTP header in it - too small */ 6794 m_freem(sp); 6795 goto out; 6796 } 6797 /* Now pull up the UDP header and SCTP header together */ 6798 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6799 if (sp == NULL) { 6800 /* Gak pullup failed */ 6801 goto out; 6802 } 6803 /* Trim out the UDP header */ 6804 m_adj(sp, sizeof(struct udphdr)); 6805 6806 /* Now reconstruct the mbuf chain */ 6807 for (last = m; last->m_next; last = last->m_next); 6808 last->m_next = sp; 6809 m->m_pkthdr.len += sp->m_pkthdr.len; 6810 iph = mtod(m, struct ip *); 6811 switch (iph->ip_v) { 6812 #ifdef INET 6813 case IPVERSION: 6814 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6815 sctp_input_with_port(m, off, port); 6816 break; 6817 #endif 6818 #ifdef INET6 6819 case IPV6_VERSION >> 4: 6820 ip6 = mtod(m, struct ip6_hdr *); 6821 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6822 sctp6_input_with_port(&m, &off, port); 6823 break; 6824 #endif 6825 default: 6826 goto out; 6827 break; 6828 } 6829 return; 6830 out: 6831 m_freem(m); 6832 } 6833 6834 void 6835 sctp_over_udp_stop(void) 6836 { 6837 /* 6838 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6839 * for writting! 6840 */ 6841 #ifdef INET 6842 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6843 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 6844 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 6845 } 6846 #endif 6847 #ifdef INET6 6848 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6849 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 6850 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 6851 } 6852 #endif 6853 } 6854 6855 int 6856 sctp_over_udp_start(void) 6857 { 6858 uint16_t port; 6859 int ret; 6860 6861 #ifdef INET 6862 struct sockaddr_in sin; 6863 6864 #endif 6865 #ifdef INET6 6866 struct sockaddr_in6 sin6; 6867 6868 #endif 6869 /* 6870 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6871 * for writting! 6872 */ 6873 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 6874 if (ntohs(port) == 0) { 6875 /* Must have a port set */ 6876 return (EINVAL); 6877 } 6878 #ifdef INET 6879 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6880 /* Already running -- must stop first */ 6881 return (EALREADY); 6882 } 6883 #endif 6884 #ifdef INET6 6885 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6886 /* Already running -- must stop first */ 6887 return (EALREADY); 6888 } 6889 #endif 6890 #ifdef INET 6891 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 6892 SOCK_DGRAM, IPPROTO_UDP, 6893 curthread->td_ucred, curthread))) { 6894 sctp_over_udp_stop(); 6895 return (ret); 6896 } 6897 /* Call the special UDP hook. */ 6898 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 6899 sctp_recv_udp_tunneled_packet))) { 6900 sctp_over_udp_stop(); 6901 return (ret); 6902 } 6903 /* Ok, we have a socket, bind it to the port. */ 6904 memset(&sin, 0, sizeof(struct sockaddr_in)); 6905 sin.sin_len = sizeof(struct sockaddr_in); 6906 sin.sin_family = AF_INET; 6907 sin.sin_port = htons(port); 6908 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 6909 (struct sockaddr *)&sin, curthread))) { 6910 sctp_over_udp_stop(); 6911 return (ret); 6912 } 6913 #endif 6914 #ifdef INET6 6915 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 6916 SOCK_DGRAM, IPPROTO_UDP, 6917 curthread->td_ucred, curthread))) { 6918 sctp_over_udp_stop(); 6919 return (ret); 6920 } 6921 /* Call the special UDP hook. */ 6922 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 6923 sctp_recv_udp_tunneled_packet))) { 6924 sctp_over_udp_stop(); 6925 return (ret); 6926 } 6927 /* Ok, we have a socket, bind it to the port. */ 6928 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 6929 sin6.sin6_len = sizeof(struct sockaddr_in6); 6930 sin6.sin6_family = AF_INET6; 6931 sin6.sin6_port = htons(port); 6932 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 6933 (struct sockaddr *)&sin6, curthread))) { 6934 sctp_over_udp_stop(); 6935 return (ret); 6936 } 6937 #endif 6938 return (0); 6939 } 6940