1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/udp.h> 53 #include <netinet/udp_var.h> 54 #include <sys/proc.h> 55 56 57 #ifndef KTR_SCTP 58 #define KTR_SCTP KTR_SUBSYS 59 #endif 60 61 extern struct sctp_cc_functions sctp_cc_functions[]; 62 extern struct sctp_ss_functions sctp_ss_functions[]; 63 64 void 65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 66 { 67 struct sctp_cwnd_log sctp_clog; 68 69 sctp_clog.x.sb.stcb = stcb; 70 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 71 if (stcb) 72 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 73 else 74 sctp_clog.x.sb.stcb_sbcc = 0; 75 sctp_clog.x.sb.incr = incr; 76 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 77 SCTP_LOG_EVENT_SB, 78 from, 79 sctp_clog.x.misc.log1, 80 sctp_clog.x.misc.log2, 81 sctp_clog.x.misc.log3, 82 sctp_clog.x.misc.log4); 83 } 84 85 void 86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 87 { 88 struct sctp_cwnd_log sctp_clog; 89 90 sctp_clog.x.close.inp = (void *)inp; 91 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 92 if (stcb) { 93 sctp_clog.x.close.stcb = (void *)stcb; 94 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 95 } else { 96 sctp_clog.x.close.stcb = 0; 97 sctp_clog.x.close.state = 0; 98 } 99 sctp_clog.x.close.loc = loc; 100 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 101 SCTP_LOG_EVENT_CLOSE, 102 0, 103 sctp_clog.x.misc.log1, 104 sctp_clog.x.misc.log2, 105 sctp_clog.x.misc.log3, 106 sctp_clog.x.misc.log4); 107 } 108 109 void 110 rto_logging(struct sctp_nets *net, int from) 111 { 112 struct sctp_cwnd_log sctp_clog; 113 114 memset(&sctp_clog, 0, sizeof(sctp_clog)); 115 sctp_clog.x.rto.net = (void *)net; 116 sctp_clog.x.rto.rtt = net->rtt / 1000; 117 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 118 SCTP_LOG_EVENT_RTT, 119 from, 120 sctp_clog.x.misc.log1, 121 sctp_clog.x.misc.log2, 122 sctp_clog.x.misc.log3, 123 sctp_clog.x.misc.log4); 124 } 125 126 void 127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 128 { 129 struct sctp_cwnd_log sctp_clog; 130 131 sctp_clog.x.strlog.stcb = stcb; 132 sctp_clog.x.strlog.n_tsn = tsn; 133 sctp_clog.x.strlog.n_sseq = sseq; 134 sctp_clog.x.strlog.e_tsn = 0; 135 sctp_clog.x.strlog.e_sseq = 0; 136 sctp_clog.x.strlog.strm = stream; 137 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 138 SCTP_LOG_EVENT_STRM, 139 from, 140 sctp_clog.x.misc.log1, 141 sctp_clog.x.misc.log2, 142 sctp_clog.x.misc.log3, 143 sctp_clog.x.misc.log4); 144 } 145 146 void 147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 148 { 149 struct sctp_cwnd_log sctp_clog; 150 151 sctp_clog.x.nagle.stcb = (void *)stcb; 152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 157 SCTP_LOG_EVENT_NAGLE, 158 action, 159 sctp_clog.x.misc.log1, 160 sctp_clog.x.misc.log2, 161 sctp_clog.x.misc.log3, 162 sctp_clog.x.misc.log4); 163 } 164 165 void 166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 167 { 168 struct sctp_cwnd_log sctp_clog; 169 170 sctp_clog.x.sack.cumack = cumack; 171 sctp_clog.x.sack.oldcumack = old_cumack; 172 sctp_clog.x.sack.tsn = tsn; 173 sctp_clog.x.sack.numGaps = gaps; 174 sctp_clog.x.sack.numDups = dups; 175 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 176 SCTP_LOG_EVENT_SACK, 177 from, 178 sctp_clog.x.misc.log1, 179 sctp_clog.x.misc.log2, 180 sctp_clog.x.misc.log3, 181 sctp_clog.x.misc.log4); 182 } 183 184 void 185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 186 { 187 struct sctp_cwnd_log sctp_clog; 188 189 memset(&sctp_clog, 0, sizeof(sctp_clog)); 190 sctp_clog.x.map.base = map; 191 sctp_clog.x.map.cum = cum; 192 sctp_clog.x.map.high = high; 193 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 194 SCTP_LOG_EVENT_MAP, 195 from, 196 sctp_clog.x.misc.log1, 197 sctp_clog.x.misc.log2, 198 sctp_clog.x.misc.log3, 199 sctp_clog.x.misc.log4); 200 } 201 202 void 203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 204 { 205 struct sctp_cwnd_log sctp_clog; 206 207 memset(&sctp_clog, 0, sizeof(sctp_clog)); 208 sctp_clog.x.fr.largest_tsn = biggest_tsn; 209 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 210 sctp_clog.x.fr.tsn = tsn; 211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 212 SCTP_LOG_EVENT_FR, 213 from, 214 sctp_clog.x.misc.log1, 215 sctp_clog.x.misc.log2, 216 sctp_clog.x.misc.log3, 217 sctp_clog.x.misc.log4); 218 } 219 220 void 221 sctp_log_mb(struct mbuf *m, int from) 222 { 223 struct sctp_cwnd_log sctp_clog; 224 225 sctp_clog.x.mb.mp = m; 226 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 227 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 228 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 229 if (SCTP_BUF_IS_EXTENDED(m)) { 230 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 231 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 232 } else { 233 sctp_clog.x.mb.ext = 0; 234 sctp_clog.x.mb.refcnt = 0; 235 } 236 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 237 SCTP_LOG_EVENT_MBUF, 238 from, 239 sctp_clog.x.misc.log1, 240 sctp_clog.x.misc.log2, 241 sctp_clog.x.misc.log3, 242 sctp_clog.x.misc.log4); 243 } 244 245 void 246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 247 { 248 struct sctp_cwnd_log sctp_clog; 249 250 if (control == NULL) { 251 SCTP_PRINTF("Gak log of NULL?\n"); 252 return; 253 } 254 sctp_clog.x.strlog.stcb = control->stcb; 255 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 256 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 257 sctp_clog.x.strlog.strm = control->sinfo_stream; 258 if (poschk != NULL) { 259 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 260 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 261 } else { 262 sctp_clog.x.strlog.e_tsn = 0; 263 sctp_clog.x.strlog.e_sseq = 0; 264 } 265 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 266 SCTP_LOG_EVENT_STRM, 267 from, 268 sctp_clog.x.misc.log1, 269 sctp_clog.x.misc.log2, 270 sctp_clog.x.misc.log3, 271 sctp_clog.x.misc.log4); 272 } 273 274 void 275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 276 { 277 struct sctp_cwnd_log sctp_clog; 278 279 sctp_clog.x.cwnd.net = net; 280 if (stcb->asoc.send_queue_cnt > 255) 281 sctp_clog.x.cwnd.cnt_in_send = 255; 282 else 283 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 284 if (stcb->asoc.stream_queue_cnt > 255) 285 sctp_clog.x.cwnd.cnt_in_str = 255; 286 else 287 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 288 289 if (net) { 290 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 291 sctp_clog.x.cwnd.inflight = net->flight_size; 292 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 293 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 294 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 295 } 296 if (SCTP_CWNDLOG_PRESEND == from) { 297 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 298 } 299 sctp_clog.x.cwnd.cwnd_augment = augment; 300 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 301 SCTP_LOG_EVENT_CWND, 302 from, 303 sctp_clog.x.misc.log1, 304 sctp_clog.x.misc.log2, 305 sctp_clog.x.misc.log3, 306 sctp_clog.x.misc.log4); 307 } 308 309 void 310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 311 { 312 struct sctp_cwnd_log sctp_clog; 313 314 memset(&sctp_clog, 0, sizeof(sctp_clog)); 315 if (inp) { 316 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 317 318 } else { 319 sctp_clog.x.lock.sock = (void *)NULL; 320 } 321 sctp_clog.x.lock.inp = (void *)inp; 322 if (stcb) { 323 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 324 } else { 325 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 326 } 327 if (inp) { 328 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 329 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 330 } else { 331 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 332 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 333 } 334 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 335 if (inp && (inp->sctp_socket)) { 336 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 337 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 338 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 339 } else { 340 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 341 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 342 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 343 } 344 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 345 SCTP_LOG_LOCK_EVENT, 346 from, 347 sctp_clog.x.misc.log1, 348 sctp_clog.x.misc.log2, 349 sctp_clog.x.misc.log3, 350 sctp_clog.x.misc.log4); 351 } 352 353 void 354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 355 { 356 struct sctp_cwnd_log sctp_clog; 357 358 memset(&sctp_clog, 0, sizeof(sctp_clog)); 359 sctp_clog.x.cwnd.net = net; 360 sctp_clog.x.cwnd.cwnd_new_value = error; 361 sctp_clog.x.cwnd.inflight = net->flight_size; 362 sctp_clog.x.cwnd.cwnd_augment = burst; 363 if (stcb->asoc.send_queue_cnt > 255) 364 sctp_clog.x.cwnd.cnt_in_send = 255; 365 else 366 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 367 if (stcb->asoc.stream_queue_cnt > 255) 368 sctp_clog.x.cwnd.cnt_in_str = 255; 369 else 370 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 371 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 372 SCTP_LOG_EVENT_MAXBURST, 373 from, 374 sctp_clog.x.misc.log1, 375 sctp_clog.x.misc.log2, 376 sctp_clog.x.misc.log3, 377 sctp_clog.x.misc.log4); 378 } 379 380 void 381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 382 { 383 struct sctp_cwnd_log sctp_clog; 384 385 sctp_clog.x.rwnd.rwnd = peers_rwnd; 386 sctp_clog.x.rwnd.send_size = snd_size; 387 sctp_clog.x.rwnd.overhead = overhead; 388 sctp_clog.x.rwnd.new_rwnd = 0; 389 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 390 SCTP_LOG_EVENT_RWND, 391 from, 392 sctp_clog.x.misc.log1, 393 sctp_clog.x.misc.log2, 394 sctp_clog.x.misc.log3, 395 sctp_clog.x.misc.log4); 396 } 397 398 void 399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 400 { 401 struct sctp_cwnd_log sctp_clog; 402 403 sctp_clog.x.rwnd.rwnd = peers_rwnd; 404 sctp_clog.x.rwnd.send_size = flight_size; 405 sctp_clog.x.rwnd.overhead = overhead; 406 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 407 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 408 SCTP_LOG_EVENT_RWND, 409 from, 410 sctp_clog.x.misc.log1, 411 sctp_clog.x.misc.log2, 412 sctp_clog.x.misc.log3, 413 sctp_clog.x.misc.log4); 414 } 415 416 void 417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 418 { 419 struct sctp_cwnd_log sctp_clog; 420 421 sctp_clog.x.mbcnt.total_queue_size = total_oq; 422 sctp_clog.x.mbcnt.size_change = book; 423 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 424 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 425 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 426 SCTP_LOG_EVENT_MBCNT, 427 from, 428 sctp_clog.x.misc.log1, 429 sctp_clog.x.misc.log2, 430 sctp_clog.x.misc.log3, 431 sctp_clog.x.misc.log4); 432 } 433 434 void 435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 436 { 437 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 438 SCTP_LOG_MISC_EVENT, 439 from, 440 a, b, c, d); 441 } 442 443 void 444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 445 { 446 struct sctp_cwnd_log sctp_clog; 447 448 sctp_clog.x.wake.stcb = (void *)stcb; 449 sctp_clog.x.wake.wake_cnt = wake_cnt; 450 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 451 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 452 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 453 454 if (stcb->asoc.stream_queue_cnt < 0xff) 455 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 456 else 457 sctp_clog.x.wake.stream_qcnt = 0xff; 458 459 if (stcb->asoc.chunks_on_out_queue < 0xff) 460 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 461 else 462 sctp_clog.x.wake.chunks_on_oque = 0xff; 463 464 sctp_clog.x.wake.sctpflags = 0; 465 /* set in the defered mode stuff */ 466 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 467 sctp_clog.x.wake.sctpflags |= 1; 468 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 469 sctp_clog.x.wake.sctpflags |= 2; 470 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 471 sctp_clog.x.wake.sctpflags |= 4; 472 /* what about the sb */ 473 if (stcb->sctp_socket) { 474 struct socket *so = stcb->sctp_socket; 475 476 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 477 } else { 478 sctp_clog.x.wake.sbflags = 0xff; 479 } 480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 481 SCTP_LOG_EVENT_WAKE, 482 from, 483 sctp_clog.x.misc.log1, 484 sctp_clog.x.misc.log2, 485 sctp_clog.x.misc.log3, 486 sctp_clog.x.misc.log4); 487 } 488 489 void 490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen) 491 { 492 struct sctp_cwnd_log sctp_clog; 493 494 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 495 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 496 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 497 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 498 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 499 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 500 sctp_clog.x.blk.sndlen = sendlen; 501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 502 SCTP_LOG_EVENT_BLOCK, 503 from, 504 sctp_clog.x.misc.log1, 505 sctp_clog.x.misc.log2, 506 sctp_clog.x.misc.log3, 507 sctp_clog.x.misc.log4); 508 } 509 510 int 511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 512 { 513 /* May need to fix this if ktrdump does not work */ 514 return (0); 515 } 516 517 #ifdef SCTP_AUDITING_ENABLED 518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 519 static int sctp_audit_indx = 0; 520 521 static 522 void 523 sctp_print_audit_report(void) 524 { 525 int i; 526 int cnt; 527 528 cnt = 0; 529 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 530 if ((sctp_audit_data[i][0] == 0xe0) && 531 (sctp_audit_data[i][1] == 0x01)) { 532 cnt = 0; 533 SCTP_PRINTF("\n"); 534 } else if (sctp_audit_data[i][0] == 0xf0) { 535 cnt = 0; 536 SCTP_PRINTF("\n"); 537 } else if ((sctp_audit_data[i][0] == 0xc0) && 538 (sctp_audit_data[i][1] == 0x01)) { 539 SCTP_PRINTF("\n"); 540 cnt = 0; 541 } 542 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 543 (uint32_t) sctp_audit_data[i][1]); 544 cnt++; 545 if ((cnt % 14) == 0) 546 SCTP_PRINTF("\n"); 547 } 548 for (i = 0; i < sctp_audit_indx; i++) { 549 if ((sctp_audit_data[i][0] == 0xe0) && 550 (sctp_audit_data[i][1] == 0x01)) { 551 cnt = 0; 552 SCTP_PRINTF("\n"); 553 } else if (sctp_audit_data[i][0] == 0xf0) { 554 cnt = 0; 555 SCTP_PRINTF("\n"); 556 } else if ((sctp_audit_data[i][0] == 0xc0) && 557 (sctp_audit_data[i][1] == 0x01)) { 558 SCTP_PRINTF("\n"); 559 cnt = 0; 560 } 561 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 562 (uint32_t) sctp_audit_data[i][1]); 563 cnt++; 564 if ((cnt % 14) == 0) 565 SCTP_PRINTF("\n"); 566 } 567 SCTP_PRINTF("\n"); 568 } 569 570 void 571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 572 struct sctp_nets *net) 573 { 574 int resend_cnt, tot_out, rep, tot_book_cnt; 575 struct sctp_nets *lnet; 576 struct sctp_tmit_chunk *chk; 577 578 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 579 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 580 sctp_audit_indx++; 581 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 582 sctp_audit_indx = 0; 583 } 584 if (inp == NULL) { 585 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 586 sctp_audit_data[sctp_audit_indx][1] = 0x01; 587 sctp_audit_indx++; 588 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 589 sctp_audit_indx = 0; 590 } 591 return; 592 } 593 if (stcb == NULL) { 594 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 595 sctp_audit_data[sctp_audit_indx][1] = 0x02; 596 sctp_audit_indx++; 597 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 598 sctp_audit_indx = 0; 599 } 600 return; 601 } 602 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 603 sctp_audit_data[sctp_audit_indx][1] = 604 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 605 sctp_audit_indx++; 606 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 607 sctp_audit_indx = 0; 608 } 609 rep = 0; 610 tot_book_cnt = 0; 611 resend_cnt = tot_out = 0; 612 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 613 if (chk->sent == SCTP_DATAGRAM_RESEND) { 614 resend_cnt++; 615 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 616 tot_out += chk->book_size; 617 tot_book_cnt++; 618 } 619 } 620 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 621 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 622 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 623 sctp_audit_indx++; 624 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 625 sctp_audit_indx = 0; 626 } 627 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 628 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 629 rep = 1; 630 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 631 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 632 sctp_audit_data[sctp_audit_indx][1] = 633 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 634 sctp_audit_indx++; 635 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 636 sctp_audit_indx = 0; 637 } 638 } 639 if (tot_out != stcb->asoc.total_flight) { 640 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 641 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 642 sctp_audit_indx++; 643 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 644 sctp_audit_indx = 0; 645 } 646 rep = 1; 647 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 648 (int)stcb->asoc.total_flight); 649 stcb->asoc.total_flight = tot_out; 650 } 651 if (tot_book_cnt != stcb->asoc.total_flight_count) { 652 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 653 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 654 sctp_audit_indx++; 655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 656 sctp_audit_indx = 0; 657 } 658 rep = 1; 659 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 660 661 stcb->asoc.total_flight_count = tot_book_cnt; 662 } 663 tot_out = 0; 664 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 665 tot_out += lnet->flight_size; 666 } 667 if (tot_out != stcb->asoc.total_flight) { 668 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 669 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 670 sctp_audit_indx++; 671 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 672 sctp_audit_indx = 0; 673 } 674 rep = 1; 675 SCTP_PRINTF("real flight:%d net total was %d\n", 676 stcb->asoc.total_flight, tot_out); 677 /* now corrective action */ 678 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 679 680 tot_out = 0; 681 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 682 if ((chk->whoTo == lnet) && 683 (chk->sent < SCTP_DATAGRAM_RESEND)) { 684 tot_out += chk->book_size; 685 } 686 } 687 if (lnet->flight_size != tot_out) { 688 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 689 (void *)lnet, lnet->flight_size, 690 tot_out); 691 lnet->flight_size = tot_out; 692 } 693 } 694 } 695 if (rep) { 696 sctp_print_audit_report(); 697 } 698 } 699 700 void 701 sctp_audit_log(uint8_t ev, uint8_t fd) 702 { 703 704 sctp_audit_data[sctp_audit_indx][0] = ev; 705 sctp_audit_data[sctp_audit_indx][1] = fd; 706 sctp_audit_indx++; 707 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 708 sctp_audit_indx = 0; 709 } 710 } 711 712 #endif 713 714 /* 715 * sctp_stop_timers_for_shutdown() should be called 716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 717 * state to make sure that all timers are stopped. 718 */ 719 void 720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 721 { 722 struct sctp_association *asoc; 723 struct sctp_nets *net; 724 725 asoc = &stcb->asoc; 726 727 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 728 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 729 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 730 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 731 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 732 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 733 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 734 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 735 } 736 } 737 738 /* 739 * a list of sizes based on typical mtu's, used only if next hop size not 740 * returned. 741 */ 742 static uint32_t sctp_mtu_sizes[] = { 743 68, 744 296, 745 508, 746 512, 747 544, 748 576, 749 1006, 750 1492, 751 1500, 752 1536, 753 2002, 754 2048, 755 4352, 756 4464, 757 8166, 758 17914, 759 32000, 760 65535 761 }; 762 763 /* 764 * Return the largest MTU smaller than val. If there is no 765 * entry, just return val. 766 */ 767 uint32_t 768 sctp_get_prev_mtu(uint32_t val) 769 { 770 uint32_t i; 771 772 if (val <= sctp_mtu_sizes[0]) { 773 return (val); 774 } 775 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 776 if (val <= sctp_mtu_sizes[i]) { 777 break; 778 } 779 } 780 return (sctp_mtu_sizes[i - 1]); 781 } 782 783 /* 784 * Return the smallest MTU larger than val. If there is no 785 * entry, just return val. 786 */ 787 uint32_t 788 sctp_get_next_mtu(uint32_t val) 789 { 790 /* select another MTU that is just bigger than this one */ 791 uint32_t i; 792 793 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 794 if (val < sctp_mtu_sizes[i]) { 795 return (sctp_mtu_sizes[i]); 796 } 797 } 798 return (val); 799 } 800 801 void 802 sctp_fill_random_store(struct sctp_pcb *m) 803 { 804 /* 805 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 806 * our counter. The result becomes our good random numbers and we 807 * then setup to give these out. Note that we do no locking to 808 * protect this. This is ok, since if competing folks call this we 809 * will get more gobbled gook in the random store which is what we 810 * want. There is a danger that two guys will use the same random 811 * numbers, but thats ok too since that is random as well :-> 812 */ 813 m->store_at = 0; 814 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 815 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 816 sizeof(m->random_counter), (uint8_t *) m->random_store); 817 m->random_counter++; 818 } 819 820 uint32_t 821 sctp_select_initial_TSN(struct sctp_pcb *inp) 822 { 823 /* 824 * A true implementation should use random selection process to get 825 * the initial stream sequence number, using RFC1750 as a good 826 * guideline 827 */ 828 uint32_t x, *xp; 829 uint8_t *p; 830 int store_at, new_store; 831 832 if (inp->initial_sequence_debug != 0) { 833 uint32_t ret; 834 835 ret = inp->initial_sequence_debug; 836 inp->initial_sequence_debug++; 837 return (ret); 838 } 839 retry: 840 store_at = inp->store_at; 841 new_store = store_at + sizeof(uint32_t); 842 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 843 new_store = 0; 844 } 845 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 846 goto retry; 847 } 848 if (new_store == 0) { 849 /* Refill the random store */ 850 sctp_fill_random_store(inp); 851 } 852 p = &inp->random_store[store_at]; 853 xp = (uint32_t *) p; 854 x = *xp; 855 return (x); 856 } 857 858 uint32_t 859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 860 { 861 uint32_t x; 862 struct timeval now; 863 864 if (check) { 865 (void)SCTP_GETTIME_TIMEVAL(&now); 866 } 867 for (;;) { 868 x = sctp_select_initial_TSN(&inp->sctp_ep); 869 if (x == 0) { 870 /* we never use 0 */ 871 continue; 872 } 873 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 874 break; 875 } 876 } 877 return (x); 878 } 879 880 int 881 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb, 882 uint32_t override_tag, uint32_t vrf_id) 883 { 884 struct sctp_association *asoc; 885 886 /* 887 * Anything set to zero is taken care of by the allocation routine's 888 * bzero 889 */ 890 891 /* 892 * Up front select what scoping to apply on addresses I tell my peer 893 * Not sure what to do with these right now, we will need to come up 894 * with a way to set them. We may need to pass them through from the 895 * caller in the sctp_aloc_assoc() function. 896 */ 897 int i; 898 899 asoc = &stcb->asoc; 900 /* init all variables to a known value. */ 901 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 902 asoc->max_burst = m->sctp_ep.max_burst; 903 asoc->fr_max_burst = m->sctp_ep.fr_max_burst; 904 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 905 asoc->cookie_life = m->sctp_ep.def_cookie_life; 906 asoc->sctp_cmt_on_off = m->sctp_cmt_on_off; 907 asoc->ecn_allowed = m->sctp_ecn_enable; 908 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off); 909 asoc->sctp_cmt_pf = (uint8_t) 0; 910 asoc->sctp_frag_point = m->sctp_frag_point; 911 asoc->sctp_features = m->sctp_features; 912 asoc->default_dscp = m->sctp_ep.default_dscp; 913 #ifdef INET6 914 if (m->sctp_ep.default_flowlabel) { 915 asoc->default_flowlabel = m->sctp_ep.default_flowlabel; 916 } else { 917 if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 918 asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep); 919 asoc->default_flowlabel &= 0x000fffff; 920 asoc->default_flowlabel |= 0x80000000; 921 } else { 922 asoc->default_flowlabel = 0; 923 } 924 } 925 #endif 926 asoc->sb_send_resv = 0; 927 if (override_tag) { 928 asoc->my_vtag = override_tag; 929 } else { 930 asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 931 } 932 /* Get the nonce tags */ 933 asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 934 asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 935 asoc->vrf_id = vrf_id; 936 937 #ifdef SCTP_ASOCLOG_OF_TSNS 938 asoc->tsn_in_at = 0; 939 asoc->tsn_out_at = 0; 940 asoc->tsn_in_wrapped = 0; 941 asoc->tsn_out_wrapped = 0; 942 asoc->cumack_log_at = 0; 943 asoc->cumack_log_atsnt = 0; 944 #endif 945 #ifdef SCTP_FS_SPEC_LOG 946 asoc->fs_index = 0; 947 #endif 948 asoc->refcnt = 0; 949 asoc->assoc_up_sent = 0; 950 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 951 sctp_select_initial_TSN(&m->sctp_ep); 952 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 953 /* we are optimisitic here */ 954 asoc->peer_supports_pktdrop = 1; 955 asoc->peer_supports_nat = 0; 956 asoc->sent_queue_retran_cnt = 0; 957 958 /* for CMT */ 959 asoc->last_net_cmt_send_started = NULL; 960 961 /* This will need to be adjusted */ 962 asoc->last_acked_seq = asoc->init_seq_number - 1; 963 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 964 asoc->asconf_seq_in = asoc->last_acked_seq; 965 966 /* here we are different, we hold the next one we expect */ 967 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 968 969 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 970 asoc->initial_rto = m->sctp_ep.initial_rto; 971 972 asoc->max_init_times = m->sctp_ep.max_init_times; 973 asoc->max_send_times = m->sctp_ep.max_send_times; 974 asoc->def_net_failure = m->sctp_ep.def_net_failure; 975 asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold; 976 asoc->free_chunk_cnt = 0; 977 978 asoc->iam_blocking = 0; 979 asoc->context = m->sctp_context; 980 asoc->local_strreset_support = m->local_strreset_support; 981 asoc->def_send = m->def_send; 982 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 983 asoc->sack_freq = m->sctp_ep.sctp_sack_freq; 984 asoc->pr_sctp_cnt = 0; 985 asoc->total_output_queue_size = 0; 986 987 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 988 struct in6pcb *inp6; 989 990 /* Its a V6 socket */ 991 inp6 = (struct in6pcb *)m; 992 asoc->ipv6_addr_legal = 1; 993 /* Now look at the binding flag to see if V4 will be legal */ 994 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 995 asoc->ipv4_addr_legal = 1; 996 } else { 997 /* V4 addresses are NOT legal on the association */ 998 asoc->ipv4_addr_legal = 0; 999 } 1000 } else { 1001 /* Its a V4 socket, no - V6 */ 1002 asoc->ipv4_addr_legal = 1; 1003 asoc->ipv6_addr_legal = 0; 1004 } 1005 1006 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND); 1007 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket); 1008 1009 asoc->smallest_mtu = m->sctp_frag_point; 1010 asoc->minrto = m->sctp_ep.sctp_minrto; 1011 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1012 1013 asoc->locked_on_sending = NULL; 1014 asoc->stream_locked_on = 0; 1015 asoc->ecn_echo_cnt_onq = 0; 1016 asoc->stream_locked = 0; 1017 1018 asoc->send_sack = 1; 1019 1020 LIST_INIT(&asoc->sctp_restricted_addrs); 1021 1022 TAILQ_INIT(&asoc->nets); 1023 TAILQ_INIT(&asoc->pending_reply_queue); 1024 TAILQ_INIT(&asoc->asconf_ack_sent); 1025 /* Setup to fill the hb random cache at first HB */ 1026 asoc->hb_random_idx = 4; 1027 1028 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1029 1030 stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module; 1031 stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module]; 1032 1033 stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module; 1034 stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module]; 1035 1036 /* 1037 * Now the stream parameters, here we allocate space for all streams 1038 * that we request by default. 1039 */ 1040 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1041 m->sctp_ep.pre_open_stream_count; 1042 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1043 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1044 SCTP_M_STRMO); 1045 if (asoc->strmout == NULL) { 1046 /* big trouble no memory */ 1047 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1048 return (ENOMEM); 1049 } 1050 for (i = 0; i < asoc->streamoutcnt; i++) { 1051 /* 1052 * inbound side must be set to 0xffff, also NOTE when we get 1053 * the INIT-ACK back (for INIT sender) we MUST reduce the 1054 * count (streamoutcnt) but first check if we sent to any of 1055 * the upper streams that were dropped (if some were). Those 1056 * that were dropped must be notified to the upper layer as 1057 * failed to send. 1058 */ 1059 asoc->strmout[i].next_sequence_send = 0x0; 1060 TAILQ_INIT(&asoc->strmout[i].outqueue); 1061 asoc->strmout[i].chunks_on_queues = 0; 1062 asoc->strmout[i].stream_no = i; 1063 asoc->strmout[i].last_msg_incomplete = 0; 1064 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL); 1065 } 1066 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1067 1068 /* Now the mapping array */ 1069 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1070 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1071 SCTP_M_MAP); 1072 if (asoc->mapping_array == NULL) { 1073 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1074 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1075 return (ENOMEM); 1076 } 1077 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1078 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1079 SCTP_M_MAP); 1080 if (asoc->nr_mapping_array == NULL) { 1081 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1082 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1083 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1084 return (ENOMEM); 1085 } 1086 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1087 1088 /* Now the init of the other outqueues */ 1089 TAILQ_INIT(&asoc->free_chunks); 1090 TAILQ_INIT(&asoc->control_send_queue); 1091 TAILQ_INIT(&asoc->asconf_send_queue); 1092 TAILQ_INIT(&asoc->send_queue); 1093 TAILQ_INIT(&asoc->sent_queue); 1094 TAILQ_INIT(&asoc->reasmqueue); 1095 TAILQ_INIT(&asoc->resetHead); 1096 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1097 TAILQ_INIT(&asoc->asconf_queue); 1098 /* authentication fields */ 1099 asoc->authinfo.random = NULL; 1100 asoc->authinfo.active_keyid = 0; 1101 asoc->authinfo.assoc_key = NULL; 1102 asoc->authinfo.assoc_keyid = 0; 1103 asoc->authinfo.recv_key = NULL; 1104 asoc->authinfo.recv_keyid = 0; 1105 LIST_INIT(&asoc->shared_keys); 1106 asoc->marked_retrans = 0; 1107 asoc->port = m->sctp_ep.port; 1108 asoc->timoinit = 0; 1109 asoc->timodata = 0; 1110 asoc->timosack = 0; 1111 asoc->timoshutdown = 0; 1112 asoc->timoheartbeat = 0; 1113 asoc->timocookie = 0; 1114 asoc->timoshutdownack = 0; 1115 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1116 asoc->discontinuity_time = asoc->start_time; 1117 /* 1118 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1119 * freed later when the association is freed. 1120 */ 1121 return (0); 1122 } 1123 1124 void 1125 sctp_print_mapping_array(struct sctp_association *asoc) 1126 { 1127 unsigned int i, limit; 1128 1129 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1130 asoc->mapping_array_size, 1131 asoc->mapping_array_base_tsn, 1132 asoc->cumulative_tsn, 1133 asoc->highest_tsn_inside_map, 1134 asoc->highest_tsn_inside_nr_map); 1135 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1136 if (asoc->mapping_array[limit - 1] != 0) { 1137 break; 1138 } 1139 } 1140 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1141 for (i = 0; i < limit; i++) { 1142 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1143 } 1144 if (limit % 16) 1145 SCTP_PRINTF("\n"); 1146 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1147 if (asoc->nr_mapping_array[limit - 1]) { 1148 break; 1149 } 1150 } 1151 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1152 for (i = 0; i < limit; i++) { 1153 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1154 } 1155 if (limit % 16) 1156 SCTP_PRINTF("\n"); 1157 } 1158 1159 int 1160 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1161 { 1162 /* mapping array needs to grow */ 1163 uint8_t *new_array1, *new_array2; 1164 uint32_t new_size; 1165 1166 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1167 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1168 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1169 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1170 /* can't get more, forget it */ 1171 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1172 if (new_array1) { 1173 SCTP_FREE(new_array1, SCTP_M_MAP); 1174 } 1175 if (new_array2) { 1176 SCTP_FREE(new_array2, SCTP_M_MAP); 1177 } 1178 return (-1); 1179 } 1180 memset(new_array1, 0, new_size); 1181 memset(new_array2, 0, new_size); 1182 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1183 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1184 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1185 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1186 asoc->mapping_array = new_array1; 1187 asoc->nr_mapping_array = new_array2; 1188 asoc->mapping_array_size = new_size; 1189 return (0); 1190 } 1191 1192 1193 static void 1194 sctp_iterator_work(struct sctp_iterator *it) 1195 { 1196 int iteration_count = 0; 1197 int inp_skip = 0; 1198 int first_in = 1; 1199 struct sctp_inpcb *tinp; 1200 1201 SCTP_INP_INFO_RLOCK(); 1202 SCTP_ITERATOR_LOCK(); 1203 if (it->inp) { 1204 SCTP_INP_RLOCK(it->inp); 1205 SCTP_INP_DECR_REF(it->inp); 1206 } 1207 if (it->inp == NULL) { 1208 /* iterator is complete */ 1209 done_with_iterator: 1210 SCTP_ITERATOR_UNLOCK(); 1211 SCTP_INP_INFO_RUNLOCK(); 1212 if (it->function_atend != NULL) { 1213 (*it->function_atend) (it->pointer, it->val); 1214 } 1215 SCTP_FREE(it, SCTP_M_ITER); 1216 return; 1217 } 1218 select_a_new_ep: 1219 if (first_in) { 1220 first_in = 0; 1221 } else { 1222 SCTP_INP_RLOCK(it->inp); 1223 } 1224 while (((it->pcb_flags) && 1225 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1226 ((it->pcb_features) && 1227 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1228 /* endpoint flags or features don't match, so keep looking */ 1229 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1230 SCTP_INP_RUNLOCK(it->inp); 1231 goto done_with_iterator; 1232 } 1233 tinp = it->inp; 1234 it->inp = LIST_NEXT(it->inp, sctp_list); 1235 SCTP_INP_RUNLOCK(tinp); 1236 if (it->inp == NULL) { 1237 goto done_with_iterator; 1238 } 1239 SCTP_INP_RLOCK(it->inp); 1240 } 1241 /* now go through each assoc which is in the desired state */ 1242 if (it->done_current_ep == 0) { 1243 if (it->function_inp != NULL) 1244 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1245 it->done_current_ep = 1; 1246 } 1247 if (it->stcb == NULL) { 1248 /* run the per instance function */ 1249 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1250 } 1251 if ((inp_skip) || it->stcb == NULL) { 1252 if (it->function_inp_end != NULL) { 1253 inp_skip = (*it->function_inp_end) (it->inp, 1254 it->pointer, 1255 it->val); 1256 } 1257 SCTP_INP_RUNLOCK(it->inp); 1258 goto no_stcb; 1259 } 1260 while (it->stcb) { 1261 SCTP_TCB_LOCK(it->stcb); 1262 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1263 /* not in the right state... keep looking */ 1264 SCTP_TCB_UNLOCK(it->stcb); 1265 goto next_assoc; 1266 } 1267 /* see if we have limited out the iterator loop */ 1268 iteration_count++; 1269 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1270 /* Pause to let others grab the lock */ 1271 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1272 SCTP_TCB_UNLOCK(it->stcb); 1273 SCTP_INP_INCR_REF(it->inp); 1274 SCTP_INP_RUNLOCK(it->inp); 1275 SCTP_ITERATOR_UNLOCK(); 1276 SCTP_INP_INFO_RUNLOCK(); 1277 SCTP_INP_INFO_RLOCK(); 1278 SCTP_ITERATOR_LOCK(); 1279 if (sctp_it_ctl.iterator_flags) { 1280 /* We won't be staying here */ 1281 SCTP_INP_DECR_REF(it->inp); 1282 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1283 if (sctp_it_ctl.iterator_flags & 1284 SCTP_ITERATOR_STOP_CUR_IT) { 1285 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1286 goto done_with_iterator; 1287 } 1288 if (sctp_it_ctl.iterator_flags & 1289 SCTP_ITERATOR_STOP_CUR_INP) { 1290 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1291 goto no_stcb; 1292 } 1293 /* If we reach here huh? */ 1294 SCTP_PRINTF("Unknown it ctl flag %x\n", 1295 sctp_it_ctl.iterator_flags); 1296 sctp_it_ctl.iterator_flags = 0; 1297 } 1298 SCTP_INP_RLOCK(it->inp); 1299 SCTP_INP_DECR_REF(it->inp); 1300 SCTP_TCB_LOCK(it->stcb); 1301 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1302 iteration_count = 0; 1303 } 1304 /* run function on this one */ 1305 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1306 1307 /* 1308 * we lie here, it really needs to have its own type but 1309 * first I must verify that this won't effect things :-0 1310 */ 1311 if (it->no_chunk_output == 0) 1312 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1313 1314 SCTP_TCB_UNLOCK(it->stcb); 1315 next_assoc: 1316 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1317 if (it->stcb == NULL) { 1318 /* Run last function */ 1319 if (it->function_inp_end != NULL) { 1320 inp_skip = (*it->function_inp_end) (it->inp, 1321 it->pointer, 1322 it->val); 1323 } 1324 } 1325 } 1326 SCTP_INP_RUNLOCK(it->inp); 1327 no_stcb: 1328 /* done with all assocs on this endpoint, move on to next endpoint */ 1329 it->done_current_ep = 0; 1330 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1331 it->inp = NULL; 1332 } else { 1333 it->inp = LIST_NEXT(it->inp, sctp_list); 1334 } 1335 if (it->inp == NULL) { 1336 goto done_with_iterator; 1337 } 1338 goto select_a_new_ep; 1339 } 1340 1341 void 1342 sctp_iterator_worker(void) 1343 { 1344 struct sctp_iterator *it, *nit; 1345 1346 /* This function is called with the WQ lock in place */ 1347 1348 sctp_it_ctl.iterator_running = 1; 1349 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1350 sctp_it_ctl.cur_it = it; 1351 /* now lets work on this one */ 1352 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1353 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1354 CURVNET_SET(it->vn); 1355 sctp_iterator_work(it); 1356 sctp_it_ctl.cur_it = NULL; 1357 CURVNET_RESTORE(); 1358 SCTP_IPI_ITERATOR_WQ_LOCK(); 1359 /* sa_ignore FREED_MEMORY */ 1360 } 1361 sctp_it_ctl.iterator_running = 0; 1362 return; 1363 } 1364 1365 1366 static void 1367 sctp_handle_addr_wq(void) 1368 { 1369 /* deal with the ADDR wq from the rtsock calls */ 1370 struct sctp_laddr *wi, *nwi; 1371 struct sctp_asconf_iterator *asc; 1372 1373 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1374 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1375 if (asc == NULL) { 1376 /* Try later, no memory */ 1377 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1378 (struct sctp_inpcb *)NULL, 1379 (struct sctp_tcb *)NULL, 1380 (struct sctp_nets *)NULL); 1381 return; 1382 } 1383 LIST_INIT(&asc->list_of_work); 1384 asc->cnt = 0; 1385 1386 SCTP_WQ_ADDR_LOCK(); 1387 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1388 LIST_REMOVE(wi, sctp_nxt_addr); 1389 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1390 asc->cnt++; 1391 } 1392 SCTP_WQ_ADDR_UNLOCK(); 1393 1394 if (asc->cnt == 0) { 1395 SCTP_FREE(asc, SCTP_M_ASC_IT); 1396 } else { 1397 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep, 1398 sctp_asconf_iterator_stcb, 1399 NULL, /* No ep end for boundall */ 1400 SCTP_PCB_FLAGS_BOUNDALL, 1401 SCTP_PCB_ANY_FEATURES, 1402 SCTP_ASOC_ANY_STATE, 1403 (void *)asc, 0, 1404 sctp_asconf_iterator_end, NULL, 0); 1405 } 1406 } 1407 1408 void 1409 sctp_timeout_handler(void *t) 1410 { 1411 struct sctp_inpcb *inp; 1412 struct sctp_tcb *stcb; 1413 struct sctp_nets *net; 1414 struct sctp_timer *tmr; 1415 1416 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1417 struct socket *so; 1418 1419 #endif 1420 int did_output, type; 1421 1422 tmr = (struct sctp_timer *)t; 1423 inp = (struct sctp_inpcb *)tmr->ep; 1424 stcb = (struct sctp_tcb *)tmr->tcb; 1425 net = (struct sctp_nets *)tmr->net; 1426 CURVNET_SET((struct vnet *)tmr->vnet); 1427 did_output = 1; 1428 1429 #ifdef SCTP_AUDITING_ENABLED 1430 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1431 sctp_auditing(3, inp, stcb, net); 1432 #endif 1433 1434 /* sanity checks... */ 1435 if (tmr->self != (void *)tmr) { 1436 /* 1437 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1438 * (void *)tmr); 1439 */ 1440 CURVNET_RESTORE(); 1441 return; 1442 } 1443 tmr->stopped_from = 0xa001; 1444 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1445 /* 1446 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1447 * tmr->type); 1448 */ 1449 CURVNET_RESTORE(); 1450 return; 1451 } 1452 tmr->stopped_from = 0xa002; 1453 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1454 CURVNET_RESTORE(); 1455 return; 1456 } 1457 /* if this is an iterator timeout, get the struct and clear inp */ 1458 tmr->stopped_from = 0xa003; 1459 type = tmr->type; 1460 if (inp) { 1461 SCTP_INP_INCR_REF(inp); 1462 if ((inp->sctp_socket == NULL) && 1463 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1464 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1465 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1466 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1467 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1468 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1469 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1470 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1471 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1472 ) { 1473 SCTP_INP_DECR_REF(inp); 1474 CURVNET_RESTORE(); 1475 return; 1476 } 1477 } 1478 tmr->stopped_from = 0xa004; 1479 if (stcb) { 1480 atomic_add_int(&stcb->asoc.refcnt, 1); 1481 if (stcb->asoc.state == 0) { 1482 atomic_add_int(&stcb->asoc.refcnt, -1); 1483 if (inp) { 1484 SCTP_INP_DECR_REF(inp); 1485 } 1486 CURVNET_RESTORE(); 1487 return; 1488 } 1489 } 1490 tmr->stopped_from = 0xa005; 1491 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type); 1492 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1493 if (inp) { 1494 SCTP_INP_DECR_REF(inp); 1495 } 1496 if (stcb) { 1497 atomic_add_int(&stcb->asoc.refcnt, -1); 1498 } 1499 CURVNET_RESTORE(); 1500 return; 1501 } 1502 tmr->stopped_from = 0xa006; 1503 1504 if (stcb) { 1505 SCTP_TCB_LOCK(stcb); 1506 atomic_add_int(&stcb->asoc.refcnt, -1); 1507 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) && 1508 ((stcb->asoc.state == 0) || 1509 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1510 SCTP_TCB_UNLOCK(stcb); 1511 if (inp) { 1512 SCTP_INP_DECR_REF(inp); 1513 } 1514 CURVNET_RESTORE(); 1515 return; 1516 } 1517 } 1518 /* record in stopped what t-o occured */ 1519 tmr->stopped_from = tmr->type; 1520 1521 /* mark as being serviced now */ 1522 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1523 /* 1524 * Callout has been rescheduled. 1525 */ 1526 goto get_out; 1527 } 1528 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1529 /* 1530 * Not active, so no action. 1531 */ 1532 goto get_out; 1533 } 1534 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1535 1536 /* call the handler for the appropriate timer type */ 1537 switch (tmr->type) { 1538 case SCTP_TIMER_TYPE_ZERO_COPY: 1539 if (inp == NULL) { 1540 break; 1541 } 1542 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1543 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1544 } 1545 break; 1546 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1547 if (inp == NULL) { 1548 break; 1549 } 1550 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1551 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1552 } 1553 break; 1554 case SCTP_TIMER_TYPE_ADDR_WQ: 1555 sctp_handle_addr_wq(); 1556 break; 1557 case SCTP_TIMER_TYPE_SEND: 1558 if ((stcb == NULL) || (inp == NULL)) { 1559 break; 1560 } 1561 SCTP_STAT_INCR(sctps_timodata); 1562 stcb->asoc.timodata++; 1563 stcb->asoc.num_send_timers_up--; 1564 if (stcb->asoc.num_send_timers_up < 0) { 1565 stcb->asoc.num_send_timers_up = 0; 1566 } 1567 SCTP_TCB_LOCK_ASSERT(stcb); 1568 if (sctp_t3rxt_timer(inp, stcb, net)) { 1569 /* no need to unlock on tcb its gone */ 1570 1571 goto out_decr; 1572 } 1573 SCTP_TCB_LOCK_ASSERT(stcb); 1574 #ifdef SCTP_AUDITING_ENABLED 1575 sctp_auditing(4, inp, stcb, net); 1576 #endif 1577 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1578 if ((stcb->asoc.num_send_timers_up == 0) && 1579 (stcb->asoc.sent_queue_cnt > 0)) { 1580 struct sctp_tmit_chunk *chk; 1581 1582 /* 1583 * safeguard. If there on some on the sent queue 1584 * somewhere but no timers running something is 1585 * wrong... so we start a timer on the first chunk 1586 * on the send queue on whatever net it is sent to. 1587 */ 1588 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1589 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1590 chk->whoTo); 1591 } 1592 break; 1593 case SCTP_TIMER_TYPE_INIT: 1594 if ((stcb == NULL) || (inp == NULL)) { 1595 break; 1596 } 1597 SCTP_STAT_INCR(sctps_timoinit); 1598 stcb->asoc.timoinit++; 1599 if (sctp_t1init_timer(inp, stcb, net)) { 1600 /* no need to unlock on tcb its gone */ 1601 goto out_decr; 1602 } 1603 /* We do output but not here */ 1604 did_output = 0; 1605 break; 1606 case SCTP_TIMER_TYPE_RECV: 1607 if ((stcb == NULL) || (inp == NULL)) { 1608 break; 1609 } 1610 SCTP_STAT_INCR(sctps_timosack); 1611 stcb->asoc.timosack++; 1612 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1613 #ifdef SCTP_AUDITING_ENABLED 1614 sctp_auditing(4, inp, stcb, net); 1615 #endif 1616 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1617 break; 1618 case SCTP_TIMER_TYPE_SHUTDOWN: 1619 if ((stcb == NULL) || (inp == NULL)) { 1620 break; 1621 } 1622 if (sctp_shutdown_timer(inp, stcb, net)) { 1623 /* no need to unlock on tcb its gone */ 1624 goto out_decr; 1625 } 1626 SCTP_STAT_INCR(sctps_timoshutdown); 1627 stcb->asoc.timoshutdown++; 1628 #ifdef SCTP_AUDITING_ENABLED 1629 sctp_auditing(4, inp, stcb, net); 1630 #endif 1631 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1632 break; 1633 case SCTP_TIMER_TYPE_HEARTBEAT: 1634 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1635 break; 1636 } 1637 SCTP_STAT_INCR(sctps_timoheartbeat); 1638 stcb->asoc.timoheartbeat++; 1639 if (sctp_heartbeat_timer(inp, stcb, net)) { 1640 /* no need to unlock on tcb its gone */ 1641 goto out_decr; 1642 } 1643 #ifdef SCTP_AUDITING_ENABLED 1644 sctp_auditing(4, inp, stcb, net); 1645 #endif 1646 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1647 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1648 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1649 } 1650 break; 1651 case SCTP_TIMER_TYPE_COOKIE: 1652 if ((stcb == NULL) || (inp == NULL)) { 1653 break; 1654 } 1655 if (sctp_cookie_timer(inp, stcb, net)) { 1656 /* no need to unlock on tcb its gone */ 1657 goto out_decr; 1658 } 1659 SCTP_STAT_INCR(sctps_timocookie); 1660 stcb->asoc.timocookie++; 1661 #ifdef SCTP_AUDITING_ENABLED 1662 sctp_auditing(4, inp, stcb, net); 1663 #endif 1664 /* 1665 * We consider T3 and Cookie timer pretty much the same with 1666 * respect to where from in chunk_output. 1667 */ 1668 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1669 break; 1670 case SCTP_TIMER_TYPE_NEWCOOKIE: 1671 { 1672 struct timeval tv; 1673 int i, secret; 1674 1675 if (inp == NULL) { 1676 break; 1677 } 1678 SCTP_STAT_INCR(sctps_timosecret); 1679 (void)SCTP_GETTIME_TIMEVAL(&tv); 1680 SCTP_INP_WLOCK(inp); 1681 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1682 inp->sctp_ep.last_secret_number = 1683 inp->sctp_ep.current_secret_number; 1684 inp->sctp_ep.current_secret_number++; 1685 if (inp->sctp_ep.current_secret_number >= 1686 SCTP_HOW_MANY_SECRETS) { 1687 inp->sctp_ep.current_secret_number = 0; 1688 } 1689 secret = (int)inp->sctp_ep.current_secret_number; 1690 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1691 inp->sctp_ep.secret_key[secret][i] = 1692 sctp_select_initial_TSN(&inp->sctp_ep); 1693 } 1694 SCTP_INP_WUNLOCK(inp); 1695 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1696 } 1697 did_output = 0; 1698 break; 1699 case SCTP_TIMER_TYPE_PATHMTURAISE: 1700 if ((stcb == NULL) || (inp == NULL)) { 1701 break; 1702 } 1703 SCTP_STAT_INCR(sctps_timopathmtu); 1704 sctp_pathmtu_timer(inp, stcb, net); 1705 did_output = 0; 1706 break; 1707 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1708 if ((stcb == NULL) || (inp == NULL)) { 1709 break; 1710 } 1711 if (sctp_shutdownack_timer(inp, stcb, net)) { 1712 /* no need to unlock on tcb its gone */ 1713 goto out_decr; 1714 } 1715 SCTP_STAT_INCR(sctps_timoshutdownack); 1716 stcb->asoc.timoshutdownack++; 1717 #ifdef SCTP_AUDITING_ENABLED 1718 sctp_auditing(4, inp, stcb, net); 1719 #endif 1720 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1721 break; 1722 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1723 if ((stcb == NULL) || (inp == NULL)) { 1724 break; 1725 } 1726 SCTP_STAT_INCR(sctps_timoshutdownguard); 1727 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED); 1728 /* no need to unlock on tcb its gone */ 1729 goto out_decr; 1730 1731 case SCTP_TIMER_TYPE_STRRESET: 1732 if ((stcb == NULL) || (inp == NULL)) { 1733 break; 1734 } 1735 if (sctp_strreset_timer(inp, stcb, net)) { 1736 /* no need to unlock on tcb its gone */ 1737 goto out_decr; 1738 } 1739 SCTP_STAT_INCR(sctps_timostrmrst); 1740 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1741 break; 1742 case SCTP_TIMER_TYPE_ASCONF: 1743 if ((stcb == NULL) || (inp == NULL)) { 1744 break; 1745 } 1746 if (sctp_asconf_timer(inp, stcb, net)) { 1747 /* no need to unlock on tcb its gone */ 1748 goto out_decr; 1749 } 1750 SCTP_STAT_INCR(sctps_timoasconf); 1751 #ifdef SCTP_AUDITING_ENABLED 1752 sctp_auditing(4, inp, stcb, net); 1753 #endif 1754 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1755 break; 1756 case SCTP_TIMER_TYPE_PRIM_DELETED: 1757 if ((stcb == NULL) || (inp == NULL)) { 1758 break; 1759 } 1760 sctp_delete_prim_timer(inp, stcb, net); 1761 SCTP_STAT_INCR(sctps_timodelprim); 1762 break; 1763 1764 case SCTP_TIMER_TYPE_AUTOCLOSE: 1765 if ((stcb == NULL) || (inp == NULL)) { 1766 break; 1767 } 1768 SCTP_STAT_INCR(sctps_timoautoclose); 1769 sctp_autoclose_timer(inp, stcb, net); 1770 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1771 did_output = 0; 1772 break; 1773 case SCTP_TIMER_TYPE_ASOCKILL: 1774 if ((stcb == NULL) || (inp == NULL)) { 1775 break; 1776 } 1777 SCTP_STAT_INCR(sctps_timoassockill); 1778 /* Can we free it yet? */ 1779 SCTP_INP_DECR_REF(inp); 1780 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1781 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1782 so = SCTP_INP_SO(inp); 1783 atomic_add_int(&stcb->asoc.refcnt, 1); 1784 SCTP_TCB_UNLOCK(stcb); 1785 SCTP_SOCKET_LOCK(so, 1); 1786 SCTP_TCB_LOCK(stcb); 1787 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1788 #endif 1789 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1791 SCTP_SOCKET_UNLOCK(so, 1); 1792 #endif 1793 /* 1794 * free asoc, always unlocks (or destroy's) so prevent 1795 * duplicate unlock or unlock of a free mtx :-0 1796 */ 1797 stcb = NULL; 1798 goto out_no_decr; 1799 case SCTP_TIMER_TYPE_INPKILL: 1800 SCTP_STAT_INCR(sctps_timoinpkill); 1801 if (inp == NULL) { 1802 break; 1803 } 1804 /* 1805 * special case, take away our increment since WE are the 1806 * killer 1807 */ 1808 SCTP_INP_DECR_REF(inp); 1809 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1810 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1811 SCTP_CALLED_FROM_INPKILL_TIMER); 1812 inp = NULL; 1813 goto out_no_decr; 1814 default: 1815 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1816 tmr->type); 1817 break; 1818 } 1819 #ifdef SCTP_AUDITING_ENABLED 1820 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1821 if (inp) 1822 sctp_auditing(5, inp, stcb, net); 1823 #endif 1824 if ((did_output) && stcb) { 1825 /* 1826 * Now we need to clean up the control chunk chain if an 1827 * ECNE is on it. It must be marked as UNSENT again so next 1828 * call will continue to send it until such time that we get 1829 * a CWR, to remove it. It is, however, less likely that we 1830 * will find a ecn echo on the chain though. 1831 */ 1832 sctp_fix_ecn_echo(&stcb->asoc); 1833 } 1834 get_out: 1835 if (stcb) { 1836 SCTP_TCB_UNLOCK(stcb); 1837 } 1838 out_decr: 1839 if (inp) { 1840 SCTP_INP_DECR_REF(inp); 1841 } 1842 out_no_decr: 1843 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n", 1844 type); 1845 CURVNET_RESTORE(); 1846 } 1847 1848 void 1849 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1850 struct sctp_nets *net) 1851 { 1852 uint32_t to_ticks; 1853 struct sctp_timer *tmr; 1854 1855 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1856 return; 1857 1858 tmr = NULL; 1859 if (stcb) { 1860 SCTP_TCB_LOCK_ASSERT(stcb); 1861 } 1862 switch (t_type) { 1863 case SCTP_TIMER_TYPE_ZERO_COPY: 1864 tmr = &inp->sctp_ep.zero_copy_timer; 1865 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1866 break; 1867 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1868 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1869 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1870 break; 1871 case SCTP_TIMER_TYPE_ADDR_WQ: 1872 /* Only 1 tick away :-) */ 1873 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1874 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1875 break; 1876 case SCTP_TIMER_TYPE_SEND: 1877 /* Here we use the RTO timer */ 1878 { 1879 int rto_val; 1880 1881 if ((stcb == NULL) || (net == NULL)) { 1882 return; 1883 } 1884 tmr = &net->rxt_timer; 1885 if (net->RTO == 0) { 1886 rto_val = stcb->asoc.initial_rto; 1887 } else { 1888 rto_val = net->RTO; 1889 } 1890 to_ticks = MSEC_TO_TICKS(rto_val); 1891 } 1892 break; 1893 case SCTP_TIMER_TYPE_INIT: 1894 /* 1895 * Here we use the INIT timer default usually about 1 1896 * minute. 1897 */ 1898 if ((stcb == NULL) || (net == NULL)) { 1899 return; 1900 } 1901 tmr = &net->rxt_timer; 1902 if (net->RTO == 0) { 1903 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1904 } else { 1905 to_ticks = MSEC_TO_TICKS(net->RTO); 1906 } 1907 break; 1908 case SCTP_TIMER_TYPE_RECV: 1909 /* 1910 * Here we use the Delayed-Ack timer value from the inp 1911 * ususually about 200ms. 1912 */ 1913 if (stcb == NULL) { 1914 return; 1915 } 1916 tmr = &stcb->asoc.dack_timer; 1917 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1918 break; 1919 case SCTP_TIMER_TYPE_SHUTDOWN: 1920 /* Here we use the RTO of the destination. */ 1921 if ((stcb == NULL) || (net == NULL)) { 1922 return; 1923 } 1924 if (net->RTO == 0) { 1925 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1926 } else { 1927 to_ticks = MSEC_TO_TICKS(net->RTO); 1928 } 1929 tmr = &net->rxt_timer; 1930 break; 1931 case SCTP_TIMER_TYPE_HEARTBEAT: 1932 /* 1933 * the net is used here so that we can add in the RTO. Even 1934 * though we use a different timer. We also add the HB timer 1935 * PLUS a random jitter. 1936 */ 1937 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 1938 return; 1939 } else { 1940 uint32_t rndval; 1941 uint32_t jitter; 1942 1943 if ((net->dest_state & SCTP_ADDR_NOHB) && 1944 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1945 return; 1946 } 1947 if (net->RTO == 0) { 1948 to_ticks = stcb->asoc.initial_rto; 1949 } else { 1950 to_ticks = net->RTO; 1951 } 1952 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1953 jitter = rndval % to_ticks; 1954 if (jitter >= (to_ticks >> 1)) { 1955 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 1956 } else { 1957 to_ticks = to_ticks - jitter; 1958 } 1959 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1960 !(net->dest_state & SCTP_ADDR_PF)) { 1961 to_ticks += net->heart_beat_delay; 1962 } 1963 /* 1964 * Now we must convert the to_ticks that are now in 1965 * ms to ticks. 1966 */ 1967 to_ticks = MSEC_TO_TICKS(to_ticks); 1968 tmr = &net->hb_timer; 1969 } 1970 break; 1971 case SCTP_TIMER_TYPE_COOKIE: 1972 /* 1973 * Here we can use the RTO timer from the network since one 1974 * RTT was compelete. If a retran happened then we will be 1975 * using the RTO initial value. 1976 */ 1977 if ((stcb == NULL) || (net == NULL)) { 1978 return; 1979 } 1980 if (net->RTO == 0) { 1981 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1982 } else { 1983 to_ticks = MSEC_TO_TICKS(net->RTO); 1984 } 1985 tmr = &net->rxt_timer; 1986 break; 1987 case SCTP_TIMER_TYPE_NEWCOOKIE: 1988 /* 1989 * nothing needed but the endpoint here ususually about 60 1990 * minutes. 1991 */ 1992 if (inp == NULL) { 1993 return; 1994 } 1995 tmr = &inp->sctp_ep.signature_change; 1996 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1997 break; 1998 case SCTP_TIMER_TYPE_ASOCKILL: 1999 if (stcb == NULL) { 2000 return; 2001 } 2002 tmr = &stcb->asoc.strreset_timer; 2003 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 2004 break; 2005 case SCTP_TIMER_TYPE_INPKILL: 2006 /* 2007 * The inp is setup to die. We re-use the signature_chage 2008 * timer since that has stopped and we are in the GONE 2009 * state. 2010 */ 2011 if (inp == NULL) { 2012 return; 2013 } 2014 tmr = &inp->sctp_ep.signature_change; 2015 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2016 break; 2017 case SCTP_TIMER_TYPE_PATHMTURAISE: 2018 /* 2019 * Here we use the value found in the EP for PMTU ususually 2020 * about 10 minutes. 2021 */ 2022 if ((stcb == NULL) || (inp == NULL)) { 2023 return; 2024 } 2025 if (net == NULL) { 2026 return; 2027 } 2028 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2029 return; 2030 } 2031 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2032 tmr = &net->pmtu_timer; 2033 break; 2034 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2035 /* Here we use the RTO of the destination */ 2036 if ((stcb == NULL) || (net == NULL)) { 2037 return; 2038 } 2039 if (net->RTO == 0) { 2040 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2041 } else { 2042 to_ticks = MSEC_TO_TICKS(net->RTO); 2043 } 2044 tmr = &net->rxt_timer; 2045 break; 2046 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2047 /* 2048 * Here we use the endpoints shutdown guard timer usually 2049 * about 3 minutes. 2050 */ 2051 if ((inp == NULL) || (stcb == NULL)) { 2052 return; 2053 } 2054 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2055 tmr = &stcb->asoc.shut_guard_timer; 2056 break; 2057 case SCTP_TIMER_TYPE_STRRESET: 2058 /* 2059 * Here the timer comes from the stcb but its value is from 2060 * the net's RTO. 2061 */ 2062 if ((stcb == NULL) || (net == NULL)) { 2063 return; 2064 } 2065 if (net->RTO == 0) { 2066 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2067 } else { 2068 to_ticks = MSEC_TO_TICKS(net->RTO); 2069 } 2070 tmr = &stcb->asoc.strreset_timer; 2071 break; 2072 case SCTP_TIMER_TYPE_ASCONF: 2073 /* 2074 * Here the timer comes from the stcb but its value is from 2075 * the net's RTO. 2076 */ 2077 if ((stcb == NULL) || (net == NULL)) { 2078 return; 2079 } 2080 if (net->RTO == 0) { 2081 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2082 } else { 2083 to_ticks = MSEC_TO_TICKS(net->RTO); 2084 } 2085 tmr = &stcb->asoc.asconf_timer; 2086 break; 2087 case SCTP_TIMER_TYPE_PRIM_DELETED: 2088 if ((stcb == NULL) || (net != NULL)) { 2089 return; 2090 } 2091 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2092 tmr = &stcb->asoc.delete_prim_timer; 2093 break; 2094 case SCTP_TIMER_TYPE_AUTOCLOSE: 2095 if (stcb == NULL) { 2096 return; 2097 } 2098 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2099 /* 2100 * Really an error since stcb is NOT set to 2101 * autoclose 2102 */ 2103 return; 2104 } 2105 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2106 tmr = &stcb->asoc.autoclose_timer; 2107 break; 2108 default: 2109 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2110 __FUNCTION__, t_type); 2111 return; 2112 break; 2113 } 2114 if ((to_ticks <= 0) || (tmr == NULL)) { 2115 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2116 __FUNCTION__, t_type, to_ticks, (void *)tmr); 2117 return; 2118 } 2119 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2120 /* 2121 * we do NOT allow you to have it already running. if it is 2122 * we leave the current one up unchanged 2123 */ 2124 return; 2125 } 2126 /* At this point we can proceed */ 2127 if (t_type == SCTP_TIMER_TYPE_SEND) { 2128 stcb->asoc.num_send_timers_up++; 2129 } 2130 tmr->stopped_from = 0; 2131 tmr->type = t_type; 2132 tmr->ep = (void *)inp; 2133 tmr->tcb = (void *)stcb; 2134 tmr->net = (void *)net; 2135 tmr->self = (void *)tmr; 2136 tmr->vnet = (void *)curvnet; 2137 tmr->ticks = sctp_get_tick_count(); 2138 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2139 return; 2140 } 2141 2142 void 2143 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2144 struct sctp_nets *net, uint32_t from) 2145 { 2146 struct sctp_timer *tmr; 2147 2148 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2149 (inp == NULL)) 2150 return; 2151 2152 tmr = NULL; 2153 if (stcb) { 2154 SCTP_TCB_LOCK_ASSERT(stcb); 2155 } 2156 switch (t_type) { 2157 case SCTP_TIMER_TYPE_ZERO_COPY: 2158 tmr = &inp->sctp_ep.zero_copy_timer; 2159 break; 2160 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2161 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2162 break; 2163 case SCTP_TIMER_TYPE_ADDR_WQ: 2164 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2165 break; 2166 case SCTP_TIMER_TYPE_SEND: 2167 if ((stcb == NULL) || (net == NULL)) { 2168 return; 2169 } 2170 tmr = &net->rxt_timer; 2171 break; 2172 case SCTP_TIMER_TYPE_INIT: 2173 if ((stcb == NULL) || (net == NULL)) { 2174 return; 2175 } 2176 tmr = &net->rxt_timer; 2177 break; 2178 case SCTP_TIMER_TYPE_RECV: 2179 if (stcb == NULL) { 2180 return; 2181 } 2182 tmr = &stcb->asoc.dack_timer; 2183 break; 2184 case SCTP_TIMER_TYPE_SHUTDOWN: 2185 if ((stcb == NULL) || (net == NULL)) { 2186 return; 2187 } 2188 tmr = &net->rxt_timer; 2189 break; 2190 case SCTP_TIMER_TYPE_HEARTBEAT: 2191 if ((stcb == NULL) || (net == NULL)) { 2192 return; 2193 } 2194 tmr = &net->hb_timer; 2195 break; 2196 case SCTP_TIMER_TYPE_COOKIE: 2197 if ((stcb == NULL) || (net == NULL)) { 2198 return; 2199 } 2200 tmr = &net->rxt_timer; 2201 break; 2202 case SCTP_TIMER_TYPE_NEWCOOKIE: 2203 /* nothing needed but the endpoint here */ 2204 tmr = &inp->sctp_ep.signature_change; 2205 /* 2206 * We re-use the newcookie timer for the INP kill timer. We 2207 * must assure that we do not kill it by accident. 2208 */ 2209 break; 2210 case SCTP_TIMER_TYPE_ASOCKILL: 2211 /* 2212 * Stop the asoc kill timer. 2213 */ 2214 if (stcb == NULL) { 2215 return; 2216 } 2217 tmr = &stcb->asoc.strreset_timer; 2218 break; 2219 2220 case SCTP_TIMER_TYPE_INPKILL: 2221 /* 2222 * The inp is setup to die. We re-use the signature_chage 2223 * timer since that has stopped and we are in the GONE 2224 * state. 2225 */ 2226 tmr = &inp->sctp_ep.signature_change; 2227 break; 2228 case SCTP_TIMER_TYPE_PATHMTURAISE: 2229 if ((stcb == NULL) || (net == NULL)) { 2230 return; 2231 } 2232 tmr = &net->pmtu_timer; 2233 break; 2234 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2235 if ((stcb == NULL) || (net == NULL)) { 2236 return; 2237 } 2238 tmr = &net->rxt_timer; 2239 break; 2240 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2241 if (stcb == NULL) { 2242 return; 2243 } 2244 tmr = &stcb->asoc.shut_guard_timer; 2245 break; 2246 case SCTP_TIMER_TYPE_STRRESET: 2247 if (stcb == NULL) { 2248 return; 2249 } 2250 tmr = &stcb->asoc.strreset_timer; 2251 break; 2252 case SCTP_TIMER_TYPE_ASCONF: 2253 if (stcb == NULL) { 2254 return; 2255 } 2256 tmr = &stcb->asoc.asconf_timer; 2257 break; 2258 case SCTP_TIMER_TYPE_PRIM_DELETED: 2259 if (stcb == NULL) { 2260 return; 2261 } 2262 tmr = &stcb->asoc.delete_prim_timer; 2263 break; 2264 case SCTP_TIMER_TYPE_AUTOCLOSE: 2265 if (stcb == NULL) { 2266 return; 2267 } 2268 tmr = &stcb->asoc.autoclose_timer; 2269 break; 2270 default: 2271 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2272 __FUNCTION__, t_type); 2273 break; 2274 } 2275 if (tmr == NULL) { 2276 return; 2277 } 2278 if ((tmr->type != t_type) && tmr->type) { 2279 /* 2280 * Ok we have a timer that is under joint use. Cookie timer 2281 * per chance with the SEND timer. We therefore are NOT 2282 * running the timer that the caller wants stopped. So just 2283 * return. 2284 */ 2285 return; 2286 } 2287 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2288 stcb->asoc.num_send_timers_up--; 2289 if (stcb->asoc.num_send_timers_up < 0) { 2290 stcb->asoc.num_send_timers_up = 0; 2291 } 2292 } 2293 tmr->self = NULL; 2294 tmr->stopped_from = from; 2295 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2296 return; 2297 } 2298 2299 uint32_t 2300 sctp_calculate_len(struct mbuf *m) 2301 { 2302 uint32_t tlen = 0; 2303 struct mbuf *at; 2304 2305 at = m; 2306 while (at) { 2307 tlen += SCTP_BUF_LEN(at); 2308 at = SCTP_BUF_NEXT(at); 2309 } 2310 return (tlen); 2311 } 2312 2313 void 2314 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2315 struct sctp_association *asoc, uint32_t mtu) 2316 { 2317 /* 2318 * Reset the P-MTU size on this association, this involves changing 2319 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2320 * allow the DF flag to be cleared. 2321 */ 2322 struct sctp_tmit_chunk *chk; 2323 unsigned int eff_mtu, ovh; 2324 2325 asoc->smallest_mtu = mtu; 2326 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2327 ovh = SCTP_MIN_OVERHEAD; 2328 } else { 2329 ovh = SCTP_MIN_V4_OVERHEAD; 2330 } 2331 eff_mtu = mtu - ovh; 2332 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2333 if (chk->send_size > eff_mtu) { 2334 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2335 } 2336 } 2337 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2338 if (chk->send_size > eff_mtu) { 2339 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2340 } 2341 } 2342 } 2343 2344 2345 /* 2346 * given an association and starting time of the current RTT period return 2347 * RTO in number of msecs net should point to the current network 2348 */ 2349 2350 uint32_t 2351 sctp_calculate_rto(struct sctp_tcb *stcb, 2352 struct sctp_association *asoc, 2353 struct sctp_nets *net, 2354 struct timeval *told, 2355 int safe, int rtt_from_sack) 2356 { 2357 /*- 2358 * given an association and the starting time of the current RTT 2359 * period (in value1/value2) return RTO in number of msecs. 2360 */ 2361 int32_t rtt; /* RTT in ms */ 2362 uint32_t new_rto; 2363 int first_measure = 0; 2364 struct timeval now, then, *old; 2365 2366 /* Copy it out for sparc64 */ 2367 if (safe == sctp_align_unsafe_makecopy) { 2368 old = &then; 2369 memcpy(&then, told, sizeof(struct timeval)); 2370 } else if (safe == sctp_align_safe_nocopy) { 2371 old = told; 2372 } else { 2373 /* error */ 2374 SCTP_PRINTF("Huh, bad rto calc call\n"); 2375 return (0); 2376 } 2377 /************************/ 2378 /* 1. calculate new RTT */ 2379 /************************/ 2380 /* get the current time */ 2381 if (stcb->asoc.use_precise_time) { 2382 (void)SCTP_GETPTIME_TIMEVAL(&now); 2383 } else { 2384 (void)SCTP_GETTIME_TIMEVAL(&now); 2385 } 2386 timevalsub(&now, old); 2387 /* store the current RTT in us */ 2388 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec + 2389 (uint64_t) now.tv_usec; 2390 2391 /* computer rtt in ms */ 2392 rtt = net->rtt / 1000; 2393 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2394 /* 2395 * Tell the CC module that a new update has just occurred 2396 * from a sack 2397 */ 2398 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2399 } 2400 /* 2401 * Do we need to determine the lan? We do this only on sacks i.e. 2402 * RTT being determined from data not non-data (HB/INIT->INITACK). 2403 */ 2404 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2405 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2406 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2407 net->lan_type = SCTP_LAN_INTERNET; 2408 } else { 2409 net->lan_type = SCTP_LAN_LOCAL; 2410 } 2411 } 2412 /***************************/ 2413 /* 2. update RTTVAR & SRTT */ 2414 /***************************/ 2415 /*- 2416 * Compute the scaled average lastsa and the 2417 * scaled variance lastsv as described in van Jacobson 2418 * Paper "Congestion Avoidance and Control", Annex A. 2419 * 2420 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2421 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2422 */ 2423 if (net->RTO_measured) { 2424 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2425 net->lastsa += rtt; 2426 if (rtt < 0) { 2427 rtt = -rtt; 2428 } 2429 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2430 net->lastsv += rtt; 2431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2432 rto_logging(net, SCTP_LOG_RTTVAR); 2433 } 2434 } else { 2435 /* First RTO measurment */ 2436 net->RTO_measured = 1; 2437 first_measure = 1; 2438 net->lastsa = rtt << SCTP_RTT_SHIFT; 2439 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2441 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2442 } 2443 } 2444 if (net->lastsv == 0) { 2445 net->lastsv = SCTP_CLOCK_GRANULARITY; 2446 } 2447 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2448 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2449 (stcb->asoc.sat_network_lockout == 0)) { 2450 stcb->asoc.sat_network = 1; 2451 } else if ((!first_measure) && stcb->asoc.sat_network) { 2452 stcb->asoc.sat_network = 0; 2453 stcb->asoc.sat_network_lockout = 1; 2454 } 2455 /* bound it, per C6/C7 in Section 5.3.1 */ 2456 if (new_rto < stcb->asoc.minrto) { 2457 new_rto = stcb->asoc.minrto; 2458 } 2459 if (new_rto > stcb->asoc.maxrto) { 2460 new_rto = stcb->asoc.maxrto; 2461 } 2462 /* we are now returning the RTO */ 2463 return (new_rto); 2464 } 2465 2466 /* 2467 * return a pointer to a contiguous piece of data from the given mbuf chain 2468 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2469 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2470 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2471 */ 2472 caddr_t 2473 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2474 { 2475 uint32_t count; 2476 uint8_t *ptr; 2477 2478 ptr = in_ptr; 2479 if ((off < 0) || (len <= 0)) 2480 return (NULL); 2481 2482 /* find the desired start location */ 2483 while ((m != NULL) && (off > 0)) { 2484 if (off < SCTP_BUF_LEN(m)) 2485 break; 2486 off -= SCTP_BUF_LEN(m); 2487 m = SCTP_BUF_NEXT(m); 2488 } 2489 if (m == NULL) 2490 return (NULL); 2491 2492 /* is the current mbuf large enough (eg. contiguous)? */ 2493 if ((SCTP_BUF_LEN(m) - off) >= len) { 2494 return (mtod(m, caddr_t)+off); 2495 } else { 2496 /* else, it spans more than one mbuf, so save a temp copy... */ 2497 while ((m != NULL) && (len > 0)) { 2498 count = min(SCTP_BUF_LEN(m) - off, len); 2499 bcopy(mtod(m, caddr_t)+off, ptr, count); 2500 len -= count; 2501 ptr += count; 2502 off = 0; 2503 m = SCTP_BUF_NEXT(m); 2504 } 2505 if ((m == NULL) && (len > 0)) 2506 return (NULL); 2507 else 2508 return ((caddr_t)in_ptr); 2509 } 2510 } 2511 2512 2513 2514 struct sctp_paramhdr * 2515 sctp_get_next_param(struct mbuf *m, 2516 int offset, 2517 struct sctp_paramhdr *pull, 2518 int pull_limit) 2519 { 2520 /* This just provides a typed signature to Peter's Pull routine */ 2521 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2522 (uint8_t *) pull)); 2523 } 2524 2525 2526 int 2527 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2528 { 2529 /* 2530 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2531 * padlen is > 3 this routine will fail. 2532 */ 2533 uint8_t *dp; 2534 int i; 2535 2536 if (padlen > 3) { 2537 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2538 return (ENOBUFS); 2539 } 2540 if (padlen <= M_TRAILINGSPACE(m)) { 2541 /* 2542 * The easy way. We hope the majority of the time we hit 2543 * here :) 2544 */ 2545 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2546 SCTP_BUF_LEN(m) += padlen; 2547 } else { 2548 /* Hard way we must grow the mbuf */ 2549 struct mbuf *tmp; 2550 2551 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2552 if (tmp == NULL) { 2553 /* Out of space GAK! we are in big trouble. */ 2554 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2555 return (ENOBUFS); 2556 } 2557 /* setup and insert in middle */ 2558 SCTP_BUF_LEN(tmp) = padlen; 2559 SCTP_BUF_NEXT(tmp) = NULL; 2560 SCTP_BUF_NEXT(m) = tmp; 2561 dp = mtod(tmp, uint8_t *); 2562 } 2563 /* zero out the pad */ 2564 for (i = 0; i < padlen; i++) { 2565 *dp = 0; 2566 dp++; 2567 } 2568 return (0); 2569 } 2570 2571 int 2572 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2573 { 2574 /* find the last mbuf in chain and pad it */ 2575 struct mbuf *m_at; 2576 2577 if (last_mbuf) { 2578 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2579 } else { 2580 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2581 if (SCTP_BUF_NEXT(m_at) == NULL) { 2582 return (sctp_add_pad_tombuf(m_at, padval)); 2583 } 2584 } 2585 } 2586 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 2587 return (EFAULT); 2588 } 2589 2590 static void 2591 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2592 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2593 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2594 SCTP_UNUSED 2595 #endif 2596 ) 2597 { 2598 struct mbuf *m_notify; 2599 struct sctp_assoc_change *sac; 2600 struct sctp_queued_to_read *control; 2601 size_t notif_len, abort_len; 2602 unsigned int i; 2603 2604 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2605 struct socket *so; 2606 2607 #endif 2608 2609 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2610 notif_len = sizeof(struct sctp_assoc_change); 2611 if (abort != NULL) { 2612 abort_len = htons(abort->ch.chunk_length); 2613 } else { 2614 abort_len = 0; 2615 } 2616 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2617 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2618 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2619 notif_len += abort_len; 2620 } 2621 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2622 if (m_notify == NULL) { 2623 /* Retry with smaller value. */ 2624 notif_len = sizeof(struct sctp_assoc_change); 2625 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2626 if (m_notify == NULL) { 2627 goto set_error; 2628 } 2629 } 2630 SCTP_BUF_NEXT(m_notify) = NULL; 2631 sac = mtod(m_notify, struct sctp_assoc_change *); 2632 sac->sac_type = SCTP_ASSOC_CHANGE; 2633 sac->sac_flags = 0; 2634 sac->sac_length = sizeof(struct sctp_assoc_change); 2635 sac->sac_state = state; 2636 sac->sac_error = error; 2637 /* XXX verify these stream counts */ 2638 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2639 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2640 sac->sac_assoc_id = sctp_get_associd(stcb); 2641 if (notif_len > sizeof(struct sctp_assoc_change)) { 2642 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2643 i = 0; 2644 if (stcb->asoc.peer_supports_prsctp) { 2645 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2646 } 2647 if (stcb->asoc.peer_supports_auth) { 2648 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2649 } 2650 if (stcb->asoc.peer_supports_asconf) { 2651 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2652 } 2653 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2654 if (stcb->asoc.peer_supports_strreset) { 2655 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2656 } 2657 sac->sac_length += i; 2658 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2659 memcpy(sac->sac_info, abort, abort_len); 2660 sac->sac_length += abort_len; 2661 } 2662 } 2663 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2664 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2665 0, 0, stcb->asoc.context, 0, 0, 0, 2666 m_notify); 2667 if (control != NULL) { 2668 control->length = SCTP_BUF_LEN(m_notify); 2669 /* not that we need this */ 2670 control->tail_mbuf = m_notify; 2671 control->spec_flags = M_NOTIFICATION; 2672 sctp_add_to_readq(stcb->sctp_ep, stcb, 2673 control, 2674 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2675 so_locked); 2676 } else { 2677 sctp_m_freem(m_notify); 2678 } 2679 } 2680 /* 2681 * For 1-to-1 style sockets, we send up and error when an ABORT 2682 * comes in. 2683 */ 2684 set_error: 2685 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2686 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2687 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2688 if (from_peer) { 2689 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2690 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2691 stcb->sctp_socket->so_error = ECONNREFUSED; 2692 } else { 2693 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2694 stcb->sctp_socket->so_error = ECONNRESET; 2695 } 2696 } else { 2697 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2698 stcb->sctp_socket->so_error = ECONNABORTED; 2699 } 2700 } 2701 /* Wake ANY sleepers */ 2702 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2703 so = SCTP_INP_SO(stcb->sctp_ep); 2704 if (!so_locked) { 2705 atomic_add_int(&stcb->asoc.refcnt, 1); 2706 SCTP_TCB_UNLOCK(stcb); 2707 SCTP_SOCKET_LOCK(so, 1); 2708 SCTP_TCB_LOCK(stcb); 2709 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2710 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2711 SCTP_SOCKET_UNLOCK(so, 1); 2712 return; 2713 } 2714 } 2715 #endif 2716 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2717 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2718 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2719 socantrcvmore(stcb->sctp_socket); 2720 } 2721 sorwakeup(stcb->sctp_socket); 2722 sowwakeup(stcb->sctp_socket); 2723 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2724 if (!so_locked) { 2725 SCTP_SOCKET_UNLOCK(so, 1); 2726 } 2727 #endif 2728 } 2729 2730 static void 2731 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2732 struct sockaddr *sa, uint32_t error) 2733 { 2734 struct mbuf *m_notify; 2735 struct sctp_paddr_change *spc; 2736 struct sctp_queued_to_read *control; 2737 2738 if ((stcb == NULL) || 2739 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2740 /* event not enabled */ 2741 return; 2742 } 2743 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2744 if (m_notify == NULL) 2745 return; 2746 SCTP_BUF_LEN(m_notify) = 0; 2747 spc = mtod(m_notify, struct sctp_paddr_change *); 2748 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2749 spc->spc_flags = 0; 2750 spc->spc_length = sizeof(struct sctp_paddr_change); 2751 switch (sa->sa_family) { 2752 #ifdef INET 2753 case AF_INET: 2754 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2755 break; 2756 #endif 2757 #ifdef INET6 2758 case AF_INET6: 2759 { 2760 struct sockaddr_in6 *sin6; 2761 2762 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2763 2764 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2765 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2766 if (sin6->sin6_scope_id == 0) { 2767 /* recover scope_id for user */ 2768 (void)sa6_recoverscope(sin6); 2769 } else { 2770 /* clear embedded scope_id for user */ 2771 in6_clearscope(&sin6->sin6_addr); 2772 } 2773 } 2774 break; 2775 } 2776 #endif 2777 default: 2778 /* TSNH */ 2779 break; 2780 } 2781 spc->spc_state = state; 2782 spc->spc_error = error; 2783 spc->spc_assoc_id = sctp_get_associd(stcb); 2784 2785 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2786 SCTP_BUF_NEXT(m_notify) = NULL; 2787 2788 /* append to socket */ 2789 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2790 0, 0, stcb->asoc.context, 0, 0, 0, 2791 m_notify); 2792 if (control == NULL) { 2793 /* no memory */ 2794 sctp_m_freem(m_notify); 2795 return; 2796 } 2797 control->length = SCTP_BUF_LEN(m_notify); 2798 control->spec_flags = M_NOTIFICATION; 2799 /* not that we need this */ 2800 control->tail_mbuf = m_notify; 2801 sctp_add_to_readq(stcb->sctp_ep, stcb, 2802 control, 2803 &stcb->sctp_socket->so_rcv, 1, 2804 SCTP_READ_LOCK_NOT_HELD, 2805 SCTP_SO_NOT_LOCKED); 2806 } 2807 2808 2809 static void 2810 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2811 struct sctp_tmit_chunk *chk, int so_locked 2812 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2813 SCTP_UNUSED 2814 #endif 2815 ) 2816 { 2817 struct mbuf *m_notify; 2818 struct sctp_send_failed *ssf; 2819 struct sctp_send_failed_event *ssfe; 2820 struct sctp_queued_to_read *control; 2821 int length; 2822 2823 if ((stcb == NULL) || 2824 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2825 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2826 /* event not enabled */ 2827 return; 2828 } 2829 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2830 length = sizeof(struct sctp_send_failed_event); 2831 } else { 2832 length = sizeof(struct sctp_send_failed); 2833 } 2834 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2835 if (m_notify == NULL) 2836 /* no space left */ 2837 return; 2838 length += chk->send_size; 2839 length -= sizeof(struct sctp_data_chunk); 2840 SCTP_BUF_LEN(m_notify) = 0; 2841 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2842 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2843 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2844 if (sent) { 2845 ssfe->ssfe_flags = SCTP_DATA_SENT; 2846 } else { 2847 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2848 } 2849 ssfe->ssfe_length = length; 2850 ssfe->ssfe_error = error; 2851 /* not exactly what the user sent in, but should be close :) */ 2852 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2853 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number; 2854 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2855 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype; 2856 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2857 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2858 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2859 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2860 } else { 2861 ssf = mtod(m_notify, struct sctp_send_failed *); 2862 ssf->ssf_type = SCTP_SEND_FAILED; 2863 if (sent) { 2864 ssf->ssf_flags = SCTP_DATA_SENT; 2865 } else { 2866 ssf->ssf_flags = SCTP_DATA_UNSENT; 2867 } 2868 ssf->ssf_length = length; 2869 ssf->ssf_error = error; 2870 /* not exactly what the user sent in, but should be close :) */ 2871 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2872 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2873 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2874 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2875 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2876 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2877 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2878 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2879 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2880 } 2881 if (chk->data) { 2882 /* 2883 * trim off the sctp chunk header(it should be there) 2884 */ 2885 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 2886 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 2887 sctp_mbuf_crush(chk->data); 2888 chk->send_size -= sizeof(struct sctp_data_chunk); 2889 } 2890 } 2891 SCTP_BUF_NEXT(m_notify) = chk->data; 2892 /* Steal off the mbuf */ 2893 chk->data = NULL; 2894 /* 2895 * For this case, we check the actual socket buffer, since the assoc 2896 * is going away we don't want to overfill the socket buffer for a 2897 * non-reader 2898 */ 2899 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2900 sctp_m_freem(m_notify); 2901 return; 2902 } 2903 /* append to socket */ 2904 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2905 0, 0, stcb->asoc.context, 0, 0, 0, 2906 m_notify); 2907 if (control == NULL) { 2908 /* no memory */ 2909 sctp_m_freem(m_notify); 2910 return; 2911 } 2912 control->spec_flags = M_NOTIFICATION; 2913 sctp_add_to_readq(stcb->sctp_ep, stcb, 2914 control, 2915 &stcb->sctp_socket->so_rcv, 1, 2916 SCTP_READ_LOCK_NOT_HELD, 2917 so_locked); 2918 } 2919 2920 2921 static void 2922 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2923 struct sctp_stream_queue_pending *sp, int so_locked 2924 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2925 SCTP_UNUSED 2926 #endif 2927 ) 2928 { 2929 struct mbuf *m_notify; 2930 struct sctp_send_failed *ssf; 2931 struct sctp_send_failed_event *ssfe; 2932 struct sctp_queued_to_read *control; 2933 int length; 2934 2935 if ((stcb == NULL) || 2936 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2937 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2938 /* event not enabled */ 2939 return; 2940 } 2941 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2942 length = sizeof(struct sctp_send_failed_event); 2943 } else { 2944 length = sizeof(struct sctp_send_failed); 2945 } 2946 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2947 if (m_notify == NULL) { 2948 /* no space left */ 2949 return; 2950 } 2951 length += sp->length; 2952 SCTP_BUF_LEN(m_notify) = 0; 2953 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2954 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2955 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2956 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2957 ssfe->ssfe_length = length; 2958 ssfe->ssfe_error = error; 2959 /* not exactly what the user sent in, but should be close :) */ 2960 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2961 ssfe->ssfe_info.snd_sid = sp->stream; 2962 if (sp->some_taken) { 2963 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 2964 } else { 2965 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 2966 } 2967 ssfe->ssfe_info.snd_ppid = sp->ppid; 2968 ssfe->ssfe_info.snd_context = sp->context; 2969 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2970 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2971 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2972 } else { 2973 ssf = mtod(m_notify, struct sctp_send_failed *); 2974 ssf->ssf_type = SCTP_SEND_FAILED; 2975 ssf->ssf_flags = SCTP_DATA_UNSENT; 2976 ssf->ssf_length = length; 2977 ssf->ssf_error = error; 2978 /* not exactly what the user sent in, but should be close :) */ 2979 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2980 ssf->ssf_info.sinfo_stream = sp->stream; 2981 ssf->ssf_info.sinfo_ssn = 0; 2982 if (sp->some_taken) { 2983 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 2984 } else { 2985 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 2986 } 2987 ssf->ssf_info.sinfo_ppid = sp->ppid; 2988 ssf->ssf_info.sinfo_context = sp->context; 2989 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2990 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2991 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2992 } 2993 SCTP_BUF_NEXT(m_notify) = sp->data; 2994 2995 /* Steal off the mbuf */ 2996 sp->data = NULL; 2997 /* 2998 * For this case, we check the actual socket buffer, since the assoc 2999 * is going away we don't want to overfill the socket buffer for a 3000 * non-reader 3001 */ 3002 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3003 sctp_m_freem(m_notify); 3004 return; 3005 } 3006 /* append to socket */ 3007 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3008 0, 0, stcb->asoc.context, 0, 0, 0, 3009 m_notify); 3010 if (control == NULL) { 3011 /* no memory */ 3012 sctp_m_freem(m_notify); 3013 return; 3014 } 3015 control->spec_flags = M_NOTIFICATION; 3016 sctp_add_to_readq(stcb->sctp_ep, stcb, 3017 control, 3018 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3019 } 3020 3021 3022 3023 static void 3024 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3025 { 3026 struct mbuf *m_notify; 3027 struct sctp_adaptation_event *sai; 3028 struct sctp_queued_to_read *control; 3029 3030 if ((stcb == NULL) || 3031 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3032 /* event not enabled */ 3033 return; 3034 } 3035 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3036 if (m_notify == NULL) 3037 /* no space left */ 3038 return; 3039 SCTP_BUF_LEN(m_notify) = 0; 3040 sai = mtod(m_notify, struct sctp_adaptation_event *); 3041 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3042 sai->sai_flags = 0; 3043 sai->sai_length = sizeof(struct sctp_adaptation_event); 3044 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3045 sai->sai_assoc_id = sctp_get_associd(stcb); 3046 3047 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3048 SCTP_BUF_NEXT(m_notify) = NULL; 3049 3050 /* append to socket */ 3051 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3052 0, 0, stcb->asoc.context, 0, 0, 0, 3053 m_notify); 3054 if (control == NULL) { 3055 /* no memory */ 3056 sctp_m_freem(m_notify); 3057 return; 3058 } 3059 control->length = SCTP_BUF_LEN(m_notify); 3060 control->spec_flags = M_NOTIFICATION; 3061 /* not that we need this */ 3062 control->tail_mbuf = m_notify; 3063 sctp_add_to_readq(stcb->sctp_ep, stcb, 3064 control, 3065 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3066 } 3067 3068 /* This always must be called with the read-queue LOCKED in the INP */ 3069 static void 3070 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3071 uint32_t val, int so_locked 3072 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3073 SCTP_UNUSED 3074 #endif 3075 ) 3076 { 3077 struct mbuf *m_notify; 3078 struct sctp_pdapi_event *pdapi; 3079 struct sctp_queued_to_read *control; 3080 struct sockbuf *sb; 3081 3082 if ((stcb == NULL) || 3083 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3084 /* event not enabled */ 3085 return; 3086 } 3087 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3088 return; 3089 } 3090 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3091 if (m_notify == NULL) 3092 /* no space left */ 3093 return; 3094 SCTP_BUF_LEN(m_notify) = 0; 3095 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3096 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3097 pdapi->pdapi_flags = 0; 3098 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3099 pdapi->pdapi_indication = error; 3100 pdapi->pdapi_stream = (val >> 16); 3101 pdapi->pdapi_seq = (val & 0x0000ffff); 3102 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3103 3104 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3105 SCTP_BUF_NEXT(m_notify) = NULL; 3106 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3107 0, 0, stcb->asoc.context, 0, 0, 0, 3108 m_notify); 3109 if (control == NULL) { 3110 /* no memory */ 3111 sctp_m_freem(m_notify); 3112 return; 3113 } 3114 control->spec_flags = M_NOTIFICATION; 3115 control->length = SCTP_BUF_LEN(m_notify); 3116 /* not that we need this */ 3117 control->tail_mbuf = m_notify; 3118 control->held_length = 0; 3119 control->length = 0; 3120 sb = &stcb->sctp_socket->so_rcv; 3121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3122 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3123 } 3124 sctp_sballoc(stcb, sb, m_notify); 3125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3126 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3127 } 3128 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3129 control->end_added = 1; 3130 if (stcb->asoc.control_pdapi) 3131 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3132 else { 3133 /* we really should not see this case */ 3134 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3135 } 3136 if (stcb->sctp_ep && stcb->sctp_socket) { 3137 /* This should always be the case */ 3138 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3139 struct socket *so; 3140 3141 so = SCTP_INP_SO(stcb->sctp_ep); 3142 if (!so_locked) { 3143 atomic_add_int(&stcb->asoc.refcnt, 1); 3144 SCTP_TCB_UNLOCK(stcb); 3145 SCTP_SOCKET_LOCK(so, 1); 3146 SCTP_TCB_LOCK(stcb); 3147 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3148 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3149 SCTP_SOCKET_UNLOCK(so, 1); 3150 return; 3151 } 3152 } 3153 #endif 3154 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3155 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3156 if (!so_locked) { 3157 SCTP_SOCKET_UNLOCK(so, 1); 3158 } 3159 #endif 3160 } 3161 } 3162 3163 static void 3164 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3165 { 3166 struct mbuf *m_notify; 3167 struct sctp_shutdown_event *sse; 3168 struct sctp_queued_to_read *control; 3169 3170 /* 3171 * For TCP model AND UDP connected sockets we will send an error up 3172 * when an SHUTDOWN completes 3173 */ 3174 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3175 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3176 /* mark socket closed for read/write and wakeup! */ 3177 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3178 struct socket *so; 3179 3180 so = SCTP_INP_SO(stcb->sctp_ep); 3181 atomic_add_int(&stcb->asoc.refcnt, 1); 3182 SCTP_TCB_UNLOCK(stcb); 3183 SCTP_SOCKET_LOCK(so, 1); 3184 SCTP_TCB_LOCK(stcb); 3185 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3186 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3187 SCTP_SOCKET_UNLOCK(so, 1); 3188 return; 3189 } 3190 #endif 3191 socantsendmore(stcb->sctp_socket); 3192 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3193 SCTP_SOCKET_UNLOCK(so, 1); 3194 #endif 3195 } 3196 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3197 /* event not enabled */ 3198 return; 3199 } 3200 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3201 if (m_notify == NULL) 3202 /* no space left */ 3203 return; 3204 sse = mtod(m_notify, struct sctp_shutdown_event *); 3205 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3206 sse->sse_flags = 0; 3207 sse->sse_length = sizeof(struct sctp_shutdown_event); 3208 sse->sse_assoc_id = sctp_get_associd(stcb); 3209 3210 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3211 SCTP_BUF_NEXT(m_notify) = NULL; 3212 3213 /* append to socket */ 3214 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3215 0, 0, stcb->asoc.context, 0, 0, 0, 3216 m_notify); 3217 if (control == NULL) { 3218 /* no memory */ 3219 sctp_m_freem(m_notify); 3220 return; 3221 } 3222 control->spec_flags = M_NOTIFICATION; 3223 control->length = SCTP_BUF_LEN(m_notify); 3224 /* not that we need this */ 3225 control->tail_mbuf = m_notify; 3226 sctp_add_to_readq(stcb->sctp_ep, stcb, 3227 control, 3228 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3229 } 3230 3231 static void 3232 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3233 int so_locked 3234 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3235 SCTP_UNUSED 3236 #endif 3237 ) 3238 { 3239 struct mbuf *m_notify; 3240 struct sctp_sender_dry_event *event; 3241 struct sctp_queued_to_read *control; 3242 3243 if ((stcb == NULL) || 3244 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3245 /* event not enabled */ 3246 return; 3247 } 3248 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3249 if (m_notify == NULL) { 3250 /* no space left */ 3251 return; 3252 } 3253 SCTP_BUF_LEN(m_notify) = 0; 3254 event = mtod(m_notify, struct sctp_sender_dry_event *); 3255 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3256 event->sender_dry_flags = 0; 3257 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3258 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3259 3260 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3261 SCTP_BUF_NEXT(m_notify) = NULL; 3262 3263 /* append to socket */ 3264 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3265 0, 0, stcb->asoc.context, 0, 0, 0, 3266 m_notify); 3267 if (control == NULL) { 3268 /* no memory */ 3269 sctp_m_freem(m_notify); 3270 return; 3271 } 3272 control->length = SCTP_BUF_LEN(m_notify); 3273 control->spec_flags = M_NOTIFICATION; 3274 /* not that we need this */ 3275 control->tail_mbuf = m_notify; 3276 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3277 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3278 } 3279 3280 3281 void 3282 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3283 { 3284 struct mbuf *m_notify; 3285 struct sctp_queued_to_read *control; 3286 struct sctp_stream_change_event *stradd; 3287 int len; 3288 3289 if ((stcb == NULL) || 3290 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3291 /* event not enabled */ 3292 return; 3293 } 3294 if ((stcb->asoc.peer_req_out) && flag) { 3295 /* Peer made the request, don't tell the local user */ 3296 stcb->asoc.peer_req_out = 0; 3297 return; 3298 } 3299 stcb->asoc.peer_req_out = 0; 3300 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3301 if (m_notify == NULL) 3302 /* no space left */ 3303 return; 3304 SCTP_BUF_LEN(m_notify) = 0; 3305 len = sizeof(struct sctp_stream_change_event); 3306 if (len > M_TRAILINGSPACE(m_notify)) { 3307 /* never enough room */ 3308 sctp_m_freem(m_notify); 3309 return; 3310 } 3311 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3312 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3313 stradd->strchange_flags = flag; 3314 stradd->strchange_length = len; 3315 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3316 stradd->strchange_instrms = numberin; 3317 stradd->strchange_outstrms = numberout; 3318 SCTP_BUF_LEN(m_notify) = len; 3319 SCTP_BUF_NEXT(m_notify) = NULL; 3320 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3321 /* no space */ 3322 sctp_m_freem(m_notify); 3323 return; 3324 } 3325 /* append to socket */ 3326 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3327 0, 0, stcb->asoc.context, 0, 0, 0, 3328 m_notify); 3329 if (control == NULL) { 3330 /* no memory */ 3331 sctp_m_freem(m_notify); 3332 return; 3333 } 3334 control->spec_flags = M_NOTIFICATION; 3335 control->length = SCTP_BUF_LEN(m_notify); 3336 /* not that we need this */ 3337 control->tail_mbuf = m_notify; 3338 sctp_add_to_readq(stcb->sctp_ep, stcb, 3339 control, 3340 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3341 } 3342 3343 void 3344 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3345 { 3346 struct mbuf *m_notify; 3347 struct sctp_queued_to_read *control; 3348 struct sctp_assoc_reset_event *strasoc; 3349 int len; 3350 3351 if ((stcb == NULL) || 3352 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3353 /* event not enabled */ 3354 return; 3355 } 3356 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3357 if (m_notify == NULL) 3358 /* no space left */ 3359 return; 3360 SCTP_BUF_LEN(m_notify) = 0; 3361 len = sizeof(struct sctp_assoc_reset_event); 3362 if (len > M_TRAILINGSPACE(m_notify)) { 3363 /* never enough room */ 3364 sctp_m_freem(m_notify); 3365 return; 3366 } 3367 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3368 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3369 strasoc->assocreset_flags = flag; 3370 strasoc->assocreset_length = len; 3371 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3372 strasoc->assocreset_local_tsn = sending_tsn; 3373 strasoc->assocreset_remote_tsn = recv_tsn; 3374 SCTP_BUF_LEN(m_notify) = len; 3375 SCTP_BUF_NEXT(m_notify) = NULL; 3376 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3377 /* no space */ 3378 sctp_m_freem(m_notify); 3379 return; 3380 } 3381 /* append to socket */ 3382 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3383 0, 0, stcb->asoc.context, 0, 0, 0, 3384 m_notify); 3385 if (control == NULL) { 3386 /* no memory */ 3387 sctp_m_freem(m_notify); 3388 return; 3389 } 3390 control->spec_flags = M_NOTIFICATION; 3391 control->length = SCTP_BUF_LEN(m_notify); 3392 /* not that we need this */ 3393 control->tail_mbuf = m_notify; 3394 sctp_add_to_readq(stcb->sctp_ep, stcb, 3395 control, 3396 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3397 } 3398 3399 3400 3401 static void 3402 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3403 int number_entries, uint16_t * list, int flag) 3404 { 3405 struct mbuf *m_notify; 3406 struct sctp_queued_to_read *control; 3407 struct sctp_stream_reset_event *strreset; 3408 int len; 3409 3410 if ((stcb == NULL) || 3411 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3412 /* event not enabled */ 3413 return; 3414 } 3415 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3416 if (m_notify == NULL) 3417 /* no space left */ 3418 return; 3419 SCTP_BUF_LEN(m_notify) = 0; 3420 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3421 if (len > M_TRAILINGSPACE(m_notify)) { 3422 /* never enough room */ 3423 sctp_m_freem(m_notify); 3424 return; 3425 } 3426 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3427 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3428 strreset->strreset_flags = flag; 3429 strreset->strreset_length = len; 3430 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3431 if (number_entries) { 3432 int i; 3433 3434 for (i = 0; i < number_entries; i++) { 3435 strreset->strreset_stream_list[i] = ntohs(list[i]); 3436 } 3437 } 3438 SCTP_BUF_LEN(m_notify) = len; 3439 SCTP_BUF_NEXT(m_notify) = NULL; 3440 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3441 /* no space */ 3442 sctp_m_freem(m_notify); 3443 return; 3444 } 3445 /* append to socket */ 3446 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3447 0, 0, stcb->asoc.context, 0, 0, 0, 3448 m_notify); 3449 if (control == NULL) { 3450 /* no memory */ 3451 sctp_m_freem(m_notify); 3452 return; 3453 } 3454 control->spec_flags = M_NOTIFICATION; 3455 control->length = SCTP_BUF_LEN(m_notify); 3456 /* not that we need this */ 3457 control->tail_mbuf = m_notify; 3458 sctp_add_to_readq(stcb->sctp_ep, stcb, 3459 control, 3460 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3461 } 3462 3463 3464 static void 3465 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3466 { 3467 struct mbuf *m_notify; 3468 struct sctp_remote_error *sre; 3469 struct sctp_queued_to_read *control; 3470 size_t notif_len, chunk_len; 3471 3472 if ((stcb == NULL) || 3473 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3474 return; 3475 } 3476 if (chunk != NULL) { 3477 chunk_len = htons(chunk->ch.chunk_length); 3478 } else { 3479 chunk_len = 0; 3480 } 3481 notif_len = sizeof(struct sctp_remote_error) + chunk_len; 3482 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3483 if (m_notify == NULL) { 3484 /* Retry with smaller value. */ 3485 notif_len = sizeof(struct sctp_remote_error); 3486 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3487 if (m_notify == NULL) { 3488 return; 3489 } 3490 } 3491 SCTP_BUF_NEXT(m_notify) = NULL; 3492 sre = mtod(m_notify, struct sctp_remote_error *); 3493 sre->sre_type = SCTP_REMOTE_ERROR; 3494 sre->sre_flags = 0; 3495 sre->sre_length = sizeof(struct sctp_remote_error); 3496 sre->sre_error = error; 3497 sre->sre_assoc_id = sctp_get_associd(stcb); 3498 if (notif_len > sizeof(struct sctp_remote_error)) { 3499 memcpy(sre->sre_data, chunk, chunk_len); 3500 sre->sre_length += chunk_len; 3501 } 3502 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3503 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3504 0, 0, stcb->asoc.context, 0, 0, 0, 3505 m_notify); 3506 if (control != NULL) { 3507 control->length = SCTP_BUF_LEN(m_notify); 3508 /* not that we need this */ 3509 control->tail_mbuf = m_notify; 3510 control->spec_flags = M_NOTIFICATION; 3511 sctp_add_to_readq(stcb->sctp_ep, stcb, 3512 control, 3513 &stcb->sctp_socket->so_rcv, 1, 3514 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3515 } else { 3516 sctp_m_freem(m_notify); 3517 } 3518 } 3519 3520 3521 void 3522 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3523 uint32_t error, void *data, int so_locked 3524 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3525 SCTP_UNUSED 3526 #endif 3527 ) 3528 { 3529 if ((stcb == NULL) || 3530 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3531 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3532 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3533 /* If the socket is gone we are out of here */ 3534 return; 3535 } 3536 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3537 return; 3538 } 3539 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3540 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) { 3541 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3542 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3543 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3544 /* Don't report these in front states */ 3545 return; 3546 } 3547 } 3548 switch (notification) { 3549 case SCTP_NOTIFY_ASSOC_UP: 3550 if (stcb->asoc.assoc_up_sent == 0) { 3551 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3552 stcb->asoc.assoc_up_sent = 1; 3553 } 3554 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3555 sctp_notify_adaptation_layer(stcb); 3556 } 3557 if (stcb->asoc.peer_supports_auth == 0) { 3558 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3559 NULL, so_locked); 3560 } 3561 break; 3562 case SCTP_NOTIFY_ASSOC_DOWN: 3563 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3564 break; 3565 case SCTP_NOTIFY_INTERFACE_DOWN: 3566 { 3567 struct sctp_nets *net; 3568 3569 net = (struct sctp_nets *)data; 3570 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3571 (struct sockaddr *)&net->ro._l_addr, error); 3572 break; 3573 } 3574 case SCTP_NOTIFY_INTERFACE_UP: 3575 { 3576 struct sctp_nets *net; 3577 3578 net = (struct sctp_nets *)data; 3579 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3580 (struct sockaddr *)&net->ro._l_addr, error); 3581 break; 3582 } 3583 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3584 { 3585 struct sctp_nets *net; 3586 3587 net = (struct sctp_nets *)data; 3588 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3589 (struct sockaddr *)&net->ro._l_addr, error); 3590 break; 3591 } 3592 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3593 sctp_notify_send_failed2(stcb, error, 3594 (struct sctp_stream_queue_pending *)data, so_locked); 3595 break; 3596 case SCTP_NOTIFY_SENT_DG_FAIL: 3597 sctp_notify_send_failed(stcb, 1, error, 3598 (struct sctp_tmit_chunk *)data, so_locked); 3599 break; 3600 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3601 sctp_notify_send_failed(stcb, 0, error, 3602 (struct sctp_tmit_chunk *)data, so_locked); 3603 break; 3604 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3605 { 3606 uint32_t val; 3607 3608 val = *((uint32_t *) data); 3609 3610 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3611 break; 3612 } 3613 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3614 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3615 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3616 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3617 } else { 3618 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3619 } 3620 break; 3621 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3622 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3623 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3624 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3625 } else { 3626 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3627 } 3628 break; 3629 case SCTP_NOTIFY_ASSOC_RESTART: 3630 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3631 if (stcb->asoc.peer_supports_auth == 0) { 3632 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3633 NULL, so_locked); 3634 } 3635 break; 3636 case SCTP_NOTIFY_STR_RESET_SEND: 3637 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN); 3638 break; 3639 case SCTP_NOTIFY_STR_RESET_RECV: 3640 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING); 3641 break; 3642 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3643 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3644 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3645 break; 3646 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3647 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3648 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3649 break; 3650 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3651 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3652 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3653 break; 3654 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3655 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3656 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3657 break; 3658 case SCTP_NOTIFY_ASCONF_ADD_IP: 3659 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3660 error); 3661 break; 3662 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3663 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3664 error); 3665 break; 3666 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3667 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3668 error); 3669 break; 3670 case SCTP_NOTIFY_PEER_SHUTDOWN: 3671 sctp_notify_shutdown_event(stcb); 3672 break; 3673 case SCTP_NOTIFY_AUTH_NEW_KEY: 3674 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3675 (uint16_t) (uintptr_t) data, 3676 so_locked); 3677 break; 3678 case SCTP_NOTIFY_AUTH_FREE_KEY: 3679 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3680 (uint16_t) (uintptr_t) data, 3681 so_locked); 3682 break; 3683 case SCTP_NOTIFY_NO_PEER_AUTH: 3684 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3685 (uint16_t) (uintptr_t) data, 3686 so_locked); 3687 break; 3688 case SCTP_NOTIFY_SENDER_DRY: 3689 sctp_notify_sender_dry_event(stcb, so_locked); 3690 break; 3691 case SCTP_NOTIFY_REMOTE_ERROR: 3692 sctp_notify_remote_error(stcb, error, data); 3693 break; 3694 default: 3695 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3696 __FUNCTION__, notification, notification); 3697 break; 3698 } /* end switch */ 3699 } 3700 3701 void 3702 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3703 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3704 SCTP_UNUSED 3705 #endif 3706 ) 3707 { 3708 struct sctp_association *asoc; 3709 struct sctp_stream_out *outs; 3710 struct sctp_tmit_chunk *chk, *nchk; 3711 struct sctp_stream_queue_pending *sp, *nsp; 3712 int i; 3713 3714 if (stcb == NULL) { 3715 return; 3716 } 3717 asoc = &stcb->asoc; 3718 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3719 /* already being freed */ 3720 return; 3721 } 3722 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3723 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3724 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3725 return; 3726 } 3727 /* now through all the gunk freeing chunks */ 3728 if (holds_lock == 0) { 3729 SCTP_TCB_SEND_LOCK(stcb); 3730 } 3731 /* sent queue SHOULD be empty */ 3732 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3733 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3734 asoc->sent_queue_cnt--; 3735 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3736 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3737 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3738 #ifdef INVARIANTS 3739 } else { 3740 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3741 #endif 3742 } 3743 } 3744 if (chk->data != NULL) { 3745 sctp_free_bufspace(stcb, asoc, chk, 1); 3746 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3747 error, chk, so_locked); 3748 if (chk->data) { 3749 sctp_m_freem(chk->data); 3750 chk->data = NULL; 3751 } 3752 } 3753 sctp_free_a_chunk(stcb, chk, so_locked); 3754 /* sa_ignore FREED_MEMORY */ 3755 } 3756 /* pending send queue SHOULD be empty */ 3757 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3758 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3759 asoc->send_queue_cnt--; 3760 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3761 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3762 #ifdef INVARIANTS 3763 } else { 3764 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3765 #endif 3766 } 3767 if (chk->data != NULL) { 3768 sctp_free_bufspace(stcb, asoc, chk, 1); 3769 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3770 error, chk, so_locked); 3771 if (chk->data) { 3772 sctp_m_freem(chk->data); 3773 chk->data = NULL; 3774 } 3775 } 3776 sctp_free_a_chunk(stcb, chk, so_locked); 3777 /* sa_ignore FREED_MEMORY */ 3778 } 3779 for (i = 0; i < asoc->streamoutcnt; i++) { 3780 /* For each stream */ 3781 outs = &asoc->strmout[i]; 3782 /* clean up any sends there */ 3783 asoc->locked_on_sending = NULL; 3784 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3785 asoc->stream_queue_cnt--; 3786 TAILQ_REMOVE(&outs->outqueue, sp, next); 3787 sctp_free_spbufspace(stcb, asoc, sp); 3788 if (sp->data) { 3789 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3790 error, (void *)sp, so_locked); 3791 if (sp->data) { 3792 sctp_m_freem(sp->data); 3793 sp->data = NULL; 3794 sp->tail_mbuf = NULL; 3795 sp->length = 0; 3796 } 3797 } 3798 if (sp->net) { 3799 sctp_free_remote_addr(sp->net); 3800 sp->net = NULL; 3801 } 3802 /* Free the chunk */ 3803 sctp_free_a_strmoq(stcb, sp, so_locked); 3804 /* sa_ignore FREED_MEMORY */ 3805 } 3806 } 3807 3808 if (holds_lock == 0) { 3809 SCTP_TCB_SEND_UNLOCK(stcb); 3810 } 3811 } 3812 3813 void 3814 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3815 struct sctp_abort_chunk *abort, int so_locked 3816 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3817 SCTP_UNUSED 3818 #endif 3819 ) 3820 { 3821 if (stcb == NULL) { 3822 return; 3823 } 3824 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3825 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3826 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3827 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3828 } 3829 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3830 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3831 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3832 return; 3833 } 3834 /* Tell them we lost the asoc */ 3835 sctp_report_all_outbound(stcb, error, 1, so_locked); 3836 if (from_peer) { 3837 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3838 } else { 3839 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3840 } 3841 } 3842 3843 void 3844 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3845 struct mbuf *m, int iphlen, 3846 struct sockaddr *src, struct sockaddr *dst, 3847 struct sctphdr *sh, struct mbuf *op_err, 3848 uint8_t use_mflowid, uint32_t mflowid, 3849 uint32_t vrf_id, uint16_t port) 3850 { 3851 uint32_t vtag; 3852 3853 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3854 struct socket *so; 3855 3856 #endif 3857 3858 vtag = 0; 3859 if (stcb != NULL) { 3860 /* We have a TCB to abort, send notification too */ 3861 vtag = stcb->asoc.peer_vtag; 3862 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3863 /* get the assoc vrf id and table id */ 3864 vrf_id = stcb->asoc.vrf_id; 3865 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3866 } 3867 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3868 use_mflowid, mflowid, 3869 vrf_id, port); 3870 if (stcb != NULL) { 3871 /* Ok, now lets free it */ 3872 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3873 so = SCTP_INP_SO(inp); 3874 atomic_add_int(&stcb->asoc.refcnt, 1); 3875 SCTP_TCB_UNLOCK(stcb); 3876 SCTP_SOCKET_LOCK(so, 1); 3877 SCTP_TCB_LOCK(stcb); 3878 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3879 #endif 3880 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3881 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3882 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3883 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3884 } 3885 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3887 SCTP_SOCKET_UNLOCK(so, 1); 3888 #endif 3889 } 3890 } 3891 3892 #ifdef SCTP_ASOCLOG_OF_TSNS 3893 void 3894 sctp_print_out_track_log(struct sctp_tcb *stcb) 3895 { 3896 #ifdef NOSIY_PRINTS 3897 int i; 3898 3899 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 3900 SCTP_PRINTF("IN bound TSN log-aaa\n"); 3901 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 3902 SCTP_PRINTF("None rcvd\n"); 3903 goto none_in; 3904 } 3905 if (stcb->asoc.tsn_in_wrapped) { 3906 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 3907 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3908 stcb->asoc.in_tsnlog[i].tsn, 3909 stcb->asoc.in_tsnlog[i].strm, 3910 stcb->asoc.in_tsnlog[i].seq, 3911 stcb->asoc.in_tsnlog[i].flgs, 3912 stcb->asoc.in_tsnlog[i].sz); 3913 } 3914 } 3915 if (stcb->asoc.tsn_in_at) { 3916 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 3917 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3918 stcb->asoc.in_tsnlog[i].tsn, 3919 stcb->asoc.in_tsnlog[i].strm, 3920 stcb->asoc.in_tsnlog[i].seq, 3921 stcb->asoc.in_tsnlog[i].flgs, 3922 stcb->asoc.in_tsnlog[i].sz); 3923 } 3924 } 3925 none_in: 3926 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 3927 if ((stcb->asoc.tsn_out_at == 0) && 3928 (stcb->asoc.tsn_out_wrapped == 0)) { 3929 SCTP_PRINTF("None sent\n"); 3930 } 3931 if (stcb->asoc.tsn_out_wrapped) { 3932 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 3933 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3934 stcb->asoc.out_tsnlog[i].tsn, 3935 stcb->asoc.out_tsnlog[i].strm, 3936 stcb->asoc.out_tsnlog[i].seq, 3937 stcb->asoc.out_tsnlog[i].flgs, 3938 stcb->asoc.out_tsnlog[i].sz); 3939 } 3940 } 3941 if (stcb->asoc.tsn_out_at) { 3942 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 3943 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3944 stcb->asoc.out_tsnlog[i].tsn, 3945 stcb->asoc.out_tsnlog[i].strm, 3946 stcb->asoc.out_tsnlog[i].seq, 3947 stcb->asoc.out_tsnlog[i].flgs, 3948 stcb->asoc.out_tsnlog[i].sz); 3949 } 3950 } 3951 #endif 3952 } 3953 3954 #endif 3955 3956 void 3957 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3958 struct mbuf *op_err, 3959 int so_locked 3960 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3961 SCTP_UNUSED 3962 #endif 3963 ) 3964 { 3965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3966 struct socket *so; 3967 3968 #endif 3969 3970 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3971 so = SCTP_INP_SO(inp); 3972 #endif 3973 if (stcb == NULL) { 3974 /* Got to have a TCB */ 3975 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3976 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3977 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3978 SCTP_CALLED_DIRECTLY_NOCMPSET); 3979 } 3980 } 3981 return; 3982 } else { 3983 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3984 } 3985 /* notify the ulp */ 3986 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 3987 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 3988 } 3989 /* notify the peer */ 3990 sctp_send_abort_tcb(stcb, op_err, so_locked); 3991 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3992 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3993 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3994 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3995 } 3996 /* now free the asoc */ 3997 #ifdef SCTP_ASOCLOG_OF_TSNS 3998 sctp_print_out_track_log(stcb); 3999 #endif 4000 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4001 if (!so_locked) { 4002 atomic_add_int(&stcb->asoc.refcnt, 1); 4003 SCTP_TCB_UNLOCK(stcb); 4004 SCTP_SOCKET_LOCK(so, 1); 4005 SCTP_TCB_LOCK(stcb); 4006 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4007 } 4008 #endif 4009 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4011 if (!so_locked) { 4012 SCTP_SOCKET_UNLOCK(so, 1); 4013 } 4014 #endif 4015 } 4016 4017 void 4018 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4019 struct sockaddr *src, struct sockaddr *dst, 4020 struct sctphdr *sh, struct sctp_inpcb *inp, 4021 uint8_t use_mflowid, uint32_t mflowid, 4022 uint32_t vrf_id, uint16_t port) 4023 { 4024 struct sctp_chunkhdr *ch, chunk_buf; 4025 unsigned int chk_length; 4026 int contains_init_chunk; 4027 4028 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4029 /* Generate a TO address for future reference */ 4030 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4031 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 4032 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4033 SCTP_CALLED_DIRECTLY_NOCMPSET); 4034 } 4035 } 4036 contains_init_chunk = 0; 4037 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4038 sizeof(*ch), (uint8_t *) & chunk_buf); 4039 while (ch != NULL) { 4040 chk_length = ntohs(ch->chunk_length); 4041 if (chk_length < sizeof(*ch)) { 4042 /* break to abort land */ 4043 break; 4044 } 4045 switch (ch->chunk_type) { 4046 case SCTP_INIT: 4047 contains_init_chunk = 1; 4048 break; 4049 case SCTP_COOKIE_ECHO: 4050 /* We hit here only if the assoc is being freed */ 4051 return; 4052 case SCTP_PACKET_DROPPED: 4053 /* we don't respond to pkt-dropped */ 4054 return; 4055 case SCTP_ABORT_ASSOCIATION: 4056 /* we don't respond with an ABORT to an ABORT */ 4057 return; 4058 case SCTP_SHUTDOWN_COMPLETE: 4059 /* 4060 * we ignore it since we are not waiting for it and 4061 * peer is gone 4062 */ 4063 return; 4064 case SCTP_SHUTDOWN_ACK: 4065 sctp_send_shutdown_complete2(src, dst, sh, 4066 use_mflowid, mflowid, 4067 vrf_id, port); 4068 return; 4069 default: 4070 break; 4071 } 4072 offset += SCTP_SIZE32(chk_length); 4073 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4074 sizeof(*ch), (uint8_t *) & chunk_buf); 4075 } 4076 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4077 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4078 (contains_init_chunk == 0))) { 4079 sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL, 4080 use_mflowid, mflowid, 4081 vrf_id, port); 4082 } 4083 } 4084 4085 /* 4086 * check the inbound datagram to make sure there is not an abort inside it, 4087 * if there is return 1, else return 0. 4088 */ 4089 int 4090 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4091 { 4092 struct sctp_chunkhdr *ch; 4093 struct sctp_init_chunk *init_chk, chunk_buf; 4094 int offset; 4095 unsigned int chk_length; 4096 4097 offset = iphlen + sizeof(struct sctphdr); 4098 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4099 (uint8_t *) & chunk_buf); 4100 while (ch != NULL) { 4101 chk_length = ntohs(ch->chunk_length); 4102 if (chk_length < sizeof(*ch)) { 4103 /* packet is probably corrupt */ 4104 break; 4105 } 4106 /* we seem to be ok, is it an abort? */ 4107 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4108 /* yep, tell them */ 4109 return (1); 4110 } 4111 if (ch->chunk_type == SCTP_INITIATION) { 4112 /* need to update the Vtag */ 4113 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4114 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4115 if (init_chk != NULL) { 4116 *vtagfill = ntohl(init_chk->init.initiate_tag); 4117 } 4118 } 4119 /* Nope, move to the next chunk */ 4120 offset += SCTP_SIZE32(chk_length); 4121 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4122 sizeof(*ch), (uint8_t *) & chunk_buf); 4123 } 4124 return (0); 4125 } 4126 4127 /* 4128 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4129 * set (i.e. it's 0) so, create this function to compare link local scopes 4130 */ 4131 #ifdef INET6 4132 uint32_t 4133 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4134 { 4135 struct sockaddr_in6 a, b; 4136 4137 /* save copies */ 4138 a = *addr1; 4139 b = *addr2; 4140 4141 if (a.sin6_scope_id == 0) 4142 if (sa6_recoverscope(&a)) { 4143 /* can't get scope, so can't match */ 4144 return (0); 4145 } 4146 if (b.sin6_scope_id == 0) 4147 if (sa6_recoverscope(&b)) { 4148 /* can't get scope, so can't match */ 4149 return (0); 4150 } 4151 if (a.sin6_scope_id != b.sin6_scope_id) 4152 return (0); 4153 4154 return (1); 4155 } 4156 4157 /* 4158 * returns a sockaddr_in6 with embedded scope recovered and removed 4159 */ 4160 struct sockaddr_in6 * 4161 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4162 { 4163 /* check and strip embedded scope junk */ 4164 if (addr->sin6_family == AF_INET6) { 4165 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4166 if (addr->sin6_scope_id == 0) { 4167 *store = *addr; 4168 if (!sa6_recoverscope(store)) { 4169 /* use the recovered scope */ 4170 addr = store; 4171 } 4172 } else { 4173 /* else, return the original "to" addr */ 4174 in6_clearscope(&addr->sin6_addr); 4175 } 4176 } 4177 } 4178 return (addr); 4179 } 4180 4181 #endif 4182 4183 /* 4184 * are the two addresses the same? currently a "scopeless" check returns: 1 4185 * if same, 0 if not 4186 */ 4187 int 4188 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4189 { 4190 4191 /* must be valid */ 4192 if (sa1 == NULL || sa2 == NULL) 4193 return (0); 4194 4195 /* must be the same family */ 4196 if (sa1->sa_family != sa2->sa_family) 4197 return (0); 4198 4199 switch (sa1->sa_family) { 4200 #ifdef INET6 4201 case AF_INET6: 4202 { 4203 /* IPv6 addresses */ 4204 struct sockaddr_in6 *sin6_1, *sin6_2; 4205 4206 sin6_1 = (struct sockaddr_in6 *)sa1; 4207 sin6_2 = (struct sockaddr_in6 *)sa2; 4208 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4209 sin6_2)); 4210 } 4211 #endif 4212 #ifdef INET 4213 case AF_INET: 4214 { 4215 /* IPv4 addresses */ 4216 struct sockaddr_in *sin_1, *sin_2; 4217 4218 sin_1 = (struct sockaddr_in *)sa1; 4219 sin_2 = (struct sockaddr_in *)sa2; 4220 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4221 } 4222 #endif 4223 default: 4224 /* we don't do these... */ 4225 return (0); 4226 } 4227 } 4228 4229 void 4230 sctp_print_address(struct sockaddr *sa) 4231 { 4232 #ifdef INET6 4233 char ip6buf[INET6_ADDRSTRLEN]; 4234 4235 #endif 4236 4237 switch (sa->sa_family) { 4238 #ifdef INET6 4239 case AF_INET6: 4240 { 4241 struct sockaddr_in6 *sin6; 4242 4243 sin6 = (struct sockaddr_in6 *)sa; 4244 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4245 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4246 ntohs(sin6->sin6_port), 4247 sin6->sin6_scope_id); 4248 break; 4249 } 4250 #endif 4251 #ifdef INET 4252 case AF_INET: 4253 { 4254 struct sockaddr_in *sin; 4255 unsigned char *p; 4256 4257 sin = (struct sockaddr_in *)sa; 4258 p = (unsigned char *)&sin->sin_addr; 4259 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4260 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4261 break; 4262 } 4263 #endif 4264 default: 4265 SCTP_PRINTF("?\n"); 4266 break; 4267 } 4268 } 4269 4270 void 4271 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4272 struct sctp_inpcb *new_inp, 4273 struct sctp_tcb *stcb, 4274 int waitflags) 4275 { 4276 /* 4277 * go through our old INP and pull off any control structures that 4278 * belong to stcb and move then to the new inp. 4279 */ 4280 struct socket *old_so, *new_so; 4281 struct sctp_queued_to_read *control, *nctl; 4282 struct sctp_readhead tmp_queue; 4283 struct mbuf *m; 4284 int error = 0; 4285 4286 old_so = old_inp->sctp_socket; 4287 new_so = new_inp->sctp_socket; 4288 TAILQ_INIT(&tmp_queue); 4289 error = sblock(&old_so->so_rcv, waitflags); 4290 if (error) { 4291 /* 4292 * Gak, can't get sblock, we have a problem. data will be 4293 * left stranded.. and we don't dare look at it since the 4294 * other thread may be reading something. Oh well, its a 4295 * screwed up app that does a peeloff OR a accept while 4296 * reading from the main socket... actually its only the 4297 * peeloff() case, since I think read will fail on a 4298 * listening socket.. 4299 */ 4300 return; 4301 } 4302 /* lock the socket buffers */ 4303 SCTP_INP_READ_LOCK(old_inp); 4304 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4305 /* Pull off all for out target stcb */ 4306 if (control->stcb == stcb) { 4307 /* remove it we want it */ 4308 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4309 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4310 m = control->data; 4311 while (m) { 4312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4313 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4314 } 4315 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4317 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4318 } 4319 m = SCTP_BUF_NEXT(m); 4320 } 4321 } 4322 } 4323 SCTP_INP_READ_UNLOCK(old_inp); 4324 /* Remove the sb-lock on the old socket */ 4325 4326 sbunlock(&old_so->so_rcv); 4327 /* Now we move them over to the new socket buffer */ 4328 SCTP_INP_READ_LOCK(new_inp); 4329 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4330 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4331 m = control->data; 4332 while (m) { 4333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4334 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4335 } 4336 sctp_sballoc(stcb, &new_so->so_rcv, m); 4337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4338 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4339 } 4340 m = SCTP_BUF_NEXT(m); 4341 } 4342 } 4343 SCTP_INP_READ_UNLOCK(new_inp); 4344 } 4345 4346 void 4347 sctp_add_to_readq(struct sctp_inpcb *inp, 4348 struct sctp_tcb *stcb, 4349 struct sctp_queued_to_read *control, 4350 struct sockbuf *sb, 4351 int end, 4352 int inp_read_lock_held, 4353 int so_locked 4354 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4355 SCTP_UNUSED 4356 #endif 4357 ) 4358 { 4359 /* 4360 * Here we must place the control on the end of the socket read 4361 * queue AND increment sb_cc so that select will work properly on 4362 * read. 4363 */ 4364 struct mbuf *m, *prev = NULL; 4365 4366 if (inp == NULL) { 4367 /* Gak, TSNH!! */ 4368 #ifdef INVARIANTS 4369 panic("Gak, inp NULL on add_to_readq"); 4370 #endif 4371 return; 4372 } 4373 if (inp_read_lock_held == 0) 4374 SCTP_INP_READ_LOCK(inp); 4375 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4376 sctp_free_remote_addr(control->whoFrom); 4377 if (control->data) { 4378 sctp_m_freem(control->data); 4379 control->data = NULL; 4380 } 4381 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4382 if (inp_read_lock_held == 0) 4383 SCTP_INP_READ_UNLOCK(inp); 4384 return; 4385 } 4386 if (!(control->spec_flags & M_NOTIFICATION)) { 4387 atomic_add_int(&inp->total_recvs, 1); 4388 if (!control->do_not_ref_stcb) { 4389 atomic_add_int(&stcb->total_recvs, 1); 4390 } 4391 } 4392 m = control->data; 4393 control->held_length = 0; 4394 control->length = 0; 4395 while (m) { 4396 if (SCTP_BUF_LEN(m) == 0) { 4397 /* Skip mbufs with NO length */ 4398 if (prev == NULL) { 4399 /* First one */ 4400 control->data = sctp_m_free(m); 4401 m = control->data; 4402 } else { 4403 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4404 m = SCTP_BUF_NEXT(prev); 4405 } 4406 if (m == NULL) { 4407 control->tail_mbuf = prev; 4408 } 4409 continue; 4410 } 4411 prev = m; 4412 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4413 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4414 } 4415 sctp_sballoc(stcb, sb, m); 4416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4417 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4418 } 4419 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4420 m = SCTP_BUF_NEXT(m); 4421 } 4422 if (prev != NULL) { 4423 control->tail_mbuf = prev; 4424 } else { 4425 /* Everything got collapsed out?? */ 4426 sctp_free_remote_addr(control->whoFrom); 4427 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4428 if (inp_read_lock_held == 0) 4429 SCTP_INP_READ_UNLOCK(inp); 4430 return; 4431 } 4432 if (end) { 4433 control->end_added = 1; 4434 } 4435 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4436 if (inp_read_lock_held == 0) 4437 SCTP_INP_READ_UNLOCK(inp); 4438 if (inp && inp->sctp_socket) { 4439 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4440 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4441 } else { 4442 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4443 struct socket *so; 4444 4445 so = SCTP_INP_SO(inp); 4446 if (!so_locked) { 4447 if (stcb) { 4448 atomic_add_int(&stcb->asoc.refcnt, 1); 4449 SCTP_TCB_UNLOCK(stcb); 4450 } 4451 SCTP_SOCKET_LOCK(so, 1); 4452 if (stcb) { 4453 SCTP_TCB_LOCK(stcb); 4454 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4455 } 4456 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4457 SCTP_SOCKET_UNLOCK(so, 1); 4458 return; 4459 } 4460 } 4461 #endif 4462 sctp_sorwakeup(inp, inp->sctp_socket); 4463 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4464 if (!so_locked) { 4465 SCTP_SOCKET_UNLOCK(so, 1); 4466 } 4467 #endif 4468 } 4469 } 4470 } 4471 4472 4473 int 4474 sctp_append_to_readq(struct sctp_inpcb *inp, 4475 struct sctp_tcb *stcb, 4476 struct sctp_queued_to_read *control, 4477 struct mbuf *m, 4478 int end, 4479 int ctls_cumack, 4480 struct sockbuf *sb) 4481 { 4482 /* 4483 * A partial delivery API event is underway. OR we are appending on 4484 * the reassembly queue. 4485 * 4486 * If PDAPI this means we need to add m to the end of the data. 4487 * Increase the length in the control AND increment the sb_cc. 4488 * Otherwise sb is NULL and all we need to do is put it at the end 4489 * of the mbuf chain. 4490 */ 4491 int len = 0; 4492 struct mbuf *mm, *tail = NULL, *prev = NULL; 4493 4494 if (inp) { 4495 SCTP_INP_READ_LOCK(inp); 4496 } 4497 if (control == NULL) { 4498 get_out: 4499 if (inp) { 4500 SCTP_INP_READ_UNLOCK(inp); 4501 } 4502 return (-1); 4503 } 4504 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4505 SCTP_INP_READ_UNLOCK(inp); 4506 return (0); 4507 } 4508 if (control->end_added) { 4509 /* huh this one is complete? */ 4510 goto get_out; 4511 } 4512 mm = m; 4513 if (mm == NULL) { 4514 goto get_out; 4515 } 4516 while (mm) { 4517 if (SCTP_BUF_LEN(mm) == 0) { 4518 /* Skip mbufs with NO lenght */ 4519 if (prev == NULL) { 4520 /* First one */ 4521 m = sctp_m_free(mm); 4522 mm = m; 4523 } else { 4524 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 4525 mm = SCTP_BUF_NEXT(prev); 4526 } 4527 continue; 4528 } 4529 prev = mm; 4530 len += SCTP_BUF_LEN(mm); 4531 if (sb) { 4532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4533 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4534 } 4535 sctp_sballoc(stcb, sb, mm); 4536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4537 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4538 } 4539 } 4540 mm = SCTP_BUF_NEXT(mm); 4541 } 4542 if (prev) { 4543 tail = prev; 4544 } else { 4545 /* Really there should always be a prev */ 4546 if (m == NULL) { 4547 /* Huh nothing left? */ 4548 #ifdef INVARIANTS 4549 panic("Nothing left to add?"); 4550 #else 4551 goto get_out; 4552 #endif 4553 } 4554 tail = m; 4555 } 4556 if (control->tail_mbuf) { 4557 /* append */ 4558 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4559 control->tail_mbuf = tail; 4560 } else { 4561 /* nothing there */ 4562 #ifdef INVARIANTS 4563 if (control->data != NULL) { 4564 panic("This should NOT happen"); 4565 } 4566 #endif 4567 control->data = m; 4568 control->tail_mbuf = tail; 4569 } 4570 atomic_add_int(&control->length, len); 4571 if (end) { 4572 /* message is complete */ 4573 if (stcb && (control == stcb->asoc.control_pdapi)) { 4574 stcb->asoc.control_pdapi = NULL; 4575 } 4576 control->held_length = 0; 4577 control->end_added = 1; 4578 } 4579 if (stcb == NULL) { 4580 control->do_not_ref_stcb = 1; 4581 } 4582 /* 4583 * When we are appending in partial delivery, the cum-ack is used 4584 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4585 * is populated in the outbound sinfo structure from the true cumack 4586 * if the association exists... 4587 */ 4588 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4589 if (inp) { 4590 SCTP_INP_READ_UNLOCK(inp); 4591 } 4592 if (inp && inp->sctp_socket) { 4593 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4594 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4595 } else { 4596 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4597 struct socket *so; 4598 4599 so = SCTP_INP_SO(inp); 4600 if (stcb) { 4601 atomic_add_int(&stcb->asoc.refcnt, 1); 4602 SCTP_TCB_UNLOCK(stcb); 4603 } 4604 SCTP_SOCKET_LOCK(so, 1); 4605 if (stcb) { 4606 SCTP_TCB_LOCK(stcb); 4607 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4608 } 4609 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4610 SCTP_SOCKET_UNLOCK(so, 1); 4611 return (0); 4612 } 4613 #endif 4614 sctp_sorwakeup(inp, inp->sctp_socket); 4615 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4616 SCTP_SOCKET_UNLOCK(so, 1); 4617 #endif 4618 } 4619 } 4620 return (0); 4621 } 4622 4623 4624 4625 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4626 *************ALTERNATE ROUTING CODE 4627 */ 4628 4629 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4630 *************ALTERNATE ROUTING CODE 4631 */ 4632 4633 struct mbuf * 4634 sctp_generate_invmanparam(int err) 4635 { 4636 /* Return a MBUF with a invalid mandatory parameter */ 4637 struct mbuf *m; 4638 4639 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); 4640 if (m) { 4641 struct sctp_paramhdr *ph; 4642 4643 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4644 ph = mtod(m, struct sctp_paramhdr *); 4645 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4646 ph->param_type = htons(err); 4647 } 4648 return (m); 4649 } 4650 4651 #ifdef SCTP_MBCNT_LOGGING 4652 void 4653 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4654 struct sctp_tmit_chunk *tp1, int chk_cnt) 4655 { 4656 if (tp1->data == NULL) { 4657 return; 4658 } 4659 asoc->chunks_on_out_queue -= chk_cnt; 4660 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4661 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4662 asoc->total_output_queue_size, 4663 tp1->book_size, 4664 0, 4665 tp1->mbcnt); 4666 } 4667 if (asoc->total_output_queue_size >= tp1->book_size) { 4668 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4669 } else { 4670 asoc->total_output_queue_size = 0; 4671 } 4672 4673 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4674 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4675 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4676 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4677 } else { 4678 stcb->sctp_socket->so_snd.sb_cc = 0; 4679 4680 } 4681 } 4682 } 4683 4684 #endif 4685 4686 int 4687 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4688 uint8_t sent, int so_locked 4689 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4690 SCTP_UNUSED 4691 #endif 4692 ) 4693 { 4694 struct sctp_stream_out *strq; 4695 struct sctp_tmit_chunk *chk = NULL, *tp2; 4696 struct sctp_stream_queue_pending *sp; 4697 uint16_t stream = 0, seq = 0; 4698 uint8_t foundeom = 0; 4699 int ret_sz = 0; 4700 int notdone; 4701 int do_wakeup_routine = 0; 4702 4703 stream = tp1->rec.data.stream_number; 4704 seq = tp1->rec.data.stream_seq; 4705 do { 4706 ret_sz += tp1->book_size; 4707 if (tp1->data != NULL) { 4708 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4709 sctp_flight_size_decrease(tp1); 4710 sctp_total_flight_decrease(stcb, tp1); 4711 } 4712 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4713 stcb->asoc.peers_rwnd += tp1->send_size; 4714 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4715 if (sent) { 4716 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4717 } else { 4718 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4719 } 4720 if (tp1->data) { 4721 sctp_m_freem(tp1->data); 4722 tp1->data = NULL; 4723 } 4724 do_wakeup_routine = 1; 4725 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4726 stcb->asoc.sent_queue_cnt_removeable--; 4727 } 4728 } 4729 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4730 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4731 SCTP_DATA_NOT_FRAG) { 4732 /* not frag'ed we ae done */ 4733 notdone = 0; 4734 foundeom = 1; 4735 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4736 /* end of frag, we are done */ 4737 notdone = 0; 4738 foundeom = 1; 4739 } else { 4740 /* 4741 * Its a begin or middle piece, we must mark all of 4742 * it 4743 */ 4744 notdone = 1; 4745 tp1 = TAILQ_NEXT(tp1, sctp_next); 4746 } 4747 } while (tp1 && notdone); 4748 if (foundeom == 0) { 4749 /* 4750 * The multi-part message was scattered across the send and 4751 * sent queue. 4752 */ 4753 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4754 if ((tp1->rec.data.stream_number != stream) || 4755 (tp1->rec.data.stream_seq != seq)) { 4756 break; 4757 } 4758 /* 4759 * save to chk in case we have some on stream out 4760 * queue. If so and we have an un-transmitted one we 4761 * don't have to fudge the TSN. 4762 */ 4763 chk = tp1; 4764 ret_sz += tp1->book_size; 4765 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4766 if (sent) { 4767 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4768 } else { 4769 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4770 } 4771 if (tp1->data) { 4772 sctp_m_freem(tp1->data); 4773 tp1->data = NULL; 4774 } 4775 /* No flight involved here book the size to 0 */ 4776 tp1->book_size = 0; 4777 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4778 foundeom = 1; 4779 } 4780 do_wakeup_routine = 1; 4781 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4782 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4783 /* 4784 * on to the sent queue so we can wait for it to be 4785 * passed by. 4786 */ 4787 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4788 sctp_next); 4789 stcb->asoc.send_queue_cnt--; 4790 stcb->asoc.sent_queue_cnt++; 4791 } 4792 } 4793 if (foundeom == 0) { 4794 /* 4795 * Still no eom found. That means there is stuff left on the 4796 * stream out queue.. yuck. 4797 */ 4798 SCTP_TCB_SEND_LOCK(stcb); 4799 strq = &stcb->asoc.strmout[stream]; 4800 sp = TAILQ_FIRST(&strq->outqueue); 4801 if (sp != NULL) { 4802 sp->discard_rest = 1; 4803 /* 4804 * We may need to put a chunk on the queue that 4805 * holds the TSN that would have been sent with the 4806 * LAST bit. 4807 */ 4808 if (chk == NULL) { 4809 /* Yep, we have to */ 4810 sctp_alloc_a_chunk(stcb, chk); 4811 if (chk == NULL) { 4812 /* 4813 * we are hosed. All we can do is 4814 * nothing.. which will cause an 4815 * abort if the peer is paying 4816 * attention. 4817 */ 4818 goto oh_well; 4819 } 4820 memset(chk, 0, sizeof(*chk)); 4821 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG; 4822 chk->sent = SCTP_FORWARD_TSN_SKIP; 4823 chk->asoc = &stcb->asoc; 4824 chk->rec.data.stream_seq = strq->next_sequence_send; 4825 chk->rec.data.stream_number = sp->stream; 4826 chk->rec.data.payloadtype = sp->ppid; 4827 chk->rec.data.context = sp->context; 4828 chk->flags = sp->act_flags; 4829 if (sp->net) 4830 chk->whoTo = sp->net; 4831 else 4832 chk->whoTo = stcb->asoc.primary_destination; 4833 atomic_add_int(&chk->whoTo->ref_count, 1); 4834 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4835 stcb->asoc.pr_sctp_cnt++; 4836 chk->pr_sctp_on = 1; 4837 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4838 stcb->asoc.sent_queue_cnt++; 4839 stcb->asoc.pr_sctp_cnt++; 4840 } else { 4841 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4842 } 4843 strq->next_sequence_send++; 4844 oh_well: 4845 if (sp->data) { 4846 /* 4847 * Pull any data to free up the SB and allow 4848 * sender to "add more" while we will throw 4849 * away :-) 4850 */ 4851 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4852 ret_sz += sp->length; 4853 do_wakeup_routine = 1; 4854 sp->some_taken = 1; 4855 sctp_m_freem(sp->data); 4856 sp->data = NULL; 4857 sp->tail_mbuf = NULL; 4858 sp->length = 0; 4859 } 4860 } 4861 SCTP_TCB_SEND_UNLOCK(stcb); 4862 } 4863 if (do_wakeup_routine) { 4864 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4865 struct socket *so; 4866 4867 so = SCTP_INP_SO(stcb->sctp_ep); 4868 if (!so_locked) { 4869 atomic_add_int(&stcb->asoc.refcnt, 1); 4870 SCTP_TCB_UNLOCK(stcb); 4871 SCTP_SOCKET_LOCK(so, 1); 4872 SCTP_TCB_LOCK(stcb); 4873 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4874 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4875 /* assoc was freed while we were unlocked */ 4876 SCTP_SOCKET_UNLOCK(so, 1); 4877 return (ret_sz); 4878 } 4879 } 4880 #endif 4881 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4883 if (!so_locked) { 4884 SCTP_SOCKET_UNLOCK(so, 1); 4885 } 4886 #endif 4887 } 4888 return (ret_sz); 4889 } 4890 4891 /* 4892 * checks to see if the given address, sa, is one that is currently known by 4893 * the kernel note: can't distinguish the same address on multiple interfaces 4894 * and doesn't handle multiple addresses with different zone/scope id's note: 4895 * ifa_ifwithaddr() compares the entire sockaddr struct 4896 */ 4897 struct sctp_ifa * 4898 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4899 int holds_lock) 4900 { 4901 struct sctp_laddr *laddr; 4902 4903 if (holds_lock == 0) { 4904 SCTP_INP_RLOCK(inp); 4905 } 4906 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4907 if (laddr->ifa == NULL) 4908 continue; 4909 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4910 continue; 4911 #ifdef INET 4912 if (addr->sa_family == AF_INET) { 4913 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4914 laddr->ifa->address.sin.sin_addr.s_addr) { 4915 /* found him. */ 4916 if (holds_lock == 0) { 4917 SCTP_INP_RUNLOCK(inp); 4918 } 4919 return (laddr->ifa); 4920 break; 4921 } 4922 } 4923 #endif 4924 #ifdef INET6 4925 if (addr->sa_family == AF_INET6) { 4926 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4927 &laddr->ifa->address.sin6)) { 4928 /* found him. */ 4929 if (holds_lock == 0) { 4930 SCTP_INP_RUNLOCK(inp); 4931 } 4932 return (laddr->ifa); 4933 break; 4934 } 4935 } 4936 #endif 4937 } 4938 if (holds_lock == 0) { 4939 SCTP_INP_RUNLOCK(inp); 4940 } 4941 return (NULL); 4942 } 4943 4944 uint32_t 4945 sctp_get_ifa_hash_val(struct sockaddr *addr) 4946 { 4947 switch (addr->sa_family) { 4948 #ifdef INET 4949 case AF_INET: 4950 { 4951 struct sockaddr_in *sin; 4952 4953 sin = (struct sockaddr_in *)addr; 4954 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4955 } 4956 #endif 4957 #ifdef INET6 4958 case INET6: 4959 { 4960 struct sockaddr_in6 *sin6; 4961 uint32_t hash_of_addr; 4962 4963 sin6 = (struct sockaddr_in6 *)addr; 4964 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4965 sin6->sin6_addr.s6_addr32[1] + 4966 sin6->sin6_addr.s6_addr32[2] + 4967 sin6->sin6_addr.s6_addr32[3]); 4968 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4969 return (hash_of_addr); 4970 } 4971 #endif 4972 default: 4973 break; 4974 } 4975 return (0); 4976 } 4977 4978 struct sctp_ifa * 4979 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4980 { 4981 struct sctp_ifa *sctp_ifap; 4982 struct sctp_vrf *vrf; 4983 struct sctp_ifalist *hash_head; 4984 uint32_t hash_of_addr; 4985 4986 if (holds_lock == 0) 4987 SCTP_IPI_ADDR_RLOCK(); 4988 4989 vrf = sctp_find_vrf(vrf_id); 4990 if (vrf == NULL) { 4991 stage_right: 4992 if (holds_lock == 0) 4993 SCTP_IPI_ADDR_RUNLOCK(); 4994 return (NULL); 4995 } 4996 hash_of_addr = sctp_get_ifa_hash_val(addr); 4997 4998 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 4999 if (hash_head == NULL) { 5000 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 5001 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 5002 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 5003 sctp_print_address(addr); 5004 SCTP_PRINTF("No such bucket for address\n"); 5005 if (holds_lock == 0) 5006 SCTP_IPI_ADDR_RUNLOCK(); 5007 5008 return (NULL); 5009 } 5010 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5011 if (sctp_ifap == NULL) { 5012 #ifdef INVARIANTS 5013 panic("Huh LIST_FOREACH corrupt"); 5014 goto stage_right; 5015 #else 5016 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n"); 5017 goto stage_right; 5018 #endif 5019 } 5020 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5021 continue; 5022 #ifdef INET 5023 if (addr->sa_family == AF_INET) { 5024 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5025 sctp_ifap->address.sin.sin_addr.s_addr) { 5026 /* found him. */ 5027 if (holds_lock == 0) 5028 SCTP_IPI_ADDR_RUNLOCK(); 5029 return (sctp_ifap); 5030 break; 5031 } 5032 } 5033 #endif 5034 #ifdef INET6 5035 if (addr->sa_family == AF_INET6) { 5036 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5037 &sctp_ifap->address.sin6)) { 5038 /* found him. */ 5039 if (holds_lock == 0) 5040 SCTP_IPI_ADDR_RUNLOCK(); 5041 return (sctp_ifap); 5042 break; 5043 } 5044 } 5045 #endif 5046 } 5047 if (holds_lock == 0) 5048 SCTP_IPI_ADDR_RUNLOCK(); 5049 return (NULL); 5050 } 5051 5052 static void 5053 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 5054 uint32_t rwnd_req) 5055 { 5056 /* User pulled some data, do we need a rwnd update? */ 5057 int r_unlocked = 0; 5058 uint32_t dif, rwnd; 5059 struct socket *so = NULL; 5060 5061 if (stcb == NULL) 5062 return; 5063 5064 atomic_add_int(&stcb->asoc.refcnt, 1); 5065 5066 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5067 SCTP_STATE_SHUTDOWN_RECEIVED | 5068 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5069 /* Pre-check If we are freeing no update */ 5070 goto no_lock; 5071 } 5072 SCTP_INP_INCR_REF(stcb->sctp_ep); 5073 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5074 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5075 goto out; 5076 } 5077 so = stcb->sctp_socket; 5078 if (so == NULL) { 5079 goto out; 5080 } 5081 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5082 /* Have you have freed enough to look */ 5083 *freed_so_far = 0; 5084 /* Yep, its worth a look and the lock overhead */ 5085 5086 /* Figure out what the rwnd would be */ 5087 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5088 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5089 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5090 } else { 5091 dif = 0; 5092 } 5093 if (dif >= rwnd_req) { 5094 if (hold_rlock) { 5095 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5096 r_unlocked = 1; 5097 } 5098 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5099 /* 5100 * One last check before we allow the guy possibly 5101 * to get in. There is a race, where the guy has not 5102 * reached the gate. In that case 5103 */ 5104 goto out; 5105 } 5106 SCTP_TCB_LOCK(stcb); 5107 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5108 /* No reports here */ 5109 SCTP_TCB_UNLOCK(stcb); 5110 goto out; 5111 } 5112 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5113 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5114 5115 sctp_chunk_output(stcb->sctp_ep, stcb, 5116 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5117 /* make sure no timer is running */ 5118 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5119 SCTP_TCB_UNLOCK(stcb); 5120 } else { 5121 /* Update how much we have pending */ 5122 stcb->freed_by_sorcv_sincelast = dif; 5123 } 5124 out: 5125 if (so && r_unlocked && hold_rlock) { 5126 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5127 } 5128 SCTP_INP_DECR_REF(stcb->sctp_ep); 5129 no_lock: 5130 atomic_add_int(&stcb->asoc.refcnt, -1); 5131 return; 5132 } 5133 5134 int 5135 sctp_sorecvmsg(struct socket *so, 5136 struct uio *uio, 5137 struct mbuf **mp, 5138 struct sockaddr *from, 5139 int fromlen, 5140 int *msg_flags, 5141 struct sctp_sndrcvinfo *sinfo, 5142 int filling_sinfo) 5143 { 5144 /* 5145 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5146 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5147 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5148 * On the way out we may send out any combination of: 5149 * MSG_NOTIFICATION MSG_EOR 5150 * 5151 */ 5152 struct sctp_inpcb *inp = NULL; 5153 int my_len = 0; 5154 int cp_len = 0, error = 0; 5155 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5156 struct mbuf *m = NULL; 5157 struct sctp_tcb *stcb = NULL; 5158 int wakeup_read_socket = 0; 5159 int freecnt_applied = 0; 5160 int out_flags = 0, in_flags = 0; 5161 int block_allowed = 1; 5162 uint32_t freed_so_far = 0; 5163 uint32_t copied_so_far = 0; 5164 int in_eeor_mode = 0; 5165 int no_rcv_needed = 0; 5166 uint32_t rwnd_req = 0; 5167 int hold_sblock = 0; 5168 int hold_rlock = 0; 5169 int slen = 0; 5170 uint32_t held_length = 0; 5171 int sockbuf_lock = 0; 5172 5173 if (uio == NULL) { 5174 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5175 return (EINVAL); 5176 } 5177 if (msg_flags) { 5178 in_flags = *msg_flags; 5179 if (in_flags & MSG_PEEK) 5180 SCTP_STAT_INCR(sctps_read_peeks); 5181 } else { 5182 in_flags = 0; 5183 } 5184 slen = uio->uio_resid; 5185 5186 /* Pull in and set up our int flags */ 5187 if (in_flags & MSG_OOB) { 5188 /* Out of band's NOT supported */ 5189 return (EOPNOTSUPP); 5190 } 5191 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5192 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5193 return (EINVAL); 5194 } 5195 if ((in_flags & (MSG_DONTWAIT 5196 | MSG_NBIO 5197 )) || 5198 SCTP_SO_IS_NBIO(so)) { 5199 block_allowed = 0; 5200 } 5201 /* setup the endpoint */ 5202 inp = (struct sctp_inpcb *)so->so_pcb; 5203 if (inp == NULL) { 5204 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5205 return (EFAULT); 5206 } 5207 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5208 /* Must be at least a MTU's worth */ 5209 if (rwnd_req < SCTP_MIN_RWND) 5210 rwnd_req = SCTP_MIN_RWND; 5211 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5212 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5213 sctp_misc_ints(SCTP_SORECV_ENTER, 5214 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 5215 } 5216 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5217 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5218 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 5219 } 5220 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5221 if (error) { 5222 goto release_unlocked; 5223 } 5224 sockbuf_lock = 1; 5225 restart: 5226 5227 5228 restart_nosblocks: 5229 if (hold_sblock == 0) { 5230 SOCKBUF_LOCK(&so->so_rcv); 5231 hold_sblock = 1; 5232 } 5233 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5234 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5235 goto out; 5236 } 5237 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5238 if (so->so_error) { 5239 error = so->so_error; 5240 if ((in_flags & MSG_PEEK) == 0) 5241 so->so_error = 0; 5242 goto out; 5243 } else { 5244 if (so->so_rcv.sb_cc == 0) { 5245 /* indicate EOF */ 5246 error = 0; 5247 goto out; 5248 } 5249 } 5250 } 5251 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 5252 /* we need to wait for data */ 5253 if ((so->so_rcv.sb_cc == 0) && 5254 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5255 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5256 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5257 /* 5258 * For active open side clear flags for 5259 * re-use passive open is blocked by 5260 * connect. 5261 */ 5262 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5263 /* 5264 * You were aborted, passive side 5265 * always hits here 5266 */ 5267 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5268 error = ECONNRESET; 5269 } 5270 so->so_state &= ~(SS_ISCONNECTING | 5271 SS_ISDISCONNECTING | 5272 SS_ISCONFIRMING | 5273 SS_ISCONNECTED); 5274 if (error == 0) { 5275 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5276 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5277 error = ENOTCONN; 5278 } 5279 } 5280 goto out; 5281 } 5282 } 5283 error = sbwait(&so->so_rcv); 5284 if (error) { 5285 goto out; 5286 } 5287 held_length = 0; 5288 goto restart_nosblocks; 5289 } else if (so->so_rcv.sb_cc == 0) { 5290 if (so->so_error) { 5291 error = so->so_error; 5292 if ((in_flags & MSG_PEEK) == 0) 5293 so->so_error = 0; 5294 } else { 5295 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5296 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5297 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5298 /* 5299 * For active open side clear flags 5300 * for re-use passive open is 5301 * blocked by connect. 5302 */ 5303 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5304 /* 5305 * You were aborted, passive 5306 * side always hits here 5307 */ 5308 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5309 error = ECONNRESET; 5310 } 5311 so->so_state &= ~(SS_ISCONNECTING | 5312 SS_ISDISCONNECTING | 5313 SS_ISCONFIRMING | 5314 SS_ISCONNECTED); 5315 if (error == 0) { 5316 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5317 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5318 error = ENOTCONN; 5319 } 5320 } 5321 goto out; 5322 } 5323 } 5324 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5325 error = EWOULDBLOCK; 5326 } 5327 goto out; 5328 } 5329 if (hold_sblock == 1) { 5330 SOCKBUF_UNLOCK(&so->so_rcv); 5331 hold_sblock = 0; 5332 } 5333 /* we possibly have data we can read */ 5334 /* sa_ignore FREED_MEMORY */ 5335 control = TAILQ_FIRST(&inp->read_queue); 5336 if (control == NULL) { 5337 /* 5338 * This could be happening since the appender did the 5339 * increment but as not yet did the tailq insert onto the 5340 * read_queue 5341 */ 5342 if (hold_rlock == 0) { 5343 SCTP_INP_READ_LOCK(inp); 5344 } 5345 control = TAILQ_FIRST(&inp->read_queue); 5346 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5347 #ifdef INVARIANTS 5348 panic("Huh, its non zero and nothing on control?"); 5349 #endif 5350 so->so_rcv.sb_cc = 0; 5351 } 5352 SCTP_INP_READ_UNLOCK(inp); 5353 hold_rlock = 0; 5354 goto restart; 5355 } 5356 if ((control->length == 0) && 5357 (control->do_not_ref_stcb)) { 5358 /* 5359 * Clean up code for freeing assoc that left behind a 5360 * pdapi.. maybe a peer in EEOR that just closed after 5361 * sending and never indicated a EOR. 5362 */ 5363 if (hold_rlock == 0) { 5364 hold_rlock = 1; 5365 SCTP_INP_READ_LOCK(inp); 5366 } 5367 control->held_length = 0; 5368 if (control->data) { 5369 /* Hmm there is data here .. fix */ 5370 struct mbuf *m_tmp; 5371 int cnt = 0; 5372 5373 m_tmp = control->data; 5374 while (m_tmp) { 5375 cnt += SCTP_BUF_LEN(m_tmp); 5376 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5377 control->tail_mbuf = m_tmp; 5378 control->end_added = 1; 5379 } 5380 m_tmp = SCTP_BUF_NEXT(m_tmp); 5381 } 5382 control->length = cnt; 5383 } else { 5384 /* remove it */ 5385 TAILQ_REMOVE(&inp->read_queue, control, next); 5386 /* Add back any hiddend data */ 5387 sctp_free_remote_addr(control->whoFrom); 5388 sctp_free_a_readq(stcb, control); 5389 } 5390 if (hold_rlock) { 5391 hold_rlock = 0; 5392 SCTP_INP_READ_UNLOCK(inp); 5393 } 5394 goto restart; 5395 } 5396 if ((control->length == 0) && 5397 (control->end_added == 1)) { 5398 /* 5399 * Do we also need to check for (control->pdapi_aborted == 5400 * 1)? 5401 */ 5402 if (hold_rlock == 0) { 5403 hold_rlock = 1; 5404 SCTP_INP_READ_LOCK(inp); 5405 } 5406 TAILQ_REMOVE(&inp->read_queue, control, next); 5407 if (control->data) { 5408 #ifdef INVARIANTS 5409 panic("control->data not null but control->length == 0"); 5410 #else 5411 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5412 sctp_m_freem(control->data); 5413 control->data = NULL; 5414 #endif 5415 } 5416 if (control->aux_data) { 5417 sctp_m_free(control->aux_data); 5418 control->aux_data = NULL; 5419 } 5420 sctp_free_remote_addr(control->whoFrom); 5421 sctp_free_a_readq(stcb, control); 5422 if (hold_rlock) { 5423 hold_rlock = 0; 5424 SCTP_INP_READ_UNLOCK(inp); 5425 } 5426 goto restart; 5427 } 5428 if (control->length == 0) { 5429 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5430 (filling_sinfo)) { 5431 /* find a more suitable one then this */ 5432 ctl = TAILQ_NEXT(control, next); 5433 while (ctl) { 5434 if ((ctl->stcb != control->stcb) && (ctl->length) && 5435 (ctl->some_taken || 5436 (ctl->spec_flags & M_NOTIFICATION) || 5437 ((ctl->do_not_ref_stcb == 0) && 5438 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5439 ) { 5440 /*- 5441 * If we have a different TCB next, and there is data 5442 * present. If we have already taken some (pdapi), OR we can 5443 * ref the tcb and no delivery as started on this stream, we 5444 * take it. Note we allow a notification on a different 5445 * assoc to be delivered.. 5446 */ 5447 control = ctl; 5448 goto found_one; 5449 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5450 (ctl->length) && 5451 ((ctl->some_taken) || 5452 ((ctl->do_not_ref_stcb == 0) && 5453 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5454 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5455 /*- 5456 * If we have the same tcb, and there is data present, and we 5457 * have the strm interleave feature present. Then if we have 5458 * taken some (pdapi) or we can refer to tht tcb AND we have 5459 * not started a delivery for this stream, we can take it. 5460 * Note we do NOT allow a notificaiton on the same assoc to 5461 * be delivered. 5462 */ 5463 control = ctl; 5464 goto found_one; 5465 } 5466 ctl = TAILQ_NEXT(ctl, next); 5467 } 5468 } 5469 /* 5470 * if we reach here, not suitable replacement is available 5471 * <or> fragment interleave is NOT on. So stuff the sb_cc 5472 * into the our held count, and its time to sleep again. 5473 */ 5474 held_length = so->so_rcv.sb_cc; 5475 control->held_length = so->so_rcv.sb_cc; 5476 goto restart; 5477 } 5478 /* Clear the held length since there is something to read */ 5479 control->held_length = 0; 5480 if (hold_rlock) { 5481 SCTP_INP_READ_UNLOCK(inp); 5482 hold_rlock = 0; 5483 } 5484 found_one: 5485 /* 5486 * If we reach here, control has a some data for us to read off. 5487 * Note that stcb COULD be NULL. 5488 */ 5489 control->some_taken++; 5490 if (hold_sblock) { 5491 SOCKBUF_UNLOCK(&so->so_rcv); 5492 hold_sblock = 0; 5493 } 5494 stcb = control->stcb; 5495 if (stcb) { 5496 if ((control->do_not_ref_stcb == 0) && 5497 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5498 if (freecnt_applied == 0) 5499 stcb = NULL; 5500 } else if (control->do_not_ref_stcb == 0) { 5501 /* you can't free it on me please */ 5502 /* 5503 * The lock on the socket buffer protects us so the 5504 * free code will stop. But since we used the 5505 * socketbuf lock and the sender uses the tcb_lock 5506 * to increment, we need to use the atomic add to 5507 * the refcnt 5508 */ 5509 if (freecnt_applied) { 5510 #ifdef INVARIANTS 5511 panic("refcnt already incremented"); 5512 #else 5513 SCTP_PRINTF("refcnt already incremented?\n"); 5514 #endif 5515 } else { 5516 atomic_add_int(&stcb->asoc.refcnt, 1); 5517 freecnt_applied = 1; 5518 } 5519 /* 5520 * Setup to remember how much we have not yet told 5521 * the peer our rwnd has opened up. Note we grab the 5522 * value from the tcb from last time. Note too that 5523 * sack sending clears this when a sack is sent, 5524 * which is fine. Once we hit the rwnd_req, we then 5525 * will go to the sctp_user_rcvd() that will not 5526 * lock until it KNOWs it MUST send a WUP-SACK. 5527 */ 5528 freed_so_far = stcb->freed_by_sorcv_sincelast; 5529 stcb->freed_by_sorcv_sincelast = 0; 5530 } 5531 } 5532 if (stcb && 5533 ((control->spec_flags & M_NOTIFICATION) == 0) && 5534 control->do_not_ref_stcb == 0) { 5535 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5536 } 5537 /* First lets get off the sinfo and sockaddr info */ 5538 if ((sinfo) && filling_sinfo) { 5539 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 5540 nxt = TAILQ_NEXT(control, next); 5541 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5542 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5543 struct sctp_extrcvinfo *s_extra; 5544 5545 s_extra = (struct sctp_extrcvinfo *)sinfo; 5546 if ((nxt) && 5547 (nxt->length)) { 5548 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5549 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5550 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5551 } 5552 if (nxt->spec_flags & M_NOTIFICATION) { 5553 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5554 } 5555 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id; 5556 s_extra->sreinfo_next_length = nxt->length; 5557 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid; 5558 s_extra->sreinfo_next_stream = nxt->sinfo_stream; 5559 if (nxt->tail_mbuf != NULL) { 5560 if (nxt->end_added) { 5561 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5562 } 5563 } 5564 } else { 5565 /* 5566 * we explicitly 0 this, since the memcpy 5567 * got some other things beyond the older 5568 * sinfo_ that is on the control's structure 5569 * :-D 5570 */ 5571 nxt = NULL; 5572 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 5573 s_extra->sreinfo_next_aid = 0; 5574 s_extra->sreinfo_next_length = 0; 5575 s_extra->sreinfo_next_ppid = 0; 5576 s_extra->sreinfo_next_stream = 0; 5577 } 5578 } 5579 /* 5580 * update off the real current cum-ack, if we have an stcb. 5581 */ 5582 if ((control->do_not_ref_stcb == 0) && stcb) 5583 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5584 /* 5585 * mask off the high bits, we keep the actual chunk bits in 5586 * there. 5587 */ 5588 sinfo->sinfo_flags &= 0x00ff; 5589 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5590 sinfo->sinfo_flags |= SCTP_UNORDERED; 5591 } 5592 } 5593 #ifdef SCTP_ASOCLOG_OF_TSNS 5594 { 5595 int index, newindex; 5596 struct sctp_pcbtsn_rlog *entry; 5597 5598 do { 5599 index = inp->readlog_index; 5600 newindex = index + 1; 5601 if (newindex >= SCTP_READ_LOG_SIZE) { 5602 newindex = 0; 5603 } 5604 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5605 entry = &inp->readlog[index]; 5606 entry->vtag = control->sinfo_assoc_id; 5607 entry->strm = control->sinfo_stream; 5608 entry->seq = control->sinfo_ssn; 5609 entry->sz = control->length; 5610 entry->flgs = control->sinfo_flags; 5611 } 5612 #endif 5613 if (fromlen && from) { 5614 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len); 5615 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5616 #ifdef INET6 5617 case AF_INET6: 5618 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 5619 break; 5620 #endif 5621 #ifdef INET 5622 case AF_INET: 5623 ((struct sockaddr_in *)from)->sin_port = control->port_from; 5624 break; 5625 #endif 5626 default: 5627 break; 5628 } 5629 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5630 5631 #if defined(INET) && defined(INET6) 5632 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 5633 (from->sa_family == AF_INET) && 5634 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 5635 struct sockaddr_in *sin; 5636 struct sockaddr_in6 sin6; 5637 5638 sin = (struct sockaddr_in *)from; 5639 bzero(&sin6, sizeof(sin6)); 5640 sin6.sin6_family = AF_INET6; 5641 sin6.sin6_len = sizeof(struct sockaddr_in6); 5642 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 5643 bcopy(&sin->sin_addr, 5644 &sin6.sin6_addr.s6_addr32[3], 5645 sizeof(sin6.sin6_addr.s6_addr32[3])); 5646 sin6.sin6_port = sin->sin_port; 5647 memcpy(from, &sin6, sizeof(struct sockaddr_in6)); 5648 } 5649 #endif 5650 #ifdef INET6 5651 { 5652 struct sockaddr_in6 lsa6, *from6; 5653 5654 from6 = (struct sockaddr_in6 *)from; 5655 sctp_recover_scope_mac(from6, (&lsa6)); 5656 } 5657 #endif 5658 } 5659 /* now copy out what data we can */ 5660 if (mp == NULL) { 5661 /* copy out each mbuf in the chain up to length */ 5662 get_more_data: 5663 m = control->data; 5664 while (m) { 5665 /* Move out all we can */ 5666 cp_len = (int)uio->uio_resid; 5667 my_len = (int)SCTP_BUF_LEN(m); 5668 if (cp_len > my_len) { 5669 /* not enough in this buf */ 5670 cp_len = my_len; 5671 } 5672 if (hold_rlock) { 5673 SCTP_INP_READ_UNLOCK(inp); 5674 hold_rlock = 0; 5675 } 5676 if (cp_len > 0) 5677 error = uiomove(mtod(m, char *), cp_len, uio); 5678 /* re-read */ 5679 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5680 goto release; 5681 } 5682 if ((control->do_not_ref_stcb == 0) && stcb && 5683 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5684 no_rcv_needed = 1; 5685 } 5686 if (error) { 5687 /* error we are out of here */ 5688 goto release; 5689 } 5690 if ((SCTP_BUF_NEXT(m) == NULL) && 5691 (cp_len >= SCTP_BUF_LEN(m)) && 5692 ((control->end_added == 0) || 5693 (control->end_added && 5694 (TAILQ_NEXT(control, next) == NULL))) 5695 ) { 5696 SCTP_INP_READ_LOCK(inp); 5697 hold_rlock = 1; 5698 } 5699 if (cp_len == SCTP_BUF_LEN(m)) { 5700 if ((SCTP_BUF_NEXT(m) == NULL) && 5701 (control->end_added)) { 5702 out_flags |= MSG_EOR; 5703 if ((control->do_not_ref_stcb == 0) && 5704 (control->stcb != NULL) && 5705 ((control->spec_flags & M_NOTIFICATION) == 0)) 5706 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5707 } 5708 if (control->spec_flags & M_NOTIFICATION) { 5709 out_flags |= MSG_NOTIFICATION; 5710 } 5711 /* we ate up the mbuf */ 5712 if (in_flags & MSG_PEEK) { 5713 /* just looking */ 5714 m = SCTP_BUF_NEXT(m); 5715 copied_so_far += cp_len; 5716 } else { 5717 /* dispose of the mbuf */ 5718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5719 sctp_sblog(&so->so_rcv, 5720 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5721 } 5722 sctp_sbfree(control, stcb, &so->so_rcv, m); 5723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5724 sctp_sblog(&so->so_rcv, 5725 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5726 } 5727 copied_so_far += cp_len; 5728 freed_so_far += cp_len; 5729 freed_so_far += MSIZE; 5730 atomic_subtract_int(&control->length, cp_len); 5731 control->data = sctp_m_free(m); 5732 m = control->data; 5733 /* 5734 * been through it all, must hold sb 5735 * lock ok to null tail 5736 */ 5737 if (control->data == NULL) { 5738 #ifdef INVARIANTS 5739 if ((control->end_added == 0) || 5740 (TAILQ_NEXT(control, next) == NULL)) { 5741 /* 5742 * If the end is not 5743 * added, OR the 5744 * next is NOT null 5745 * we MUST have the 5746 * lock. 5747 */ 5748 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5749 panic("Hmm we don't own the lock?"); 5750 } 5751 } 5752 #endif 5753 control->tail_mbuf = NULL; 5754 #ifdef INVARIANTS 5755 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5756 panic("end_added, nothing left and no MSG_EOR"); 5757 } 5758 #endif 5759 } 5760 } 5761 } else { 5762 /* Do we need to trim the mbuf? */ 5763 if (control->spec_flags & M_NOTIFICATION) { 5764 out_flags |= MSG_NOTIFICATION; 5765 } 5766 if ((in_flags & MSG_PEEK) == 0) { 5767 SCTP_BUF_RESV_UF(m, cp_len); 5768 SCTP_BUF_LEN(m) -= cp_len; 5769 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5770 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5771 } 5772 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5773 if ((control->do_not_ref_stcb == 0) && 5774 stcb) { 5775 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5776 } 5777 copied_so_far += cp_len; 5778 freed_so_far += cp_len; 5779 freed_so_far += MSIZE; 5780 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5781 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5782 SCTP_LOG_SBRESULT, 0); 5783 } 5784 atomic_subtract_int(&control->length, cp_len); 5785 } else { 5786 copied_so_far += cp_len; 5787 } 5788 } 5789 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5790 break; 5791 } 5792 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5793 (control->do_not_ref_stcb == 0) && 5794 (freed_so_far >= rwnd_req)) { 5795 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5796 } 5797 } /* end while(m) */ 5798 /* 5799 * At this point we have looked at it all and we either have 5800 * a MSG_EOR/or read all the user wants... <OR> 5801 * control->length == 0. 5802 */ 5803 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5804 /* we are done with this control */ 5805 if (control->length == 0) { 5806 if (control->data) { 5807 #ifdef INVARIANTS 5808 panic("control->data not null at read eor?"); 5809 #else 5810 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5811 sctp_m_freem(control->data); 5812 control->data = NULL; 5813 #endif 5814 } 5815 done_with_control: 5816 if (TAILQ_NEXT(control, next) == NULL) { 5817 /* 5818 * If we don't have a next we need a 5819 * lock, if there is a next 5820 * interrupt is filling ahead of us 5821 * and we don't need a lock to 5822 * remove this guy (which is the 5823 * head of the queue). 5824 */ 5825 if (hold_rlock == 0) { 5826 SCTP_INP_READ_LOCK(inp); 5827 hold_rlock = 1; 5828 } 5829 } 5830 TAILQ_REMOVE(&inp->read_queue, control, next); 5831 /* Add back any hiddend data */ 5832 if (control->held_length) { 5833 held_length = 0; 5834 control->held_length = 0; 5835 wakeup_read_socket = 1; 5836 } 5837 if (control->aux_data) { 5838 sctp_m_free(control->aux_data); 5839 control->aux_data = NULL; 5840 } 5841 no_rcv_needed = control->do_not_ref_stcb; 5842 sctp_free_remote_addr(control->whoFrom); 5843 control->data = NULL; 5844 sctp_free_a_readq(stcb, control); 5845 control = NULL; 5846 if ((freed_so_far >= rwnd_req) && 5847 (no_rcv_needed == 0)) 5848 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5849 5850 } else { 5851 /* 5852 * The user did not read all of this 5853 * message, turn off the returned MSG_EOR 5854 * since we are leaving more behind on the 5855 * control to read. 5856 */ 5857 #ifdef INVARIANTS 5858 if (control->end_added && 5859 (control->data == NULL) && 5860 (control->tail_mbuf == NULL)) { 5861 panic("Gak, control->length is corrupt?"); 5862 } 5863 #endif 5864 no_rcv_needed = control->do_not_ref_stcb; 5865 out_flags &= ~MSG_EOR; 5866 } 5867 } 5868 if (out_flags & MSG_EOR) { 5869 goto release; 5870 } 5871 if ((uio->uio_resid == 0) || 5872 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5873 ) { 5874 goto release; 5875 } 5876 /* 5877 * If I hit here the receiver wants more and this message is 5878 * NOT done (pd-api). So two questions. Can we block? if not 5879 * we are done. Did the user NOT set MSG_WAITALL? 5880 */ 5881 if (block_allowed == 0) { 5882 goto release; 5883 } 5884 /* 5885 * We need to wait for more data a few things: - We don't 5886 * sbunlock() so we don't get someone else reading. - We 5887 * must be sure to account for the case where what is added 5888 * is NOT to our control when we wakeup. 5889 */ 5890 5891 /* 5892 * Do we need to tell the transport a rwnd update might be 5893 * needed before we go to sleep? 5894 */ 5895 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5896 ((freed_so_far >= rwnd_req) && 5897 (control->do_not_ref_stcb == 0) && 5898 (no_rcv_needed == 0))) { 5899 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5900 } 5901 wait_some_more: 5902 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5903 goto release; 5904 } 5905 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5906 goto release; 5907 5908 if (hold_rlock == 1) { 5909 SCTP_INP_READ_UNLOCK(inp); 5910 hold_rlock = 0; 5911 } 5912 if (hold_sblock == 0) { 5913 SOCKBUF_LOCK(&so->so_rcv); 5914 hold_sblock = 1; 5915 } 5916 if ((copied_so_far) && (control->length == 0) && 5917 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5918 goto release; 5919 } 5920 if (so->so_rcv.sb_cc <= control->held_length) { 5921 error = sbwait(&so->so_rcv); 5922 if (error) { 5923 goto release; 5924 } 5925 control->held_length = 0; 5926 } 5927 if (hold_sblock) { 5928 SOCKBUF_UNLOCK(&so->so_rcv); 5929 hold_sblock = 0; 5930 } 5931 if (control->length == 0) { 5932 /* still nothing here */ 5933 if (control->end_added == 1) { 5934 /* he aborted, or is done i.e.did a shutdown */ 5935 out_flags |= MSG_EOR; 5936 if (control->pdapi_aborted) { 5937 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5938 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5939 5940 out_flags |= MSG_TRUNC; 5941 } else { 5942 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5943 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5944 } 5945 goto done_with_control; 5946 } 5947 if (so->so_rcv.sb_cc > held_length) { 5948 control->held_length = so->so_rcv.sb_cc; 5949 held_length = 0; 5950 } 5951 goto wait_some_more; 5952 } else if (control->data == NULL) { 5953 /* 5954 * we must re-sync since data is probably being 5955 * added 5956 */ 5957 SCTP_INP_READ_LOCK(inp); 5958 if ((control->length > 0) && (control->data == NULL)) { 5959 /* 5960 * big trouble.. we have the lock and its 5961 * corrupt? 5962 */ 5963 #ifdef INVARIANTS 5964 panic("Impossible data==NULL length !=0"); 5965 #endif 5966 out_flags |= MSG_EOR; 5967 out_flags |= MSG_TRUNC; 5968 control->length = 0; 5969 SCTP_INP_READ_UNLOCK(inp); 5970 goto done_with_control; 5971 } 5972 SCTP_INP_READ_UNLOCK(inp); 5973 /* We will fall around to get more data */ 5974 } 5975 goto get_more_data; 5976 } else { 5977 /*- 5978 * Give caller back the mbuf chain, 5979 * store in uio_resid the length 5980 */ 5981 wakeup_read_socket = 0; 5982 if ((control->end_added == 0) || 5983 (TAILQ_NEXT(control, next) == NULL)) { 5984 /* Need to get rlock */ 5985 if (hold_rlock == 0) { 5986 SCTP_INP_READ_LOCK(inp); 5987 hold_rlock = 1; 5988 } 5989 } 5990 if (control->end_added) { 5991 out_flags |= MSG_EOR; 5992 if ((control->do_not_ref_stcb == 0) && 5993 (control->stcb != NULL) && 5994 ((control->spec_flags & M_NOTIFICATION) == 0)) 5995 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5996 } 5997 if (control->spec_flags & M_NOTIFICATION) { 5998 out_flags |= MSG_NOTIFICATION; 5999 } 6000 uio->uio_resid = control->length; 6001 *mp = control->data; 6002 m = control->data; 6003 while (m) { 6004 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6005 sctp_sblog(&so->so_rcv, 6006 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6007 } 6008 sctp_sbfree(control, stcb, &so->so_rcv, m); 6009 freed_so_far += SCTP_BUF_LEN(m); 6010 freed_so_far += MSIZE; 6011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6012 sctp_sblog(&so->so_rcv, 6013 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6014 } 6015 m = SCTP_BUF_NEXT(m); 6016 } 6017 control->data = control->tail_mbuf = NULL; 6018 control->length = 0; 6019 if (out_flags & MSG_EOR) { 6020 /* Done with this control */ 6021 goto done_with_control; 6022 } 6023 } 6024 release: 6025 if (hold_rlock == 1) { 6026 SCTP_INP_READ_UNLOCK(inp); 6027 hold_rlock = 0; 6028 } 6029 if (hold_sblock == 1) { 6030 SOCKBUF_UNLOCK(&so->so_rcv); 6031 hold_sblock = 0; 6032 } 6033 sbunlock(&so->so_rcv); 6034 sockbuf_lock = 0; 6035 6036 release_unlocked: 6037 if (hold_sblock) { 6038 SOCKBUF_UNLOCK(&so->so_rcv); 6039 hold_sblock = 0; 6040 } 6041 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6042 if ((freed_so_far >= rwnd_req) && 6043 (control && (control->do_not_ref_stcb == 0)) && 6044 (no_rcv_needed == 0)) 6045 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6046 } 6047 out: 6048 if (msg_flags) { 6049 *msg_flags = out_flags; 6050 } 6051 if (((out_flags & MSG_EOR) == 0) && 6052 ((in_flags & MSG_PEEK) == 0) && 6053 (sinfo) && 6054 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6055 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6056 struct sctp_extrcvinfo *s_extra; 6057 6058 s_extra = (struct sctp_extrcvinfo *)sinfo; 6059 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 6060 } 6061 if (hold_rlock == 1) { 6062 SCTP_INP_READ_UNLOCK(inp); 6063 } 6064 if (hold_sblock) { 6065 SOCKBUF_UNLOCK(&so->so_rcv); 6066 } 6067 if (sockbuf_lock) { 6068 sbunlock(&so->so_rcv); 6069 } 6070 if (freecnt_applied) { 6071 /* 6072 * The lock on the socket buffer protects us so the free 6073 * code will stop. But since we used the socketbuf lock and 6074 * the sender uses the tcb_lock to increment, we need to use 6075 * the atomic add to the refcnt. 6076 */ 6077 if (stcb == NULL) { 6078 #ifdef INVARIANTS 6079 panic("stcb for refcnt has gone NULL?"); 6080 goto stage_left; 6081 #else 6082 goto stage_left; 6083 #endif 6084 } 6085 atomic_add_int(&stcb->asoc.refcnt, -1); 6086 /* Save the value back for next time */ 6087 stcb->freed_by_sorcv_sincelast = freed_so_far; 6088 } 6089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6090 if (stcb) { 6091 sctp_misc_ints(SCTP_SORECV_DONE, 6092 freed_so_far, 6093 ((uio) ? (slen - uio->uio_resid) : slen), 6094 stcb->asoc.my_rwnd, 6095 so->so_rcv.sb_cc); 6096 } else { 6097 sctp_misc_ints(SCTP_SORECV_DONE, 6098 freed_so_far, 6099 ((uio) ? (slen - uio->uio_resid) : slen), 6100 0, 6101 so->so_rcv.sb_cc); 6102 } 6103 } 6104 stage_left: 6105 if (wakeup_read_socket) { 6106 sctp_sorwakeup(inp, so); 6107 } 6108 return (error); 6109 } 6110 6111 6112 #ifdef SCTP_MBUF_LOGGING 6113 struct mbuf * 6114 sctp_m_free(struct mbuf *m) 6115 { 6116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6117 if (SCTP_BUF_IS_EXTENDED(m)) { 6118 sctp_log_mb(m, SCTP_MBUF_IFREE); 6119 } 6120 } 6121 return (m_free(m)); 6122 } 6123 6124 void 6125 sctp_m_freem(struct mbuf *mb) 6126 { 6127 while (mb != NULL) 6128 mb = sctp_m_free(mb); 6129 } 6130 6131 #endif 6132 6133 int 6134 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6135 { 6136 /* 6137 * Given a local address. For all associations that holds the 6138 * address, request a peer-set-primary. 6139 */ 6140 struct sctp_ifa *ifa; 6141 struct sctp_laddr *wi; 6142 6143 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6144 if (ifa == NULL) { 6145 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6146 return (EADDRNOTAVAIL); 6147 } 6148 /* 6149 * Now that we have the ifa we must awaken the iterator with this 6150 * message. 6151 */ 6152 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6153 if (wi == NULL) { 6154 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6155 return (ENOMEM); 6156 } 6157 /* Now incr the count and int wi structure */ 6158 SCTP_INCR_LADDR_COUNT(); 6159 bzero(wi, sizeof(*wi)); 6160 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6161 wi->ifa = ifa; 6162 wi->action = SCTP_SET_PRIM_ADDR; 6163 atomic_add_int(&ifa->refcount, 1); 6164 6165 /* Now add it to the work queue */ 6166 SCTP_WQ_ADDR_LOCK(); 6167 /* 6168 * Should this really be a tailq? As it is we will process the 6169 * newest first :-0 6170 */ 6171 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6172 SCTP_WQ_ADDR_UNLOCK(); 6173 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6174 (struct sctp_inpcb *)NULL, 6175 (struct sctp_tcb *)NULL, 6176 (struct sctp_nets *)NULL); 6177 return (0); 6178 } 6179 6180 6181 int 6182 sctp_soreceive(struct socket *so, 6183 struct sockaddr **psa, 6184 struct uio *uio, 6185 struct mbuf **mp0, 6186 struct mbuf **controlp, 6187 int *flagsp) 6188 { 6189 int error, fromlen; 6190 uint8_t sockbuf[256]; 6191 struct sockaddr *from; 6192 struct sctp_extrcvinfo sinfo; 6193 int filling_sinfo = 1; 6194 struct sctp_inpcb *inp; 6195 6196 inp = (struct sctp_inpcb *)so->so_pcb; 6197 /* pickup the assoc we are reading from */ 6198 if (inp == NULL) { 6199 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6200 return (EINVAL); 6201 } 6202 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6203 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6204 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6205 (controlp == NULL)) { 6206 /* user does not want the sndrcv ctl */ 6207 filling_sinfo = 0; 6208 } 6209 if (psa) { 6210 from = (struct sockaddr *)sockbuf; 6211 fromlen = sizeof(sockbuf); 6212 from->sa_len = 0; 6213 } else { 6214 from = NULL; 6215 fromlen = 0; 6216 } 6217 6218 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6219 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6220 if ((controlp) && (filling_sinfo)) { 6221 /* copy back the sinfo in a CMSG format */ 6222 if (filling_sinfo) 6223 *controlp = sctp_build_ctl_nchunk(inp, 6224 (struct sctp_sndrcvinfo *)&sinfo); 6225 else 6226 *controlp = NULL; 6227 } 6228 if (psa) { 6229 /* copy back the address info */ 6230 if (from && from->sa_len) { 6231 *psa = sodupsockaddr(from, M_NOWAIT); 6232 } else { 6233 *psa = NULL; 6234 } 6235 } 6236 return (error); 6237 } 6238 6239 6240 6241 6242 6243 int 6244 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6245 int totaddr, int *error) 6246 { 6247 int added = 0; 6248 int i; 6249 struct sctp_inpcb *inp; 6250 struct sockaddr *sa; 6251 size_t incr = 0; 6252 6253 #ifdef INET 6254 struct sockaddr_in *sin; 6255 6256 #endif 6257 #ifdef INET6 6258 struct sockaddr_in6 *sin6; 6259 6260 #endif 6261 6262 sa = addr; 6263 inp = stcb->sctp_ep; 6264 *error = 0; 6265 for (i = 0; i < totaddr; i++) { 6266 switch (sa->sa_family) { 6267 #ifdef INET 6268 case AF_INET: 6269 incr = sizeof(struct sockaddr_in); 6270 sin = (struct sockaddr_in *)sa; 6271 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6272 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6273 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6275 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6276 *error = EINVAL; 6277 goto out_now; 6278 } 6279 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6280 /* assoc gone no un-lock */ 6281 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6282 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6283 *error = ENOBUFS; 6284 goto out_now; 6285 } 6286 added++; 6287 break; 6288 #endif 6289 #ifdef INET6 6290 case AF_INET6: 6291 incr = sizeof(struct sockaddr_in6); 6292 sin6 = (struct sockaddr_in6 *)sa; 6293 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6294 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6295 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6296 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6297 *error = EINVAL; 6298 goto out_now; 6299 } 6300 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6301 /* assoc gone no un-lock */ 6302 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6303 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6304 *error = ENOBUFS; 6305 goto out_now; 6306 } 6307 added++; 6308 break; 6309 #endif 6310 default: 6311 break; 6312 } 6313 sa = (struct sockaddr *)((caddr_t)sa + incr); 6314 } 6315 out_now: 6316 return (added); 6317 } 6318 6319 struct sctp_tcb * 6320 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6321 int *totaddr, int *num_v4, int *num_v6, int *error, 6322 int limit, int *bad_addr) 6323 { 6324 struct sockaddr *sa; 6325 struct sctp_tcb *stcb = NULL; 6326 size_t incr, at, i; 6327 6328 at = incr = 0; 6329 sa = addr; 6330 6331 *error = *num_v6 = *num_v4 = 0; 6332 /* account and validate addresses */ 6333 for (i = 0; i < (size_t)*totaddr; i++) { 6334 switch (sa->sa_family) { 6335 #ifdef INET 6336 case AF_INET: 6337 (*num_v4) += 1; 6338 incr = sizeof(struct sockaddr_in); 6339 if (sa->sa_len != incr) { 6340 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6341 *error = EINVAL; 6342 *bad_addr = 1; 6343 return (NULL); 6344 } 6345 break; 6346 #endif 6347 #ifdef INET6 6348 case AF_INET6: 6349 { 6350 struct sockaddr_in6 *sin6; 6351 6352 sin6 = (struct sockaddr_in6 *)sa; 6353 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6354 /* Must be non-mapped for connectx */ 6355 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6356 *error = EINVAL; 6357 *bad_addr = 1; 6358 return (NULL); 6359 } 6360 (*num_v6) += 1; 6361 incr = sizeof(struct sockaddr_in6); 6362 if (sa->sa_len != incr) { 6363 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6364 *error = EINVAL; 6365 *bad_addr = 1; 6366 return (NULL); 6367 } 6368 break; 6369 } 6370 #endif 6371 default: 6372 *totaddr = i; 6373 /* we are done */ 6374 break; 6375 } 6376 if (i == (size_t)*totaddr) { 6377 break; 6378 } 6379 SCTP_INP_INCR_REF(inp); 6380 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6381 if (stcb != NULL) { 6382 /* Already have or am bring up an association */ 6383 return (stcb); 6384 } else { 6385 SCTP_INP_DECR_REF(inp); 6386 } 6387 if ((at + incr) > (size_t)limit) { 6388 *totaddr = i; 6389 break; 6390 } 6391 sa = (struct sockaddr *)((caddr_t)sa + incr); 6392 } 6393 return ((struct sctp_tcb *)NULL); 6394 } 6395 6396 /* 6397 * sctp_bindx(ADD) for one address. 6398 * assumes all arguments are valid/checked by caller. 6399 */ 6400 void 6401 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6402 struct sockaddr *sa, sctp_assoc_t assoc_id, 6403 uint32_t vrf_id, int *error, void *p) 6404 { 6405 struct sockaddr *addr_touse; 6406 6407 #ifdef INET6 6408 struct sockaddr_in sin; 6409 6410 #endif 6411 6412 /* see if we're bound all already! */ 6413 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6414 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6415 *error = EINVAL; 6416 return; 6417 } 6418 addr_touse = sa; 6419 #ifdef INET6 6420 if (sa->sa_family == AF_INET6) { 6421 struct sockaddr_in6 *sin6; 6422 6423 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6424 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6425 *error = EINVAL; 6426 return; 6427 } 6428 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6429 /* can only bind v6 on PF_INET6 sockets */ 6430 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6431 *error = EINVAL; 6432 return; 6433 } 6434 sin6 = (struct sockaddr_in6 *)addr_touse; 6435 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6436 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6437 SCTP_IPV6_V6ONLY(inp)) { 6438 /* can't bind v4-mapped on PF_INET sockets */ 6439 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6440 *error = EINVAL; 6441 return; 6442 } 6443 in6_sin6_2_sin(&sin, sin6); 6444 addr_touse = (struct sockaddr *)&sin; 6445 } 6446 } 6447 #endif 6448 #ifdef INET 6449 if (sa->sa_family == AF_INET) { 6450 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6452 *error = EINVAL; 6453 return; 6454 } 6455 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6456 SCTP_IPV6_V6ONLY(inp)) { 6457 /* can't bind v4 on PF_INET sockets */ 6458 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6459 *error = EINVAL; 6460 return; 6461 } 6462 } 6463 #endif 6464 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6465 if (p == NULL) { 6466 /* Can't get proc for Net/Open BSD */ 6467 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6468 *error = EINVAL; 6469 return; 6470 } 6471 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6472 return; 6473 } 6474 /* 6475 * No locks required here since bind and mgmt_ep_sa all do their own 6476 * locking. If we do something for the FIX: below we may need to 6477 * lock in that case. 6478 */ 6479 if (assoc_id == 0) { 6480 /* add the address */ 6481 struct sctp_inpcb *lep; 6482 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6483 6484 /* validate the incoming port */ 6485 if ((lsin->sin_port != 0) && 6486 (lsin->sin_port != inp->sctp_lport)) { 6487 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6488 *error = EINVAL; 6489 return; 6490 } else { 6491 /* user specified 0 port, set it to existing port */ 6492 lsin->sin_port = inp->sctp_lport; 6493 } 6494 6495 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6496 if (lep != NULL) { 6497 /* 6498 * We must decrement the refcount since we have the 6499 * ep already and are binding. No remove going on 6500 * here. 6501 */ 6502 SCTP_INP_DECR_REF(lep); 6503 } 6504 if (lep == inp) { 6505 /* already bound to it.. ok */ 6506 return; 6507 } else if (lep == NULL) { 6508 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6509 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6510 SCTP_ADD_IP_ADDRESS, 6511 vrf_id, NULL); 6512 } else { 6513 *error = EADDRINUSE; 6514 } 6515 if (*error) 6516 return; 6517 } else { 6518 /* 6519 * FIX: decide whether we allow assoc based bindx 6520 */ 6521 } 6522 } 6523 6524 /* 6525 * sctp_bindx(DELETE) for one address. 6526 * assumes all arguments are valid/checked by caller. 6527 */ 6528 void 6529 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6530 struct sockaddr *sa, sctp_assoc_t assoc_id, 6531 uint32_t vrf_id, int *error) 6532 { 6533 struct sockaddr *addr_touse; 6534 6535 #ifdef INET6 6536 struct sockaddr_in sin; 6537 6538 #endif 6539 6540 /* see if we're bound all already! */ 6541 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6542 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6543 *error = EINVAL; 6544 return; 6545 } 6546 addr_touse = sa; 6547 #ifdef INET6 6548 if (sa->sa_family == AF_INET6) { 6549 struct sockaddr_in6 *sin6; 6550 6551 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6553 *error = EINVAL; 6554 return; 6555 } 6556 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6557 /* can only bind v6 on PF_INET6 sockets */ 6558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6559 *error = EINVAL; 6560 return; 6561 } 6562 sin6 = (struct sockaddr_in6 *)addr_touse; 6563 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6564 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6565 SCTP_IPV6_V6ONLY(inp)) { 6566 /* can't bind mapped-v4 on PF_INET sockets */ 6567 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6568 *error = EINVAL; 6569 return; 6570 } 6571 in6_sin6_2_sin(&sin, sin6); 6572 addr_touse = (struct sockaddr *)&sin; 6573 } 6574 } 6575 #endif 6576 #ifdef INET 6577 if (sa->sa_family == AF_INET) { 6578 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6579 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6580 *error = EINVAL; 6581 return; 6582 } 6583 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6584 SCTP_IPV6_V6ONLY(inp)) { 6585 /* can't bind v4 on PF_INET sockets */ 6586 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6587 *error = EINVAL; 6588 return; 6589 } 6590 } 6591 #endif 6592 /* 6593 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6594 * below is ever changed we may need to lock before calling 6595 * association level binding. 6596 */ 6597 if (assoc_id == 0) { 6598 /* delete the address */ 6599 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6600 SCTP_DEL_IP_ADDRESS, 6601 vrf_id, NULL); 6602 } else { 6603 /* 6604 * FIX: decide whether we allow assoc based bindx 6605 */ 6606 } 6607 } 6608 6609 /* 6610 * returns the valid local address count for an assoc, taking into account 6611 * all scoping rules 6612 */ 6613 int 6614 sctp_local_addr_count(struct sctp_tcb *stcb) 6615 { 6616 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 6617 int ipv4_addr_legal, ipv6_addr_legal; 6618 struct sctp_vrf *vrf; 6619 struct sctp_ifn *sctp_ifn; 6620 struct sctp_ifa *sctp_ifa; 6621 int count = 0; 6622 6623 /* Turn on all the appropriate scopes */ 6624 loopback_scope = stcb->asoc.loopback_scope; 6625 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 6626 local_scope = stcb->asoc.local_scope; 6627 site_scope = stcb->asoc.site_scope; 6628 ipv4_addr_legal = ipv6_addr_legal = 0; 6629 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6630 ipv6_addr_legal = 1; 6631 if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) { 6632 ipv4_addr_legal = 1; 6633 } 6634 } else { 6635 ipv4_addr_legal = 1; 6636 } 6637 6638 SCTP_IPI_ADDR_RLOCK(); 6639 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6640 if (vrf == NULL) { 6641 /* no vrf, no addresses */ 6642 SCTP_IPI_ADDR_RUNLOCK(); 6643 return (0); 6644 } 6645 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6646 /* 6647 * bound all case: go through all ifns on the vrf 6648 */ 6649 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6650 if ((loopback_scope == 0) && 6651 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6652 continue; 6653 } 6654 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6655 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6656 continue; 6657 switch (sctp_ifa->address.sa.sa_family) { 6658 #ifdef INET 6659 case AF_INET: 6660 if (ipv4_addr_legal) { 6661 struct sockaddr_in *sin; 6662 6663 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 6664 if (sin->sin_addr.s_addr == 0) { 6665 /* 6666 * skip unspecified 6667 * addrs 6668 */ 6669 continue; 6670 } 6671 if ((ipv4_local_scope == 0) && 6672 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6673 continue; 6674 } 6675 /* count this one */ 6676 count++; 6677 } else { 6678 continue; 6679 } 6680 break; 6681 #endif 6682 #ifdef INET6 6683 case AF_INET6: 6684 if (ipv6_addr_legal) { 6685 struct sockaddr_in6 *sin6; 6686 6687 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 6688 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6689 continue; 6690 } 6691 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6692 if (local_scope == 0) 6693 continue; 6694 if (sin6->sin6_scope_id == 0) { 6695 if (sa6_recoverscope(sin6) != 0) 6696 /* 6697 * 6698 * bad 6699 * 6700 * li 6701 * nk 6702 * 6703 * loc 6704 * al 6705 * 6706 * add 6707 * re 6708 * ss 6709 * */ 6710 continue; 6711 } 6712 } 6713 if ((site_scope == 0) && 6714 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6715 continue; 6716 } 6717 /* count this one */ 6718 count++; 6719 } 6720 break; 6721 #endif 6722 default: 6723 /* TSNH */ 6724 break; 6725 } 6726 } 6727 } 6728 } else { 6729 /* 6730 * subset bound case 6731 */ 6732 struct sctp_laddr *laddr; 6733 6734 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6735 sctp_nxt_addr) { 6736 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6737 continue; 6738 } 6739 /* count this one */ 6740 count++; 6741 } 6742 } 6743 SCTP_IPI_ADDR_RUNLOCK(); 6744 return (count); 6745 } 6746 6747 #if defined(SCTP_LOCAL_TRACE_BUF) 6748 6749 void 6750 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6751 { 6752 uint32_t saveindex, newindex; 6753 6754 do { 6755 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6756 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6757 newindex = 1; 6758 } else { 6759 newindex = saveindex + 1; 6760 } 6761 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6762 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6763 saveindex = 0; 6764 } 6765 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6766 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6767 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6768 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6769 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6770 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6771 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6772 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6773 } 6774 6775 #endif 6776 static void 6777 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored) 6778 { 6779 struct ip *iph; 6780 6781 #ifdef INET6 6782 struct ip6_hdr *ip6; 6783 6784 #endif 6785 struct mbuf *sp, *last; 6786 struct udphdr *uhdr; 6787 uint16_t port; 6788 6789 if ((m->m_flags & M_PKTHDR) == 0) { 6790 /* Can't handle one that is not a pkt hdr */ 6791 goto out; 6792 } 6793 /* Pull the src port */ 6794 iph = mtod(m, struct ip *); 6795 uhdr = (struct udphdr *)((caddr_t)iph + off); 6796 port = uhdr->uh_sport; 6797 /* 6798 * Split out the mbuf chain. Leave the IP header in m, place the 6799 * rest in the sp. 6800 */ 6801 sp = m_split(m, off, M_NOWAIT); 6802 if (sp == NULL) { 6803 /* Gak, drop packet, we can't do a split */ 6804 goto out; 6805 } 6806 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6807 /* Gak, packet can't have an SCTP header in it - too small */ 6808 m_freem(sp); 6809 goto out; 6810 } 6811 /* Now pull up the UDP header and SCTP header together */ 6812 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6813 if (sp == NULL) { 6814 /* Gak pullup failed */ 6815 goto out; 6816 } 6817 /* Trim out the UDP header */ 6818 m_adj(sp, sizeof(struct udphdr)); 6819 6820 /* Now reconstruct the mbuf chain */ 6821 for (last = m; last->m_next; last = last->m_next); 6822 last->m_next = sp; 6823 m->m_pkthdr.len += sp->m_pkthdr.len; 6824 iph = mtod(m, struct ip *); 6825 switch (iph->ip_v) { 6826 #ifdef INET 6827 case IPVERSION: 6828 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6829 sctp_input_with_port(m, off, port); 6830 break; 6831 #endif 6832 #ifdef INET6 6833 case IPV6_VERSION >> 4: 6834 ip6 = mtod(m, struct ip6_hdr *); 6835 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6836 sctp6_input_with_port(&m, &off, port); 6837 break; 6838 #endif 6839 default: 6840 goto out; 6841 break; 6842 } 6843 return; 6844 out: 6845 m_freem(m); 6846 } 6847 6848 void 6849 sctp_over_udp_stop(void) 6850 { 6851 /* 6852 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6853 * for writting! 6854 */ 6855 #ifdef INET 6856 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6857 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 6858 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 6859 } 6860 #endif 6861 #ifdef INET6 6862 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6863 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 6864 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 6865 } 6866 #endif 6867 } 6868 6869 int 6870 sctp_over_udp_start(void) 6871 { 6872 uint16_t port; 6873 int ret; 6874 6875 #ifdef INET 6876 struct sockaddr_in sin; 6877 6878 #endif 6879 #ifdef INET6 6880 struct sockaddr_in6 sin6; 6881 6882 #endif 6883 /* 6884 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6885 * for writting! 6886 */ 6887 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 6888 if (ntohs(port) == 0) { 6889 /* Must have a port set */ 6890 return (EINVAL); 6891 } 6892 #ifdef INET 6893 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6894 /* Already running -- must stop first */ 6895 return (EALREADY); 6896 } 6897 #endif 6898 #ifdef INET6 6899 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6900 /* Already running -- must stop first */ 6901 return (EALREADY); 6902 } 6903 #endif 6904 #ifdef INET 6905 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 6906 SOCK_DGRAM, IPPROTO_UDP, 6907 curthread->td_ucred, curthread))) { 6908 sctp_over_udp_stop(); 6909 return (ret); 6910 } 6911 /* Call the special UDP hook. */ 6912 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 6913 sctp_recv_udp_tunneled_packet))) { 6914 sctp_over_udp_stop(); 6915 return (ret); 6916 } 6917 /* Ok, we have a socket, bind it to the port. */ 6918 memset(&sin, 0, sizeof(struct sockaddr_in)); 6919 sin.sin_len = sizeof(struct sockaddr_in); 6920 sin.sin_family = AF_INET; 6921 sin.sin_port = htons(port); 6922 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 6923 (struct sockaddr *)&sin, curthread))) { 6924 sctp_over_udp_stop(); 6925 return (ret); 6926 } 6927 #endif 6928 #ifdef INET6 6929 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 6930 SOCK_DGRAM, IPPROTO_UDP, 6931 curthread->td_ucred, curthread))) { 6932 sctp_over_udp_stop(); 6933 return (ret); 6934 } 6935 /* Call the special UDP hook. */ 6936 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 6937 sctp_recv_udp_tunneled_packet))) { 6938 sctp_over_udp_stop(); 6939 return (ret); 6940 } 6941 /* Ok, we have a socket, bind it to the port. */ 6942 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 6943 sin6.sin6_len = sizeof(struct sockaddr_in6); 6944 sin6.sin6_family = AF_INET6; 6945 sin6.sin6_port = htons(port); 6946 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 6947 (struct sockaddr *)&sin6, curthread))) { 6948 sctp_over_udp_stop(); 6949 return (ret); 6950 } 6951 #endif 6952 return (0); 6953 } 6954