1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #endif 43 #include <netinet/sctp_header.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 48 #include <netinet/sctp_auth.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_bsd_addr.h> 51 52 53 #ifndef KTR_SCTP 54 #define KTR_SCTP KTR_SUBSYS 55 #endif 56 57 extern struct sctp_cc_functions sctp_cc_functions[]; 58 extern struct sctp_ss_functions sctp_ss_functions[]; 59 60 void 61 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 62 { 63 struct sctp_cwnd_log sctp_clog; 64 65 sctp_clog.x.sb.stcb = stcb; 66 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 67 if (stcb) 68 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 69 else 70 sctp_clog.x.sb.stcb_sbcc = 0; 71 sctp_clog.x.sb.incr = incr; 72 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 73 SCTP_LOG_EVENT_SB, 74 from, 75 sctp_clog.x.misc.log1, 76 sctp_clog.x.misc.log2, 77 sctp_clog.x.misc.log3, 78 sctp_clog.x.misc.log4); 79 } 80 81 void 82 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 83 { 84 struct sctp_cwnd_log sctp_clog; 85 86 sctp_clog.x.close.inp = (void *)inp; 87 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 88 if (stcb) { 89 sctp_clog.x.close.stcb = (void *)stcb; 90 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 91 } else { 92 sctp_clog.x.close.stcb = 0; 93 sctp_clog.x.close.state = 0; 94 } 95 sctp_clog.x.close.loc = loc; 96 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 97 SCTP_LOG_EVENT_CLOSE, 98 0, 99 sctp_clog.x.misc.log1, 100 sctp_clog.x.misc.log2, 101 sctp_clog.x.misc.log3, 102 sctp_clog.x.misc.log4); 103 } 104 105 void 106 rto_logging(struct sctp_nets *net, int from) 107 { 108 struct sctp_cwnd_log sctp_clog; 109 110 memset(&sctp_clog, 0, sizeof(sctp_clog)); 111 sctp_clog.x.rto.net = (void *)net; 112 sctp_clog.x.rto.rtt = net->rtt / 1000; 113 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 114 SCTP_LOG_EVENT_RTT, 115 from, 116 sctp_clog.x.misc.log1, 117 sctp_clog.x.misc.log2, 118 sctp_clog.x.misc.log3, 119 sctp_clog.x.misc.log4); 120 } 121 122 void 123 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 124 { 125 struct sctp_cwnd_log sctp_clog; 126 127 sctp_clog.x.strlog.stcb = stcb; 128 sctp_clog.x.strlog.n_tsn = tsn; 129 sctp_clog.x.strlog.n_sseq = sseq; 130 sctp_clog.x.strlog.e_tsn = 0; 131 sctp_clog.x.strlog.e_sseq = 0; 132 sctp_clog.x.strlog.strm = stream; 133 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 134 SCTP_LOG_EVENT_STRM, 135 from, 136 sctp_clog.x.misc.log1, 137 sctp_clog.x.misc.log2, 138 sctp_clog.x.misc.log3, 139 sctp_clog.x.misc.log4); 140 } 141 142 void 143 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 144 { 145 struct sctp_cwnd_log sctp_clog; 146 147 sctp_clog.x.nagle.stcb = (void *)stcb; 148 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 149 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 150 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 151 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 152 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 153 SCTP_LOG_EVENT_NAGLE, 154 action, 155 sctp_clog.x.misc.log1, 156 sctp_clog.x.misc.log2, 157 sctp_clog.x.misc.log3, 158 sctp_clog.x.misc.log4); 159 } 160 161 void 162 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 163 { 164 struct sctp_cwnd_log sctp_clog; 165 166 sctp_clog.x.sack.cumack = cumack; 167 sctp_clog.x.sack.oldcumack = old_cumack; 168 sctp_clog.x.sack.tsn = tsn; 169 sctp_clog.x.sack.numGaps = gaps; 170 sctp_clog.x.sack.numDups = dups; 171 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 172 SCTP_LOG_EVENT_SACK, 173 from, 174 sctp_clog.x.misc.log1, 175 sctp_clog.x.misc.log2, 176 sctp_clog.x.misc.log3, 177 sctp_clog.x.misc.log4); 178 } 179 180 void 181 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 182 { 183 struct sctp_cwnd_log sctp_clog; 184 185 memset(&sctp_clog, 0, sizeof(sctp_clog)); 186 sctp_clog.x.map.base = map; 187 sctp_clog.x.map.cum = cum; 188 sctp_clog.x.map.high = high; 189 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 190 SCTP_LOG_EVENT_MAP, 191 from, 192 sctp_clog.x.misc.log1, 193 sctp_clog.x.misc.log2, 194 sctp_clog.x.misc.log3, 195 sctp_clog.x.misc.log4); 196 } 197 198 void 199 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 200 { 201 struct sctp_cwnd_log sctp_clog; 202 203 memset(&sctp_clog, 0, sizeof(sctp_clog)); 204 sctp_clog.x.fr.largest_tsn = biggest_tsn; 205 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 206 sctp_clog.x.fr.tsn = tsn; 207 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 208 SCTP_LOG_EVENT_FR, 209 from, 210 sctp_clog.x.misc.log1, 211 sctp_clog.x.misc.log2, 212 sctp_clog.x.misc.log3, 213 sctp_clog.x.misc.log4); 214 } 215 216 void 217 sctp_log_mb(struct mbuf *m, int from) 218 { 219 struct sctp_cwnd_log sctp_clog; 220 221 sctp_clog.x.mb.mp = m; 222 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 223 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 224 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 225 if (SCTP_BUF_IS_EXTENDED(m)) { 226 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 227 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 228 } else { 229 sctp_clog.x.mb.ext = 0; 230 sctp_clog.x.mb.refcnt = 0; 231 } 232 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 233 SCTP_LOG_EVENT_MBUF, 234 from, 235 sctp_clog.x.misc.log1, 236 sctp_clog.x.misc.log2, 237 sctp_clog.x.misc.log3, 238 sctp_clog.x.misc.log4); 239 } 240 241 void 242 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 243 { 244 struct sctp_cwnd_log sctp_clog; 245 246 if (control == NULL) { 247 SCTP_PRINTF("Gak log of NULL?\n"); 248 return; 249 } 250 sctp_clog.x.strlog.stcb = control->stcb; 251 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 252 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 253 sctp_clog.x.strlog.strm = control->sinfo_stream; 254 if (poschk != NULL) { 255 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 256 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 257 } else { 258 sctp_clog.x.strlog.e_tsn = 0; 259 sctp_clog.x.strlog.e_sseq = 0; 260 } 261 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 262 SCTP_LOG_EVENT_STRM, 263 from, 264 sctp_clog.x.misc.log1, 265 sctp_clog.x.misc.log2, 266 sctp_clog.x.misc.log3, 267 sctp_clog.x.misc.log4); 268 } 269 270 void 271 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 272 { 273 struct sctp_cwnd_log sctp_clog; 274 275 sctp_clog.x.cwnd.net = net; 276 if (stcb->asoc.send_queue_cnt > 255) 277 sctp_clog.x.cwnd.cnt_in_send = 255; 278 else 279 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 280 if (stcb->asoc.stream_queue_cnt > 255) 281 sctp_clog.x.cwnd.cnt_in_str = 255; 282 else 283 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 284 285 if (net) { 286 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 287 sctp_clog.x.cwnd.inflight = net->flight_size; 288 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 289 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 290 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 291 } 292 if (SCTP_CWNDLOG_PRESEND == from) { 293 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 294 } 295 sctp_clog.x.cwnd.cwnd_augment = augment; 296 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 297 SCTP_LOG_EVENT_CWND, 298 from, 299 sctp_clog.x.misc.log1, 300 sctp_clog.x.misc.log2, 301 sctp_clog.x.misc.log3, 302 sctp_clog.x.misc.log4); 303 } 304 305 void 306 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 307 { 308 struct sctp_cwnd_log sctp_clog; 309 310 memset(&sctp_clog, 0, sizeof(sctp_clog)); 311 if (inp) { 312 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 313 314 } else { 315 sctp_clog.x.lock.sock = (void *)NULL; 316 } 317 sctp_clog.x.lock.inp = (void *)inp; 318 if (stcb) { 319 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 320 } else { 321 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 322 } 323 if (inp) { 324 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 325 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 326 } else { 327 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 328 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 329 } 330 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 331 if (inp && (inp->sctp_socket)) { 332 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 333 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 334 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 335 } else { 336 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 337 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 338 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 339 } 340 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 341 SCTP_LOG_LOCK_EVENT, 342 from, 343 sctp_clog.x.misc.log1, 344 sctp_clog.x.misc.log2, 345 sctp_clog.x.misc.log3, 346 sctp_clog.x.misc.log4); 347 } 348 349 void 350 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 351 { 352 struct sctp_cwnd_log sctp_clog; 353 354 memset(&sctp_clog, 0, sizeof(sctp_clog)); 355 sctp_clog.x.cwnd.net = net; 356 sctp_clog.x.cwnd.cwnd_new_value = error; 357 sctp_clog.x.cwnd.inflight = net->flight_size; 358 sctp_clog.x.cwnd.cwnd_augment = burst; 359 if (stcb->asoc.send_queue_cnt > 255) 360 sctp_clog.x.cwnd.cnt_in_send = 255; 361 else 362 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 363 if (stcb->asoc.stream_queue_cnt > 255) 364 sctp_clog.x.cwnd.cnt_in_str = 255; 365 else 366 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 367 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 368 SCTP_LOG_EVENT_MAXBURST, 369 from, 370 sctp_clog.x.misc.log1, 371 sctp_clog.x.misc.log2, 372 sctp_clog.x.misc.log3, 373 sctp_clog.x.misc.log4); 374 } 375 376 void 377 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 378 { 379 struct sctp_cwnd_log sctp_clog; 380 381 sctp_clog.x.rwnd.rwnd = peers_rwnd; 382 sctp_clog.x.rwnd.send_size = snd_size; 383 sctp_clog.x.rwnd.overhead = overhead; 384 sctp_clog.x.rwnd.new_rwnd = 0; 385 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 386 SCTP_LOG_EVENT_RWND, 387 from, 388 sctp_clog.x.misc.log1, 389 sctp_clog.x.misc.log2, 390 sctp_clog.x.misc.log3, 391 sctp_clog.x.misc.log4); 392 } 393 394 void 395 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 396 { 397 struct sctp_cwnd_log sctp_clog; 398 399 sctp_clog.x.rwnd.rwnd = peers_rwnd; 400 sctp_clog.x.rwnd.send_size = flight_size; 401 sctp_clog.x.rwnd.overhead = overhead; 402 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 403 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 404 SCTP_LOG_EVENT_RWND, 405 from, 406 sctp_clog.x.misc.log1, 407 sctp_clog.x.misc.log2, 408 sctp_clog.x.misc.log3, 409 sctp_clog.x.misc.log4); 410 } 411 412 void 413 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 414 { 415 struct sctp_cwnd_log sctp_clog; 416 417 sctp_clog.x.mbcnt.total_queue_size = total_oq; 418 sctp_clog.x.mbcnt.size_change = book; 419 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 420 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 421 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 422 SCTP_LOG_EVENT_MBCNT, 423 from, 424 sctp_clog.x.misc.log1, 425 sctp_clog.x.misc.log2, 426 sctp_clog.x.misc.log3, 427 sctp_clog.x.misc.log4); 428 } 429 430 void 431 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 432 { 433 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 434 SCTP_LOG_MISC_EVENT, 435 from, 436 a, b, c, d); 437 } 438 439 void 440 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 441 { 442 struct sctp_cwnd_log sctp_clog; 443 444 sctp_clog.x.wake.stcb = (void *)stcb; 445 sctp_clog.x.wake.wake_cnt = wake_cnt; 446 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 447 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 448 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 449 450 if (stcb->asoc.stream_queue_cnt < 0xff) 451 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 452 else 453 sctp_clog.x.wake.stream_qcnt = 0xff; 454 455 if (stcb->asoc.chunks_on_out_queue < 0xff) 456 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 457 else 458 sctp_clog.x.wake.chunks_on_oque = 0xff; 459 460 sctp_clog.x.wake.sctpflags = 0; 461 /* set in the defered mode stuff */ 462 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 463 sctp_clog.x.wake.sctpflags |= 1; 464 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 465 sctp_clog.x.wake.sctpflags |= 2; 466 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 467 sctp_clog.x.wake.sctpflags |= 4; 468 /* what about the sb */ 469 if (stcb->sctp_socket) { 470 struct socket *so = stcb->sctp_socket; 471 472 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 473 } else { 474 sctp_clog.x.wake.sbflags = 0xff; 475 } 476 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 477 SCTP_LOG_EVENT_WAKE, 478 from, 479 sctp_clog.x.misc.log1, 480 sctp_clog.x.misc.log2, 481 sctp_clog.x.misc.log3, 482 sctp_clog.x.misc.log4); 483 } 484 485 void 486 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen) 487 { 488 struct sctp_cwnd_log sctp_clog; 489 490 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 491 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 492 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 493 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 494 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 495 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 496 sctp_clog.x.blk.sndlen = sendlen; 497 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 498 SCTP_LOG_EVENT_BLOCK, 499 from, 500 sctp_clog.x.misc.log1, 501 sctp_clog.x.misc.log2, 502 sctp_clog.x.misc.log3, 503 sctp_clog.x.misc.log4); 504 } 505 506 int 507 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 508 { 509 /* May need to fix this if ktrdump does not work */ 510 return (0); 511 } 512 513 #ifdef SCTP_AUDITING_ENABLED 514 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 515 static int sctp_audit_indx = 0; 516 517 static 518 void 519 sctp_print_audit_report(void) 520 { 521 int i; 522 int cnt; 523 524 cnt = 0; 525 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 526 if ((sctp_audit_data[i][0] == 0xe0) && 527 (sctp_audit_data[i][1] == 0x01)) { 528 cnt = 0; 529 SCTP_PRINTF("\n"); 530 } else if (sctp_audit_data[i][0] == 0xf0) { 531 cnt = 0; 532 SCTP_PRINTF("\n"); 533 } else if ((sctp_audit_data[i][0] == 0xc0) && 534 (sctp_audit_data[i][1] == 0x01)) { 535 SCTP_PRINTF("\n"); 536 cnt = 0; 537 } 538 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 539 (uint32_t) sctp_audit_data[i][1]); 540 cnt++; 541 if ((cnt % 14) == 0) 542 SCTP_PRINTF("\n"); 543 } 544 for (i = 0; i < sctp_audit_indx; i++) { 545 if ((sctp_audit_data[i][0] == 0xe0) && 546 (sctp_audit_data[i][1] == 0x01)) { 547 cnt = 0; 548 SCTP_PRINTF("\n"); 549 } else if (sctp_audit_data[i][0] == 0xf0) { 550 cnt = 0; 551 SCTP_PRINTF("\n"); 552 } else if ((sctp_audit_data[i][0] == 0xc0) && 553 (sctp_audit_data[i][1] == 0x01)) { 554 SCTP_PRINTF("\n"); 555 cnt = 0; 556 } 557 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 558 (uint32_t) sctp_audit_data[i][1]); 559 cnt++; 560 if ((cnt % 14) == 0) 561 SCTP_PRINTF("\n"); 562 } 563 SCTP_PRINTF("\n"); 564 } 565 566 void 567 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 568 struct sctp_nets *net) 569 { 570 int resend_cnt, tot_out, rep, tot_book_cnt; 571 struct sctp_nets *lnet; 572 struct sctp_tmit_chunk *chk; 573 574 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 575 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 576 sctp_audit_indx++; 577 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 578 sctp_audit_indx = 0; 579 } 580 if (inp == NULL) { 581 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 582 sctp_audit_data[sctp_audit_indx][1] = 0x01; 583 sctp_audit_indx++; 584 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 585 sctp_audit_indx = 0; 586 } 587 return; 588 } 589 if (stcb == NULL) { 590 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 591 sctp_audit_data[sctp_audit_indx][1] = 0x02; 592 sctp_audit_indx++; 593 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 594 sctp_audit_indx = 0; 595 } 596 return; 597 } 598 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 599 sctp_audit_data[sctp_audit_indx][1] = 600 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 601 sctp_audit_indx++; 602 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 603 sctp_audit_indx = 0; 604 } 605 rep = 0; 606 tot_book_cnt = 0; 607 resend_cnt = tot_out = 0; 608 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 609 if (chk->sent == SCTP_DATAGRAM_RESEND) { 610 resend_cnt++; 611 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 612 tot_out += chk->book_size; 613 tot_book_cnt++; 614 } 615 } 616 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 617 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 618 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 619 sctp_audit_indx++; 620 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 621 sctp_audit_indx = 0; 622 } 623 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 624 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 625 rep = 1; 626 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 627 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 628 sctp_audit_data[sctp_audit_indx][1] = 629 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 630 sctp_audit_indx++; 631 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 632 sctp_audit_indx = 0; 633 } 634 } 635 if (tot_out != stcb->asoc.total_flight) { 636 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 637 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 638 sctp_audit_indx++; 639 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 640 sctp_audit_indx = 0; 641 } 642 rep = 1; 643 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 644 (int)stcb->asoc.total_flight); 645 stcb->asoc.total_flight = tot_out; 646 } 647 if (tot_book_cnt != stcb->asoc.total_flight_count) { 648 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 649 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 650 sctp_audit_indx++; 651 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 652 sctp_audit_indx = 0; 653 } 654 rep = 1; 655 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 656 657 stcb->asoc.total_flight_count = tot_book_cnt; 658 } 659 tot_out = 0; 660 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 661 tot_out += lnet->flight_size; 662 } 663 if (tot_out != stcb->asoc.total_flight) { 664 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 665 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 666 sctp_audit_indx++; 667 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 668 sctp_audit_indx = 0; 669 } 670 rep = 1; 671 SCTP_PRINTF("real flight:%d net total was %d\n", 672 stcb->asoc.total_flight, tot_out); 673 /* now corrective action */ 674 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 675 676 tot_out = 0; 677 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 678 if ((chk->whoTo == lnet) && 679 (chk->sent < SCTP_DATAGRAM_RESEND)) { 680 tot_out += chk->book_size; 681 } 682 } 683 if (lnet->flight_size != tot_out) { 684 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 685 (void *)lnet, lnet->flight_size, 686 tot_out); 687 lnet->flight_size = tot_out; 688 } 689 } 690 } 691 if (rep) { 692 sctp_print_audit_report(); 693 } 694 } 695 696 void 697 sctp_audit_log(uint8_t ev, uint8_t fd) 698 { 699 700 sctp_audit_data[sctp_audit_indx][0] = ev; 701 sctp_audit_data[sctp_audit_indx][1] = fd; 702 sctp_audit_indx++; 703 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 704 sctp_audit_indx = 0; 705 } 706 } 707 708 #endif 709 710 /* 711 * sctp_stop_timers_for_shutdown() should be called 712 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 713 * state to make sure that all timers are stopped. 714 */ 715 void 716 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 717 { 718 struct sctp_association *asoc; 719 struct sctp_nets *net; 720 721 asoc = &stcb->asoc; 722 723 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 724 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 725 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 726 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 727 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 728 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 729 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 730 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 731 } 732 } 733 734 /* 735 * a list of sizes based on typical mtu's, used only if next hop size not 736 * returned. 737 */ 738 static uint32_t sctp_mtu_sizes[] = { 739 68, 740 296, 741 508, 742 512, 743 544, 744 576, 745 1006, 746 1492, 747 1500, 748 1536, 749 2002, 750 2048, 751 4352, 752 4464, 753 8166, 754 17914, 755 32000, 756 65535 757 }; 758 759 /* 760 * Return the largest MTU smaller than val. If there is no 761 * entry, just return val. 762 */ 763 uint32_t 764 sctp_get_prev_mtu(uint32_t val) 765 { 766 uint32_t i; 767 768 if (val <= sctp_mtu_sizes[0]) { 769 return (val); 770 } 771 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 772 if (val <= sctp_mtu_sizes[i]) { 773 break; 774 } 775 } 776 return (sctp_mtu_sizes[i - 1]); 777 } 778 779 /* 780 * Return the smallest MTU larger than val. If there is no 781 * entry, just return val. 782 */ 783 uint32_t 784 sctp_get_next_mtu(uint32_t val) 785 { 786 /* select another MTU that is just bigger than this one */ 787 uint32_t i; 788 789 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 790 if (val < sctp_mtu_sizes[i]) { 791 return (sctp_mtu_sizes[i]); 792 } 793 } 794 return (val); 795 } 796 797 void 798 sctp_fill_random_store(struct sctp_pcb *m) 799 { 800 /* 801 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 802 * our counter. The result becomes our good random numbers and we 803 * then setup to give these out. Note that we do no locking to 804 * protect this. This is ok, since if competing folks call this we 805 * will get more gobbled gook in the random store which is what we 806 * want. There is a danger that two guys will use the same random 807 * numbers, but thats ok too since that is random as well :-> 808 */ 809 m->store_at = 0; 810 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 811 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 812 sizeof(m->random_counter), (uint8_t *) m->random_store); 813 m->random_counter++; 814 } 815 816 uint32_t 817 sctp_select_initial_TSN(struct sctp_pcb *inp) 818 { 819 /* 820 * A true implementation should use random selection process to get 821 * the initial stream sequence number, using RFC1750 as a good 822 * guideline 823 */ 824 uint32_t x, *xp; 825 uint8_t *p; 826 int store_at, new_store; 827 828 if (inp->initial_sequence_debug != 0) { 829 uint32_t ret; 830 831 ret = inp->initial_sequence_debug; 832 inp->initial_sequence_debug++; 833 return (ret); 834 } 835 retry: 836 store_at = inp->store_at; 837 new_store = store_at + sizeof(uint32_t); 838 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 839 new_store = 0; 840 } 841 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 842 goto retry; 843 } 844 if (new_store == 0) { 845 /* Refill the random store */ 846 sctp_fill_random_store(inp); 847 } 848 p = &inp->random_store[store_at]; 849 xp = (uint32_t *) p; 850 x = *xp; 851 return (x); 852 } 853 854 uint32_t 855 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 856 { 857 uint32_t x; 858 struct timeval now; 859 860 if (check) { 861 (void)SCTP_GETTIME_TIMEVAL(&now); 862 } 863 for (;;) { 864 x = sctp_select_initial_TSN(&inp->sctp_ep); 865 if (x == 0) { 866 /* we never use 0 */ 867 continue; 868 } 869 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 870 break; 871 } 872 } 873 return (x); 874 } 875 876 int 877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb, 878 uint32_t override_tag, uint32_t vrf_id) 879 { 880 struct sctp_association *asoc; 881 882 /* 883 * Anything set to zero is taken care of by the allocation routine's 884 * bzero 885 */ 886 887 /* 888 * Up front select what scoping to apply on addresses I tell my peer 889 * Not sure what to do with these right now, we will need to come up 890 * with a way to set them. We may need to pass them through from the 891 * caller in the sctp_aloc_assoc() function. 892 */ 893 int i; 894 895 asoc = &stcb->asoc; 896 /* init all variables to a known value. */ 897 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 898 asoc->max_burst = m->sctp_ep.max_burst; 899 asoc->fr_max_burst = m->sctp_ep.fr_max_burst; 900 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 901 asoc->cookie_life = m->sctp_ep.def_cookie_life; 902 asoc->sctp_cmt_on_off = m->sctp_cmt_on_off; 903 asoc->ecn_allowed = m->sctp_ecn_enable; 904 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off); 905 asoc->sctp_cmt_pf = (uint8_t) 0; 906 asoc->sctp_frag_point = m->sctp_frag_point; 907 asoc->sctp_features = m->sctp_features; 908 asoc->default_dscp = m->sctp_ep.default_dscp; 909 #ifdef INET6 910 if (m->sctp_ep.default_flowlabel) { 911 asoc->default_flowlabel = m->sctp_ep.default_flowlabel; 912 } else { 913 if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 914 asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep); 915 asoc->default_flowlabel &= 0x000fffff; 916 asoc->default_flowlabel |= 0x80000000; 917 } else { 918 asoc->default_flowlabel = 0; 919 } 920 } 921 #endif 922 asoc->sb_send_resv = 0; 923 if (override_tag) { 924 asoc->my_vtag = override_tag; 925 } else { 926 asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 927 } 928 /* Get the nonce tags */ 929 asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 930 asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 931 asoc->vrf_id = vrf_id; 932 933 #ifdef SCTP_ASOCLOG_OF_TSNS 934 asoc->tsn_in_at = 0; 935 asoc->tsn_out_at = 0; 936 asoc->tsn_in_wrapped = 0; 937 asoc->tsn_out_wrapped = 0; 938 asoc->cumack_log_at = 0; 939 asoc->cumack_log_atsnt = 0; 940 #endif 941 #ifdef SCTP_FS_SPEC_LOG 942 asoc->fs_index = 0; 943 #endif 944 asoc->refcnt = 0; 945 asoc->assoc_up_sent = 0; 946 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 947 sctp_select_initial_TSN(&m->sctp_ep); 948 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 949 /* we are optimisitic here */ 950 asoc->peer_supports_pktdrop = 1; 951 asoc->peer_supports_nat = 0; 952 asoc->sent_queue_retran_cnt = 0; 953 954 /* for CMT */ 955 asoc->last_net_cmt_send_started = NULL; 956 957 /* This will need to be adjusted */ 958 asoc->last_acked_seq = asoc->init_seq_number - 1; 959 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 960 asoc->asconf_seq_in = asoc->last_acked_seq; 961 962 /* here we are different, we hold the next one we expect */ 963 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 964 965 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 966 asoc->initial_rto = m->sctp_ep.initial_rto; 967 968 asoc->max_init_times = m->sctp_ep.max_init_times; 969 asoc->max_send_times = m->sctp_ep.max_send_times; 970 asoc->def_net_failure = m->sctp_ep.def_net_failure; 971 asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold; 972 asoc->free_chunk_cnt = 0; 973 974 asoc->iam_blocking = 0; 975 asoc->context = m->sctp_context; 976 asoc->local_strreset_support = m->local_strreset_support; 977 asoc->def_send = m->def_send; 978 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 979 asoc->sack_freq = m->sctp_ep.sctp_sack_freq; 980 asoc->pr_sctp_cnt = 0; 981 asoc->total_output_queue_size = 0; 982 983 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 984 struct in6pcb *inp6; 985 986 /* Its a V6 socket */ 987 inp6 = (struct in6pcb *)m; 988 asoc->ipv6_addr_legal = 1; 989 /* Now look at the binding flag to see if V4 will be legal */ 990 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 991 asoc->ipv4_addr_legal = 1; 992 } else { 993 /* V4 addresses are NOT legal on the association */ 994 asoc->ipv4_addr_legal = 0; 995 } 996 } else { 997 /* Its a V4 socket, no - V6 */ 998 asoc->ipv4_addr_legal = 1; 999 asoc->ipv6_addr_legal = 0; 1000 } 1001 1002 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND); 1003 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket); 1004 1005 asoc->smallest_mtu = m->sctp_frag_point; 1006 asoc->minrto = m->sctp_ep.sctp_minrto; 1007 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1008 1009 asoc->locked_on_sending = NULL; 1010 asoc->stream_locked_on = 0; 1011 asoc->ecn_echo_cnt_onq = 0; 1012 asoc->stream_locked = 0; 1013 1014 asoc->send_sack = 1; 1015 1016 LIST_INIT(&asoc->sctp_restricted_addrs); 1017 1018 TAILQ_INIT(&asoc->nets); 1019 TAILQ_INIT(&asoc->pending_reply_queue); 1020 TAILQ_INIT(&asoc->asconf_ack_sent); 1021 /* Setup to fill the hb random cache at first HB */ 1022 asoc->hb_random_idx = 4; 1023 1024 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1025 1026 stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module; 1027 stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module]; 1028 1029 stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module; 1030 stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module]; 1031 1032 /* 1033 * Now the stream parameters, here we allocate space for all streams 1034 * that we request by default. 1035 */ 1036 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1037 m->sctp_ep.pre_open_stream_count; 1038 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1039 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1040 SCTP_M_STRMO); 1041 if (asoc->strmout == NULL) { 1042 /* big trouble no memory */ 1043 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1044 return (ENOMEM); 1045 } 1046 for (i = 0; i < asoc->streamoutcnt; i++) { 1047 /* 1048 * inbound side must be set to 0xffff, also NOTE when we get 1049 * the INIT-ACK back (for INIT sender) we MUST reduce the 1050 * count (streamoutcnt) but first check if we sent to any of 1051 * the upper streams that were dropped (if some were). Those 1052 * that were dropped must be notified to the upper layer as 1053 * failed to send. 1054 */ 1055 asoc->strmout[i].next_sequence_send = 0x0; 1056 TAILQ_INIT(&asoc->strmout[i].outqueue); 1057 asoc->strmout[i].stream_no = i; 1058 asoc->strmout[i].last_msg_incomplete = 0; 1059 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL); 1060 } 1061 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1062 1063 /* Now the mapping array */ 1064 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1065 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1066 SCTP_M_MAP); 1067 if (asoc->mapping_array == NULL) { 1068 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1069 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1070 return (ENOMEM); 1071 } 1072 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1073 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1074 SCTP_M_MAP); 1075 if (asoc->nr_mapping_array == NULL) { 1076 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1077 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1078 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1079 return (ENOMEM); 1080 } 1081 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1082 1083 /* Now the init of the other outqueues */ 1084 TAILQ_INIT(&asoc->free_chunks); 1085 TAILQ_INIT(&asoc->control_send_queue); 1086 TAILQ_INIT(&asoc->asconf_send_queue); 1087 TAILQ_INIT(&asoc->send_queue); 1088 TAILQ_INIT(&asoc->sent_queue); 1089 TAILQ_INIT(&asoc->reasmqueue); 1090 TAILQ_INIT(&asoc->resetHead); 1091 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1092 TAILQ_INIT(&asoc->asconf_queue); 1093 /* authentication fields */ 1094 asoc->authinfo.random = NULL; 1095 asoc->authinfo.active_keyid = 0; 1096 asoc->authinfo.assoc_key = NULL; 1097 asoc->authinfo.assoc_keyid = 0; 1098 asoc->authinfo.recv_key = NULL; 1099 asoc->authinfo.recv_keyid = 0; 1100 LIST_INIT(&asoc->shared_keys); 1101 asoc->marked_retrans = 0; 1102 asoc->port = m->sctp_ep.port; 1103 asoc->timoinit = 0; 1104 asoc->timodata = 0; 1105 asoc->timosack = 0; 1106 asoc->timoshutdown = 0; 1107 asoc->timoheartbeat = 0; 1108 asoc->timocookie = 0; 1109 asoc->timoshutdownack = 0; 1110 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1111 asoc->discontinuity_time = asoc->start_time; 1112 /* 1113 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1114 * freed later when the association is freed. 1115 */ 1116 return (0); 1117 } 1118 1119 void 1120 sctp_print_mapping_array(struct sctp_association *asoc) 1121 { 1122 unsigned int i, limit; 1123 1124 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1125 asoc->mapping_array_size, 1126 asoc->mapping_array_base_tsn, 1127 asoc->cumulative_tsn, 1128 asoc->highest_tsn_inside_map, 1129 asoc->highest_tsn_inside_nr_map); 1130 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1131 if (asoc->mapping_array[limit - 1] != 0) { 1132 break; 1133 } 1134 } 1135 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1136 for (i = 0; i < limit; i++) { 1137 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1138 } 1139 if (limit % 16) 1140 SCTP_PRINTF("\n"); 1141 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1142 if (asoc->nr_mapping_array[limit - 1]) { 1143 break; 1144 } 1145 } 1146 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1147 for (i = 0; i < limit; i++) { 1148 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1149 } 1150 if (limit % 16) 1151 SCTP_PRINTF("\n"); 1152 } 1153 1154 int 1155 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1156 { 1157 /* mapping array needs to grow */ 1158 uint8_t *new_array1, *new_array2; 1159 uint32_t new_size; 1160 1161 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1162 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1163 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1164 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1165 /* can't get more, forget it */ 1166 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1167 if (new_array1) { 1168 SCTP_FREE(new_array1, SCTP_M_MAP); 1169 } 1170 if (new_array2) { 1171 SCTP_FREE(new_array2, SCTP_M_MAP); 1172 } 1173 return (-1); 1174 } 1175 memset(new_array1, 0, new_size); 1176 memset(new_array2, 0, new_size); 1177 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1178 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1179 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1180 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1181 asoc->mapping_array = new_array1; 1182 asoc->nr_mapping_array = new_array2; 1183 asoc->mapping_array_size = new_size; 1184 return (0); 1185 } 1186 1187 1188 static void 1189 sctp_iterator_work(struct sctp_iterator *it) 1190 { 1191 int iteration_count = 0; 1192 int inp_skip = 0; 1193 int first_in = 1; 1194 struct sctp_inpcb *tinp; 1195 1196 SCTP_INP_INFO_RLOCK(); 1197 SCTP_ITERATOR_LOCK(); 1198 if (it->inp) { 1199 SCTP_INP_RLOCK(it->inp); 1200 SCTP_INP_DECR_REF(it->inp); 1201 } 1202 if (it->inp == NULL) { 1203 /* iterator is complete */ 1204 done_with_iterator: 1205 SCTP_ITERATOR_UNLOCK(); 1206 SCTP_INP_INFO_RUNLOCK(); 1207 if (it->function_atend != NULL) { 1208 (*it->function_atend) (it->pointer, it->val); 1209 } 1210 SCTP_FREE(it, SCTP_M_ITER); 1211 return; 1212 } 1213 select_a_new_ep: 1214 if (first_in) { 1215 first_in = 0; 1216 } else { 1217 SCTP_INP_RLOCK(it->inp); 1218 } 1219 while (((it->pcb_flags) && 1220 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1221 ((it->pcb_features) && 1222 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1223 /* endpoint flags or features don't match, so keep looking */ 1224 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1225 SCTP_INP_RUNLOCK(it->inp); 1226 goto done_with_iterator; 1227 } 1228 tinp = it->inp; 1229 it->inp = LIST_NEXT(it->inp, sctp_list); 1230 SCTP_INP_RUNLOCK(tinp); 1231 if (it->inp == NULL) { 1232 goto done_with_iterator; 1233 } 1234 SCTP_INP_RLOCK(it->inp); 1235 } 1236 /* now go through each assoc which is in the desired state */ 1237 if (it->done_current_ep == 0) { 1238 if (it->function_inp != NULL) 1239 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1240 it->done_current_ep = 1; 1241 } 1242 if (it->stcb == NULL) { 1243 /* run the per instance function */ 1244 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1245 } 1246 if ((inp_skip) || it->stcb == NULL) { 1247 if (it->function_inp_end != NULL) { 1248 inp_skip = (*it->function_inp_end) (it->inp, 1249 it->pointer, 1250 it->val); 1251 } 1252 SCTP_INP_RUNLOCK(it->inp); 1253 goto no_stcb; 1254 } 1255 while (it->stcb) { 1256 SCTP_TCB_LOCK(it->stcb); 1257 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1258 /* not in the right state... keep looking */ 1259 SCTP_TCB_UNLOCK(it->stcb); 1260 goto next_assoc; 1261 } 1262 /* see if we have limited out the iterator loop */ 1263 iteration_count++; 1264 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1265 /* Pause to let others grab the lock */ 1266 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1267 SCTP_TCB_UNLOCK(it->stcb); 1268 SCTP_INP_INCR_REF(it->inp); 1269 SCTP_INP_RUNLOCK(it->inp); 1270 SCTP_ITERATOR_UNLOCK(); 1271 SCTP_INP_INFO_RUNLOCK(); 1272 SCTP_INP_INFO_RLOCK(); 1273 SCTP_ITERATOR_LOCK(); 1274 if (sctp_it_ctl.iterator_flags) { 1275 /* We won't be staying here */ 1276 SCTP_INP_DECR_REF(it->inp); 1277 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1278 if (sctp_it_ctl.iterator_flags & 1279 SCTP_ITERATOR_STOP_CUR_IT) { 1280 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1281 goto done_with_iterator; 1282 } 1283 if (sctp_it_ctl.iterator_flags & 1284 SCTP_ITERATOR_STOP_CUR_INP) { 1285 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1286 goto no_stcb; 1287 } 1288 /* If we reach here huh? */ 1289 SCTP_PRINTF("Unknown it ctl flag %x\n", 1290 sctp_it_ctl.iterator_flags); 1291 sctp_it_ctl.iterator_flags = 0; 1292 } 1293 SCTP_INP_RLOCK(it->inp); 1294 SCTP_INP_DECR_REF(it->inp); 1295 SCTP_TCB_LOCK(it->stcb); 1296 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1297 iteration_count = 0; 1298 } 1299 /* run function on this one */ 1300 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1301 1302 /* 1303 * we lie here, it really needs to have its own type but 1304 * first I must verify that this won't effect things :-0 1305 */ 1306 if (it->no_chunk_output == 0) 1307 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1308 1309 SCTP_TCB_UNLOCK(it->stcb); 1310 next_assoc: 1311 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1312 if (it->stcb == NULL) { 1313 /* Run last function */ 1314 if (it->function_inp_end != NULL) { 1315 inp_skip = (*it->function_inp_end) (it->inp, 1316 it->pointer, 1317 it->val); 1318 } 1319 } 1320 } 1321 SCTP_INP_RUNLOCK(it->inp); 1322 no_stcb: 1323 /* done with all assocs on this endpoint, move on to next endpoint */ 1324 it->done_current_ep = 0; 1325 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1326 it->inp = NULL; 1327 } else { 1328 it->inp = LIST_NEXT(it->inp, sctp_list); 1329 } 1330 if (it->inp == NULL) { 1331 goto done_with_iterator; 1332 } 1333 goto select_a_new_ep; 1334 } 1335 1336 void 1337 sctp_iterator_worker(void) 1338 { 1339 struct sctp_iterator *it, *nit; 1340 1341 /* This function is called with the WQ lock in place */ 1342 1343 sctp_it_ctl.iterator_running = 1; 1344 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1345 sctp_it_ctl.cur_it = it; 1346 /* now lets work on this one */ 1347 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1348 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1349 CURVNET_SET(it->vn); 1350 sctp_iterator_work(it); 1351 sctp_it_ctl.cur_it = NULL; 1352 CURVNET_RESTORE(); 1353 SCTP_IPI_ITERATOR_WQ_LOCK(); 1354 /* sa_ignore FREED_MEMORY */ 1355 } 1356 sctp_it_ctl.iterator_running = 0; 1357 return; 1358 } 1359 1360 1361 static void 1362 sctp_handle_addr_wq(void) 1363 { 1364 /* deal with the ADDR wq from the rtsock calls */ 1365 struct sctp_laddr *wi, *nwi; 1366 struct sctp_asconf_iterator *asc; 1367 1368 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1369 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1370 if (asc == NULL) { 1371 /* Try later, no memory */ 1372 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1373 (struct sctp_inpcb *)NULL, 1374 (struct sctp_tcb *)NULL, 1375 (struct sctp_nets *)NULL); 1376 return; 1377 } 1378 LIST_INIT(&asc->list_of_work); 1379 asc->cnt = 0; 1380 1381 SCTP_WQ_ADDR_LOCK(); 1382 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1383 LIST_REMOVE(wi, sctp_nxt_addr); 1384 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1385 asc->cnt++; 1386 } 1387 SCTP_WQ_ADDR_UNLOCK(); 1388 1389 if (asc->cnt == 0) { 1390 SCTP_FREE(asc, SCTP_M_ASC_IT); 1391 } else { 1392 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep, 1393 sctp_asconf_iterator_stcb, 1394 NULL, /* No ep end for boundall */ 1395 SCTP_PCB_FLAGS_BOUNDALL, 1396 SCTP_PCB_ANY_FEATURES, 1397 SCTP_ASOC_ANY_STATE, 1398 (void *)asc, 0, 1399 sctp_asconf_iterator_end, NULL, 0); 1400 } 1401 } 1402 1403 void 1404 sctp_timeout_handler(void *t) 1405 { 1406 struct sctp_inpcb *inp; 1407 struct sctp_tcb *stcb; 1408 struct sctp_nets *net; 1409 struct sctp_timer *tmr; 1410 1411 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1412 struct socket *so; 1413 1414 #endif 1415 int did_output, type; 1416 1417 tmr = (struct sctp_timer *)t; 1418 inp = (struct sctp_inpcb *)tmr->ep; 1419 stcb = (struct sctp_tcb *)tmr->tcb; 1420 net = (struct sctp_nets *)tmr->net; 1421 CURVNET_SET((struct vnet *)tmr->vnet); 1422 did_output = 1; 1423 1424 #ifdef SCTP_AUDITING_ENABLED 1425 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1426 sctp_auditing(3, inp, stcb, net); 1427 #endif 1428 1429 /* sanity checks... */ 1430 if (tmr->self != (void *)tmr) { 1431 /* 1432 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1433 * (void *)tmr); 1434 */ 1435 CURVNET_RESTORE(); 1436 return; 1437 } 1438 tmr->stopped_from = 0xa001; 1439 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1440 /* 1441 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1442 * tmr->type); 1443 */ 1444 CURVNET_RESTORE(); 1445 return; 1446 } 1447 tmr->stopped_from = 0xa002; 1448 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1449 CURVNET_RESTORE(); 1450 return; 1451 } 1452 /* if this is an iterator timeout, get the struct and clear inp */ 1453 tmr->stopped_from = 0xa003; 1454 type = tmr->type; 1455 if (inp) { 1456 SCTP_INP_INCR_REF(inp); 1457 if ((inp->sctp_socket == NULL) && 1458 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1459 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1460 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1461 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1462 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1463 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1464 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1465 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1466 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1467 ) { 1468 SCTP_INP_DECR_REF(inp); 1469 CURVNET_RESTORE(); 1470 return; 1471 } 1472 } 1473 tmr->stopped_from = 0xa004; 1474 if (stcb) { 1475 atomic_add_int(&stcb->asoc.refcnt, 1); 1476 if (stcb->asoc.state == 0) { 1477 atomic_add_int(&stcb->asoc.refcnt, -1); 1478 if (inp) { 1479 SCTP_INP_DECR_REF(inp); 1480 } 1481 CURVNET_RESTORE(); 1482 return; 1483 } 1484 } 1485 tmr->stopped_from = 0xa005; 1486 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type); 1487 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1488 if (inp) { 1489 SCTP_INP_DECR_REF(inp); 1490 } 1491 if (stcb) { 1492 atomic_add_int(&stcb->asoc.refcnt, -1); 1493 } 1494 CURVNET_RESTORE(); 1495 return; 1496 } 1497 tmr->stopped_from = 0xa006; 1498 1499 if (stcb) { 1500 SCTP_TCB_LOCK(stcb); 1501 atomic_add_int(&stcb->asoc.refcnt, -1); 1502 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) && 1503 ((stcb->asoc.state == 0) || 1504 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1505 SCTP_TCB_UNLOCK(stcb); 1506 if (inp) { 1507 SCTP_INP_DECR_REF(inp); 1508 } 1509 CURVNET_RESTORE(); 1510 return; 1511 } 1512 } 1513 /* record in stopped what t-o occured */ 1514 tmr->stopped_from = tmr->type; 1515 1516 /* mark as being serviced now */ 1517 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1518 /* 1519 * Callout has been rescheduled. 1520 */ 1521 goto get_out; 1522 } 1523 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1524 /* 1525 * Not active, so no action. 1526 */ 1527 goto get_out; 1528 } 1529 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1530 1531 /* call the handler for the appropriate timer type */ 1532 switch (tmr->type) { 1533 case SCTP_TIMER_TYPE_ZERO_COPY: 1534 if (inp == NULL) { 1535 break; 1536 } 1537 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1538 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1539 } 1540 break; 1541 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1542 if (inp == NULL) { 1543 break; 1544 } 1545 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1546 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1547 } 1548 break; 1549 case SCTP_TIMER_TYPE_ADDR_WQ: 1550 sctp_handle_addr_wq(); 1551 break; 1552 case SCTP_TIMER_TYPE_SEND: 1553 if ((stcb == NULL) || (inp == NULL)) { 1554 break; 1555 } 1556 SCTP_STAT_INCR(sctps_timodata); 1557 stcb->asoc.timodata++; 1558 stcb->asoc.num_send_timers_up--; 1559 if (stcb->asoc.num_send_timers_up < 0) { 1560 stcb->asoc.num_send_timers_up = 0; 1561 } 1562 SCTP_TCB_LOCK_ASSERT(stcb); 1563 if (sctp_t3rxt_timer(inp, stcb, net)) { 1564 /* no need to unlock on tcb its gone */ 1565 1566 goto out_decr; 1567 } 1568 SCTP_TCB_LOCK_ASSERT(stcb); 1569 #ifdef SCTP_AUDITING_ENABLED 1570 sctp_auditing(4, inp, stcb, net); 1571 #endif 1572 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1573 if ((stcb->asoc.num_send_timers_up == 0) && 1574 (stcb->asoc.sent_queue_cnt > 0)) { 1575 struct sctp_tmit_chunk *chk; 1576 1577 /* 1578 * safeguard. If there on some on the sent queue 1579 * somewhere but no timers running something is 1580 * wrong... so we start a timer on the first chunk 1581 * on the send queue on whatever net it is sent to. 1582 */ 1583 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1584 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1585 chk->whoTo); 1586 } 1587 break; 1588 case SCTP_TIMER_TYPE_INIT: 1589 if ((stcb == NULL) || (inp == NULL)) { 1590 break; 1591 } 1592 SCTP_STAT_INCR(sctps_timoinit); 1593 stcb->asoc.timoinit++; 1594 if (sctp_t1init_timer(inp, stcb, net)) { 1595 /* no need to unlock on tcb its gone */ 1596 goto out_decr; 1597 } 1598 /* We do output but not here */ 1599 did_output = 0; 1600 break; 1601 case SCTP_TIMER_TYPE_RECV: 1602 if ((stcb == NULL) || (inp == NULL)) { 1603 break; 1604 } 1605 SCTP_STAT_INCR(sctps_timosack); 1606 stcb->asoc.timosack++; 1607 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1608 #ifdef SCTP_AUDITING_ENABLED 1609 sctp_auditing(4, inp, stcb, net); 1610 #endif 1611 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1612 break; 1613 case SCTP_TIMER_TYPE_SHUTDOWN: 1614 if ((stcb == NULL) || (inp == NULL)) { 1615 break; 1616 } 1617 if (sctp_shutdown_timer(inp, stcb, net)) { 1618 /* no need to unlock on tcb its gone */ 1619 goto out_decr; 1620 } 1621 SCTP_STAT_INCR(sctps_timoshutdown); 1622 stcb->asoc.timoshutdown++; 1623 #ifdef SCTP_AUDITING_ENABLED 1624 sctp_auditing(4, inp, stcb, net); 1625 #endif 1626 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1627 break; 1628 case SCTP_TIMER_TYPE_HEARTBEAT: 1629 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1630 break; 1631 } 1632 SCTP_STAT_INCR(sctps_timoheartbeat); 1633 stcb->asoc.timoheartbeat++; 1634 if (sctp_heartbeat_timer(inp, stcb, net)) { 1635 /* no need to unlock on tcb its gone */ 1636 goto out_decr; 1637 } 1638 #ifdef SCTP_AUDITING_ENABLED 1639 sctp_auditing(4, inp, stcb, net); 1640 #endif 1641 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1642 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1643 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1644 } 1645 break; 1646 case SCTP_TIMER_TYPE_COOKIE: 1647 if ((stcb == NULL) || (inp == NULL)) { 1648 break; 1649 } 1650 if (sctp_cookie_timer(inp, stcb, net)) { 1651 /* no need to unlock on tcb its gone */ 1652 goto out_decr; 1653 } 1654 SCTP_STAT_INCR(sctps_timocookie); 1655 stcb->asoc.timocookie++; 1656 #ifdef SCTP_AUDITING_ENABLED 1657 sctp_auditing(4, inp, stcb, net); 1658 #endif 1659 /* 1660 * We consider T3 and Cookie timer pretty much the same with 1661 * respect to where from in chunk_output. 1662 */ 1663 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1664 break; 1665 case SCTP_TIMER_TYPE_NEWCOOKIE: 1666 { 1667 struct timeval tv; 1668 int i, secret; 1669 1670 if (inp == NULL) { 1671 break; 1672 } 1673 SCTP_STAT_INCR(sctps_timosecret); 1674 (void)SCTP_GETTIME_TIMEVAL(&tv); 1675 SCTP_INP_WLOCK(inp); 1676 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1677 inp->sctp_ep.last_secret_number = 1678 inp->sctp_ep.current_secret_number; 1679 inp->sctp_ep.current_secret_number++; 1680 if (inp->sctp_ep.current_secret_number >= 1681 SCTP_HOW_MANY_SECRETS) { 1682 inp->sctp_ep.current_secret_number = 0; 1683 } 1684 secret = (int)inp->sctp_ep.current_secret_number; 1685 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1686 inp->sctp_ep.secret_key[secret][i] = 1687 sctp_select_initial_TSN(&inp->sctp_ep); 1688 } 1689 SCTP_INP_WUNLOCK(inp); 1690 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1691 } 1692 did_output = 0; 1693 break; 1694 case SCTP_TIMER_TYPE_PATHMTURAISE: 1695 if ((stcb == NULL) || (inp == NULL)) { 1696 break; 1697 } 1698 SCTP_STAT_INCR(sctps_timopathmtu); 1699 sctp_pathmtu_timer(inp, stcb, net); 1700 did_output = 0; 1701 break; 1702 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1703 if ((stcb == NULL) || (inp == NULL)) { 1704 break; 1705 } 1706 if (sctp_shutdownack_timer(inp, stcb, net)) { 1707 /* no need to unlock on tcb its gone */ 1708 goto out_decr; 1709 } 1710 SCTP_STAT_INCR(sctps_timoshutdownack); 1711 stcb->asoc.timoshutdownack++; 1712 #ifdef SCTP_AUDITING_ENABLED 1713 sctp_auditing(4, inp, stcb, net); 1714 #endif 1715 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1716 break; 1717 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1718 if ((stcb == NULL) || (inp == NULL)) { 1719 break; 1720 } 1721 SCTP_STAT_INCR(sctps_timoshutdownguard); 1722 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED); 1723 /* no need to unlock on tcb its gone */ 1724 goto out_decr; 1725 1726 case SCTP_TIMER_TYPE_STRRESET: 1727 if ((stcb == NULL) || (inp == NULL)) { 1728 break; 1729 } 1730 if (sctp_strreset_timer(inp, stcb, net)) { 1731 /* no need to unlock on tcb its gone */ 1732 goto out_decr; 1733 } 1734 SCTP_STAT_INCR(sctps_timostrmrst); 1735 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1736 break; 1737 case SCTP_TIMER_TYPE_ASCONF: 1738 if ((stcb == NULL) || (inp == NULL)) { 1739 break; 1740 } 1741 if (sctp_asconf_timer(inp, stcb, net)) { 1742 /* no need to unlock on tcb its gone */ 1743 goto out_decr; 1744 } 1745 SCTP_STAT_INCR(sctps_timoasconf); 1746 #ifdef SCTP_AUDITING_ENABLED 1747 sctp_auditing(4, inp, stcb, net); 1748 #endif 1749 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1750 break; 1751 case SCTP_TIMER_TYPE_PRIM_DELETED: 1752 if ((stcb == NULL) || (inp == NULL)) { 1753 break; 1754 } 1755 sctp_delete_prim_timer(inp, stcb, net); 1756 SCTP_STAT_INCR(sctps_timodelprim); 1757 break; 1758 1759 case SCTP_TIMER_TYPE_AUTOCLOSE: 1760 if ((stcb == NULL) || (inp == NULL)) { 1761 break; 1762 } 1763 SCTP_STAT_INCR(sctps_timoautoclose); 1764 sctp_autoclose_timer(inp, stcb, net); 1765 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1766 did_output = 0; 1767 break; 1768 case SCTP_TIMER_TYPE_ASOCKILL: 1769 if ((stcb == NULL) || (inp == NULL)) { 1770 break; 1771 } 1772 SCTP_STAT_INCR(sctps_timoassockill); 1773 /* Can we free it yet? */ 1774 SCTP_INP_DECR_REF(inp); 1775 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1776 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1777 so = SCTP_INP_SO(inp); 1778 atomic_add_int(&stcb->asoc.refcnt, 1); 1779 SCTP_TCB_UNLOCK(stcb); 1780 SCTP_SOCKET_LOCK(so, 1); 1781 SCTP_TCB_LOCK(stcb); 1782 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1783 #endif 1784 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1786 SCTP_SOCKET_UNLOCK(so, 1); 1787 #endif 1788 /* 1789 * free asoc, always unlocks (or destroy's) so prevent 1790 * duplicate unlock or unlock of a free mtx :-0 1791 */ 1792 stcb = NULL; 1793 goto out_no_decr; 1794 case SCTP_TIMER_TYPE_INPKILL: 1795 SCTP_STAT_INCR(sctps_timoinpkill); 1796 if (inp == NULL) { 1797 break; 1798 } 1799 /* 1800 * special case, take away our increment since WE are the 1801 * killer 1802 */ 1803 SCTP_INP_DECR_REF(inp); 1804 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1805 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1806 SCTP_CALLED_FROM_INPKILL_TIMER); 1807 inp = NULL; 1808 goto out_no_decr; 1809 default: 1810 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1811 tmr->type); 1812 break; 1813 } 1814 #ifdef SCTP_AUDITING_ENABLED 1815 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1816 if (inp) 1817 sctp_auditing(5, inp, stcb, net); 1818 #endif 1819 if ((did_output) && stcb) { 1820 /* 1821 * Now we need to clean up the control chunk chain if an 1822 * ECNE is on it. It must be marked as UNSENT again so next 1823 * call will continue to send it until such time that we get 1824 * a CWR, to remove it. It is, however, less likely that we 1825 * will find a ecn echo on the chain though. 1826 */ 1827 sctp_fix_ecn_echo(&stcb->asoc); 1828 } 1829 get_out: 1830 if (stcb) { 1831 SCTP_TCB_UNLOCK(stcb); 1832 } 1833 out_decr: 1834 if (inp) { 1835 SCTP_INP_DECR_REF(inp); 1836 } 1837 out_no_decr: 1838 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n", 1839 type); 1840 CURVNET_RESTORE(); 1841 } 1842 1843 void 1844 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1845 struct sctp_nets *net) 1846 { 1847 uint32_t to_ticks; 1848 struct sctp_timer *tmr; 1849 1850 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1851 return; 1852 1853 tmr = NULL; 1854 if (stcb) { 1855 SCTP_TCB_LOCK_ASSERT(stcb); 1856 } 1857 switch (t_type) { 1858 case SCTP_TIMER_TYPE_ZERO_COPY: 1859 tmr = &inp->sctp_ep.zero_copy_timer; 1860 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1861 break; 1862 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1863 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1864 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1865 break; 1866 case SCTP_TIMER_TYPE_ADDR_WQ: 1867 /* Only 1 tick away :-) */ 1868 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1869 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1870 break; 1871 case SCTP_TIMER_TYPE_SEND: 1872 /* Here we use the RTO timer */ 1873 { 1874 int rto_val; 1875 1876 if ((stcb == NULL) || (net == NULL)) { 1877 return; 1878 } 1879 tmr = &net->rxt_timer; 1880 if (net->RTO == 0) { 1881 rto_val = stcb->asoc.initial_rto; 1882 } else { 1883 rto_val = net->RTO; 1884 } 1885 to_ticks = MSEC_TO_TICKS(rto_val); 1886 } 1887 break; 1888 case SCTP_TIMER_TYPE_INIT: 1889 /* 1890 * Here we use the INIT timer default usually about 1 1891 * minute. 1892 */ 1893 if ((stcb == NULL) || (net == NULL)) { 1894 return; 1895 } 1896 tmr = &net->rxt_timer; 1897 if (net->RTO == 0) { 1898 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1899 } else { 1900 to_ticks = MSEC_TO_TICKS(net->RTO); 1901 } 1902 break; 1903 case SCTP_TIMER_TYPE_RECV: 1904 /* 1905 * Here we use the Delayed-Ack timer value from the inp 1906 * ususually about 200ms. 1907 */ 1908 if (stcb == NULL) { 1909 return; 1910 } 1911 tmr = &stcb->asoc.dack_timer; 1912 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1913 break; 1914 case SCTP_TIMER_TYPE_SHUTDOWN: 1915 /* Here we use the RTO of the destination. */ 1916 if ((stcb == NULL) || (net == NULL)) { 1917 return; 1918 } 1919 if (net->RTO == 0) { 1920 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1921 } else { 1922 to_ticks = MSEC_TO_TICKS(net->RTO); 1923 } 1924 tmr = &net->rxt_timer; 1925 break; 1926 case SCTP_TIMER_TYPE_HEARTBEAT: 1927 /* 1928 * the net is used here so that we can add in the RTO. Even 1929 * though we use a different timer. We also add the HB timer 1930 * PLUS a random jitter. 1931 */ 1932 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 1933 return; 1934 } else { 1935 uint32_t rndval; 1936 uint32_t jitter; 1937 1938 if ((net->dest_state & SCTP_ADDR_NOHB) && 1939 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1940 return; 1941 } 1942 if (net->RTO == 0) { 1943 to_ticks = stcb->asoc.initial_rto; 1944 } else { 1945 to_ticks = net->RTO; 1946 } 1947 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1948 jitter = rndval % to_ticks; 1949 if (jitter >= (to_ticks >> 1)) { 1950 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 1951 } else { 1952 to_ticks = to_ticks - jitter; 1953 } 1954 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1955 !(net->dest_state & SCTP_ADDR_PF)) { 1956 to_ticks += net->heart_beat_delay; 1957 } 1958 /* 1959 * Now we must convert the to_ticks that are now in 1960 * ms to ticks. 1961 */ 1962 to_ticks = MSEC_TO_TICKS(to_ticks); 1963 tmr = &net->hb_timer; 1964 } 1965 break; 1966 case SCTP_TIMER_TYPE_COOKIE: 1967 /* 1968 * Here we can use the RTO timer from the network since one 1969 * RTT was compelete. If a retran happened then we will be 1970 * using the RTO initial value. 1971 */ 1972 if ((stcb == NULL) || (net == NULL)) { 1973 return; 1974 } 1975 if (net->RTO == 0) { 1976 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1977 } else { 1978 to_ticks = MSEC_TO_TICKS(net->RTO); 1979 } 1980 tmr = &net->rxt_timer; 1981 break; 1982 case SCTP_TIMER_TYPE_NEWCOOKIE: 1983 /* 1984 * nothing needed but the endpoint here ususually about 60 1985 * minutes. 1986 */ 1987 if (inp == NULL) { 1988 return; 1989 } 1990 tmr = &inp->sctp_ep.signature_change; 1991 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1992 break; 1993 case SCTP_TIMER_TYPE_ASOCKILL: 1994 if (stcb == NULL) { 1995 return; 1996 } 1997 tmr = &stcb->asoc.strreset_timer; 1998 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1999 break; 2000 case SCTP_TIMER_TYPE_INPKILL: 2001 /* 2002 * The inp is setup to die. We re-use the signature_chage 2003 * timer since that has stopped and we are in the GONE 2004 * state. 2005 */ 2006 if (inp == NULL) { 2007 return; 2008 } 2009 tmr = &inp->sctp_ep.signature_change; 2010 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2011 break; 2012 case SCTP_TIMER_TYPE_PATHMTURAISE: 2013 /* 2014 * Here we use the value found in the EP for PMTU ususually 2015 * about 10 minutes. 2016 */ 2017 if ((stcb == NULL) || (inp == NULL)) { 2018 return; 2019 } 2020 if (net == NULL) { 2021 return; 2022 } 2023 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2024 return; 2025 } 2026 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2027 tmr = &net->pmtu_timer; 2028 break; 2029 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2030 /* Here we use the RTO of the destination */ 2031 if ((stcb == NULL) || (net == NULL)) { 2032 return; 2033 } 2034 if (net->RTO == 0) { 2035 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2036 } else { 2037 to_ticks = MSEC_TO_TICKS(net->RTO); 2038 } 2039 tmr = &net->rxt_timer; 2040 break; 2041 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2042 /* 2043 * Here we use the endpoints shutdown guard timer usually 2044 * about 3 minutes. 2045 */ 2046 if ((inp == NULL) || (stcb == NULL)) { 2047 return; 2048 } 2049 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2050 tmr = &stcb->asoc.shut_guard_timer; 2051 break; 2052 case SCTP_TIMER_TYPE_STRRESET: 2053 /* 2054 * Here the timer comes from the stcb but its value is from 2055 * the net's RTO. 2056 */ 2057 if ((stcb == NULL) || (net == NULL)) { 2058 return; 2059 } 2060 if (net->RTO == 0) { 2061 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2062 } else { 2063 to_ticks = MSEC_TO_TICKS(net->RTO); 2064 } 2065 tmr = &stcb->asoc.strreset_timer; 2066 break; 2067 case SCTP_TIMER_TYPE_ASCONF: 2068 /* 2069 * Here the timer comes from the stcb but its value is from 2070 * the net's RTO. 2071 */ 2072 if ((stcb == NULL) || (net == NULL)) { 2073 return; 2074 } 2075 if (net->RTO == 0) { 2076 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2077 } else { 2078 to_ticks = MSEC_TO_TICKS(net->RTO); 2079 } 2080 tmr = &stcb->asoc.asconf_timer; 2081 break; 2082 case SCTP_TIMER_TYPE_PRIM_DELETED: 2083 if ((stcb == NULL) || (net != NULL)) { 2084 return; 2085 } 2086 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2087 tmr = &stcb->asoc.delete_prim_timer; 2088 break; 2089 case SCTP_TIMER_TYPE_AUTOCLOSE: 2090 if (stcb == NULL) { 2091 return; 2092 } 2093 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2094 /* 2095 * Really an error since stcb is NOT set to 2096 * autoclose 2097 */ 2098 return; 2099 } 2100 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2101 tmr = &stcb->asoc.autoclose_timer; 2102 break; 2103 default: 2104 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2105 __FUNCTION__, t_type); 2106 return; 2107 break; 2108 } 2109 if ((to_ticks <= 0) || (tmr == NULL)) { 2110 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2111 __FUNCTION__, t_type, to_ticks, (void *)tmr); 2112 return; 2113 } 2114 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2115 /* 2116 * we do NOT allow you to have it already running. if it is 2117 * we leave the current one up unchanged 2118 */ 2119 return; 2120 } 2121 /* At this point we can proceed */ 2122 if (t_type == SCTP_TIMER_TYPE_SEND) { 2123 stcb->asoc.num_send_timers_up++; 2124 } 2125 tmr->stopped_from = 0; 2126 tmr->type = t_type; 2127 tmr->ep = (void *)inp; 2128 tmr->tcb = (void *)stcb; 2129 tmr->net = (void *)net; 2130 tmr->self = (void *)tmr; 2131 tmr->vnet = (void *)curvnet; 2132 tmr->ticks = sctp_get_tick_count(); 2133 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2134 return; 2135 } 2136 2137 void 2138 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2139 struct sctp_nets *net, uint32_t from) 2140 { 2141 struct sctp_timer *tmr; 2142 2143 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2144 (inp == NULL)) 2145 return; 2146 2147 tmr = NULL; 2148 if (stcb) { 2149 SCTP_TCB_LOCK_ASSERT(stcb); 2150 } 2151 switch (t_type) { 2152 case SCTP_TIMER_TYPE_ZERO_COPY: 2153 tmr = &inp->sctp_ep.zero_copy_timer; 2154 break; 2155 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2156 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2157 break; 2158 case SCTP_TIMER_TYPE_ADDR_WQ: 2159 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2160 break; 2161 case SCTP_TIMER_TYPE_SEND: 2162 if ((stcb == NULL) || (net == NULL)) { 2163 return; 2164 } 2165 tmr = &net->rxt_timer; 2166 break; 2167 case SCTP_TIMER_TYPE_INIT: 2168 if ((stcb == NULL) || (net == NULL)) { 2169 return; 2170 } 2171 tmr = &net->rxt_timer; 2172 break; 2173 case SCTP_TIMER_TYPE_RECV: 2174 if (stcb == NULL) { 2175 return; 2176 } 2177 tmr = &stcb->asoc.dack_timer; 2178 break; 2179 case SCTP_TIMER_TYPE_SHUTDOWN: 2180 if ((stcb == NULL) || (net == NULL)) { 2181 return; 2182 } 2183 tmr = &net->rxt_timer; 2184 break; 2185 case SCTP_TIMER_TYPE_HEARTBEAT: 2186 if ((stcb == NULL) || (net == NULL)) { 2187 return; 2188 } 2189 tmr = &net->hb_timer; 2190 break; 2191 case SCTP_TIMER_TYPE_COOKIE: 2192 if ((stcb == NULL) || (net == NULL)) { 2193 return; 2194 } 2195 tmr = &net->rxt_timer; 2196 break; 2197 case SCTP_TIMER_TYPE_NEWCOOKIE: 2198 /* nothing needed but the endpoint here */ 2199 tmr = &inp->sctp_ep.signature_change; 2200 /* 2201 * We re-use the newcookie timer for the INP kill timer. We 2202 * must assure that we do not kill it by accident. 2203 */ 2204 break; 2205 case SCTP_TIMER_TYPE_ASOCKILL: 2206 /* 2207 * Stop the asoc kill timer. 2208 */ 2209 if (stcb == NULL) { 2210 return; 2211 } 2212 tmr = &stcb->asoc.strreset_timer; 2213 break; 2214 2215 case SCTP_TIMER_TYPE_INPKILL: 2216 /* 2217 * The inp is setup to die. We re-use the signature_chage 2218 * timer since that has stopped and we are in the GONE 2219 * state. 2220 */ 2221 tmr = &inp->sctp_ep.signature_change; 2222 break; 2223 case SCTP_TIMER_TYPE_PATHMTURAISE: 2224 if ((stcb == NULL) || (net == NULL)) { 2225 return; 2226 } 2227 tmr = &net->pmtu_timer; 2228 break; 2229 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2230 if ((stcb == NULL) || (net == NULL)) { 2231 return; 2232 } 2233 tmr = &net->rxt_timer; 2234 break; 2235 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2236 if (stcb == NULL) { 2237 return; 2238 } 2239 tmr = &stcb->asoc.shut_guard_timer; 2240 break; 2241 case SCTP_TIMER_TYPE_STRRESET: 2242 if (stcb == NULL) { 2243 return; 2244 } 2245 tmr = &stcb->asoc.strreset_timer; 2246 break; 2247 case SCTP_TIMER_TYPE_ASCONF: 2248 if (stcb == NULL) { 2249 return; 2250 } 2251 tmr = &stcb->asoc.asconf_timer; 2252 break; 2253 case SCTP_TIMER_TYPE_PRIM_DELETED: 2254 if (stcb == NULL) { 2255 return; 2256 } 2257 tmr = &stcb->asoc.delete_prim_timer; 2258 break; 2259 case SCTP_TIMER_TYPE_AUTOCLOSE: 2260 if (stcb == NULL) { 2261 return; 2262 } 2263 tmr = &stcb->asoc.autoclose_timer; 2264 break; 2265 default: 2266 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2267 __FUNCTION__, t_type); 2268 break; 2269 } 2270 if (tmr == NULL) { 2271 return; 2272 } 2273 if ((tmr->type != t_type) && tmr->type) { 2274 /* 2275 * Ok we have a timer that is under joint use. Cookie timer 2276 * per chance with the SEND timer. We therefore are NOT 2277 * running the timer that the caller wants stopped. So just 2278 * return. 2279 */ 2280 return; 2281 } 2282 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2283 stcb->asoc.num_send_timers_up--; 2284 if (stcb->asoc.num_send_timers_up < 0) { 2285 stcb->asoc.num_send_timers_up = 0; 2286 } 2287 } 2288 tmr->self = NULL; 2289 tmr->stopped_from = from; 2290 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2291 return; 2292 } 2293 2294 uint32_t 2295 sctp_calculate_len(struct mbuf *m) 2296 { 2297 uint32_t tlen = 0; 2298 struct mbuf *at; 2299 2300 at = m; 2301 while (at) { 2302 tlen += SCTP_BUF_LEN(at); 2303 at = SCTP_BUF_NEXT(at); 2304 } 2305 return (tlen); 2306 } 2307 2308 void 2309 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2310 struct sctp_association *asoc, uint32_t mtu) 2311 { 2312 /* 2313 * Reset the P-MTU size on this association, this involves changing 2314 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2315 * allow the DF flag to be cleared. 2316 */ 2317 struct sctp_tmit_chunk *chk; 2318 unsigned int eff_mtu, ovh; 2319 2320 asoc->smallest_mtu = mtu; 2321 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2322 ovh = SCTP_MIN_OVERHEAD; 2323 } else { 2324 ovh = SCTP_MIN_V4_OVERHEAD; 2325 } 2326 eff_mtu = mtu - ovh; 2327 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2328 if (chk->send_size > eff_mtu) { 2329 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2330 } 2331 } 2332 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2333 if (chk->send_size > eff_mtu) { 2334 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2335 } 2336 } 2337 } 2338 2339 2340 /* 2341 * given an association and starting time of the current RTT period return 2342 * RTO in number of msecs net should point to the current network 2343 */ 2344 2345 uint32_t 2346 sctp_calculate_rto(struct sctp_tcb *stcb, 2347 struct sctp_association *asoc, 2348 struct sctp_nets *net, 2349 struct timeval *told, 2350 int safe, int rtt_from_sack) 2351 { 2352 /*- 2353 * given an association and the starting time of the current RTT 2354 * period (in value1/value2) return RTO in number of msecs. 2355 */ 2356 int32_t rtt; /* RTT in ms */ 2357 uint32_t new_rto; 2358 int first_measure = 0; 2359 struct timeval now, then, *old; 2360 2361 /* Copy it out for sparc64 */ 2362 if (safe == sctp_align_unsafe_makecopy) { 2363 old = &then; 2364 memcpy(&then, told, sizeof(struct timeval)); 2365 } else if (safe == sctp_align_safe_nocopy) { 2366 old = told; 2367 } else { 2368 /* error */ 2369 SCTP_PRINTF("Huh, bad rto calc call\n"); 2370 return (0); 2371 } 2372 /************************/ 2373 /* 1. calculate new RTT */ 2374 /************************/ 2375 /* get the current time */ 2376 if (stcb->asoc.use_precise_time) { 2377 (void)SCTP_GETPTIME_TIMEVAL(&now); 2378 } else { 2379 (void)SCTP_GETTIME_TIMEVAL(&now); 2380 } 2381 timevalsub(&now, old); 2382 /* store the current RTT in us */ 2383 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec + 2384 (uint64_t) now.tv_usec; 2385 2386 /* computer rtt in ms */ 2387 rtt = net->rtt / 1000; 2388 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2389 /* 2390 * Tell the CC module that a new update has just occurred 2391 * from a sack 2392 */ 2393 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2394 } 2395 /* 2396 * Do we need to determine the lan? We do this only on sacks i.e. 2397 * RTT being determined from data not non-data (HB/INIT->INITACK). 2398 */ 2399 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2400 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2401 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2402 net->lan_type = SCTP_LAN_INTERNET; 2403 } else { 2404 net->lan_type = SCTP_LAN_LOCAL; 2405 } 2406 } 2407 /***************************/ 2408 /* 2. update RTTVAR & SRTT */ 2409 /***************************/ 2410 /*- 2411 * Compute the scaled average lastsa and the 2412 * scaled variance lastsv as described in van Jacobson 2413 * Paper "Congestion Avoidance and Control", Annex A. 2414 * 2415 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2416 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2417 */ 2418 if (net->RTO_measured) { 2419 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2420 net->lastsa += rtt; 2421 if (rtt < 0) { 2422 rtt = -rtt; 2423 } 2424 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2425 net->lastsv += rtt; 2426 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2427 rto_logging(net, SCTP_LOG_RTTVAR); 2428 } 2429 } else { 2430 /* First RTO measurment */ 2431 net->RTO_measured = 1; 2432 first_measure = 1; 2433 net->lastsa = rtt << SCTP_RTT_SHIFT; 2434 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2435 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2436 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2437 } 2438 } 2439 if (net->lastsv == 0) { 2440 net->lastsv = SCTP_CLOCK_GRANULARITY; 2441 } 2442 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2443 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2444 (stcb->asoc.sat_network_lockout == 0)) { 2445 stcb->asoc.sat_network = 1; 2446 } else if ((!first_measure) && stcb->asoc.sat_network) { 2447 stcb->asoc.sat_network = 0; 2448 stcb->asoc.sat_network_lockout = 1; 2449 } 2450 /* bound it, per C6/C7 in Section 5.3.1 */ 2451 if (new_rto < stcb->asoc.minrto) { 2452 new_rto = stcb->asoc.minrto; 2453 } 2454 if (new_rto > stcb->asoc.maxrto) { 2455 new_rto = stcb->asoc.maxrto; 2456 } 2457 /* we are now returning the RTO */ 2458 return (new_rto); 2459 } 2460 2461 /* 2462 * return a pointer to a contiguous piece of data from the given mbuf chain 2463 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2464 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2465 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2466 */ 2467 caddr_t 2468 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2469 { 2470 uint32_t count; 2471 uint8_t *ptr; 2472 2473 ptr = in_ptr; 2474 if ((off < 0) || (len <= 0)) 2475 return (NULL); 2476 2477 /* find the desired start location */ 2478 while ((m != NULL) && (off > 0)) { 2479 if (off < SCTP_BUF_LEN(m)) 2480 break; 2481 off -= SCTP_BUF_LEN(m); 2482 m = SCTP_BUF_NEXT(m); 2483 } 2484 if (m == NULL) 2485 return (NULL); 2486 2487 /* is the current mbuf large enough (eg. contiguous)? */ 2488 if ((SCTP_BUF_LEN(m) - off) >= len) { 2489 return (mtod(m, caddr_t)+off); 2490 } else { 2491 /* else, it spans more than one mbuf, so save a temp copy... */ 2492 while ((m != NULL) && (len > 0)) { 2493 count = min(SCTP_BUF_LEN(m) - off, len); 2494 bcopy(mtod(m, caddr_t)+off, ptr, count); 2495 len -= count; 2496 ptr += count; 2497 off = 0; 2498 m = SCTP_BUF_NEXT(m); 2499 } 2500 if ((m == NULL) && (len > 0)) 2501 return (NULL); 2502 else 2503 return ((caddr_t)in_ptr); 2504 } 2505 } 2506 2507 2508 2509 struct sctp_paramhdr * 2510 sctp_get_next_param(struct mbuf *m, 2511 int offset, 2512 struct sctp_paramhdr *pull, 2513 int pull_limit) 2514 { 2515 /* This just provides a typed signature to Peter's Pull routine */ 2516 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2517 (uint8_t *) pull)); 2518 } 2519 2520 2521 int 2522 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2523 { 2524 /* 2525 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2526 * padlen is > 3 this routine will fail. 2527 */ 2528 uint8_t *dp; 2529 int i; 2530 2531 if (padlen > 3) { 2532 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2533 return (ENOBUFS); 2534 } 2535 if (padlen <= M_TRAILINGSPACE(m)) { 2536 /* 2537 * The easy way. We hope the majority of the time we hit 2538 * here :) 2539 */ 2540 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2541 SCTP_BUF_LEN(m) += padlen; 2542 } else { 2543 /* Hard way we must grow the mbuf */ 2544 struct mbuf *tmp; 2545 2546 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2547 if (tmp == NULL) { 2548 /* Out of space GAK! we are in big trouble. */ 2549 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2550 return (ENOBUFS); 2551 } 2552 /* setup and insert in middle */ 2553 SCTP_BUF_LEN(tmp) = padlen; 2554 SCTP_BUF_NEXT(tmp) = NULL; 2555 SCTP_BUF_NEXT(m) = tmp; 2556 dp = mtod(tmp, uint8_t *); 2557 } 2558 /* zero out the pad */ 2559 for (i = 0; i < padlen; i++) { 2560 *dp = 0; 2561 dp++; 2562 } 2563 return (0); 2564 } 2565 2566 int 2567 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2568 { 2569 /* find the last mbuf in chain and pad it */ 2570 struct mbuf *m_at; 2571 2572 if (last_mbuf) { 2573 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2574 } else { 2575 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2576 if (SCTP_BUF_NEXT(m_at) == NULL) { 2577 return (sctp_add_pad_tombuf(m_at, padval)); 2578 } 2579 } 2580 } 2581 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 2582 return (EFAULT); 2583 } 2584 2585 static void 2586 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2587 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2588 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2589 SCTP_UNUSED 2590 #endif 2591 ) 2592 { 2593 struct mbuf *m_notify; 2594 struct sctp_assoc_change *sac; 2595 struct sctp_queued_to_read *control; 2596 size_t notif_len, abort_len; 2597 unsigned int i; 2598 2599 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2600 struct socket *so; 2601 2602 #endif 2603 2604 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2605 notif_len = sizeof(struct sctp_assoc_change); 2606 if (abort != NULL) { 2607 abort_len = htons(abort->ch.chunk_length); 2608 } else { 2609 abort_len = 0; 2610 } 2611 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2612 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2613 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2614 notif_len += abort_len; 2615 } 2616 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA); 2617 if (m_notify == NULL) { 2618 /* Retry with smaller value. */ 2619 notif_len = sizeof(struct sctp_assoc_change); 2620 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA); 2621 if (m_notify == NULL) { 2622 goto set_error; 2623 } 2624 } 2625 SCTP_BUF_NEXT(m_notify) = NULL; 2626 sac = mtod(m_notify, struct sctp_assoc_change *); 2627 sac->sac_type = SCTP_ASSOC_CHANGE; 2628 sac->sac_flags = 0; 2629 sac->sac_length = sizeof(struct sctp_assoc_change); 2630 sac->sac_state = state; 2631 sac->sac_error = error; 2632 /* XXX verify these stream counts */ 2633 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2634 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2635 sac->sac_assoc_id = sctp_get_associd(stcb); 2636 if (notif_len > sizeof(struct sctp_assoc_change)) { 2637 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2638 i = 0; 2639 if (stcb->asoc.peer_supports_prsctp) { 2640 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2641 } 2642 if (stcb->asoc.peer_supports_auth) { 2643 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2644 } 2645 if (stcb->asoc.peer_supports_asconf) { 2646 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2647 } 2648 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2649 if (stcb->asoc.peer_supports_strreset) { 2650 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2651 } 2652 sac->sac_length += i; 2653 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2654 memcpy(sac->sac_info, abort, abort_len); 2655 sac->sac_length += abort_len; 2656 } 2657 } 2658 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2659 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2660 0, 0, stcb->asoc.context, 0, 0, 0, 2661 m_notify); 2662 if (control != NULL) { 2663 control->length = SCTP_BUF_LEN(m_notify); 2664 /* not that we need this */ 2665 control->tail_mbuf = m_notify; 2666 control->spec_flags = M_NOTIFICATION; 2667 sctp_add_to_readq(stcb->sctp_ep, stcb, 2668 control, 2669 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2670 so_locked); 2671 } else { 2672 sctp_m_freem(m_notify); 2673 } 2674 } 2675 /* 2676 * For 1-to-1 style sockets, we send up and error when an ABORT 2677 * comes in. 2678 */ 2679 set_error: 2680 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2681 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2682 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2683 if (from_peer) { 2684 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2685 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2686 stcb->sctp_socket->so_error = ECONNREFUSED; 2687 } else { 2688 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2689 stcb->sctp_socket->so_error = ECONNRESET; 2690 } 2691 } else { 2692 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2693 stcb->sctp_socket->so_error = ECONNABORTED; 2694 } 2695 } 2696 /* Wake ANY sleepers */ 2697 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2698 so = SCTP_INP_SO(stcb->sctp_ep); 2699 if (!so_locked) { 2700 atomic_add_int(&stcb->asoc.refcnt, 1); 2701 SCTP_TCB_UNLOCK(stcb); 2702 SCTP_SOCKET_LOCK(so, 1); 2703 SCTP_TCB_LOCK(stcb); 2704 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2705 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2706 SCTP_SOCKET_UNLOCK(so, 1); 2707 return; 2708 } 2709 } 2710 #endif 2711 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2712 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2713 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2714 socantrcvmore(stcb->sctp_socket); 2715 } 2716 sorwakeup(stcb->sctp_socket); 2717 sowwakeup(stcb->sctp_socket); 2718 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2719 if (!so_locked) { 2720 SCTP_SOCKET_UNLOCK(so, 1); 2721 } 2722 #endif 2723 } 2724 2725 static void 2726 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2727 struct sockaddr *sa, uint32_t error) 2728 { 2729 struct mbuf *m_notify; 2730 struct sctp_paddr_change *spc; 2731 struct sctp_queued_to_read *control; 2732 2733 if ((stcb == NULL) || 2734 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2735 /* event not enabled */ 2736 return; 2737 } 2738 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2739 if (m_notify == NULL) 2740 return; 2741 SCTP_BUF_LEN(m_notify) = 0; 2742 spc = mtod(m_notify, struct sctp_paddr_change *); 2743 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2744 spc->spc_flags = 0; 2745 spc->spc_length = sizeof(struct sctp_paddr_change); 2746 switch (sa->sa_family) { 2747 #ifdef INET 2748 case AF_INET: 2749 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2750 break; 2751 #endif 2752 #ifdef INET6 2753 case AF_INET6: 2754 { 2755 struct sockaddr_in6 *sin6; 2756 2757 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2758 2759 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2760 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2761 if (sin6->sin6_scope_id == 0) { 2762 /* recover scope_id for user */ 2763 (void)sa6_recoverscope(sin6); 2764 } else { 2765 /* clear embedded scope_id for user */ 2766 in6_clearscope(&sin6->sin6_addr); 2767 } 2768 } 2769 break; 2770 } 2771 #endif 2772 default: 2773 /* TSNH */ 2774 break; 2775 } 2776 spc->spc_state = state; 2777 spc->spc_error = error; 2778 spc->spc_assoc_id = sctp_get_associd(stcb); 2779 2780 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2781 SCTP_BUF_NEXT(m_notify) = NULL; 2782 2783 /* append to socket */ 2784 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2785 0, 0, stcb->asoc.context, 0, 0, 0, 2786 m_notify); 2787 if (control == NULL) { 2788 /* no memory */ 2789 sctp_m_freem(m_notify); 2790 return; 2791 } 2792 control->length = SCTP_BUF_LEN(m_notify); 2793 control->spec_flags = M_NOTIFICATION; 2794 /* not that we need this */ 2795 control->tail_mbuf = m_notify; 2796 sctp_add_to_readq(stcb->sctp_ep, stcb, 2797 control, 2798 &stcb->sctp_socket->so_rcv, 1, 2799 SCTP_READ_LOCK_NOT_HELD, 2800 SCTP_SO_NOT_LOCKED); 2801 } 2802 2803 2804 static void 2805 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2806 struct sctp_tmit_chunk *chk, int so_locked 2807 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2808 SCTP_UNUSED 2809 #endif 2810 ) 2811 { 2812 struct mbuf *m_notify; 2813 struct sctp_send_failed *ssf; 2814 struct sctp_send_failed_event *ssfe; 2815 struct sctp_queued_to_read *control; 2816 int length; 2817 2818 if ((stcb == NULL) || 2819 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2820 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2821 /* event not enabled */ 2822 return; 2823 } 2824 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2825 length = sizeof(struct sctp_send_failed_event); 2826 } else { 2827 length = sizeof(struct sctp_send_failed); 2828 } 2829 m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA); 2830 if (m_notify == NULL) 2831 /* no space left */ 2832 return; 2833 length += chk->send_size; 2834 length -= sizeof(struct sctp_data_chunk); 2835 SCTP_BUF_LEN(m_notify) = 0; 2836 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2837 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2838 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2839 if (sent) { 2840 ssfe->ssfe_flags = SCTP_DATA_SENT; 2841 } else { 2842 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2843 } 2844 ssfe->ssfe_length = length; 2845 ssfe->ssfe_error = error; 2846 /* not exactly what the user sent in, but should be close :) */ 2847 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2848 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number; 2849 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2850 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype; 2851 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2852 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2853 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2854 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2855 } else { 2856 ssf = mtod(m_notify, struct sctp_send_failed *); 2857 ssf->ssf_type = SCTP_SEND_FAILED; 2858 if (sent) { 2859 ssf->ssf_flags = SCTP_DATA_SENT; 2860 } else { 2861 ssf->ssf_flags = SCTP_DATA_UNSENT; 2862 } 2863 ssf->ssf_length = length; 2864 ssf->ssf_error = error; 2865 /* not exactly what the user sent in, but should be close :) */ 2866 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2867 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2868 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2869 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2870 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2871 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2872 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2873 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2874 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2875 } 2876 if (chk->data) { 2877 /* 2878 * trim off the sctp chunk header(it should be there) 2879 */ 2880 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 2881 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 2882 sctp_mbuf_crush(chk->data); 2883 chk->send_size -= sizeof(struct sctp_data_chunk); 2884 } 2885 } 2886 SCTP_BUF_NEXT(m_notify) = chk->data; 2887 /* Steal off the mbuf */ 2888 chk->data = NULL; 2889 /* 2890 * For this case, we check the actual socket buffer, since the assoc 2891 * is going away we don't want to overfill the socket buffer for a 2892 * non-reader 2893 */ 2894 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2895 sctp_m_freem(m_notify); 2896 return; 2897 } 2898 /* append to socket */ 2899 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2900 0, 0, stcb->asoc.context, 0, 0, 0, 2901 m_notify); 2902 if (control == NULL) { 2903 /* no memory */ 2904 sctp_m_freem(m_notify); 2905 return; 2906 } 2907 control->spec_flags = M_NOTIFICATION; 2908 sctp_add_to_readq(stcb->sctp_ep, stcb, 2909 control, 2910 &stcb->sctp_socket->so_rcv, 1, 2911 SCTP_READ_LOCK_NOT_HELD, 2912 so_locked); 2913 } 2914 2915 2916 static void 2917 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2918 struct sctp_stream_queue_pending *sp, int so_locked 2919 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2920 SCTP_UNUSED 2921 #endif 2922 ) 2923 { 2924 struct mbuf *m_notify; 2925 struct sctp_send_failed *ssf; 2926 struct sctp_send_failed_event *ssfe; 2927 struct sctp_queued_to_read *control; 2928 int length; 2929 2930 if ((stcb == NULL) || 2931 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2932 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2933 /* event not enabled */ 2934 return; 2935 } 2936 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2937 length = sizeof(struct sctp_send_failed_event); 2938 } else { 2939 length = sizeof(struct sctp_send_failed); 2940 } 2941 m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA); 2942 if (m_notify == NULL) { 2943 /* no space left */ 2944 return; 2945 } 2946 length += sp->length; 2947 SCTP_BUF_LEN(m_notify) = 0; 2948 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2949 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2950 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2951 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2952 ssfe->ssfe_length = length; 2953 ssfe->ssfe_error = error; 2954 /* not exactly what the user sent in, but should be close :) */ 2955 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info)); 2956 ssfe->ssfe_info.snd_sid = sp->stream; 2957 if (sp->some_taken) { 2958 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 2959 } else { 2960 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 2961 } 2962 ssfe->ssfe_info.snd_ppid = sp->ppid; 2963 ssfe->ssfe_info.snd_context = sp->context; 2964 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2965 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2966 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2967 } else { 2968 ssf = mtod(m_notify, struct sctp_send_failed *); 2969 ssf->ssf_type = SCTP_SEND_FAILED; 2970 ssf->ssf_flags = SCTP_DATA_UNSENT; 2971 ssf->ssf_length = length; 2972 ssf->ssf_error = error; 2973 /* not exactly what the user sent in, but should be close :) */ 2974 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2975 ssf->ssf_info.sinfo_stream = sp->stream; 2976 ssf->ssf_info.sinfo_ssn = 0; 2977 if (sp->some_taken) { 2978 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 2979 } else { 2980 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 2981 } 2982 ssf->ssf_info.sinfo_ppid = sp->ppid; 2983 ssf->ssf_info.sinfo_context = sp->context; 2984 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2985 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2986 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2987 } 2988 SCTP_BUF_NEXT(m_notify) = sp->data; 2989 2990 /* Steal off the mbuf */ 2991 sp->data = NULL; 2992 /* 2993 * For this case, we check the actual socket buffer, since the assoc 2994 * is going away we don't want to overfill the socket buffer for a 2995 * non-reader 2996 */ 2997 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2998 sctp_m_freem(m_notify); 2999 return; 3000 } 3001 /* append to socket */ 3002 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3003 0, 0, stcb->asoc.context, 0, 0, 0, 3004 m_notify); 3005 if (control == NULL) { 3006 /* no memory */ 3007 sctp_m_freem(m_notify); 3008 return; 3009 } 3010 control->spec_flags = M_NOTIFICATION; 3011 sctp_add_to_readq(stcb->sctp_ep, stcb, 3012 control, 3013 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3014 } 3015 3016 3017 3018 static void 3019 sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3020 { 3021 struct mbuf *m_notify; 3022 struct sctp_adaptation_event *sai; 3023 struct sctp_queued_to_read *control; 3024 3025 if ((stcb == NULL) || 3026 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3027 /* event not enabled */ 3028 return; 3029 } 3030 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 3031 if (m_notify == NULL) 3032 /* no space left */ 3033 return; 3034 SCTP_BUF_LEN(m_notify) = 0; 3035 sai = mtod(m_notify, struct sctp_adaptation_event *); 3036 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3037 sai->sai_flags = 0; 3038 sai->sai_length = sizeof(struct sctp_adaptation_event); 3039 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3040 sai->sai_assoc_id = sctp_get_associd(stcb); 3041 3042 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3043 SCTP_BUF_NEXT(m_notify) = NULL; 3044 3045 /* append to socket */ 3046 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3047 0, 0, stcb->asoc.context, 0, 0, 0, 3048 m_notify); 3049 if (control == NULL) { 3050 /* no memory */ 3051 sctp_m_freem(m_notify); 3052 return; 3053 } 3054 control->length = SCTP_BUF_LEN(m_notify); 3055 control->spec_flags = M_NOTIFICATION; 3056 /* not that we need this */ 3057 control->tail_mbuf = m_notify; 3058 sctp_add_to_readq(stcb->sctp_ep, stcb, 3059 control, 3060 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3061 } 3062 3063 /* This always must be called with the read-queue LOCKED in the INP */ 3064 static void 3065 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3066 uint32_t val, int so_locked 3067 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3068 SCTP_UNUSED 3069 #endif 3070 ) 3071 { 3072 struct mbuf *m_notify; 3073 struct sctp_pdapi_event *pdapi; 3074 struct sctp_queued_to_read *control; 3075 struct sockbuf *sb; 3076 3077 if ((stcb == NULL) || 3078 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3079 /* event not enabled */ 3080 return; 3081 } 3082 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3083 return; 3084 } 3085 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 3086 if (m_notify == NULL) 3087 /* no space left */ 3088 return; 3089 SCTP_BUF_LEN(m_notify) = 0; 3090 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3091 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3092 pdapi->pdapi_flags = 0; 3093 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3094 pdapi->pdapi_indication = error; 3095 pdapi->pdapi_stream = (val >> 16); 3096 pdapi->pdapi_seq = (val & 0x0000ffff); 3097 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3098 3099 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3100 SCTP_BUF_NEXT(m_notify) = NULL; 3101 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3102 0, 0, stcb->asoc.context, 0, 0, 0, 3103 m_notify); 3104 if (control == NULL) { 3105 /* no memory */ 3106 sctp_m_freem(m_notify); 3107 return; 3108 } 3109 control->spec_flags = M_NOTIFICATION; 3110 control->length = SCTP_BUF_LEN(m_notify); 3111 /* not that we need this */ 3112 control->tail_mbuf = m_notify; 3113 control->held_length = 0; 3114 control->length = 0; 3115 sb = &stcb->sctp_socket->so_rcv; 3116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3117 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3118 } 3119 sctp_sballoc(stcb, sb, m_notify); 3120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3121 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3122 } 3123 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3124 control->end_added = 1; 3125 if (stcb->asoc.control_pdapi) 3126 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3127 else { 3128 /* we really should not see this case */ 3129 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3130 } 3131 if (stcb->sctp_ep && stcb->sctp_socket) { 3132 /* This should always be the case */ 3133 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3134 struct socket *so; 3135 3136 so = SCTP_INP_SO(stcb->sctp_ep); 3137 if (!so_locked) { 3138 atomic_add_int(&stcb->asoc.refcnt, 1); 3139 SCTP_TCB_UNLOCK(stcb); 3140 SCTP_SOCKET_LOCK(so, 1); 3141 SCTP_TCB_LOCK(stcb); 3142 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3143 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3144 SCTP_SOCKET_UNLOCK(so, 1); 3145 return; 3146 } 3147 } 3148 #endif 3149 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3150 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3151 if (!so_locked) { 3152 SCTP_SOCKET_UNLOCK(so, 1); 3153 } 3154 #endif 3155 } 3156 } 3157 3158 static void 3159 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3160 { 3161 struct mbuf *m_notify; 3162 struct sctp_shutdown_event *sse; 3163 struct sctp_queued_to_read *control; 3164 3165 /* 3166 * For TCP model AND UDP connected sockets we will send an error up 3167 * when an SHUTDOWN completes 3168 */ 3169 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3170 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3171 /* mark socket closed for read/write and wakeup! */ 3172 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3173 struct socket *so; 3174 3175 so = SCTP_INP_SO(stcb->sctp_ep); 3176 atomic_add_int(&stcb->asoc.refcnt, 1); 3177 SCTP_TCB_UNLOCK(stcb); 3178 SCTP_SOCKET_LOCK(so, 1); 3179 SCTP_TCB_LOCK(stcb); 3180 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3181 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3182 SCTP_SOCKET_UNLOCK(so, 1); 3183 return; 3184 } 3185 #endif 3186 socantsendmore(stcb->sctp_socket); 3187 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3188 SCTP_SOCKET_UNLOCK(so, 1); 3189 #endif 3190 } 3191 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3192 /* event not enabled */ 3193 return; 3194 } 3195 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 3196 if (m_notify == NULL) 3197 /* no space left */ 3198 return; 3199 sse = mtod(m_notify, struct sctp_shutdown_event *); 3200 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3201 sse->sse_flags = 0; 3202 sse->sse_length = sizeof(struct sctp_shutdown_event); 3203 sse->sse_assoc_id = sctp_get_associd(stcb); 3204 3205 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3206 SCTP_BUF_NEXT(m_notify) = NULL; 3207 3208 /* append to socket */ 3209 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3210 0, 0, stcb->asoc.context, 0, 0, 0, 3211 m_notify); 3212 if (control == NULL) { 3213 /* no memory */ 3214 sctp_m_freem(m_notify); 3215 return; 3216 } 3217 control->spec_flags = M_NOTIFICATION; 3218 control->length = SCTP_BUF_LEN(m_notify); 3219 /* not that we need this */ 3220 control->tail_mbuf = m_notify; 3221 sctp_add_to_readq(stcb->sctp_ep, stcb, 3222 control, 3223 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3224 } 3225 3226 static void 3227 sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3228 int so_locked 3229 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3230 SCTP_UNUSED 3231 #endif 3232 ) 3233 { 3234 struct mbuf *m_notify; 3235 struct sctp_sender_dry_event *event; 3236 struct sctp_queued_to_read *control; 3237 3238 if ((stcb == NULL) || 3239 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3240 /* event not enabled */ 3241 return; 3242 } 3243 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA); 3244 if (m_notify == NULL) { 3245 /* no space left */ 3246 return; 3247 } 3248 SCTP_BUF_LEN(m_notify) = 0; 3249 event = mtod(m_notify, struct sctp_sender_dry_event *); 3250 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3251 event->sender_dry_flags = 0; 3252 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3253 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3254 3255 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3256 SCTP_BUF_NEXT(m_notify) = NULL; 3257 3258 /* append to socket */ 3259 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3260 0, 0, stcb->asoc.context, 0, 0, 0, 3261 m_notify); 3262 if (control == NULL) { 3263 /* no memory */ 3264 sctp_m_freem(m_notify); 3265 return; 3266 } 3267 control->length = SCTP_BUF_LEN(m_notify); 3268 control->spec_flags = M_NOTIFICATION; 3269 /* not that we need this */ 3270 control->tail_mbuf = m_notify; 3271 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3272 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3273 } 3274 3275 3276 void 3277 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3278 { 3279 struct mbuf *m_notify; 3280 struct sctp_queued_to_read *control; 3281 struct sctp_stream_change_event *stradd; 3282 int len; 3283 3284 if ((stcb == NULL) || 3285 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3286 /* event not enabled */ 3287 return; 3288 } 3289 if ((stcb->asoc.peer_req_out) && flag) { 3290 /* Peer made the request, don't tell the local user */ 3291 stcb->asoc.peer_req_out = 0; 3292 return; 3293 } 3294 stcb->asoc.peer_req_out = 0; 3295 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3296 if (m_notify == NULL) 3297 /* no space left */ 3298 return; 3299 SCTP_BUF_LEN(m_notify) = 0; 3300 len = sizeof(struct sctp_stream_change_event); 3301 if (len > M_TRAILINGSPACE(m_notify)) { 3302 /* never enough room */ 3303 sctp_m_freem(m_notify); 3304 return; 3305 } 3306 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3307 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3308 stradd->strchange_flags = flag; 3309 stradd->strchange_length = len; 3310 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3311 stradd->strchange_instrms = numberin; 3312 stradd->strchange_outstrms = numberout; 3313 SCTP_BUF_LEN(m_notify) = len; 3314 SCTP_BUF_NEXT(m_notify) = NULL; 3315 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3316 /* no space */ 3317 sctp_m_freem(m_notify); 3318 return; 3319 } 3320 /* append to socket */ 3321 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3322 0, 0, stcb->asoc.context, 0, 0, 0, 3323 m_notify); 3324 if (control == NULL) { 3325 /* no memory */ 3326 sctp_m_freem(m_notify); 3327 return; 3328 } 3329 control->spec_flags = M_NOTIFICATION; 3330 control->length = SCTP_BUF_LEN(m_notify); 3331 /* not that we need this */ 3332 control->tail_mbuf = m_notify; 3333 sctp_add_to_readq(stcb->sctp_ep, stcb, 3334 control, 3335 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3336 } 3337 3338 void 3339 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3340 { 3341 struct mbuf *m_notify; 3342 struct sctp_queued_to_read *control; 3343 struct sctp_assoc_reset_event *strasoc; 3344 int len; 3345 3346 if ((stcb == NULL) || 3347 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3348 /* event not enabled */ 3349 return; 3350 } 3351 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3352 if (m_notify == NULL) 3353 /* no space left */ 3354 return; 3355 SCTP_BUF_LEN(m_notify) = 0; 3356 len = sizeof(struct sctp_assoc_reset_event); 3357 if (len > M_TRAILINGSPACE(m_notify)) { 3358 /* never enough room */ 3359 sctp_m_freem(m_notify); 3360 return; 3361 } 3362 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3363 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3364 strasoc->assocreset_flags = flag; 3365 strasoc->assocreset_length = len; 3366 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3367 strasoc->assocreset_local_tsn = sending_tsn; 3368 strasoc->assocreset_remote_tsn = recv_tsn; 3369 SCTP_BUF_LEN(m_notify) = len; 3370 SCTP_BUF_NEXT(m_notify) = NULL; 3371 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3372 /* no space */ 3373 sctp_m_freem(m_notify); 3374 return; 3375 } 3376 /* append to socket */ 3377 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3378 0, 0, stcb->asoc.context, 0, 0, 0, 3379 m_notify); 3380 if (control == NULL) { 3381 /* no memory */ 3382 sctp_m_freem(m_notify); 3383 return; 3384 } 3385 control->spec_flags = M_NOTIFICATION; 3386 control->length = SCTP_BUF_LEN(m_notify); 3387 /* not that we need this */ 3388 control->tail_mbuf = m_notify; 3389 sctp_add_to_readq(stcb->sctp_ep, stcb, 3390 control, 3391 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3392 } 3393 3394 3395 3396 static void 3397 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3398 int number_entries, uint16_t * list, int flag) 3399 { 3400 struct mbuf *m_notify; 3401 struct sctp_queued_to_read *control; 3402 struct sctp_stream_reset_event *strreset; 3403 int len; 3404 3405 if ((stcb == NULL) || 3406 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3407 /* event not enabled */ 3408 return; 3409 } 3410 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3411 if (m_notify == NULL) 3412 /* no space left */ 3413 return; 3414 SCTP_BUF_LEN(m_notify) = 0; 3415 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3416 if (len > M_TRAILINGSPACE(m_notify)) { 3417 /* never enough room */ 3418 sctp_m_freem(m_notify); 3419 return; 3420 } 3421 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3422 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3423 strreset->strreset_flags = flag; 3424 strreset->strreset_length = len; 3425 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3426 if (number_entries) { 3427 int i; 3428 3429 for (i = 0; i < number_entries; i++) { 3430 strreset->strreset_stream_list[i] = ntohs(list[i]); 3431 } 3432 } 3433 SCTP_BUF_LEN(m_notify) = len; 3434 SCTP_BUF_NEXT(m_notify) = NULL; 3435 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3436 /* no space */ 3437 sctp_m_freem(m_notify); 3438 return; 3439 } 3440 /* append to socket */ 3441 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3442 0, 0, stcb->asoc.context, 0, 0, 0, 3443 m_notify); 3444 if (control == NULL) { 3445 /* no memory */ 3446 sctp_m_freem(m_notify); 3447 return; 3448 } 3449 control->spec_flags = M_NOTIFICATION; 3450 control->length = SCTP_BUF_LEN(m_notify); 3451 /* not that we need this */ 3452 control->tail_mbuf = m_notify; 3453 sctp_add_to_readq(stcb->sctp_ep, stcb, 3454 control, 3455 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3456 } 3457 3458 3459 static void 3460 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3461 { 3462 struct mbuf *m_notify; 3463 struct sctp_remote_error *sre; 3464 struct sctp_queued_to_read *control; 3465 size_t notif_len, chunk_len; 3466 3467 if ((stcb == NULL) || 3468 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3469 return; 3470 } 3471 if (chunk != NULL) { 3472 chunk_len = htons(chunk->ch.chunk_length); 3473 } else { 3474 chunk_len = 0; 3475 } 3476 notif_len = sizeof(struct sctp_remote_error) + chunk_len; 3477 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA); 3478 if (m_notify == NULL) { 3479 /* Retry with smaller value. */ 3480 notif_len = sizeof(struct sctp_remote_error); 3481 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA); 3482 if (m_notify == NULL) { 3483 return; 3484 } 3485 } 3486 SCTP_BUF_NEXT(m_notify) = NULL; 3487 sre = mtod(m_notify, struct sctp_remote_error *); 3488 sre->sre_type = SCTP_REMOTE_ERROR; 3489 sre->sre_flags = 0; 3490 sre->sre_length = sizeof(struct sctp_remote_error); 3491 sre->sre_error = error; 3492 sre->sre_assoc_id = sctp_get_associd(stcb); 3493 if (notif_len > sizeof(struct sctp_remote_error)) { 3494 memcpy(sre->sre_data, chunk, chunk_len); 3495 sre->sre_length += chunk_len; 3496 } 3497 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3498 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3499 0, 0, stcb->asoc.context, 0, 0, 0, 3500 m_notify); 3501 if (control != NULL) { 3502 control->length = SCTP_BUF_LEN(m_notify); 3503 /* not that we need this */ 3504 control->tail_mbuf = m_notify; 3505 control->spec_flags = M_NOTIFICATION; 3506 sctp_add_to_readq(stcb->sctp_ep, stcb, 3507 control, 3508 &stcb->sctp_socket->so_rcv, 1, 3509 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3510 } else { 3511 sctp_m_freem(m_notify); 3512 } 3513 } 3514 3515 3516 void 3517 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3518 uint32_t error, void *data, int so_locked 3519 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3520 SCTP_UNUSED 3521 #endif 3522 ) 3523 { 3524 if ((stcb == NULL) || 3525 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3526 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3527 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3528 /* If the socket is gone we are out of here */ 3529 return; 3530 } 3531 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3532 return; 3533 } 3534 if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3535 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) { 3536 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3537 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3538 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3539 /* Don't report these in front states */ 3540 return; 3541 } 3542 } 3543 switch (notification) { 3544 case SCTP_NOTIFY_ASSOC_UP: 3545 if (stcb->asoc.assoc_up_sent == 0) { 3546 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3547 stcb->asoc.assoc_up_sent = 1; 3548 } 3549 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3550 sctp_notify_adaptation_layer(stcb); 3551 } 3552 if (stcb->asoc.peer_supports_auth == 0) { 3553 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3554 NULL, so_locked); 3555 } 3556 break; 3557 case SCTP_NOTIFY_ASSOC_DOWN: 3558 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3559 break; 3560 case SCTP_NOTIFY_INTERFACE_DOWN: 3561 { 3562 struct sctp_nets *net; 3563 3564 net = (struct sctp_nets *)data; 3565 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3566 (struct sockaddr *)&net->ro._l_addr, error); 3567 break; 3568 } 3569 case SCTP_NOTIFY_INTERFACE_UP: 3570 { 3571 struct sctp_nets *net; 3572 3573 net = (struct sctp_nets *)data; 3574 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3575 (struct sockaddr *)&net->ro._l_addr, error); 3576 break; 3577 } 3578 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3579 { 3580 struct sctp_nets *net; 3581 3582 net = (struct sctp_nets *)data; 3583 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3584 (struct sockaddr *)&net->ro._l_addr, error); 3585 break; 3586 } 3587 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3588 sctp_notify_send_failed2(stcb, error, 3589 (struct sctp_stream_queue_pending *)data, so_locked); 3590 break; 3591 case SCTP_NOTIFY_SENT_DG_FAIL: 3592 sctp_notify_send_failed(stcb, 1, error, 3593 (struct sctp_tmit_chunk *)data, so_locked); 3594 break; 3595 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3596 sctp_notify_send_failed(stcb, 0, error, 3597 (struct sctp_tmit_chunk *)data, so_locked); 3598 break; 3599 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3600 { 3601 uint32_t val; 3602 3603 val = *((uint32_t *) data); 3604 3605 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3606 break; 3607 } 3608 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3609 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3610 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3611 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3612 } else { 3613 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3614 } 3615 break; 3616 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3617 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3618 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3619 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3620 } else { 3621 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3622 } 3623 break; 3624 case SCTP_NOTIFY_ASSOC_RESTART: 3625 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3626 if (stcb->asoc.peer_supports_auth == 0) { 3627 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3628 NULL, so_locked); 3629 } 3630 break; 3631 case SCTP_NOTIFY_STR_RESET_SEND: 3632 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN); 3633 break; 3634 case SCTP_NOTIFY_STR_RESET_RECV: 3635 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING); 3636 break; 3637 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3638 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3639 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3640 break; 3641 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3642 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3643 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3644 break; 3645 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3646 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3647 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3648 break; 3649 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3650 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3651 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3652 break; 3653 case SCTP_NOTIFY_ASCONF_ADD_IP: 3654 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3655 error); 3656 break; 3657 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3658 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3659 error); 3660 break; 3661 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3662 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3663 error); 3664 break; 3665 case SCTP_NOTIFY_PEER_SHUTDOWN: 3666 sctp_notify_shutdown_event(stcb); 3667 break; 3668 case SCTP_NOTIFY_AUTH_NEW_KEY: 3669 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3670 (uint16_t) (uintptr_t) data, 3671 so_locked); 3672 break; 3673 case SCTP_NOTIFY_AUTH_FREE_KEY: 3674 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3675 (uint16_t) (uintptr_t) data, 3676 so_locked); 3677 break; 3678 case SCTP_NOTIFY_NO_PEER_AUTH: 3679 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3680 (uint16_t) (uintptr_t) data, 3681 so_locked); 3682 break; 3683 case SCTP_NOTIFY_SENDER_DRY: 3684 sctp_notify_sender_dry_event(stcb, so_locked); 3685 break; 3686 case SCTP_NOTIFY_REMOTE_ERROR: 3687 sctp_notify_remote_error(stcb, error, data); 3688 break; 3689 default: 3690 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3691 __FUNCTION__, notification, notification); 3692 break; 3693 } /* end switch */ 3694 } 3695 3696 void 3697 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3698 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3699 SCTP_UNUSED 3700 #endif 3701 ) 3702 { 3703 struct sctp_association *asoc; 3704 struct sctp_stream_out *outs; 3705 struct sctp_tmit_chunk *chk, *nchk; 3706 struct sctp_stream_queue_pending *sp, *nsp; 3707 int i; 3708 3709 if (stcb == NULL) { 3710 return; 3711 } 3712 asoc = &stcb->asoc; 3713 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3714 /* already being freed */ 3715 return; 3716 } 3717 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3718 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3719 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3720 return; 3721 } 3722 /* now through all the gunk freeing chunks */ 3723 if (holds_lock == 0) { 3724 SCTP_TCB_SEND_LOCK(stcb); 3725 } 3726 /* sent queue SHOULD be empty */ 3727 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3728 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3729 asoc->sent_queue_cnt--; 3730 if (chk->sent != SCTP_DATAGRAM_NR_MARKED) { 3731 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3732 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3733 #ifdef INVARIANTS 3734 } else { 3735 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3736 #endif 3737 } 3738 } 3739 if (chk->data != NULL) { 3740 sctp_free_bufspace(stcb, asoc, chk, 1); 3741 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3742 error, chk, so_locked); 3743 if (chk->data) { 3744 sctp_m_freem(chk->data); 3745 chk->data = NULL; 3746 } 3747 } 3748 sctp_free_a_chunk(stcb, chk, so_locked); 3749 /* sa_ignore FREED_MEMORY */ 3750 } 3751 /* pending send queue SHOULD be empty */ 3752 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3753 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3754 asoc->send_queue_cnt--; 3755 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3756 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3757 #ifdef INVARIANTS 3758 } else { 3759 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3760 #endif 3761 } 3762 if (chk->data != NULL) { 3763 sctp_free_bufspace(stcb, asoc, chk, 1); 3764 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3765 error, chk, so_locked); 3766 if (chk->data) { 3767 sctp_m_freem(chk->data); 3768 chk->data = NULL; 3769 } 3770 } 3771 sctp_free_a_chunk(stcb, chk, so_locked); 3772 /* sa_ignore FREED_MEMORY */ 3773 } 3774 for (i = 0; i < asoc->streamoutcnt; i++) { 3775 /* For each stream */ 3776 outs = &asoc->strmout[i]; 3777 /* clean up any sends there */ 3778 asoc->locked_on_sending = NULL; 3779 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3780 asoc->stream_queue_cnt--; 3781 TAILQ_REMOVE(&outs->outqueue, sp, next); 3782 sctp_free_spbufspace(stcb, asoc, sp); 3783 if (sp->data) { 3784 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3785 error, (void *)sp, so_locked); 3786 if (sp->data) { 3787 sctp_m_freem(sp->data); 3788 sp->data = NULL; 3789 sp->tail_mbuf = NULL; 3790 sp->length = 0; 3791 } 3792 } 3793 if (sp->net) { 3794 sctp_free_remote_addr(sp->net); 3795 sp->net = NULL; 3796 } 3797 /* Free the chunk */ 3798 sctp_free_a_strmoq(stcb, sp, so_locked); 3799 /* sa_ignore FREED_MEMORY */ 3800 } 3801 } 3802 3803 if (holds_lock == 0) { 3804 SCTP_TCB_SEND_UNLOCK(stcb); 3805 } 3806 } 3807 3808 void 3809 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3810 struct sctp_abort_chunk *abort, int so_locked 3811 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3812 SCTP_UNUSED 3813 #endif 3814 ) 3815 { 3816 if (stcb == NULL) { 3817 return; 3818 } 3819 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3820 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3821 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3822 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3823 } 3824 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3825 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3826 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3827 return; 3828 } 3829 /* Tell them we lost the asoc */ 3830 sctp_report_all_outbound(stcb, error, 1, so_locked); 3831 if (from_peer) { 3832 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3833 } else { 3834 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3835 } 3836 } 3837 3838 void 3839 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3840 struct mbuf *m, int iphlen, 3841 struct sockaddr *src, struct sockaddr *dst, 3842 struct sctphdr *sh, struct mbuf *op_err, 3843 uint8_t use_mflowid, uint32_t mflowid, 3844 uint32_t vrf_id, uint16_t port) 3845 { 3846 uint32_t vtag; 3847 3848 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3849 struct socket *so; 3850 3851 #endif 3852 3853 vtag = 0; 3854 if (stcb != NULL) { 3855 /* We have a TCB to abort, send notification too */ 3856 vtag = stcb->asoc.peer_vtag; 3857 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3858 /* get the assoc vrf id and table id */ 3859 vrf_id = stcb->asoc.vrf_id; 3860 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3861 } 3862 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3863 use_mflowid, mflowid, 3864 vrf_id, port); 3865 if (stcb != NULL) { 3866 /* Ok, now lets free it */ 3867 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3868 so = SCTP_INP_SO(inp); 3869 atomic_add_int(&stcb->asoc.refcnt, 1); 3870 SCTP_TCB_UNLOCK(stcb); 3871 SCTP_SOCKET_LOCK(so, 1); 3872 SCTP_TCB_LOCK(stcb); 3873 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3874 #endif 3875 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3876 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3877 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3878 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3879 } 3880 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3882 SCTP_SOCKET_UNLOCK(so, 1); 3883 #endif 3884 } 3885 } 3886 3887 #ifdef SCTP_ASOCLOG_OF_TSNS 3888 void 3889 sctp_print_out_track_log(struct sctp_tcb *stcb) 3890 { 3891 #ifdef NOSIY_PRINTS 3892 int i; 3893 3894 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 3895 SCTP_PRINTF("IN bound TSN log-aaa\n"); 3896 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 3897 SCTP_PRINTF("None rcvd\n"); 3898 goto none_in; 3899 } 3900 if (stcb->asoc.tsn_in_wrapped) { 3901 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 3902 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3903 stcb->asoc.in_tsnlog[i].tsn, 3904 stcb->asoc.in_tsnlog[i].strm, 3905 stcb->asoc.in_tsnlog[i].seq, 3906 stcb->asoc.in_tsnlog[i].flgs, 3907 stcb->asoc.in_tsnlog[i].sz); 3908 } 3909 } 3910 if (stcb->asoc.tsn_in_at) { 3911 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 3912 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3913 stcb->asoc.in_tsnlog[i].tsn, 3914 stcb->asoc.in_tsnlog[i].strm, 3915 stcb->asoc.in_tsnlog[i].seq, 3916 stcb->asoc.in_tsnlog[i].flgs, 3917 stcb->asoc.in_tsnlog[i].sz); 3918 } 3919 } 3920 none_in: 3921 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 3922 if ((stcb->asoc.tsn_out_at == 0) && 3923 (stcb->asoc.tsn_out_wrapped == 0)) { 3924 SCTP_PRINTF("None sent\n"); 3925 } 3926 if (stcb->asoc.tsn_out_wrapped) { 3927 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 3928 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3929 stcb->asoc.out_tsnlog[i].tsn, 3930 stcb->asoc.out_tsnlog[i].strm, 3931 stcb->asoc.out_tsnlog[i].seq, 3932 stcb->asoc.out_tsnlog[i].flgs, 3933 stcb->asoc.out_tsnlog[i].sz); 3934 } 3935 } 3936 if (stcb->asoc.tsn_out_at) { 3937 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 3938 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3939 stcb->asoc.out_tsnlog[i].tsn, 3940 stcb->asoc.out_tsnlog[i].strm, 3941 stcb->asoc.out_tsnlog[i].seq, 3942 stcb->asoc.out_tsnlog[i].flgs, 3943 stcb->asoc.out_tsnlog[i].sz); 3944 } 3945 } 3946 #endif 3947 } 3948 3949 #endif 3950 3951 void 3952 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3953 struct mbuf *op_err, 3954 int so_locked 3955 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3956 SCTP_UNUSED 3957 #endif 3958 ) 3959 { 3960 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3961 struct socket *so; 3962 3963 #endif 3964 3965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3966 so = SCTP_INP_SO(inp); 3967 #endif 3968 if (stcb == NULL) { 3969 /* Got to have a TCB */ 3970 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3971 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3972 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3973 SCTP_CALLED_DIRECTLY_NOCMPSET); 3974 } 3975 } 3976 return; 3977 } else { 3978 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3979 } 3980 /* notify the ulp */ 3981 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 3982 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 3983 } 3984 /* notify the peer */ 3985 sctp_send_abort_tcb(stcb, op_err, so_locked); 3986 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3987 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3988 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3989 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3990 } 3991 /* now free the asoc */ 3992 #ifdef SCTP_ASOCLOG_OF_TSNS 3993 sctp_print_out_track_log(stcb); 3994 #endif 3995 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3996 if (!so_locked) { 3997 atomic_add_int(&stcb->asoc.refcnt, 1); 3998 SCTP_TCB_UNLOCK(stcb); 3999 SCTP_SOCKET_LOCK(so, 1); 4000 SCTP_TCB_LOCK(stcb); 4001 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4002 } 4003 #endif 4004 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4005 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4006 if (!so_locked) { 4007 SCTP_SOCKET_UNLOCK(so, 1); 4008 } 4009 #endif 4010 } 4011 4012 void 4013 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4014 struct sockaddr *src, struct sockaddr *dst, 4015 struct sctphdr *sh, struct sctp_inpcb *inp, 4016 uint8_t use_mflowid, uint32_t mflowid, 4017 uint32_t vrf_id, uint16_t port) 4018 { 4019 struct sctp_chunkhdr *ch, chunk_buf; 4020 unsigned int chk_length; 4021 int contains_init_chunk; 4022 4023 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4024 /* Generate a TO address for future reference */ 4025 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4026 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 4027 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4028 SCTP_CALLED_DIRECTLY_NOCMPSET); 4029 } 4030 } 4031 contains_init_chunk = 0; 4032 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4033 sizeof(*ch), (uint8_t *) & chunk_buf); 4034 while (ch != NULL) { 4035 chk_length = ntohs(ch->chunk_length); 4036 if (chk_length < sizeof(*ch)) { 4037 /* break to abort land */ 4038 break; 4039 } 4040 switch (ch->chunk_type) { 4041 case SCTP_INIT: 4042 contains_init_chunk = 1; 4043 break; 4044 case SCTP_COOKIE_ECHO: 4045 /* We hit here only if the assoc is being freed */ 4046 return; 4047 case SCTP_PACKET_DROPPED: 4048 /* we don't respond to pkt-dropped */ 4049 return; 4050 case SCTP_ABORT_ASSOCIATION: 4051 /* we don't respond with an ABORT to an ABORT */ 4052 return; 4053 case SCTP_SHUTDOWN_COMPLETE: 4054 /* 4055 * we ignore it since we are not waiting for it and 4056 * peer is gone 4057 */ 4058 return; 4059 case SCTP_SHUTDOWN_ACK: 4060 sctp_send_shutdown_complete2(src, dst, sh, 4061 use_mflowid, mflowid, 4062 vrf_id, port); 4063 return; 4064 default: 4065 break; 4066 } 4067 offset += SCTP_SIZE32(chk_length); 4068 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4069 sizeof(*ch), (uint8_t *) & chunk_buf); 4070 } 4071 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4072 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4073 (contains_init_chunk == 0))) { 4074 sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL, 4075 use_mflowid, mflowid, 4076 vrf_id, port); 4077 } 4078 } 4079 4080 /* 4081 * check the inbound datagram to make sure there is not an abort inside it, 4082 * if there is return 1, else return 0. 4083 */ 4084 int 4085 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4086 { 4087 struct sctp_chunkhdr *ch; 4088 struct sctp_init_chunk *init_chk, chunk_buf; 4089 int offset; 4090 unsigned int chk_length; 4091 4092 offset = iphlen + sizeof(struct sctphdr); 4093 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4094 (uint8_t *) & chunk_buf); 4095 while (ch != NULL) { 4096 chk_length = ntohs(ch->chunk_length); 4097 if (chk_length < sizeof(*ch)) { 4098 /* packet is probably corrupt */ 4099 break; 4100 } 4101 /* we seem to be ok, is it an abort? */ 4102 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4103 /* yep, tell them */ 4104 return (1); 4105 } 4106 if (ch->chunk_type == SCTP_INITIATION) { 4107 /* need to update the Vtag */ 4108 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4109 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4110 if (init_chk != NULL) { 4111 *vtagfill = ntohl(init_chk->init.initiate_tag); 4112 } 4113 } 4114 /* Nope, move to the next chunk */ 4115 offset += SCTP_SIZE32(chk_length); 4116 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4117 sizeof(*ch), (uint8_t *) & chunk_buf); 4118 } 4119 return (0); 4120 } 4121 4122 /* 4123 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4124 * set (i.e. it's 0) so, create this function to compare link local scopes 4125 */ 4126 #ifdef INET6 4127 uint32_t 4128 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4129 { 4130 struct sockaddr_in6 a, b; 4131 4132 /* save copies */ 4133 a = *addr1; 4134 b = *addr2; 4135 4136 if (a.sin6_scope_id == 0) 4137 if (sa6_recoverscope(&a)) { 4138 /* can't get scope, so can't match */ 4139 return (0); 4140 } 4141 if (b.sin6_scope_id == 0) 4142 if (sa6_recoverscope(&b)) { 4143 /* can't get scope, so can't match */ 4144 return (0); 4145 } 4146 if (a.sin6_scope_id != b.sin6_scope_id) 4147 return (0); 4148 4149 return (1); 4150 } 4151 4152 /* 4153 * returns a sockaddr_in6 with embedded scope recovered and removed 4154 */ 4155 struct sockaddr_in6 * 4156 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4157 { 4158 /* check and strip embedded scope junk */ 4159 if (addr->sin6_family == AF_INET6) { 4160 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4161 if (addr->sin6_scope_id == 0) { 4162 *store = *addr; 4163 if (!sa6_recoverscope(store)) { 4164 /* use the recovered scope */ 4165 addr = store; 4166 } 4167 } else { 4168 /* else, return the original "to" addr */ 4169 in6_clearscope(&addr->sin6_addr); 4170 } 4171 } 4172 } 4173 return (addr); 4174 } 4175 4176 #endif 4177 4178 /* 4179 * are the two addresses the same? currently a "scopeless" check returns: 1 4180 * if same, 0 if not 4181 */ 4182 int 4183 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4184 { 4185 4186 /* must be valid */ 4187 if (sa1 == NULL || sa2 == NULL) 4188 return (0); 4189 4190 /* must be the same family */ 4191 if (sa1->sa_family != sa2->sa_family) 4192 return (0); 4193 4194 switch (sa1->sa_family) { 4195 #ifdef INET6 4196 case AF_INET6: 4197 { 4198 /* IPv6 addresses */ 4199 struct sockaddr_in6 *sin6_1, *sin6_2; 4200 4201 sin6_1 = (struct sockaddr_in6 *)sa1; 4202 sin6_2 = (struct sockaddr_in6 *)sa2; 4203 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4204 sin6_2)); 4205 } 4206 #endif 4207 #ifdef INET 4208 case AF_INET: 4209 { 4210 /* IPv4 addresses */ 4211 struct sockaddr_in *sin_1, *sin_2; 4212 4213 sin_1 = (struct sockaddr_in *)sa1; 4214 sin_2 = (struct sockaddr_in *)sa2; 4215 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4216 } 4217 #endif 4218 default: 4219 /* we don't do these... */ 4220 return (0); 4221 } 4222 } 4223 4224 void 4225 sctp_print_address(struct sockaddr *sa) 4226 { 4227 #ifdef INET6 4228 char ip6buf[INET6_ADDRSTRLEN]; 4229 4230 #endif 4231 4232 switch (sa->sa_family) { 4233 #ifdef INET6 4234 case AF_INET6: 4235 { 4236 struct sockaddr_in6 *sin6; 4237 4238 sin6 = (struct sockaddr_in6 *)sa; 4239 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4240 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4241 ntohs(sin6->sin6_port), 4242 sin6->sin6_scope_id); 4243 break; 4244 } 4245 #endif 4246 #ifdef INET 4247 case AF_INET: 4248 { 4249 struct sockaddr_in *sin; 4250 unsigned char *p; 4251 4252 sin = (struct sockaddr_in *)sa; 4253 p = (unsigned char *)&sin->sin_addr; 4254 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4255 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4256 break; 4257 } 4258 #endif 4259 default: 4260 SCTP_PRINTF("?\n"); 4261 break; 4262 } 4263 } 4264 4265 void 4266 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4267 struct sctp_inpcb *new_inp, 4268 struct sctp_tcb *stcb, 4269 int waitflags) 4270 { 4271 /* 4272 * go through our old INP and pull off any control structures that 4273 * belong to stcb and move then to the new inp. 4274 */ 4275 struct socket *old_so, *new_so; 4276 struct sctp_queued_to_read *control, *nctl; 4277 struct sctp_readhead tmp_queue; 4278 struct mbuf *m; 4279 int error = 0; 4280 4281 old_so = old_inp->sctp_socket; 4282 new_so = new_inp->sctp_socket; 4283 TAILQ_INIT(&tmp_queue); 4284 error = sblock(&old_so->so_rcv, waitflags); 4285 if (error) { 4286 /* 4287 * Gak, can't get sblock, we have a problem. data will be 4288 * left stranded.. and we don't dare look at it since the 4289 * other thread may be reading something. Oh well, its a 4290 * screwed up app that does a peeloff OR a accept while 4291 * reading from the main socket... actually its only the 4292 * peeloff() case, since I think read will fail on a 4293 * listening socket.. 4294 */ 4295 return; 4296 } 4297 /* lock the socket buffers */ 4298 SCTP_INP_READ_LOCK(old_inp); 4299 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4300 /* Pull off all for out target stcb */ 4301 if (control->stcb == stcb) { 4302 /* remove it we want it */ 4303 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4304 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4305 m = control->data; 4306 while (m) { 4307 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4308 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4309 } 4310 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4311 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4312 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4313 } 4314 m = SCTP_BUF_NEXT(m); 4315 } 4316 } 4317 } 4318 SCTP_INP_READ_UNLOCK(old_inp); 4319 /* Remove the sb-lock on the old socket */ 4320 4321 sbunlock(&old_so->so_rcv); 4322 /* Now we move them over to the new socket buffer */ 4323 SCTP_INP_READ_LOCK(new_inp); 4324 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4325 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4326 m = control->data; 4327 while (m) { 4328 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4329 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4330 } 4331 sctp_sballoc(stcb, &new_so->so_rcv, m); 4332 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4333 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4334 } 4335 m = SCTP_BUF_NEXT(m); 4336 } 4337 } 4338 SCTP_INP_READ_UNLOCK(new_inp); 4339 } 4340 4341 void 4342 sctp_add_to_readq(struct sctp_inpcb *inp, 4343 struct sctp_tcb *stcb, 4344 struct sctp_queued_to_read *control, 4345 struct sockbuf *sb, 4346 int end, 4347 int inp_read_lock_held, 4348 int so_locked 4349 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4350 SCTP_UNUSED 4351 #endif 4352 ) 4353 { 4354 /* 4355 * Here we must place the control on the end of the socket read 4356 * queue AND increment sb_cc so that select will work properly on 4357 * read. 4358 */ 4359 struct mbuf *m, *prev = NULL; 4360 4361 if (inp == NULL) { 4362 /* Gak, TSNH!! */ 4363 #ifdef INVARIANTS 4364 panic("Gak, inp NULL on add_to_readq"); 4365 #endif 4366 return; 4367 } 4368 if (inp_read_lock_held == 0) 4369 SCTP_INP_READ_LOCK(inp); 4370 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4371 sctp_free_remote_addr(control->whoFrom); 4372 if (control->data) { 4373 sctp_m_freem(control->data); 4374 control->data = NULL; 4375 } 4376 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4377 if (inp_read_lock_held == 0) 4378 SCTP_INP_READ_UNLOCK(inp); 4379 return; 4380 } 4381 if (!(control->spec_flags & M_NOTIFICATION)) { 4382 atomic_add_int(&inp->total_recvs, 1); 4383 if (!control->do_not_ref_stcb) { 4384 atomic_add_int(&stcb->total_recvs, 1); 4385 } 4386 } 4387 m = control->data; 4388 control->held_length = 0; 4389 control->length = 0; 4390 while (m) { 4391 if (SCTP_BUF_LEN(m) == 0) { 4392 /* Skip mbufs with NO length */ 4393 if (prev == NULL) { 4394 /* First one */ 4395 control->data = sctp_m_free(m); 4396 m = control->data; 4397 } else { 4398 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4399 m = SCTP_BUF_NEXT(prev); 4400 } 4401 if (m == NULL) { 4402 control->tail_mbuf = prev; 4403 } 4404 continue; 4405 } 4406 prev = m; 4407 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4408 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4409 } 4410 sctp_sballoc(stcb, sb, m); 4411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4412 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4413 } 4414 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4415 m = SCTP_BUF_NEXT(m); 4416 } 4417 if (prev != NULL) { 4418 control->tail_mbuf = prev; 4419 } else { 4420 /* Everything got collapsed out?? */ 4421 sctp_free_remote_addr(control->whoFrom); 4422 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4423 if (inp_read_lock_held == 0) 4424 SCTP_INP_READ_UNLOCK(inp); 4425 return; 4426 } 4427 if (end) { 4428 control->end_added = 1; 4429 } 4430 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4431 if (inp_read_lock_held == 0) 4432 SCTP_INP_READ_UNLOCK(inp); 4433 if (inp && inp->sctp_socket) { 4434 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4435 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4436 } else { 4437 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4438 struct socket *so; 4439 4440 so = SCTP_INP_SO(inp); 4441 if (!so_locked) { 4442 if (stcb) { 4443 atomic_add_int(&stcb->asoc.refcnt, 1); 4444 SCTP_TCB_UNLOCK(stcb); 4445 } 4446 SCTP_SOCKET_LOCK(so, 1); 4447 if (stcb) { 4448 SCTP_TCB_LOCK(stcb); 4449 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4450 } 4451 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4452 SCTP_SOCKET_UNLOCK(so, 1); 4453 return; 4454 } 4455 } 4456 #endif 4457 sctp_sorwakeup(inp, inp->sctp_socket); 4458 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4459 if (!so_locked) { 4460 SCTP_SOCKET_UNLOCK(so, 1); 4461 } 4462 #endif 4463 } 4464 } 4465 } 4466 4467 4468 int 4469 sctp_append_to_readq(struct sctp_inpcb *inp, 4470 struct sctp_tcb *stcb, 4471 struct sctp_queued_to_read *control, 4472 struct mbuf *m, 4473 int end, 4474 int ctls_cumack, 4475 struct sockbuf *sb) 4476 { 4477 /* 4478 * A partial delivery API event is underway. OR we are appending on 4479 * the reassembly queue. 4480 * 4481 * If PDAPI this means we need to add m to the end of the data. 4482 * Increase the length in the control AND increment the sb_cc. 4483 * Otherwise sb is NULL and all we need to do is put it at the end 4484 * of the mbuf chain. 4485 */ 4486 int len = 0; 4487 struct mbuf *mm, *tail = NULL, *prev = NULL; 4488 4489 if (inp) { 4490 SCTP_INP_READ_LOCK(inp); 4491 } 4492 if (control == NULL) { 4493 get_out: 4494 if (inp) { 4495 SCTP_INP_READ_UNLOCK(inp); 4496 } 4497 return (-1); 4498 } 4499 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4500 SCTP_INP_READ_UNLOCK(inp); 4501 return (0); 4502 } 4503 if (control->end_added) { 4504 /* huh this one is complete? */ 4505 goto get_out; 4506 } 4507 mm = m; 4508 if (mm == NULL) { 4509 goto get_out; 4510 } 4511 while (mm) { 4512 if (SCTP_BUF_LEN(mm) == 0) { 4513 /* Skip mbufs with NO lenght */ 4514 if (prev == NULL) { 4515 /* First one */ 4516 m = sctp_m_free(mm); 4517 mm = m; 4518 } else { 4519 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 4520 mm = SCTP_BUF_NEXT(prev); 4521 } 4522 continue; 4523 } 4524 prev = mm; 4525 len += SCTP_BUF_LEN(mm); 4526 if (sb) { 4527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4528 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4529 } 4530 sctp_sballoc(stcb, sb, mm); 4531 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4532 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4533 } 4534 } 4535 mm = SCTP_BUF_NEXT(mm); 4536 } 4537 if (prev) { 4538 tail = prev; 4539 } else { 4540 /* Really there should always be a prev */ 4541 if (m == NULL) { 4542 /* Huh nothing left? */ 4543 #ifdef INVARIANTS 4544 panic("Nothing left to add?"); 4545 #else 4546 goto get_out; 4547 #endif 4548 } 4549 tail = m; 4550 } 4551 if (control->tail_mbuf) { 4552 /* append */ 4553 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4554 control->tail_mbuf = tail; 4555 } else { 4556 /* nothing there */ 4557 #ifdef INVARIANTS 4558 if (control->data != NULL) { 4559 panic("This should NOT happen"); 4560 } 4561 #endif 4562 control->data = m; 4563 control->tail_mbuf = tail; 4564 } 4565 atomic_add_int(&control->length, len); 4566 if (end) { 4567 /* message is complete */ 4568 if (stcb && (control == stcb->asoc.control_pdapi)) { 4569 stcb->asoc.control_pdapi = NULL; 4570 } 4571 control->held_length = 0; 4572 control->end_added = 1; 4573 } 4574 if (stcb == NULL) { 4575 control->do_not_ref_stcb = 1; 4576 } 4577 /* 4578 * When we are appending in partial delivery, the cum-ack is used 4579 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4580 * is populated in the outbound sinfo structure from the true cumack 4581 * if the association exists... 4582 */ 4583 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4584 if (inp) { 4585 SCTP_INP_READ_UNLOCK(inp); 4586 } 4587 if (inp && inp->sctp_socket) { 4588 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4589 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4590 } else { 4591 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4592 struct socket *so; 4593 4594 so = SCTP_INP_SO(inp); 4595 if (stcb) { 4596 atomic_add_int(&stcb->asoc.refcnt, 1); 4597 SCTP_TCB_UNLOCK(stcb); 4598 } 4599 SCTP_SOCKET_LOCK(so, 1); 4600 if (stcb) { 4601 SCTP_TCB_LOCK(stcb); 4602 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4603 } 4604 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4605 SCTP_SOCKET_UNLOCK(so, 1); 4606 return (0); 4607 } 4608 #endif 4609 sctp_sorwakeup(inp, inp->sctp_socket); 4610 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4611 SCTP_SOCKET_UNLOCK(so, 1); 4612 #endif 4613 } 4614 } 4615 return (0); 4616 } 4617 4618 4619 4620 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4621 *************ALTERNATE ROUTING CODE 4622 */ 4623 4624 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4625 *************ALTERNATE ROUTING CODE 4626 */ 4627 4628 struct mbuf * 4629 sctp_generate_invmanparam(int err) 4630 { 4631 /* Return a MBUF with a invalid mandatory parameter */ 4632 struct mbuf *m; 4633 4634 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 4635 if (m) { 4636 struct sctp_paramhdr *ph; 4637 4638 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4639 ph = mtod(m, struct sctp_paramhdr *); 4640 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4641 ph->param_type = htons(err); 4642 } 4643 return (m); 4644 } 4645 4646 #ifdef SCTP_MBCNT_LOGGING 4647 void 4648 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4649 struct sctp_tmit_chunk *tp1, int chk_cnt) 4650 { 4651 if (tp1->data == NULL) { 4652 return; 4653 } 4654 asoc->chunks_on_out_queue -= chk_cnt; 4655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4656 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4657 asoc->total_output_queue_size, 4658 tp1->book_size, 4659 0, 4660 tp1->mbcnt); 4661 } 4662 if (asoc->total_output_queue_size >= tp1->book_size) { 4663 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4664 } else { 4665 asoc->total_output_queue_size = 0; 4666 } 4667 4668 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4669 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4670 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4671 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4672 } else { 4673 stcb->sctp_socket->so_snd.sb_cc = 0; 4674 4675 } 4676 } 4677 } 4678 4679 #endif 4680 4681 int 4682 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4683 uint8_t sent, int so_locked 4684 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4685 SCTP_UNUSED 4686 #endif 4687 ) 4688 { 4689 struct sctp_stream_out *strq; 4690 struct sctp_tmit_chunk *chk = NULL, *tp2; 4691 struct sctp_stream_queue_pending *sp; 4692 uint16_t stream = 0, seq = 0; 4693 uint8_t foundeom = 0; 4694 int ret_sz = 0; 4695 int notdone; 4696 int do_wakeup_routine = 0; 4697 4698 stream = tp1->rec.data.stream_number; 4699 seq = tp1->rec.data.stream_seq; 4700 do { 4701 ret_sz += tp1->book_size; 4702 if (tp1->data != NULL) { 4703 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4704 sctp_flight_size_decrease(tp1); 4705 sctp_total_flight_decrease(stcb, tp1); 4706 } 4707 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4708 stcb->asoc.peers_rwnd += tp1->send_size; 4709 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4710 if (sent) { 4711 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4712 } else { 4713 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4714 } 4715 if (tp1->data) { 4716 sctp_m_freem(tp1->data); 4717 tp1->data = NULL; 4718 } 4719 do_wakeup_routine = 1; 4720 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4721 stcb->asoc.sent_queue_cnt_removeable--; 4722 } 4723 } 4724 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4725 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4726 SCTP_DATA_NOT_FRAG) { 4727 /* not frag'ed we ae done */ 4728 notdone = 0; 4729 foundeom = 1; 4730 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4731 /* end of frag, we are done */ 4732 notdone = 0; 4733 foundeom = 1; 4734 } else { 4735 /* 4736 * Its a begin or middle piece, we must mark all of 4737 * it 4738 */ 4739 notdone = 1; 4740 tp1 = TAILQ_NEXT(tp1, sctp_next); 4741 } 4742 } while (tp1 && notdone); 4743 if (foundeom == 0) { 4744 /* 4745 * The multi-part message was scattered across the send and 4746 * sent queue. 4747 */ 4748 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4749 if ((tp1->rec.data.stream_number != stream) || 4750 (tp1->rec.data.stream_seq != seq)) { 4751 break; 4752 } 4753 /* 4754 * save to chk in case we have some on stream out 4755 * queue. If so and we have an un-transmitted one we 4756 * don't have to fudge the TSN. 4757 */ 4758 chk = tp1; 4759 ret_sz += tp1->book_size; 4760 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4761 if (sent) { 4762 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4763 } else { 4764 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4765 } 4766 if (tp1->data) { 4767 sctp_m_freem(tp1->data); 4768 tp1->data = NULL; 4769 } 4770 /* No flight involved here book the size to 0 */ 4771 tp1->book_size = 0; 4772 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4773 foundeom = 1; 4774 } 4775 do_wakeup_routine = 1; 4776 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4777 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4778 /* 4779 * on to the sent queue so we can wait for it to be 4780 * passed by. 4781 */ 4782 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4783 sctp_next); 4784 stcb->asoc.send_queue_cnt--; 4785 stcb->asoc.sent_queue_cnt++; 4786 } 4787 } 4788 if (foundeom == 0) { 4789 /* 4790 * Still no eom found. That means there is stuff left on the 4791 * stream out queue.. yuck. 4792 */ 4793 SCTP_TCB_SEND_LOCK(stcb); 4794 strq = &stcb->asoc.strmout[stream]; 4795 sp = TAILQ_FIRST(&strq->outqueue); 4796 if (sp != NULL) { 4797 sp->discard_rest = 1; 4798 /* 4799 * We may need to put a chunk on the queue that 4800 * holds the TSN that would have been sent with the 4801 * LAST bit. 4802 */ 4803 if (chk == NULL) { 4804 /* Yep, we have to */ 4805 sctp_alloc_a_chunk(stcb, chk); 4806 if (chk == NULL) { 4807 /* 4808 * we are hosed. All we can do is 4809 * nothing.. which will cause an 4810 * abort if the peer is paying 4811 * attention. 4812 */ 4813 goto oh_well; 4814 } 4815 memset(chk, 0, sizeof(*chk)); 4816 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG; 4817 chk->sent = SCTP_FORWARD_TSN_SKIP; 4818 chk->asoc = &stcb->asoc; 4819 chk->rec.data.stream_seq = strq->next_sequence_send; 4820 chk->rec.data.stream_number = sp->stream; 4821 chk->rec.data.payloadtype = sp->ppid; 4822 chk->rec.data.context = sp->context; 4823 chk->flags = sp->act_flags; 4824 if (sp->net) 4825 chk->whoTo = sp->net; 4826 else 4827 chk->whoTo = stcb->asoc.primary_destination; 4828 atomic_add_int(&chk->whoTo->ref_count, 1); 4829 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4830 stcb->asoc.pr_sctp_cnt++; 4831 chk->pr_sctp_on = 1; 4832 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4833 stcb->asoc.sent_queue_cnt++; 4834 stcb->asoc.pr_sctp_cnt++; 4835 } else { 4836 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4837 } 4838 strq->next_sequence_send++; 4839 oh_well: 4840 if (sp->data) { 4841 /* 4842 * Pull any data to free up the SB and allow 4843 * sender to "add more" while we will throw 4844 * away :-) 4845 */ 4846 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4847 ret_sz += sp->length; 4848 do_wakeup_routine = 1; 4849 sp->some_taken = 1; 4850 sctp_m_freem(sp->data); 4851 sp->data = NULL; 4852 sp->tail_mbuf = NULL; 4853 sp->length = 0; 4854 } 4855 } 4856 SCTP_TCB_SEND_UNLOCK(stcb); 4857 } 4858 if (do_wakeup_routine) { 4859 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4860 struct socket *so; 4861 4862 so = SCTP_INP_SO(stcb->sctp_ep); 4863 if (!so_locked) { 4864 atomic_add_int(&stcb->asoc.refcnt, 1); 4865 SCTP_TCB_UNLOCK(stcb); 4866 SCTP_SOCKET_LOCK(so, 1); 4867 SCTP_TCB_LOCK(stcb); 4868 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4869 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4870 /* assoc was freed while we were unlocked */ 4871 SCTP_SOCKET_UNLOCK(so, 1); 4872 return (ret_sz); 4873 } 4874 } 4875 #endif 4876 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4877 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4878 if (!so_locked) { 4879 SCTP_SOCKET_UNLOCK(so, 1); 4880 } 4881 #endif 4882 } 4883 return (ret_sz); 4884 } 4885 4886 /* 4887 * checks to see if the given address, sa, is one that is currently known by 4888 * the kernel note: can't distinguish the same address on multiple interfaces 4889 * and doesn't handle multiple addresses with different zone/scope id's note: 4890 * ifa_ifwithaddr() compares the entire sockaddr struct 4891 */ 4892 struct sctp_ifa * 4893 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4894 int holds_lock) 4895 { 4896 struct sctp_laddr *laddr; 4897 4898 if (holds_lock == 0) { 4899 SCTP_INP_RLOCK(inp); 4900 } 4901 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4902 if (laddr->ifa == NULL) 4903 continue; 4904 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4905 continue; 4906 #ifdef INET 4907 if (addr->sa_family == AF_INET) { 4908 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4909 laddr->ifa->address.sin.sin_addr.s_addr) { 4910 /* found him. */ 4911 if (holds_lock == 0) { 4912 SCTP_INP_RUNLOCK(inp); 4913 } 4914 return (laddr->ifa); 4915 break; 4916 } 4917 } 4918 #endif 4919 #ifdef INET6 4920 if (addr->sa_family == AF_INET6) { 4921 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4922 &laddr->ifa->address.sin6)) { 4923 /* found him. */ 4924 if (holds_lock == 0) { 4925 SCTP_INP_RUNLOCK(inp); 4926 } 4927 return (laddr->ifa); 4928 break; 4929 } 4930 } 4931 #endif 4932 } 4933 if (holds_lock == 0) { 4934 SCTP_INP_RUNLOCK(inp); 4935 } 4936 return (NULL); 4937 } 4938 4939 uint32_t 4940 sctp_get_ifa_hash_val(struct sockaddr *addr) 4941 { 4942 switch (addr->sa_family) { 4943 #ifdef INET 4944 case AF_INET: 4945 { 4946 struct sockaddr_in *sin; 4947 4948 sin = (struct sockaddr_in *)addr; 4949 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4950 } 4951 #endif 4952 #ifdef INET6 4953 case INET6: 4954 { 4955 struct sockaddr_in6 *sin6; 4956 uint32_t hash_of_addr; 4957 4958 sin6 = (struct sockaddr_in6 *)addr; 4959 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4960 sin6->sin6_addr.s6_addr32[1] + 4961 sin6->sin6_addr.s6_addr32[2] + 4962 sin6->sin6_addr.s6_addr32[3]); 4963 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4964 return (hash_of_addr); 4965 } 4966 #endif 4967 default: 4968 break; 4969 } 4970 return (0); 4971 } 4972 4973 struct sctp_ifa * 4974 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4975 { 4976 struct sctp_ifa *sctp_ifap; 4977 struct sctp_vrf *vrf; 4978 struct sctp_ifalist *hash_head; 4979 uint32_t hash_of_addr; 4980 4981 if (holds_lock == 0) 4982 SCTP_IPI_ADDR_RLOCK(); 4983 4984 vrf = sctp_find_vrf(vrf_id); 4985 if (vrf == NULL) { 4986 stage_right: 4987 if (holds_lock == 0) 4988 SCTP_IPI_ADDR_RUNLOCK(); 4989 return (NULL); 4990 } 4991 hash_of_addr = sctp_get_ifa_hash_val(addr); 4992 4993 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 4994 if (hash_head == NULL) { 4995 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 4996 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 4997 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 4998 sctp_print_address(addr); 4999 SCTP_PRINTF("No such bucket for address\n"); 5000 if (holds_lock == 0) 5001 SCTP_IPI_ADDR_RUNLOCK(); 5002 5003 return (NULL); 5004 } 5005 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5006 if (sctp_ifap == NULL) { 5007 #ifdef INVARIANTS 5008 panic("Huh LIST_FOREACH corrupt"); 5009 goto stage_right; 5010 #else 5011 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n"); 5012 goto stage_right; 5013 #endif 5014 } 5015 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5016 continue; 5017 #ifdef INET 5018 if (addr->sa_family == AF_INET) { 5019 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5020 sctp_ifap->address.sin.sin_addr.s_addr) { 5021 /* found him. */ 5022 if (holds_lock == 0) 5023 SCTP_IPI_ADDR_RUNLOCK(); 5024 return (sctp_ifap); 5025 break; 5026 } 5027 } 5028 #endif 5029 #ifdef INET6 5030 if (addr->sa_family == AF_INET6) { 5031 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5032 &sctp_ifap->address.sin6)) { 5033 /* found him. */ 5034 if (holds_lock == 0) 5035 SCTP_IPI_ADDR_RUNLOCK(); 5036 return (sctp_ifap); 5037 break; 5038 } 5039 } 5040 #endif 5041 } 5042 if (holds_lock == 0) 5043 SCTP_IPI_ADDR_RUNLOCK(); 5044 return (NULL); 5045 } 5046 5047 static void 5048 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 5049 uint32_t rwnd_req) 5050 { 5051 /* User pulled some data, do we need a rwnd update? */ 5052 int r_unlocked = 0; 5053 uint32_t dif, rwnd; 5054 struct socket *so = NULL; 5055 5056 if (stcb == NULL) 5057 return; 5058 5059 atomic_add_int(&stcb->asoc.refcnt, 1); 5060 5061 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5062 SCTP_STATE_SHUTDOWN_RECEIVED | 5063 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5064 /* Pre-check If we are freeing no update */ 5065 goto no_lock; 5066 } 5067 SCTP_INP_INCR_REF(stcb->sctp_ep); 5068 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5069 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5070 goto out; 5071 } 5072 so = stcb->sctp_socket; 5073 if (so == NULL) { 5074 goto out; 5075 } 5076 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5077 /* Have you have freed enough to look */ 5078 *freed_so_far = 0; 5079 /* Yep, its worth a look and the lock overhead */ 5080 5081 /* Figure out what the rwnd would be */ 5082 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5083 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5084 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5085 } else { 5086 dif = 0; 5087 } 5088 if (dif >= rwnd_req) { 5089 if (hold_rlock) { 5090 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5091 r_unlocked = 1; 5092 } 5093 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5094 /* 5095 * One last check before we allow the guy possibly 5096 * to get in. There is a race, where the guy has not 5097 * reached the gate. In that case 5098 */ 5099 goto out; 5100 } 5101 SCTP_TCB_LOCK(stcb); 5102 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5103 /* No reports here */ 5104 SCTP_TCB_UNLOCK(stcb); 5105 goto out; 5106 } 5107 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5108 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5109 5110 sctp_chunk_output(stcb->sctp_ep, stcb, 5111 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5112 /* make sure no timer is running */ 5113 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5114 SCTP_TCB_UNLOCK(stcb); 5115 } else { 5116 /* Update how much we have pending */ 5117 stcb->freed_by_sorcv_sincelast = dif; 5118 } 5119 out: 5120 if (so && r_unlocked && hold_rlock) { 5121 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5122 } 5123 SCTP_INP_DECR_REF(stcb->sctp_ep); 5124 no_lock: 5125 atomic_add_int(&stcb->asoc.refcnt, -1); 5126 return; 5127 } 5128 5129 int 5130 sctp_sorecvmsg(struct socket *so, 5131 struct uio *uio, 5132 struct mbuf **mp, 5133 struct sockaddr *from, 5134 int fromlen, 5135 int *msg_flags, 5136 struct sctp_sndrcvinfo *sinfo, 5137 int filling_sinfo) 5138 { 5139 /* 5140 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5141 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5142 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5143 * On the way out we may send out any combination of: 5144 * MSG_NOTIFICATION MSG_EOR 5145 * 5146 */ 5147 struct sctp_inpcb *inp = NULL; 5148 int my_len = 0; 5149 int cp_len = 0, error = 0; 5150 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5151 struct mbuf *m = NULL; 5152 struct sctp_tcb *stcb = NULL; 5153 int wakeup_read_socket = 0; 5154 int freecnt_applied = 0; 5155 int out_flags = 0, in_flags = 0; 5156 int block_allowed = 1; 5157 uint32_t freed_so_far = 0; 5158 uint32_t copied_so_far = 0; 5159 int in_eeor_mode = 0; 5160 int no_rcv_needed = 0; 5161 uint32_t rwnd_req = 0; 5162 int hold_sblock = 0; 5163 int hold_rlock = 0; 5164 int slen = 0; 5165 uint32_t held_length = 0; 5166 int sockbuf_lock = 0; 5167 5168 if (uio == NULL) { 5169 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5170 return (EINVAL); 5171 } 5172 if (msg_flags) { 5173 in_flags = *msg_flags; 5174 if (in_flags & MSG_PEEK) 5175 SCTP_STAT_INCR(sctps_read_peeks); 5176 } else { 5177 in_flags = 0; 5178 } 5179 slen = uio->uio_resid; 5180 5181 /* Pull in and set up our int flags */ 5182 if (in_flags & MSG_OOB) { 5183 /* Out of band's NOT supported */ 5184 return (EOPNOTSUPP); 5185 } 5186 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5187 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5188 return (EINVAL); 5189 } 5190 if ((in_flags & (MSG_DONTWAIT 5191 | MSG_NBIO 5192 )) || 5193 SCTP_SO_IS_NBIO(so)) { 5194 block_allowed = 0; 5195 } 5196 /* setup the endpoint */ 5197 inp = (struct sctp_inpcb *)so->so_pcb; 5198 if (inp == NULL) { 5199 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5200 return (EFAULT); 5201 } 5202 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5203 /* Must be at least a MTU's worth */ 5204 if (rwnd_req < SCTP_MIN_RWND) 5205 rwnd_req = SCTP_MIN_RWND; 5206 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5207 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5208 sctp_misc_ints(SCTP_SORECV_ENTER, 5209 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 5210 } 5211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5212 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5213 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 5214 } 5215 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5216 if (error) { 5217 goto release_unlocked; 5218 } 5219 sockbuf_lock = 1; 5220 restart: 5221 5222 5223 restart_nosblocks: 5224 if (hold_sblock == 0) { 5225 SOCKBUF_LOCK(&so->so_rcv); 5226 hold_sblock = 1; 5227 } 5228 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5229 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5230 goto out; 5231 } 5232 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5233 if (so->so_error) { 5234 error = so->so_error; 5235 if ((in_flags & MSG_PEEK) == 0) 5236 so->so_error = 0; 5237 goto out; 5238 } else { 5239 if (so->so_rcv.sb_cc == 0) { 5240 /* indicate EOF */ 5241 error = 0; 5242 goto out; 5243 } 5244 } 5245 } 5246 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 5247 /* we need to wait for data */ 5248 if ((so->so_rcv.sb_cc == 0) && 5249 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5250 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5251 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5252 /* 5253 * For active open side clear flags for 5254 * re-use passive open is blocked by 5255 * connect. 5256 */ 5257 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5258 /* 5259 * You were aborted, passive side 5260 * always hits here 5261 */ 5262 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5263 error = ECONNRESET; 5264 } 5265 so->so_state &= ~(SS_ISCONNECTING | 5266 SS_ISDISCONNECTING | 5267 SS_ISCONFIRMING | 5268 SS_ISCONNECTED); 5269 if (error == 0) { 5270 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5271 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5272 error = ENOTCONN; 5273 } 5274 } 5275 goto out; 5276 } 5277 } 5278 error = sbwait(&so->so_rcv); 5279 if (error) { 5280 goto out; 5281 } 5282 held_length = 0; 5283 goto restart_nosblocks; 5284 } else if (so->so_rcv.sb_cc == 0) { 5285 if (so->so_error) { 5286 error = so->so_error; 5287 if ((in_flags & MSG_PEEK) == 0) 5288 so->so_error = 0; 5289 } else { 5290 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5291 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5292 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5293 /* 5294 * For active open side clear flags 5295 * for re-use passive open is 5296 * blocked by connect. 5297 */ 5298 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5299 /* 5300 * You were aborted, passive 5301 * side always hits here 5302 */ 5303 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5304 error = ECONNRESET; 5305 } 5306 so->so_state &= ~(SS_ISCONNECTING | 5307 SS_ISDISCONNECTING | 5308 SS_ISCONFIRMING | 5309 SS_ISCONNECTED); 5310 if (error == 0) { 5311 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5312 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5313 error = ENOTCONN; 5314 } 5315 } 5316 goto out; 5317 } 5318 } 5319 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5320 error = EWOULDBLOCK; 5321 } 5322 goto out; 5323 } 5324 if (hold_sblock == 1) { 5325 SOCKBUF_UNLOCK(&so->so_rcv); 5326 hold_sblock = 0; 5327 } 5328 /* we possibly have data we can read */ 5329 /* sa_ignore FREED_MEMORY */ 5330 control = TAILQ_FIRST(&inp->read_queue); 5331 if (control == NULL) { 5332 /* 5333 * This could be happening since the appender did the 5334 * increment but as not yet did the tailq insert onto the 5335 * read_queue 5336 */ 5337 if (hold_rlock == 0) { 5338 SCTP_INP_READ_LOCK(inp); 5339 } 5340 control = TAILQ_FIRST(&inp->read_queue); 5341 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5342 #ifdef INVARIANTS 5343 panic("Huh, its non zero and nothing on control?"); 5344 #endif 5345 so->so_rcv.sb_cc = 0; 5346 } 5347 SCTP_INP_READ_UNLOCK(inp); 5348 hold_rlock = 0; 5349 goto restart; 5350 } 5351 if ((control->length == 0) && 5352 (control->do_not_ref_stcb)) { 5353 /* 5354 * Clean up code for freeing assoc that left behind a 5355 * pdapi.. maybe a peer in EEOR that just closed after 5356 * sending and never indicated a EOR. 5357 */ 5358 if (hold_rlock == 0) { 5359 hold_rlock = 1; 5360 SCTP_INP_READ_LOCK(inp); 5361 } 5362 control->held_length = 0; 5363 if (control->data) { 5364 /* Hmm there is data here .. fix */ 5365 struct mbuf *m_tmp; 5366 int cnt = 0; 5367 5368 m_tmp = control->data; 5369 while (m_tmp) { 5370 cnt += SCTP_BUF_LEN(m_tmp); 5371 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5372 control->tail_mbuf = m_tmp; 5373 control->end_added = 1; 5374 } 5375 m_tmp = SCTP_BUF_NEXT(m_tmp); 5376 } 5377 control->length = cnt; 5378 } else { 5379 /* remove it */ 5380 TAILQ_REMOVE(&inp->read_queue, control, next); 5381 /* Add back any hiddend data */ 5382 sctp_free_remote_addr(control->whoFrom); 5383 sctp_free_a_readq(stcb, control); 5384 } 5385 if (hold_rlock) { 5386 hold_rlock = 0; 5387 SCTP_INP_READ_UNLOCK(inp); 5388 } 5389 goto restart; 5390 } 5391 if ((control->length == 0) && 5392 (control->end_added == 1)) { 5393 /* 5394 * Do we also need to check for (control->pdapi_aborted == 5395 * 1)? 5396 */ 5397 if (hold_rlock == 0) { 5398 hold_rlock = 1; 5399 SCTP_INP_READ_LOCK(inp); 5400 } 5401 TAILQ_REMOVE(&inp->read_queue, control, next); 5402 if (control->data) { 5403 #ifdef INVARIANTS 5404 panic("control->data not null but control->length == 0"); 5405 #else 5406 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5407 sctp_m_freem(control->data); 5408 control->data = NULL; 5409 #endif 5410 } 5411 if (control->aux_data) { 5412 sctp_m_free(control->aux_data); 5413 control->aux_data = NULL; 5414 } 5415 sctp_free_remote_addr(control->whoFrom); 5416 sctp_free_a_readq(stcb, control); 5417 if (hold_rlock) { 5418 hold_rlock = 0; 5419 SCTP_INP_READ_UNLOCK(inp); 5420 } 5421 goto restart; 5422 } 5423 if (control->length == 0) { 5424 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5425 (filling_sinfo)) { 5426 /* find a more suitable one then this */ 5427 ctl = TAILQ_NEXT(control, next); 5428 while (ctl) { 5429 if ((ctl->stcb != control->stcb) && (ctl->length) && 5430 (ctl->some_taken || 5431 (ctl->spec_flags & M_NOTIFICATION) || 5432 ((ctl->do_not_ref_stcb == 0) && 5433 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5434 ) { 5435 /*- 5436 * If we have a different TCB next, and there is data 5437 * present. If we have already taken some (pdapi), OR we can 5438 * ref the tcb and no delivery as started on this stream, we 5439 * take it. Note we allow a notification on a different 5440 * assoc to be delivered.. 5441 */ 5442 control = ctl; 5443 goto found_one; 5444 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5445 (ctl->length) && 5446 ((ctl->some_taken) || 5447 ((ctl->do_not_ref_stcb == 0) && 5448 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5449 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5450 /*- 5451 * If we have the same tcb, and there is data present, and we 5452 * have the strm interleave feature present. Then if we have 5453 * taken some (pdapi) or we can refer to tht tcb AND we have 5454 * not started a delivery for this stream, we can take it. 5455 * Note we do NOT allow a notificaiton on the same assoc to 5456 * be delivered. 5457 */ 5458 control = ctl; 5459 goto found_one; 5460 } 5461 ctl = TAILQ_NEXT(ctl, next); 5462 } 5463 } 5464 /* 5465 * if we reach here, not suitable replacement is available 5466 * <or> fragment interleave is NOT on. So stuff the sb_cc 5467 * into the our held count, and its time to sleep again. 5468 */ 5469 held_length = so->so_rcv.sb_cc; 5470 control->held_length = so->so_rcv.sb_cc; 5471 goto restart; 5472 } 5473 /* Clear the held length since there is something to read */ 5474 control->held_length = 0; 5475 if (hold_rlock) { 5476 SCTP_INP_READ_UNLOCK(inp); 5477 hold_rlock = 0; 5478 } 5479 found_one: 5480 /* 5481 * If we reach here, control has a some data for us to read off. 5482 * Note that stcb COULD be NULL. 5483 */ 5484 control->some_taken++; 5485 if (hold_sblock) { 5486 SOCKBUF_UNLOCK(&so->so_rcv); 5487 hold_sblock = 0; 5488 } 5489 stcb = control->stcb; 5490 if (stcb) { 5491 if ((control->do_not_ref_stcb == 0) && 5492 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5493 if (freecnt_applied == 0) 5494 stcb = NULL; 5495 } else if (control->do_not_ref_stcb == 0) { 5496 /* you can't free it on me please */ 5497 /* 5498 * The lock on the socket buffer protects us so the 5499 * free code will stop. But since we used the 5500 * socketbuf lock and the sender uses the tcb_lock 5501 * to increment, we need to use the atomic add to 5502 * the refcnt 5503 */ 5504 if (freecnt_applied) { 5505 #ifdef INVARIANTS 5506 panic("refcnt already incremented"); 5507 #else 5508 SCTP_PRINTF("refcnt already incremented?\n"); 5509 #endif 5510 } else { 5511 atomic_add_int(&stcb->asoc.refcnt, 1); 5512 freecnt_applied = 1; 5513 } 5514 /* 5515 * Setup to remember how much we have not yet told 5516 * the peer our rwnd has opened up. Note we grab the 5517 * value from the tcb from last time. Note too that 5518 * sack sending clears this when a sack is sent, 5519 * which is fine. Once we hit the rwnd_req, we then 5520 * will go to the sctp_user_rcvd() that will not 5521 * lock until it KNOWs it MUST send a WUP-SACK. 5522 */ 5523 freed_so_far = stcb->freed_by_sorcv_sincelast; 5524 stcb->freed_by_sorcv_sincelast = 0; 5525 } 5526 } 5527 if (stcb && 5528 ((control->spec_flags & M_NOTIFICATION) == 0) && 5529 control->do_not_ref_stcb == 0) { 5530 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5531 } 5532 /* First lets get off the sinfo and sockaddr info */ 5533 if ((sinfo) && filling_sinfo) { 5534 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 5535 nxt = TAILQ_NEXT(control, next); 5536 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5537 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5538 struct sctp_extrcvinfo *s_extra; 5539 5540 s_extra = (struct sctp_extrcvinfo *)sinfo; 5541 if ((nxt) && 5542 (nxt->length)) { 5543 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5544 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5545 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5546 } 5547 if (nxt->spec_flags & M_NOTIFICATION) { 5548 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5549 } 5550 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id; 5551 s_extra->sreinfo_next_length = nxt->length; 5552 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid; 5553 s_extra->sreinfo_next_stream = nxt->sinfo_stream; 5554 if (nxt->tail_mbuf != NULL) { 5555 if (nxt->end_added) { 5556 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5557 } 5558 } 5559 } else { 5560 /* 5561 * we explicitly 0 this, since the memcpy 5562 * got some other things beyond the older 5563 * sinfo_ that is on the control's structure 5564 * :-D 5565 */ 5566 nxt = NULL; 5567 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 5568 s_extra->sreinfo_next_aid = 0; 5569 s_extra->sreinfo_next_length = 0; 5570 s_extra->sreinfo_next_ppid = 0; 5571 s_extra->sreinfo_next_stream = 0; 5572 } 5573 } 5574 /* 5575 * update off the real current cum-ack, if we have an stcb. 5576 */ 5577 if ((control->do_not_ref_stcb == 0) && stcb) 5578 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5579 /* 5580 * mask off the high bits, we keep the actual chunk bits in 5581 * there. 5582 */ 5583 sinfo->sinfo_flags &= 0x00ff; 5584 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5585 sinfo->sinfo_flags |= SCTP_UNORDERED; 5586 } 5587 } 5588 #ifdef SCTP_ASOCLOG_OF_TSNS 5589 { 5590 int index, newindex; 5591 struct sctp_pcbtsn_rlog *entry; 5592 5593 do { 5594 index = inp->readlog_index; 5595 newindex = index + 1; 5596 if (newindex >= SCTP_READ_LOG_SIZE) { 5597 newindex = 0; 5598 } 5599 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5600 entry = &inp->readlog[index]; 5601 entry->vtag = control->sinfo_assoc_id; 5602 entry->strm = control->sinfo_stream; 5603 entry->seq = control->sinfo_ssn; 5604 entry->sz = control->length; 5605 entry->flgs = control->sinfo_flags; 5606 } 5607 #endif 5608 if (fromlen && from) { 5609 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len); 5610 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5611 #ifdef INET6 5612 case AF_INET6: 5613 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 5614 break; 5615 #endif 5616 #ifdef INET 5617 case AF_INET: 5618 ((struct sockaddr_in *)from)->sin_port = control->port_from; 5619 break; 5620 #endif 5621 default: 5622 break; 5623 } 5624 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5625 5626 #if defined(INET) && defined(INET6) 5627 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 5628 (from->sa_family == AF_INET) && 5629 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 5630 struct sockaddr_in *sin; 5631 struct sockaddr_in6 sin6; 5632 5633 sin = (struct sockaddr_in *)from; 5634 bzero(&sin6, sizeof(sin6)); 5635 sin6.sin6_family = AF_INET6; 5636 sin6.sin6_len = sizeof(struct sockaddr_in6); 5637 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 5638 bcopy(&sin->sin_addr, 5639 &sin6.sin6_addr.s6_addr32[3], 5640 sizeof(sin6.sin6_addr.s6_addr32[3])); 5641 sin6.sin6_port = sin->sin_port; 5642 memcpy(from, &sin6, sizeof(struct sockaddr_in6)); 5643 } 5644 #endif 5645 #ifdef INET6 5646 { 5647 struct sockaddr_in6 lsa6, *from6; 5648 5649 from6 = (struct sockaddr_in6 *)from; 5650 sctp_recover_scope_mac(from6, (&lsa6)); 5651 } 5652 #endif 5653 } 5654 /* now copy out what data we can */ 5655 if (mp == NULL) { 5656 /* copy out each mbuf in the chain up to length */ 5657 get_more_data: 5658 m = control->data; 5659 while (m) { 5660 /* Move out all we can */ 5661 cp_len = (int)uio->uio_resid; 5662 my_len = (int)SCTP_BUF_LEN(m); 5663 if (cp_len > my_len) { 5664 /* not enough in this buf */ 5665 cp_len = my_len; 5666 } 5667 if (hold_rlock) { 5668 SCTP_INP_READ_UNLOCK(inp); 5669 hold_rlock = 0; 5670 } 5671 if (cp_len > 0) 5672 error = uiomove(mtod(m, char *), cp_len, uio); 5673 /* re-read */ 5674 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5675 goto release; 5676 } 5677 if ((control->do_not_ref_stcb == 0) && stcb && 5678 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5679 no_rcv_needed = 1; 5680 } 5681 if (error) { 5682 /* error we are out of here */ 5683 goto release; 5684 } 5685 if ((SCTP_BUF_NEXT(m) == NULL) && 5686 (cp_len >= SCTP_BUF_LEN(m)) && 5687 ((control->end_added == 0) || 5688 (control->end_added && 5689 (TAILQ_NEXT(control, next) == NULL))) 5690 ) { 5691 SCTP_INP_READ_LOCK(inp); 5692 hold_rlock = 1; 5693 } 5694 if (cp_len == SCTP_BUF_LEN(m)) { 5695 if ((SCTP_BUF_NEXT(m) == NULL) && 5696 (control->end_added)) { 5697 out_flags |= MSG_EOR; 5698 if ((control->do_not_ref_stcb == 0) && 5699 (control->stcb != NULL) && 5700 ((control->spec_flags & M_NOTIFICATION) == 0)) 5701 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5702 } 5703 if (control->spec_flags & M_NOTIFICATION) { 5704 out_flags |= MSG_NOTIFICATION; 5705 } 5706 /* we ate up the mbuf */ 5707 if (in_flags & MSG_PEEK) { 5708 /* just looking */ 5709 m = SCTP_BUF_NEXT(m); 5710 copied_so_far += cp_len; 5711 } else { 5712 /* dispose of the mbuf */ 5713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5714 sctp_sblog(&so->so_rcv, 5715 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5716 } 5717 sctp_sbfree(control, stcb, &so->so_rcv, m); 5718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5719 sctp_sblog(&so->so_rcv, 5720 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5721 } 5722 copied_so_far += cp_len; 5723 freed_so_far += cp_len; 5724 freed_so_far += MSIZE; 5725 atomic_subtract_int(&control->length, cp_len); 5726 control->data = sctp_m_free(m); 5727 m = control->data; 5728 /* 5729 * been through it all, must hold sb 5730 * lock ok to null tail 5731 */ 5732 if (control->data == NULL) { 5733 #ifdef INVARIANTS 5734 if ((control->end_added == 0) || 5735 (TAILQ_NEXT(control, next) == NULL)) { 5736 /* 5737 * If the end is not 5738 * added, OR the 5739 * next is NOT null 5740 * we MUST have the 5741 * lock. 5742 */ 5743 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5744 panic("Hmm we don't own the lock?"); 5745 } 5746 } 5747 #endif 5748 control->tail_mbuf = NULL; 5749 #ifdef INVARIANTS 5750 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5751 panic("end_added, nothing left and no MSG_EOR"); 5752 } 5753 #endif 5754 } 5755 } 5756 } else { 5757 /* Do we need to trim the mbuf? */ 5758 if (control->spec_flags & M_NOTIFICATION) { 5759 out_flags |= MSG_NOTIFICATION; 5760 } 5761 if ((in_flags & MSG_PEEK) == 0) { 5762 SCTP_BUF_RESV_UF(m, cp_len); 5763 SCTP_BUF_LEN(m) -= cp_len; 5764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5765 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5766 } 5767 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5768 if ((control->do_not_ref_stcb == 0) && 5769 stcb) { 5770 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5771 } 5772 copied_so_far += cp_len; 5773 freed_so_far += cp_len; 5774 freed_so_far += MSIZE; 5775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5776 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5777 SCTP_LOG_SBRESULT, 0); 5778 } 5779 atomic_subtract_int(&control->length, cp_len); 5780 } else { 5781 copied_so_far += cp_len; 5782 } 5783 } 5784 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5785 break; 5786 } 5787 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5788 (control->do_not_ref_stcb == 0) && 5789 (freed_so_far >= rwnd_req)) { 5790 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5791 } 5792 } /* end while(m) */ 5793 /* 5794 * At this point we have looked at it all and we either have 5795 * a MSG_EOR/or read all the user wants... <OR> 5796 * control->length == 0. 5797 */ 5798 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5799 /* we are done with this control */ 5800 if (control->length == 0) { 5801 if (control->data) { 5802 #ifdef INVARIANTS 5803 panic("control->data not null at read eor?"); 5804 #else 5805 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5806 sctp_m_freem(control->data); 5807 control->data = NULL; 5808 #endif 5809 } 5810 done_with_control: 5811 if (TAILQ_NEXT(control, next) == NULL) { 5812 /* 5813 * If we don't have a next we need a 5814 * lock, if there is a next 5815 * interrupt is filling ahead of us 5816 * and we don't need a lock to 5817 * remove this guy (which is the 5818 * head of the queue). 5819 */ 5820 if (hold_rlock == 0) { 5821 SCTP_INP_READ_LOCK(inp); 5822 hold_rlock = 1; 5823 } 5824 } 5825 TAILQ_REMOVE(&inp->read_queue, control, next); 5826 /* Add back any hiddend data */ 5827 if (control->held_length) { 5828 held_length = 0; 5829 control->held_length = 0; 5830 wakeup_read_socket = 1; 5831 } 5832 if (control->aux_data) { 5833 sctp_m_free(control->aux_data); 5834 control->aux_data = NULL; 5835 } 5836 no_rcv_needed = control->do_not_ref_stcb; 5837 sctp_free_remote_addr(control->whoFrom); 5838 control->data = NULL; 5839 sctp_free_a_readq(stcb, control); 5840 control = NULL; 5841 if ((freed_so_far >= rwnd_req) && 5842 (no_rcv_needed == 0)) 5843 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5844 5845 } else { 5846 /* 5847 * The user did not read all of this 5848 * message, turn off the returned MSG_EOR 5849 * since we are leaving more behind on the 5850 * control to read. 5851 */ 5852 #ifdef INVARIANTS 5853 if (control->end_added && 5854 (control->data == NULL) && 5855 (control->tail_mbuf == NULL)) { 5856 panic("Gak, control->length is corrupt?"); 5857 } 5858 #endif 5859 no_rcv_needed = control->do_not_ref_stcb; 5860 out_flags &= ~MSG_EOR; 5861 } 5862 } 5863 if (out_flags & MSG_EOR) { 5864 goto release; 5865 } 5866 if ((uio->uio_resid == 0) || 5867 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5868 ) { 5869 goto release; 5870 } 5871 /* 5872 * If I hit here the receiver wants more and this message is 5873 * NOT done (pd-api). So two questions. Can we block? if not 5874 * we are done. Did the user NOT set MSG_WAITALL? 5875 */ 5876 if (block_allowed == 0) { 5877 goto release; 5878 } 5879 /* 5880 * We need to wait for more data a few things: - We don't 5881 * sbunlock() so we don't get someone else reading. - We 5882 * must be sure to account for the case where what is added 5883 * is NOT to our control when we wakeup. 5884 */ 5885 5886 /* 5887 * Do we need to tell the transport a rwnd update might be 5888 * needed before we go to sleep? 5889 */ 5890 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5891 ((freed_so_far >= rwnd_req) && 5892 (control->do_not_ref_stcb == 0) && 5893 (no_rcv_needed == 0))) { 5894 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5895 } 5896 wait_some_more: 5897 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5898 goto release; 5899 } 5900 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5901 goto release; 5902 5903 if (hold_rlock == 1) { 5904 SCTP_INP_READ_UNLOCK(inp); 5905 hold_rlock = 0; 5906 } 5907 if (hold_sblock == 0) { 5908 SOCKBUF_LOCK(&so->so_rcv); 5909 hold_sblock = 1; 5910 } 5911 if ((copied_so_far) && (control->length == 0) && 5912 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5913 goto release; 5914 } 5915 if (so->so_rcv.sb_cc <= control->held_length) { 5916 error = sbwait(&so->so_rcv); 5917 if (error) { 5918 goto release; 5919 } 5920 control->held_length = 0; 5921 } 5922 if (hold_sblock) { 5923 SOCKBUF_UNLOCK(&so->so_rcv); 5924 hold_sblock = 0; 5925 } 5926 if (control->length == 0) { 5927 /* still nothing here */ 5928 if (control->end_added == 1) { 5929 /* he aborted, or is done i.e.did a shutdown */ 5930 out_flags |= MSG_EOR; 5931 if (control->pdapi_aborted) { 5932 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5933 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5934 5935 out_flags |= MSG_TRUNC; 5936 } else { 5937 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5938 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5939 } 5940 goto done_with_control; 5941 } 5942 if (so->so_rcv.sb_cc > held_length) { 5943 control->held_length = so->so_rcv.sb_cc; 5944 held_length = 0; 5945 } 5946 goto wait_some_more; 5947 } else if (control->data == NULL) { 5948 /* 5949 * we must re-sync since data is probably being 5950 * added 5951 */ 5952 SCTP_INP_READ_LOCK(inp); 5953 if ((control->length > 0) && (control->data == NULL)) { 5954 /* 5955 * big trouble.. we have the lock and its 5956 * corrupt? 5957 */ 5958 #ifdef INVARIANTS 5959 panic("Impossible data==NULL length !=0"); 5960 #endif 5961 out_flags |= MSG_EOR; 5962 out_flags |= MSG_TRUNC; 5963 control->length = 0; 5964 SCTP_INP_READ_UNLOCK(inp); 5965 goto done_with_control; 5966 } 5967 SCTP_INP_READ_UNLOCK(inp); 5968 /* We will fall around to get more data */ 5969 } 5970 goto get_more_data; 5971 } else { 5972 /*- 5973 * Give caller back the mbuf chain, 5974 * store in uio_resid the length 5975 */ 5976 wakeup_read_socket = 0; 5977 if ((control->end_added == 0) || 5978 (TAILQ_NEXT(control, next) == NULL)) { 5979 /* Need to get rlock */ 5980 if (hold_rlock == 0) { 5981 SCTP_INP_READ_LOCK(inp); 5982 hold_rlock = 1; 5983 } 5984 } 5985 if (control->end_added) { 5986 out_flags |= MSG_EOR; 5987 if ((control->do_not_ref_stcb == 0) && 5988 (control->stcb != NULL) && 5989 ((control->spec_flags & M_NOTIFICATION) == 0)) 5990 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5991 } 5992 if (control->spec_flags & M_NOTIFICATION) { 5993 out_flags |= MSG_NOTIFICATION; 5994 } 5995 uio->uio_resid = control->length; 5996 *mp = control->data; 5997 m = control->data; 5998 while (m) { 5999 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6000 sctp_sblog(&so->so_rcv, 6001 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6002 } 6003 sctp_sbfree(control, stcb, &so->so_rcv, m); 6004 freed_so_far += SCTP_BUF_LEN(m); 6005 freed_so_far += MSIZE; 6006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6007 sctp_sblog(&so->so_rcv, 6008 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6009 } 6010 m = SCTP_BUF_NEXT(m); 6011 } 6012 control->data = control->tail_mbuf = NULL; 6013 control->length = 0; 6014 if (out_flags & MSG_EOR) { 6015 /* Done with this control */ 6016 goto done_with_control; 6017 } 6018 } 6019 release: 6020 if (hold_rlock == 1) { 6021 SCTP_INP_READ_UNLOCK(inp); 6022 hold_rlock = 0; 6023 } 6024 if (hold_sblock == 1) { 6025 SOCKBUF_UNLOCK(&so->so_rcv); 6026 hold_sblock = 0; 6027 } 6028 sbunlock(&so->so_rcv); 6029 sockbuf_lock = 0; 6030 6031 release_unlocked: 6032 if (hold_sblock) { 6033 SOCKBUF_UNLOCK(&so->so_rcv); 6034 hold_sblock = 0; 6035 } 6036 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6037 if ((freed_so_far >= rwnd_req) && 6038 (control && (control->do_not_ref_stcb == 0)) && 6039 (no_rcv_needed == 0)) 6040 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6041 } 6042 out: 6043 if (msg_flags) { 6044 *msg_flags = out_flags; 6045 } 6046 if (((out_flags & MSG_EOR) == 0) && 6047 ((in_flags & MSG_PEEK) == 0) && 6048 (sinfo) && 6049 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6050 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6051 struct sctp_extrcvinfo *s_extra; 6052 6053 s_extra = (struct sctp_extrcvinfo *)sinfo; 6054 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 6055 } 6056 if (hold_rlock == 1) { 6057 SCTP_INP_READ_UNLOCK(inp); 6058 } 6059 if (hold_sblock) { 6060 SOCKBUF_UNLOCK(&so->so_rcv); 6061 } 6062 if (sockbuf_lock) { 6063 sbunlock(&so->so_rcv); 6064 } 6065 if (freecnt_applied) { 6066 /* 6067 * The lock on the socket buffer protects us so the free 6068 * code will stop. But since we used the socketbuf lock and 6069 * the sender uses the tcb_lock to increment, we need to use 6070 * the atomic add to the refcnt. 6071 */ 6072 if (stcb == NULL) { 6073 #ifdef INVARIANTS 6074 panic("stcb for refcnt has gone NULL?"); 6075 goto stage_left; 6076 #else 6077 goto stage_left; 6078 #endif 6079 } 6080 atomic_add_int(&stcb->asoc.refcnt, -1); 6081 /* Save the value back for next time */ 6082 stcb->freed_by_sorcv_sincelast = freed_so_far; 6083 } 6084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6085 if (stcb) { 6086 sctp_misc_ints(SCTP_SORECV_DONE, 6087 freed_so_far, 6088 ((uio) ? (slen - uio->uio_resid) : slen), 6089 stcb->asoc.my_rwnd, 6090 so->so_rcv.sb_cc); 6091 } else { 6092 sctp_misc_ints(SCTP_SORECV_DONE, 6093 freed_so_far, 6094 ((uio) ? (slen - uio->uio_resid) : slen), 6095 0, 6096 so->so_rcv.sb_cc); 6097 } 6098 } 6099 stage_left: 6100 if (wakeup_read_socket) { 6101 sctp_sorwakeup(inp, so); 6102 } 6103 return (error); 6104 } 6105 6106 6107 #ifdef SCTP_MBUF_LOGGING 6108 struct mbuf * 6109 sctp_m_free(struct mbuf *m) 6110 { 6111 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6112 if (SCTP_BUF_IS_EXTENDED(m)) { 6113 sctp_log_mb(m, SCTP_MBUF_IFREE); 6114 } 6115 } 6116 return (m_free(m)); 6117 } 6118 6119 void 6120 sctp_m_freem(struct mbuf *mb) 6121 { 6122 while (mb != NULL) 6123 mb = sctp_m_free(mb); 6124 } 6125 6126 #endif 6127 6128 int 6129 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6130 { 6131 /* 6132 * Given a local address. For all associations that holds the 6133 * address, request a peer-set-primary. 6134 */ 6135 struct sctp_ifa *ifa; 6136 struct sctp_laddr *wi; 6137 6138 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6139 if (ifa == NULL) { 6140 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6141 return (EADDRNOTAVAIL); 6142 } 6143 /* 6144 * Now that we have the ifa we must awaken the iterator with this 6145 * message. 6146 */ 6147 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6148 if (wi == NULL) { 6149 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6150 return (ENOMEM); 6151 } 6152 /* Now incr the count and int wi structure */ 6153 SCTP_INCR_LADDR_COUNT(); 6154 bzero(wi, sizeof(*wi)); 6155 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6156 wi->ifa = ifa; 6157 wi->action = SCTP_SET_PRIM_ADDR; 6158 atomic_add_int(&ifa->refcount, 1); 6159 6160 /* Now add it to the work queue */ 6161 SCTP_WQ_ADDR_LOCK(); 6162 /* 6163 * Should this really be a tailq? As it is we will process the 6164 * newest first :-0 6165 */ 6166 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6167 SCTP_WQ_ADDR_UNLOCK(); 6168 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6169 (struct sctp_inpcb *)NULL, 6170 (struct sctp_tcb *)NULL, 6171 (struct sctp_nets *)NULL); 6172 return (0); 6173 } 6174 6175 6176 int 6177 sctp_soreceive(struct socket *so, 6178 struct sockaddr **psa, 6179 struct uio *uio, 6180 struct mbuf **mp0, 6181 struct mbuf **controlp, 6182 int *flagsp) 6183 { 6184 int error, fromlen; 6185 uint8_t sockbuf[256]; 6186 struct sockaddr *from; 6187 struct sctp_extrcvinfo sinfo; 6188 int filling_sinfo = 1; 6189 struct sctp_inpcb *inp; 6190 6191 inp = (struct sctp_inpcb *)so->so_pcb; 6192 /* pickup the assoc we are reading from */ 6193 if (inp == NULL) { 6194 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6195 return (EINVAL); 6196 } 6197 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6198 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6199 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6200 (controlp == NULL)) { 6201 /* user does not want the sndrcv ctl */ 6202 filling_sinfo = 0; 6203 } 6204 if (psa) { 6205 from = (struct sockaddr *)sockbuf; 6206 fromlen = sizeof(sockbuf); 6207 from->sa_len = 0; 6208 } else { 6209 from = NULL; 6210 fromlen = 0; 6211 } 6212 6213 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6214 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6215 if ((controlp) && (filling_sinfo)) { 6216 /* copy back the sinfo in a CMSG format */ 6217 if (filling_sinfo) 6218 *controlp = sctp_build_ctl_nchunk(inp, 6219 (struct sctp_sndrcvinfo *)&sinfo); 6220 else 6221 *controlp = NULL; 6222 } 6223 if (psa) { 6224 /* copy back the address info */ 6225 if (from && from->sa_len) { 6226 *psa = sodupsockaddr(from, M_NOWAIT); 6227 } else { 6228 *psa = NULL; 6229 } 6230 } 6231 return (error); 6232 } 6233 6234 6235 6236 6237 6238 int 6239 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6240 int totaddr, int *error) 6241 { 6242 int added = 0; 6243 int i; 6244 struct sctp_inpcb *inp; 6245 struct sockaddr *sa; 6246 size_t incr = 0; 6247 6248 #ifdef INET 6249 struct sockaddr_in *sin; 6250 6251 #endif 6252 #ifdef INET6 6253 struct sockaddr_in6 *sin6; 6254 6255 #endif 6256 6257 sa = addr; 6258 inp = stcb->sctp_ep; 6259 *error = 0; 6260 for (i = 0; i < totaddr; i++) { 6261 switch (sa->sa_family) { 6262 #ifdef INET 6263 case AF_INET: 6264 incr = sizeof(struct sockaddr_in); 6265 sin = (struct sockaddr_in *)sa; 6266 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6267 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6268 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6269 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6270 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6271 *error = EINVAL; 6272 goto out_now; 6273 } 6274 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6275 /* assoc gone no un-lock */ 6276 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6277 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6278 *error = ENOBUFS; 6279 goto out_now; 6280 } 6281 added++; 6282 break; 6283 #endif 6284 #ifdef INET6 6285 case AF_INET6: 6286 incr = sizeof(struct sockaddr_in6); 6287 sin6 = (struct sockaddr_in6 *)sa; 6288 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6289 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6290 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6291 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6292 *error = EINVAL; 6293 goto out_now; 6294 } 6295 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6296 /* assoc gone no un-lock */ 6297 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6298 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6299 *error = ENOBUFS; 6300 goto out_now; 6301 } 6302 added++; 6303 break; 6304 #endif 6305 default: 6306 break; 6307 } 6308 sa = (struct sockaddr *)((caddr_t)sa + incr); 6309 } 6310 out_now: 6311 return (added); 6312 } 6313 6314 struct sctp_tcb * 6315 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6316 int *totaddr, int *num_v4, int *num_v6, int *error, 6317 int limit, int *bad_addr) 6318 { 6319 struct sockaddr *sa; 6320 struct sctp_tcb *stcb = NULL; 6321 size_t incr, at, i; 6322 6323 at = incr = 0; 6324 sa = addr; 6325 6326 *error = *num_v6 = *num_v4 = 0; 6327 /* account and validate addresses */ 6328 for (i = 0; i < (size_t)*totaddr; i++) { 6329 switch (sa->sa_family) { 6330 #ifdef INET 6331 case AF_INET: 6332 (*num_v4) += 1; 6333 incr = sizeof(struct sockaddr_in); 6334 if (sa->sa_len != incr) { 6335 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6336 *error = EINVAL; 6337 *bad_addr = 1; 6338 return (NULL); 6339 } 6340 break; 6341 #endif 6342 #ifdef INET6 6343 case AF_INET6: 6344 { 6345 struct sockaddr_in6 *sin6; 6346 6347 sin6 = (struct sockaddr_in6 *)sa; 6348 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6349 /* Must be non-mapped for connectx */ 6350 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6351 *error = EINVAL; 6352 *bad_addr = 1; 6353 return (NULL); 6354 } 6355 (*num_v6) += 1; 6356 incr = sizeof(struct sockaddr_in6); 6357 if (sa->sa_len != incr) { 6358 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6359 *error = EINVAL; 6360 *bad_addr = 1; 6361 return (NULL); 6362 } 6363 break; 6364 } 6365 #endif 6366 default: 6367 *totaddr = i; 6368 /* we are done */ 6369 break; 6370 } 6371 if (i == (size_t)*totaddr) { 6372 break; 6373 } 6374 SCTP_INP_INCR_REF(inp); 6375 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6376 if (stcb != NULL) { 6377 /* Already have or am bring up an association */ 6378 return (stcb); 6379 } else { 6380 SCTP_INP_DECR_REF(inp); 6381 } 6382 if ((at + incr) > (size_t)limit) { 6383 *totaddr = i; 6384 break; 6385 } 6386 sa = (struct sockaddr *)((caddr_t)sa + incr); 6387 } 6388 return ((struct sctp_tcb *)NULL); 6389 } 6390 6391 /* 6392 * sctp_bindx(ADD) for one address. 6393 * assumes all arguments are valid/checked by caller. 6394 */ 6395 void 6396 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6397 struct sockaddr *sa, sctp_assoc_t assoc_id, 6398 uint32_t vrf_id, int *error, void *p) 6399 { 6400 struct sockaddr *addr_touse; 6401 6402 #ifdef INET6 6403 struct sockaddr_in sin; 6404 6405 #endif 6406 6407 /* see if we're bound all already! */ 6408 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6409 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6410 *error = EINVAL; 6411 return; 6412 } 6413 addr_touse = sa; 6414 #ifdef INET6 6415 if (sa->sa_family == AF_INET6) { 6416 struct sockaddr_in6 *sin6; 6417 6418 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6419 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6420 *error = EINVAL; 6421 return; 6422 } 6423 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6424 /* can only bind v6 on PF_INET6 sockets */ 6425 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6426 *error = EINVAL; 6427 return; 6428 } 6429 sin6 = (struct sockaddr_in6 *)addr_touse; 6430 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6431 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6432 SCTP_IPV6_V6ONLY(inp)) { 6433 /* can't bind v4-mapped on PF_INET sockets */ 6434 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6435 *error = EINVAL; 6436 return; 6437 } 6438 in6_sin6_2_sin(&sin, sin6); 6439 addr_touse = (struct sockaddr *)&sin; 6440 } 6441 } 6442 #endif 6443 #ifdef INET 6444 if (sa->sa_family == AF_INET) { 6445 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6446 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6447 *error = EINVAL; 6448 return; 6449 } 6450 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6451 SCTP_IPV6_V6ONLY(inp)) { 6452 /* can't bind v4 on PF_INET sockets */ 6453 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6454 *error = EINVAL; 6455 return; 6456 } 6457 } 6458 #endif 6459 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6460 if (p == NULL) { 6461 /* Can't get proc for Net/Open BSD */ 6462 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6463 *error = EINVAL; 6464 return; 6465 } 6466 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6467 return; 6468 } 6469 /* 6470 * No locks required here since bind and mgmt_ep_sa all do their own 6471 * locking. If we do something for the FIX: below we may need to 6472 * lock in that case. 6473 */ 6474 if (assoc_id == 0) { 6475 /* add the address */ 6476 struct sctp_inpcb *lep; 6477 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6478 6479 /* validate the incoming port */ 6480 if ((lsin->sin_port != 0) && 6481 (lsin->sin_port != inp->sctp_lport)) { 6482 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6483 *error = EINVAL; 6484 return; 6485 } else { 6486 /* user specified 0 port, set it to existing port */ 6487 lsin->sin_port = inp->sctp_lport; 6488 } 6489 6490 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6491 if (lep != NULL) { 6492 /* 6493 * We must decrement the refcount since we have the 6494 * ep already and are binding. No remove going on 6495 * here. 6496 */ 6497 SCTP_INP_DECR_REF(lep); 6498 } 6499 if (lep == inp) { 6500 /* already bound to it.. ok */ 6501 return; 6502 } else if (lep == NULL) { 6503 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6504 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6505 SCTP_ADD_IP_ADDRESS, 6506 vrf_id, NULL); 6507 } else { 6508 *error = EADDRINUSE; 6509 } 6510 if (*error) 6511 return; 6512 } else { 6513 /* 6514 * FIX: decide whether we allow assoc based bindx 6515 */ 6516 } 6517 } 6518 6519 /* 6520 * sctp_bindx(DELETE) for one address. 6521 * assumes all arguments are valid/checked by caller. 6522 */ 6523 void 6524 sctp_bindx_delete_address(struct sctp_inpcb *inp, 6525 struct sockaddr *sa, sctp_assoc_t assoc_id, 6526 uint32_t vrf_id, int *error) 6527 { 6528 struct sockaddr *addr_touse; 6529 6530 #ifdef INET6 6531 struct sockaddr_in sin; 6532 6533 #endif 6534 6535 /* see if we're bound all already! */ 6536 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6537 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6538 *error = EINVAL; 6539 return; 6540 } 6541 addr_touse = sa; 6542 #ifdef INET6 6543 if (sa->sa_family == AF_INET6) { 6544 struct sockaddr_in6 *sin6; 6545 6546 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6547 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6548 *error = EINVAL; 6549 return; 6550 } 6551 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6552 /* can only bind v6 on PF_INET6 sockets */ 6553 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6554 *error = EINVAL; 6555 return; 6556 } 6557 sin6 = (struct sockaddr_in6 *)addr_touse; 6558 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6559 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6560 SCTP_IPV6_V6ONLY(inp)) { 6561 /* can't bind mapped-v4 on PF_INET sockets */ 6562 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6563 *error = EINVAL; 6564 return; 6565 } 6566 in6_sin6_2_sin(&sin, sin6); 6567 addr_touse = (struct sockaddr *)&sin; 6568 } 6569 } 6570 #endif 6571 #ifdef INET 6572 if (sa->sa_family == AF_INET) { 6573 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6574 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6575 *error = EINVAL; 6576 return; 6577 } 6578 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6579 SCTP_IPV6_V6ONLY(inp)) { 6580 /* can't bind v4 on PF_INET sockets */ 6581 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6582 *error = EINVAL; 6583 return; 6584 } 6585 } 6586 #endif 6587 /* 6588 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6589 * below is ever changed we may need to lock before calling 6590 * association level binding. 6591 */ 6592 if (assoc_id == 0) { 6593 /* delete the address */ 6594 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6595 SCTP_DEL_IP_ADDRESS, 6596 vrf_id, NULL); 6597 } else { 6598 /* 6599 * FIX: decide whether we allow assoc based bindx 6600 */ 6601 } 6602 } 6603 6604 /* 6605 * returns the valid local address count for an assoc, taking into account 6606 * all scoping rules 6607 */ 6608 int 6609 sctp_local_addr_count(struct sctp_tcb *stcb) 6610 { 6611 int loopback_scope, ipv4_local_scope, local_scope, site_scope; 6612 int ipv4_addr_legal, ipv6_addr_legal; 6613 struct sctp_vrf *vrf; 6614 struct sctp_ifn *sctp_ifn; 6615 struct sctp_ifa *sctp_ifa; 6616 int count = 0; 6617 6618 /* Turn on all the appropriate scopes */ 6619 loopback_scope = stcb->asoc.loopback_scope; 6620 ipv4_local_scope = stcb->asoc.ipv4_local_scope; 6621 local_scope = stcb->asoc.local_scope; 6622 site_scope = stcb->asoc.site_scope; 6623 ipv4_addr_legal = ipv6_addr_legal = 0; 6624 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6625 ipv6_addr_legal = 1; 6626 if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) { 6627 ipv4_addr_legal = 1; 6628 } 6629 } else { 6630 ipv4_addr_legal = 1; 6631 } 6632 6633 SCTP_IPI_ADDR_RLOCK(); 6634 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6635 if (vrf == NULL) { 6636 /* no vrf, no addresses */ 6637 SCTP_IPI_ADDR_RUNLOCK(); 6638 return (0); 6639 } 6640 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6641 /* 6642 * bound all case: go through all ifns on the vrf 6643 */ 6644 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6645 if ((loopback_scope == 0) && 6646 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6647 continue; 6648 } 6649 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6650 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6651 continue; 6652 switch (sctp_ifa->address.sa.sa_family) { 6653 #ifdef INET 6654 case AF_INET: 6655 if (ipv4_addr_legal) { 6656 struct sockaddr_in *sin; 6657 6658 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 6659 if (sin->sin_addr.s_addr == 0) { 6660 /* 6661 * skip unspecified 6662 * addrs 6663 */ 6664 continue; 6665 } 6666 if ((ipv4_local_scope == 0) && 6667 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6668 continue; 6669 } 6670 /* count this one */ 6671 count++; 6672 } else { 6673 continue; 6674 } 6675 break; 6676 #endif 6677 #ifdef INET6 6678 case AF_INET6: 6679 if (ipv6_addr_legal) { 6680 struct sockaddr_in6 *sin6; 6681 6682 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 6683 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6684 continue; 6685 } 6686 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6687 if (local_scope == 0) 6688 continue; 6689 if (sin6->sin6_scope_id == 0) { 6690 if (sa6_recoverscope(sin6) != 0) 6691 /* 6692 * 6693 * bad 6694 * 6695 * li 6696 * nk 6697 * 6698 * loc 6699 * al 6700 * 6701 * add 6702 * re 6703 * ss 6704 * */ 6705 continue; 6706 } 6707 } 6708 if ((site_scope == 0) && 6709 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6710 continue; 6711 } 6712 /* count this one */ 6713 count++; 6714 } 6715 break; 6716 #endif 6717 default: 6718 /* TSNH */ 6719 break; 6720 } 6721 } 6722 } 6723 } else { 6724 /* 6725 * subset bound case 6726 */ 6727 struct sctp_laddr *laddr; 6728 6729 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6730 sctp_nxt_addr) { 6731 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6732 continue; 6733 } 6734 /* count this one */ 6735 count++; 6736 } 6737 } 6738 SCTP_IPI_ADDR_RUNLOCK(); 6739 return (count); 6740 } 6741 6742 #if defined(SCTP_LOCAL_TRACE_BUF) 6743 6744 void 6745 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6746 { 6747 uint32_t saveindex, newindex; 6748 6749 do { 6750 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6751 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6752 newindex = 1; 6753 } else { 6754 newindex = saveindex + 1; 6755 } 6756 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6757 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6758 saveindex = 0; 6759 } 6760 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6761 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6762 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6763 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6764 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6765 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6766 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6767 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6768 } 6769 6770 #endif 6771 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */ 6772 #ifdef INET 6773 /* We will need to add support 6774 * to bind the ports and such here 6775 * so we can do UDP tunneling. In 6776 * the mean-time, we return error 6777 */ 6778 #include <netinet/udp.h> 6779 #include <netinet/udp_var.h> 6780 #include <sys/proc.h> 6781 #ifdef INET6 6782 #include <netinet6/sctp6_var.h> 6783 #endif 6784 6785 static void 6786 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored) 6787 { 6788 struct ip *iph; 6789 struct mbuf *sp, *last; 6790 struct udphdr *uhdr; 6791 uint16_t port; 6792 6793 if ((m->m_flags & M_PKTHDR) == 0) { 6794 /* Can't handle one that is not a pkt hdr */ 6795 goto out; 6796 } 6797 /* Pull the src port */ 6798 iph = mtod(m, struct ip *); 6799 uhdr = (struct udphdr *)((caddr_t)iph + off); 6800 port = uhdr->uh_sport; 6801 /* 6802 * Split out the mbuf chain. Leave the IP header in m, place the 6803 * rest in the sp. 6804 */ 6805 sp = m_split(m, off, M_DONTWAIT); 6806 if (sp == NULL) { 6807 /* Gak, drop packet, we can't do a split */ 6808 goto out; 6809 } 6810 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6811 /* Gak, packet can't have an SCTP header in it - too small */ 6812 m_freem(sp); 6813 goto out; 6814 } 6815 /* Now pull up the UDP header and SCTP header together */ 6816 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6817 if (sp == NULL) { 6818 /* Gak pullup failed */ 6819 goto out; 6820 } 6821 /* Trim out the UDP header */ 6822 m_adj(sp, sizeof(struct udphdr)); 6823 6824 /* Now reconstruct the mbuf chain */ 6825 for (last = m; last->m_next; last = last->m_next); 6826 last->m_next = sp; 6827 m->m_pkthdr.len += sp->m_pkthdr.len; 6828 iph = mtod(m, struct ip *); 6829 switch (iph->ip_v) { 6830 #ifdef INET 6831 case IPVERSION: 6832 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6833 sctp_input_with_port(m, off, port); 6834 break; 6835 #endif 6836 #ifdef INET6 6837 case IPV6_VERSION >> 4: 6838 /* Not yet supported. */ 6839 goto out; 6840 break; 6841 6842 #endif 6843 default: 6844 goto out; 6845 break; 6846 } 6847 return; 6848 out: 6849 m_freem(m); 6850 } 6851 6852 void 6853 sctp_over_udp_stop(void) 6854 { 6855 struct socket *sop; 6856 6857 /* 6858 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6859 * for writting! 6860 */ 6861 if (SCTP_BASE_INFO(udp_tun_socket) == NULL) { 6862 /* Nothing to do */ 6863 return; 6864 } 6865 sop = SCTP_BASE_INFO(udp_tun_socket); 6866 soclose(sop); 6867 SCTP_BASE_INFO(udp_tun_socket) = NULL; 6868 } 6869 6870 int 6871 sctp_over_udp_start(void) 6872 { 6873 uint16_t port; 6874 int ret; 6875 struct sockaddr_in sin; 6876 struct socket *sop = NULL; 6877 struct thread *th; 6878 struct ucred *cred; 6879 6880 /* 6881 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6882 * for writting! 6883 */ 6884 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 6885 if (port == 0) { 6886 /* Must have a port set */ 6887 return (EINVAL); 6888 } 6889 if (SCTP_BASE_INFO(udp_tun_socket) != NULL) { 6890 /* Already running -- must stop first */ 6891 return (EALREADY); 6892 } 6893 th = curthread; 6894 cred = th->td_ucred; 6895 if ((ret = socreate(PF_INET, &sop, 6896 SOCK_DGRAM, IPPROTO_UDP, cred, th))) { 6897 return (ret); 6898 } 6899 SCTP_BASE_INFO(udp_tun_socket) = sop; 6900 /* call the special UDP hook */ 6901 ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet); 6902 if (ret) { 6903 goto exit_stage_left; 6904 } 6905 /* Ok we have a socket, bind it to the port */ 6906 memset(&sin, 0, sizeof(sin)); 6907 sin.sin_len = sizeof(sin); 6908 sin.sin_family = AF_INET; 6909 sin.sin_port = htons(port); 6910 ret = sobind(sop, (struct sockaddr *)&sin, th); 6911 if (ret) { 6912 /* Close up we cant get the port */ 6913 exit_stage_left: 6914 sctp_over_udp_stop(); 6915 return (ret); 6916 } 6917 /* 6918 * Ok we should now get UDP packets directly to our input routine 6919 * sctp_recv_upd_tunneled_packet(). 6920 */ 6921 return (0); 6922 } 6923 6924 #endif 6925