1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 37 #include "opt_ipsec.h" 38 #include "opt_compat.h" 39 #include "opt_inet6.h" 40 #include "opt_inet.h" 41 #include "opt_sctp.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/fcntl.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> 49 #include <sys/domain.h> 50 #include <sys/file.h> /* for struct knote */ 51 #include <sys/kernel.h> 52 #include <sys/event.h> 53 #include <sys/poll.h> 54 55 #include <sys/protosw.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/proc.h> 59 #include <sys/kernel.h> 60 #include <sys/resourcevar.h> 61 #include <sys/signalvar.h> 62 #include <sys/sysctl.h> 63 #include <sys/uio.h> 64 #include <sys/jail.h> 65 66 #include <sys/callout.h> 67 68 #include <net/radix.h> 69 #include <net/route.h> 70 71 #ifdef INET6 72 #include <sys/domain.h> 73 #endif 74 75 #include <sys/limits.h> 76 #include <sys/mac.h> 77 #include <sys/mutex.h> 78 79 #include <net/if.h> 80 #include <net/if_types.h> 81 #include <net/route.h> 82 83 #include <netinet/in.h> 84 #include <netinet/in_systm.h> 85 #include <netinet/ip.h> 86 #include <netinet/in_pcb.h> 87 #include <netinet/in_var.h> 88 #include <netinet/ip_var.h> 89 90 #ifdef INET6 91 #include <netinet/ip6.h> 92 #include <netinet6/ip6_var.h> 93 94 #include <netinet6/in6_pcb.h> 95 96 #include <netinet6/scope6_var.h> 97 #endif /* INET6 */ 98 99 #ifdef IPSEC 100 #include <netinet6/ipsec.h> 101 #include <netkey/key.h> 102 #endif /* IPSEC */ 103 104 #include <netinet/sctp_os.h> 105 #include <netinet/sctp_pcb.h> 106 #include <netinet/sctputil.h> 107 #include <netinet/sctp_var.h> 108 #ifdef INET6 109 #include <netinet6/sctp6_var.h> 110 #endif 111 #include <netinet/sctp_header.h> 112 #include <netinet/sctp_output.h> 113 #include <netinet/sctp_uio.h> 114 #include <netinet/sctp_timer.h> 115 #include <netinet/sctp_crc32.h> 116 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 117 #include <netinet/sctp_auth.h> 118 #include <netinet/sctp_asconf.h> 119 120 extern int sctp_warm_the_crc32_table; 121 122 #define NUMBER_OF_MTU_SIZES 18 123 124 #ifdef SCTP_DEBUG 125 extern uint32_t sctp_debug_on; 126 127 #endif 128 129 130 #ifdef SCTP_STAT_LOGGING 131 int global_sctp_cwnd_log_at = 0; 132 int global_sctp_cwnd_log_rolled = 0; 133 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE]; 134 135 static uint32_t 136 sctp_get_time_of_event(void) 137 { 138 struct timeval now; 139 uint32_t timeval; 140 141 SCTP_GETPTIME_TIMEVAL(&now); 142 timeval = (now.tv_sec % 0x00000fff); 143 timeval <<= 20; 144 timeval |= now.tv_usec & 0xfffff; 145 return (timeval); 146 } 147 148 149 void 150 sctp_clr_stat_log(void) 151 { 152 global_sctp_cwnd_log_at = 0; 153 global_sctp_cwnd_log_rolled = 0; 154 } 155 156 157 void 158 sctp_sblog(struct sockbuf *sb, 159 struct sctp_tcb *stcb, int from, int incr) 160 { 161 int sctp_cwnd_log_at; 162 163 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 164 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 165 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 166 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB; 167 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb; 168 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc; 169 if (stcb) 170 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc; 171 else 172 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0; 173 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr; 174 } 175 176 void 177 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 178 { 179 int sctp_cwnd_log_at; 180 181 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 182 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 183 sctp_clog[sctp_cwnd_log_at].from = 0; 184 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE; 185 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp; 186 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags; 187 if (stcb) { 188 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb; 189 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state; 190 } else { 191 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0; 192 sctp_clog[sctp_cwnd_log_at].x.close.state = 0; 193 } 194 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc; 195 } 196 197 198 void 199 rto_logging(struct sctp_nets *net, int from) 200 { 201 int sctp_cwnd_log_at; 202 203 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 204 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 205 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 206 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT; 207 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net; 208 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt; 209 sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance; 210 sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir; 211 } 212 213 void 214 sctp_log_strm_del_alt(uint32_t tsn, uint16_t sseq, int from) 215 { 216 int sctp_cwnd_log_at; 217 218 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 219 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 220 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 221 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 222 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn; 223 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq; 224 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 225 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 226 } 227 228 void 229 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 230 { 231 int sctp_cwnd_log_at; 232 233 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 234 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 235 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action; 236 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE; 237 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb; 238 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight; 239 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 240 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 241 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count; 242 } 243 244 245 void 246 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 247 { 248 int sctp_cwnd_log_at; 249 250 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 251 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 252 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 253 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK; 254 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack; 255 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack; 256 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn; 257 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps; 258 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups; 259 } 260 261 void 262 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 263 { 264 int sctp_cwnd_log_at; 265 266 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 267 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 268 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 269 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP; 270 sctp_clog[sctp_cwnd_log_at].x.map.base = map; 271 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum; 272 sctp_clog[sctp_cwnd_log_at].x.map.high = high; 273 } 274 275 void 276 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 277 int from) 278 { 279 int sctp_cwnd_log_at; 280 281 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 282 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 283 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 284 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR; 285 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn; 286 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn; 287 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn; 288 } 289 290 291 void 292 sctp_log_mb(struct mbuf *m, int from) 293 { 294 int sctp_cwnd_log_at; 295 296 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 297 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 298 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 299 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF; 300 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m; 301 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (m->m_flags); 302 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (m->m_len); 303 sctp_clog[sctp_cwnd_log_at].x.mb.data = m->m_data; 304 if (m->m_flags & M_EXT) { 305 sctp_clog[sctp_cwnd_log_at].x.mb.ext = m->m_ext.ext_buf; 306 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (*m->m_ext.ref_cnt); 307 } else { 308 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0; 309 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0; 310 } 311 } 312 313 314 void 315 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 316 int from) 317 { 318 int sctp_cwnd_log_at; 319 320 if (control == NULL) { 321 printf("Gak log of NULL?\n"); 322 return; 323 } 324 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 325 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 326 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 327 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 328 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn; 329 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn; 330 if (poschk != NULL) { 331 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn; 332 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn; 333 } else { 334 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 335 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 336 } 337 } 338 339 void 340 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 341 { 342 int sctp_cwnd_log_at; 343 344 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 345 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 346 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 347 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND; 348 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 349 if (stcb->asoc.send_queue_cnt > 255) 350 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 351 else 352 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 353 if (stcb->asoc.stream_queue_cnt > 255) 354 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 355 else 356 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 357 358 if (net) { 359 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd; 360 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 361 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack; 362 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 363 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 364 } 365 if (SCTP_CWNDLOG_PRESEND == from) { 366 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 367 } 368 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment; 369 } 370 371 void 372 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 373 { 374 int sctp_cwnd_log_at; 375 376 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 377 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 378 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 379 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT; 380 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket; 381 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp; 382 if (stcb) { 383 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 384 } else { 385 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 386 } 387 if (inp) { 388 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 389 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 390 } else { 391 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 392 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN; 393 } 394 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx); 395 if (inp->sctp_socket) { 396 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 397 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 398 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 399 } else { 400 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 401 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 402 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 403 } 404 } 405 406 void 407 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 408 { 409 int sctp_cwnd_log_at; 410 411 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 412 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 413 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 414 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST; 415 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 416 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error; 417 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 418 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst; 419 if (stcb->asoc.send_queue_cnt > 255) 420 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 421 else 422 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 423 if (stcb->asoc.stream_queue_cnt > 255) 424 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 425 else 426 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 427 } 428 429 void 430 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 431 { 432 int sctp_cwnd_log_at; 433 434 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 435 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 436 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 437 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 438 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 439 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size; 440 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 441 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0; 442 } 443 444 void 445 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 446 { 447 int sctp_cwnd_log_at; 448 449 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 450 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 451 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 452 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 453 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 454 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size; 455 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 456 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval; 457 } 458 459 void 460 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 461 { 462 int sctp_cwnd_log_at; 463 464 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 465 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 466 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 467 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT; 468 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq; 469 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book; 470 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q; 471 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt; 472 } 473 474 void 475 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 476 { 477 int sctp_cwnd_log_at; 478 479 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 480 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 481 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 482 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT; 483 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a; 484 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b; 485 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c; 486 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d; 487 } 488 489 void 490 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 491 { 492 int sctp_cwnd_log_at; 493 494 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 495 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 496 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 497 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE; 498 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb; 499 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt; 500 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count; 501 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt; 502 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt; 503 504 if (stcb->asoc.stream_queue_cnt < 0xff) 505 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 506 else 507 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff; 508 509 if (stcb->asoc.chunks_on_out_queue < 0xff) 510 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 511 else 512 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff; 513 514 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0; 515 /* set in the defered mode stuff */ 516 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 517 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1; 518 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 519 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2; 520 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 521 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4; 522 /* what about the sb */ 523 if (stcb->sctp_socket) { 524 struct socket *so = stcb->sctp_socket; 525 526 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 527 } else { 528 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff; 529 } 530 } 531 532 void 533 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 534 { 535 int sctp_cwnd_log_at; 536 537 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 538 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 539 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 540 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK; 541 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size; 542 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 543 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd; 544 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 545 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 546 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 547 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen; 548 } 549 550 int 551 sctp_fill_stat_log(struct mbuf *m) 552 { 553 int sctp_cwnd_log_at; 554 struct sctp_cwnd_log_req *req; 555 size_t size_limit; 556 int num, i, at, cnt_out = 0; 557 558 if (m == NULL) 559 return (EINVAL); 560 561 size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req)); 562 if (size_limit < sizeof(struct sctp_cwnd_log)) { 563 return (EINVAL); 564 } 565 sctp_cwnd_log_at = global_sctp_cwnd_log_at; 566 req = mtod(m, struct sctp_cwnd_log_req *); 567 num = size_limit / sizeof(struct sctp_cwnd_log); 568 if (global_sctp_cwnd_log_rolled) { 569 req->num_in_log = SCTP_STAT_LOG_SIZE; 570 } else { 571 req->num_in_log = sctp_cwnd_log_at; 572 /* 573 * if the log has not rolled, we don't let you have old 574 * data. 575 */ 576 if (req->end_at > sctp_cwnd_log_at) { 577 req->end_at = sctp_cwnd_log_at; 578 } 579 } 580 if ((num < SCTP_STAT_LOG_SIZE) && 581 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) { 582 /* we can't return all of it */ 583 if (((req->start_at == 0) && (req->end_at == 0)) || 584 (req->start_at >= SCTP_STAT_LOG_SIZE) || 585 (req->end_at >= SCTP_STAT_LOG_SIZE)) { 586 /* No user request or user is wacked. */ 587 req->num_ret = num; 588 req->end_at = sctp_cwnd_log_at - 1; 589 if ((sctp_cwnd_log_at - num) < 0) { 590 int cc; 591 592 cc = num - sctp_cwnd_log_at; 593 req->start_at = SCTP_STAT_LOG_SIZE - cc; 594 } else { 595 req->start_at = sctp_cwnd_log_at - num; 596 } 597 } else { 598 /* a user request */ 599 int cc; 600 601 if (req->start_at > req->end_at) { 602 cc = (SCTP_STAT_LOG_SIZE - req->start_at) + 603 (req->end_at + 1); 604 } else { 605 606 cc = (req->end_at - req->start_at) + 1; 607 } 608 if (cc < num) { 609 num = cc; 610 } 611 req->num_ret = num; 612 } 613 } else { 614 /* We can return all of it */ 615 req->start_at = 0; 616 req->end_at = sctp_cwnd_log_at - 1; 617 req->num_ret = sctp_cwnd_log_at; 618 } 619 #ifdef INVARIENTS 620 if (req->num_ret > num) { 621 panic("Bad statlog get?"); 622 } 623 #endif 624 for (i = 0, at = req->start_at; i < req->num_ret; i++) { 625 req->log[i] = sctp_clog[at]; 626 cnt_out++; 627 at++; 628 if (at >= SCTP_STAT_LOG_SIZE) 629 at = 0; 630 } 631 m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req); 632 return (0); 633 } 634 635 #endif 636 637 #ifdef SCTP_AUDITING_ENABLED 638 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 639 static int sctp_audit_indx = 0; 640 641 static 642 void 643 sctp_print_audit_report(void) 644 { 645 int i; 646 int cnt; 647 648 cnt = 0; 649 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 650 if ((sctp_audit_data[i][0] == 0xe0) && 651 (sctp_audit_data[i][1] == 0x01)) { 652 cnt = 0; 653 printf("\n"); 654 } else if (sctp_audit_data[i][0] == 0xf0) { 655 cnt = 0; 656 printf("\n"); 657 } else if ((sctp_audit_data[i][0] == 0xc0) && 658 (sctp_audit_data[i][1] == 0x01)) { 659 printf("\n"); 660 cnt = 0; 661 } 662 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 663 (uint32_t) sctp_audit_data[i][1]); 664 cnt++; 665 if ((cnt % 14) == 0) 666 printf("\n"); 667 } 668 for (i = 0; i < sctp_audit_indx; i++) { 669 if ((sctp_audit_data[i][0] == 0xe0) && 670 (sctp_audit_data[i][1] == 0x01)) { 671 cnt = 0; 672 printf("\n"); 673 } else if (sctp_audit_data[i][0] == 0xf0) { 674 cnt = 0; 675 printf("\n"); 676 } else if ((sctp_audit_data[i][0] == 0xc0) && 677 (sctp_audit_data[i][1] == 0x01)) { 678 printf("\n"); 679 cnt = 0; 680 } 681 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 682 (uint32_t) sctp_audit_data[i][1]); 683 cnt++; 684 if ((cnt % 14) == 0) 685 printf("\n"); 686 } 687 printf("\n"); 688 } 689 690 void 691 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 692 struct sctp_nets *net) 693 { 694 int resend_cnt, tot_out, rep, tot_book_cnt; 695 struct sctp_nets *lnet; 696 struct sctp_tmit_chunk *chk; 697 698 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 699 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 700 sctp_audit_indx++; 701 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 702 sctp_audit_indx = 0; 703 } 704 if (inp == NULL) { 705 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 706 sctp_audit_data[sctp_audit_indx][1] = 0x01; 707 sctp_audit_indx++; 708 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 709 sctp_audit_indx = 0; 710 } 711 return; 712 } 713 if (stcb == NULL) { 714 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 715 sctp_audit_data[sctp_audit_indx][1] = 0x02; 716 sctp_audit_indx++; 717 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 718 sctp_audit_indx = 0; 719 } 720 return; 721 } 722 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 723 sctp_audit_data[sctp_audit_indx][1] = 724 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 725 sctp_audit_indx++; 726 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 727 sctp_audit_indx = 0; 728 } 729 rep = 0; 730 tot_book_cnt = 0; 731 resend_cnt = tot_out = 0; 732 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 733 if (chk->sent == SCTP_DATAGRAM_RESEND) { 734 resend_cnt++; 735 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 736 tot_out += chk->book_size; 737 tot_book_cnt++; 738 } 739 } 740 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 741 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 742 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 743 sctp_audit_indx++; 744 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 745 sctp_audit_indx = 0; 746 } 747 printf("resend_cnt:%d asoc-tot:%d\n", 748 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 749 rep = 1; 750 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 751 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 752 sctp_audit_data[sctp_audit_indx][1] = 753 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 754 sctp_audit_indx++; 755 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 756 sctp_audit_indx = 0; 757 } 758 } 759 if (tot_out != stcb->asoc.total_flight) { 760 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 761 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 762 sctp_audit_indx++; 763 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 764 sctp_audit_indx = 0; 765 } 766 rep = 1; 767 printf("tot_flt:%d asoc_tot:%d\n", tot_out, 768 (int)stcb->asoc.total_flight); 769 stcb->asoc.total_flight = tot_out; 770 } 771 if (tot_book_cnt != stcb->asoc.total_flight_count) { 772 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 773 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 774 sctp_audit_indx++; 775 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 776 sctp_audit_indx = 0; 777 } 778 rep = 1; 779 printf("tot_flt_book:%d\n", tot_book); 780 781 stcb->asoc.total_flight_count = tot_book_cnt; 782 } 783 tot_out = 0; 784 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 785 tot_out += lnet->flight_size; 786 } 787 if (tot_out != stcb->asoc.total_flight) { 788 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 789 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 790 sctp_audit_indx++; 791 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 792 sctp_audit_indx = 0; 793 } 794 rep = 1; 795 printf("real flight:%d net total was %d\n", 796 stcb->asoc.total_flight, tot_out); 797 /* now corrective action */ 798 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 799 800 tot_out = 0; 801 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 802 if ((chk->whoTo == lnet) && 803 (chk->sent < SCTP_DATAGRAM_RESEND)) { 804 tot_out += chk->book_size; 805 } 806 } 807 if (lnet->flight_size != tot_out) { 808 printf("net:%x flight was %d corrected to %d\n", 809 (uint32_t) lnet, lnet->flight_size, tot_out); 810 lnet->flight_size = tot_out; 811 } 812 } 813 } 814 if (rep) { 815 sctp_print_audit_report(); 816 } 817 } 818 819 void 820 sctp_audit_log(uint8_t ev, uint8_t fd) 821 { 822 int s; 823 824 s = splnet(); 825 sctp_audit_data[sctp_audit_indx][0] = ev; 826 sctp_audit_data[sctp_audit_indx][1] = fd; 827 sctp_audit_indx++; 828 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 829 sctp_audit_indx = 0; 830 } 831 splx(s); 832 } 833 834 #endif 835 836 /* 837 * a list of sizes based on typical mtu's, used only if next hop size not 838 * returned. 839 */ 840 static int sctp_mtu_sizes[] = { 841 68, 842 296, 843 508, 844 512, 845 544, 846 576, 847 1006, 848 1492, 849 1500, 850 1536, 851 2002, 852 2048, 853 4352, 854 4464, 855 8166, 856 17914, 857 32000, 858 65535 859 }; 860 861 void 862 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 863 { 864 struct sctp_association *asoc; 865 struct sctp_nets *net; 866 867 asoc = &stcb->asoc; 868 869 callout_stop(&asoc->hb_timer.timer); 870 callout_stop(&asoc->dack_timer.timer); 871 callout_stop(&asoc->strreset_timer.timer); 872 callout_stop(&asoc->asconf_timer.timer); 873 callout_stop(&asoc->autoclose_timer.timer); 874 callout_stop(&asoc->delayed_event_timer.timer); 875 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 876 callout_stop(&net->fr_timer.timer); 877 callout_stop(&net->pmtu_timer.timer); 878 } 879 } 880 881 int 882 find_next_best_mtu(int totsz) 883 { 884 int i, perfer; 885 886 /* 887 * if we are in here we must find the next best fit based on the 888 * size of the dg that failed to be sent. 889 */ 890 perfer = 0; 891 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 892 if (totsz < sctp_mtu_sizes[i]) { 893 perfer = i - 1; 894 if (perfer < 0) 895 perfer = 0; 896 break; 897 } 898 } 899 return (sctp_mtu_sizes[perfer]); 900 } 901 902 void 903 sctp_fill_random_store(struct sctp_pcb *m) 904 { 905 /* 906 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 907 * our counter. The result becomes our good random numbers and we 908 * then setup to give these out. Note that we do no locking to 909 * protect this. This is ok, since if competing folks call this we 910 * will get more gobbled gook in the random store whic is what we 911 * want. There is a danger that two guys will use the same random 912 * numbers, but thats ok too since that is random as well :-> 913 */ 914 m->store_at = 0; 915 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 916 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 917 sizeof(m->random_counter), (uint8_t *) m->random_store); 918 m->random_counter++; 919 } 920 921 uint32_t 922 sctp_select_initial_TSN(struct sctp_pcb *m) 923 { 924 /* 925 * A true implementation should use random selection process to get 926 * the initial stream sequence number, using RFC1750 as a good 927 * guideline 928 */ 929 u_long x, *xp; 930 uint8_t *p; 931 932 if (m->initial_sequence_debug != 0) { 933 uint32_t ret; 934 935 ret = m->initial_sequence_debug; 936 m->initial_sequence_debug++; 937 return (ret); 938 } 939 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) { 940 /* Refill the random store */ 941 sctp_fill_random_store(m); 942 } 943 p = &m->random_store[(int)m->store_at]; 944 xp = (u_long *)p; 945 x = *xp; 946 m->store_at += sizeof(u_long); 947 return (x); 948 } 949 950 uint32_t 951 sctp_select_a_tag(struct sctp_inpcb *m) 952 { 953 u_long x, not_done; 954 struct timeval now; 955 956 SCTP_GETTIME_TIMEVAL(&now); 957 not_done = 1; 958 while (not_done) { 959 x = sctp_select_initial_TSN(&m->sctp_ep); 960 if (x == 0) { 961 /* we never use 0 */ 962 continue; 963 } 964 if (sctp_is_vtag_good(m, x, &now)) { 965 not_done = 0; 966 } 967 } 968 return (x); 969 } 970 971 972 int 973 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc, 974 int for_a_init, uint32_t override_tag) 975 { 976 /* 977 * Anything set to zero is taken care of by the allocation routine's 978 * bzero 979 */ 980 981 /* 982 * Up front select what scoping to apply on addresses I tell my peer 983 * Not sure what to do with these right now, we will need to come up 984 * with a way to set them. We may need to pass them through from the 985 * caller in the sctp_aloc_assoc() function. 986 */ 987 int i; 988 989 /* init all variables to a known value. */ 990 asoc->state = SCTP_STATE_INUSE; 991 asoc->max_burst = m->sctp_ep.max_burst; 992 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 993 asoc->cookie_life = m->sctp_ep.def_cookie_life; 994 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off; 995 #ifdef AF_INET 996 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 997 #else 998 asoc->default_tos = 0; 999 #endif 1000 1001 #ifdef AF_INET6 1002 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 1003 #else 1004 asoc->default_flowlabel = 0; 1005 #endif 1006 if (override_tag) { 1007 struct timeval now; 1008 1009 if (sctp_is_vtag_good(m, override_tag, &now)) { 1010 asoc->my_vtag = override_tag; 1011 } else { 1012 return (ENOMEM); 1013 } 1014 1015 } else { 1016 asoc->my_vtag = sctp_select_a_tag(m); 1017 } 1018 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 1019 asoc->hb_is_disabled = 1; 1020 else 1021 asoc->hb_is_disabled = 0; 1022 1023 asoc->refcnt = 0; 1024 asoc->assoc_up_sent = 0; 1025 asoc->assoc_id = asoc->my_vtag; 1026 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 1027 sctp_select_initial_TSN(&m->sctp_ep); 1028 /* we are optimisitic here */ 1029 asoc->peer_supports_pktdrop = 1; 1030 1031 asoc->sent_queue_retran_cnt = 0; 1032 1033 /* for CMT */ 1034 asoc->last_net_data_came_from = NULL; 1035 1036 /* This will need to be adjusted */ 1037 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1038 asoc->last_acked_seq = asoc->init_seq_number - 1; 1039 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1040 asoc->asconf_seq_in = asoc->last_acked_seq; 1041 1042 /* here we are different, we hold the next one we expect */ 1043 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 1044 1045 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 1046 asoc->initial_rto = m->sctp_ep.initial_rto; 1047 1048 asoc->max_init_times = m->sctp_ep.max_init_times; 1049 asoc->max_send_times = m->sctp_ep.max_send_times; 1050 asoc->def_net_failure = m->sctp_ep.def_net_failure; 1051 asoc->free_chunk_cnt = 0; 1052 1053 asoc->iam_blocking = 0; 1054 /* ECN Nonce initialization */ 1055 asoc->context = m->sctp_context; 1056 asoc->def_send = m->def_send; 1057 asoc->ecn_nonce_allowed = 0; 1058 asoc->receiver_nonce_sum = 1; 1059 asoc->nonce_sum_expect_base = 1; 1060 asoc->nonce_sum_check = 1; 1061 asoc->nonce_resync_tsn = 0; 1062 asoc->nonce_wait_for_ecne = 0; 1063 asoc->nonce_wait_tsn = 0; 1064 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1065 asoc->pr_sctp_cnt = 0; 1066 asoc->total_output_queue_size = 0; 1067 1068 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1069 struct in6pcb *inp6; 1070 1071 1072 /* Its a V6 socket */ 1073 inp6 = (struct in6pcb *)m; 1074 asoc->ipv6_addr_legal = 1; 1075 /* Now look at the binding flag to see if V4 will be legal */ 1076 if ( 1077 (inp6->inp_flags & IN6P_IPV6_V6ONLY) 1078 == 0) { 1079 asoc->ipv4_addr_legal = 1; 1080 } else { 1081 /* V4 addresses are NOT legal on the association */ 1082 asoc->ipv4_addr_legal = 0; 1083 } 1084 } else { 1085 /* Its a V4 socket, no - V6 */ 1086 asoc->ipv4_addr_legal = 1; 1087 asoc->ipv6_addr_legal = 0; 1088 } 1089 1090 1091 asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND); 1092 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat; 1093 1094 asoc->smallest_mtu = m->sctp_frag_point; 1095 asoc->minrto = m->sctp_ep.sctp_minrto; 1096 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1097 1098 asoc->locked_on_sending = NULL; 1099 asoc->stream_locked_on = 0; 1100 asoc->ecn_echo_cnt_onq = 0; 1101 asoc->stream_locked = 0; 1102 1103 LIST_INIT(&asoc->sctp_local_addr_list); 1104 TAILQ_INIT(&asoc->nets); 1105 TAILQ_INIT(&asoc->pending_reply_queue); 1106 asoc->last_asconf_ack_sent = NULL; 1107 /* Setup to fill the hb random cache at first HB */ 1108 asoc->hb_random_idx = 4; 1109 1110 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1111 1112 /* 1113 * Now the stream parameters, here we allocate space for all streams 1114 * that we request by default. 1115 */ 1116 asoc->streamoutcnt = asoc->pre_open_streams = 1117 m->sctp_ep.pre_open_stream_count; 1118 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1119 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1120 "StreamsOut"); 1121 if (asoc->strmout == NULL) { 1122 /* big trouble no memory */ 1123 return (ENOMEM); 1124 } 1125 for (i = 0; i < asoc->streamoutcnt; i++) { 1126 /* 1127 * inbound side must be set to 0xffff, also NOTE when we get 1128 * the INIT-ACK back (for INIT sender) we MUST reduce the 1129 * count (streamoutcnt) but first check if we sent to any of 1130 * the upper streams that were dropped (if some were). Those 1131 * that were dropped must be notified to the upper layer as 1132 * failed to send. 1133 */ 1134 asoc->strmout[i].next_sequence_sent = 0x0; 1135 TAILQ_INIT(&asoc->strmout[i].outqueue); 1136 asoc->strmout[i].stream_no = i; 1137 asoc->strmout[i].last_msg_incomplete = 0; 1138 asoc->strmout[i].next_spoke.tqe_next = 0; 1139 asoc->strmout[i].next_spoke.tqe_prev = 0; 1140 } 1141 /* Now the mapping array */ 1142 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1143 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1144 "MappingArray"); 1145 if (asoc->mapping_array == NULL) { 1146 SCTP_FREE(asoc->strmout); 1147 return (ENOMEM); 1148 } 1149 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1150 /* Now the init of the other outqueues */ 1151 TAILQ_INIT(&asoc->free_chunks); 1152 TAILQ_INIT(&asoc->free_strmoq); 1153 TAILQ_INIT(&asoc->out_wheel); 1154 TAILQ_INIT(&asoc->control_send_queue); 1155 TAILQ_INIT(&asoc->send_queue); 1156 TAILQ_INIT(&asoc->sent_queue); 1157 TAILQ_INIT(&asoc->reasmqueue); 1158 TAILQ_INIT(&asoc->resetHead); 1159 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1160 TAILQ_INIT(&asoc->asconf_queue); 1161 /* authentication fields */ 1162 asoc->authinfo.random = NULL; 1163 asoc->authinfo.assoc_key = NULL; 1164 asoc->authinfo.assoc_keyid = 0; 1165 asoc->authinfo.recv_key = NULL; 1166 asoc->authinfo.recv_keyid = 0; 1167 LIST_INIT(&asoc->shared_keys); 1168 1169 return (0); 1170 } 1171 1172 int 1173 sctp_expand_mapping_array(struct sctp_association *asoc) 1174 { 1175 /* mapping array needs to grow */ 1176 uint8_t *new_array; 1177 uint16_t new_size; 1178 1179 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR; 1180 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray"); 1181 if (new_array == NULL) { 1182 /* can't get more, forget it */ 1183 printf("No memory for expansion of SCTP mapping array %d\n", 1184 new_size); 1185 return (-1); 1186 } 1187 memset(new_array, 0, new_size); 1188 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1189 SCTP_FREE(asoc->mapping_array); 1190 asoc->mapping_array = new_array; 1191 asoc->mapping_array_size = new_size; 1192 return (0); 1193 } 1194 1195 extern unsigned int sctp_early_fr_msec; 1196 1197 static void 1198 sctp_handle_addr_wq(void) 1199 { 1200 /* deal with the ADDR wq from the rtsock calls */ 1201 struct sctp_laddr *wi; 1202 1203 SCTP_IPI_ADDR_LOCK(); 1204 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1205 if (wi == NULL) { 1206 SCTP_IPI_ADDR_UNLOCK(); 1207 return; 1208 } 1209 LIST_REMOVE(wi, sctp_nxt_addr); 1210 if (!LIST_EMPTY(&sctppcbinfo.addr_wq)) { 1211 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1212 (struct sctp_inpcb *)NULL, 1213 (struct sctp_tcb *)NULL, 1214 (struct sctp_nets *)NULL); 1215 } 1216 SCTP_IPI_ADDR_UNLOCK(); 1217 if (wi->action == RTM_ADD) { 1218 sctp_add_ip_address(wi->ifa); 1219 } else if (wi->action == RTM_DELETE) { 1220 sctp_delete_ip_address(wi->ifa); 1221 } 1222 IFAFREE(wi->ifa); 1223 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, wi); 1224 SCTP_DECR_LADDR_COUNT(); 1225 } 1226 1227 void 1228 sctp_timeout_handler(void *t) 1229 { 1230 struct sctp_inpcb *inp; 1231 struct sctp_tcb *stcb; 1232 struct sctp_nets *net; 1233 struct sctp_timer *tmr; 1234 int s, did_output; 1235 struct sctp_iterator *it = NULL; 1236 1237 1238 s = splnet(); 1239 tmr = (struct sctp_timer *)t; 1240 inp = (struct sctp_inpcb *)tmr->ep; 1241 stcb = (struct sctp_tcb *)tmr->tcb; 1242 net = (struct sctp_nets *)tmr->net; 1243 did_output = 1; 1244 1245 #ifdef SCTP_AUDITING_ENABLED 1246 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1247 sctp_auditing(3, inp, stcb, net); 1248 #endif 1249 1250 /* sanity checks... */ 1251 if (tmr->self != (void *)tmr) { 1252 /* 1253 * printf("Stale SCTP timer fired (%p), ignoring...\n", 1254 * tmr); 1255 */ 1256 splx(s); 1257 return; 1258 } 1259 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1260 /* 1261 * printf("SCTP timer fired with invalid type: 0x%x\n", 1262 * tmr->type); 1263 */ 1264 splx(s); 1265 return; 1266 } 1267 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1268 splx(s); 1269 return; 1270 } 1271 /* if this is an iterator timeout, get the struct and clear inp */ 1272 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1273 it = (struct sctp_iterator *)inp; 1274 inp = NULL; 1275 } 1276 if (inp) { 1277 SCTP_INP_INCR_REF(inp); 1278 if ((inp->sctp_socket == 0) && 1279 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1280 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1281 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1282 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1283 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1284 ) { 1285 splx(s); 1286 SCTP_INP_DECR_REF(inp); 1287 return; 1288 } 1289 } 1290 if (stcb) { 1291 if (stcb->asoc.state == 0) { 1292 splx(s); 1293 if (inp) { 1294 SCTP_INP_DECR_REF(inp); 1295 } 1296 return; 1297 } 1298 } 1299 #ifdef SCTP_DEBUG 1300 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1301 printf("Timer type %d goes off\n", tmr->type); 1302 } 1303 #endif /* SCTP_DEBUG */ 1304 if (!callout_active(&tmr->timer)) { 1305 splx(s); 1306 if (inp) { 1307 SCTP_INP_DECR_REF(inp); 1308 } 1309 return; 1310 } 1311 if (stcb) { 1312 atomic_add_int(&stcb->asoc.refcnt, 1); 1313 SCTP_TCB_LOCK(stcb); 1314 atomic_add_int(&stcb->asoc.refcnt, -1); 1315 } 1316 /* mark as being serviced now */ 1317 callout_deactivate(&tmr->timer); 1318 1319 /* call the handler for the appropriate timer type */ 1320 switch (tmr->type) { 1321 case SCTP_TIMER_TYPE_ADDR_WQ: 1322 sctp_handle_addr_wq(); 1323 break; 1324 case SCTP_TIMER_TYPE_ITERATOR: 1325 SCTP_STAT_INCR(sctps_timoiterator); 1326 sctp_iterator_timer(it); 1327 break; 1328 case SCTP_TIMER_TYPE_SEND: 1329 SCTP_STAT_INCR(sctps_timodata); 1330 stcb->asoc.num_send_timers_up--; 1331 if (stcb->asoc.num_send_timers_up < 0) { 1332 stcb->asoc.num_send_timers_up = 0; 1333 } 1334 if (sctp_t3rxt_timer(inp, stcb, net)) { 1335 /* no need to unlock on tcb its gone */ 1336 1337 goto out_decr; 1338 } 1339 #ifdef SCTP_AUDITING_ENABLED 1340 sctp_auditing(4, inp, stcb, net); 1341 #endif 1342 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1343 if ((stcb->asoc.num_send_timers_up == 0) && 1344 (stcb->asoc.sent_queue_cnt > 0) 1345 ) { 1346 struct sctp_tmit_chunk *chk; 1347 1348 /* 1349 * safeguard. If there on some on the sent queue 1350 * somewhere but no timers running something is 1351 * wrong... so we start a timer on the first chunk 1352 * on the send queue on whatever net it is sent to. 1353 */ 1354 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1355 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1356 chk->whoTo); 1357 } 1358 break; 1359 case SCTP_TIMER_TYPE_INIT: 1360 SCTP_STAT_INCR(sctps_timoinit); 1361 if (sctp_t1init_timer(inp, stcb, net)) { 1362 /* no need to unlock on tcb its gone */ 1363 goto out_decr; 1364 } 1365 /* We do output but not here */ 1366 did_output = 0; 1367 break; 1368 case SCTP_TIMER_TYPE_RECV: 1369 SCTP_STAT_INCR(sctps_timosack); 1370 sctp_send_sack(stcb); 1371 #ifdef SCTP_AUDITING_ENABLED 1372 sctp_auditing(4, inp, stcb, net); 1373 #endif 1374 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR); 1375 break; 1376 case SCTP_TIMER_TYPE_SHUTDOWN: 1377 if (sctp_shutdown_timer(inp, stcb, net)) { 1378 /* no need to unlock on tcb its gone */ 1379 goto out_decr; 1380 } 1381 SCTP_STAT_INCR(sctps_timoshutdown); 1382 #ifdef SCTP_AUDITING_ENABLED 1383 sctp_auditing(4, inp, stcb, net); 1384 #endif 1385 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR); 1386 break; 1387 case SCTP_TIMER_TYPE_HEARTBEAT: 1388 { 1389 struct sctp_nets *net; 1390 int cnt_of_unconf = 0; 1391 1392 SCTP_STAT_INCR(sctps_timoheartbeat); 1393 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1394 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1395 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1396 cnt_of_unconf++; 1397 } 1398 } 1399 if (cnt_of_unconf == 0) { 1400 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) { 1401 /* no need to unlock on tcb its gone */ 1402 goto out_decr; 1403 } 1404 } 1405 #ifdef SCTP_AUDITING_ENABLED 1406 sctp_auditing(4, inp, stcb, net); 1407 #endif 1408 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 1409 stcb, net); 1410 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR); 1411 } 1412 break; 1413 case SCTP_TIMER_TYPE_COOKIE: 1414 if (sctp_cookie_timer(inp, stcb, net)) { 1415 /* no need to unlock on tcb its gone */ 1416 goto out_decr; 1417 } 1418 SCTP_STAT_INCR(sctps_timocookie); 1419 #ifdef SCTP_AUDITING_ENABLED 1420 sctp_auditing(4, inp, stcb, net); 1421 #endif 1422 /* 1423 * We consider T3 and Cookie timer pretty much the same with 1424 * respect to where from in chunk_output. 1425 */ 1426 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1427 break; 1428 case SCTP_TIMER_TYPE_NEWCOOKIE: 1429 { 1430 struct timeval tv; 1431 int i, secret; 1432 1433 SCTP_STAT_INCR(sctps_timosecret); 1434 SCTP_GETTIME_TIMEVAL(&tv); 1435 SCTP_INP_WLOCK(inp); 1436 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1437 inp->sctp_ep.last_secret_number = 1438 inp->sctp_ep.current_secret_number; 1439 inp->sctp_ep.current_secret_number++; 1440 if (inp->sctp_ep.current_secret_number >= 1441 SCTP_HOW_MANY_SECRETS) { 1442 inp->sctp_ep.current_secret_number = 0; 1443 } 1444 secret = (int)inp->sctp_ep.current_secret_number; 1445 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1446 inp->sctp_ep.secret_key[secret][i] = 1447 sctp_select_initial_TSN(&inp->sctp_ep); 1448 } 1449 SCTP_INP_WUNLOCK(inp); 1450 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1451 } 1452 did_output = 0; 1453 break; 1454 case SCTP_TIMER_TYPE_PATHMTURAISE: 1455 SCTP_STAT_INCR(sctps_timopathmtu); 1456 sctp_pathmtu_timer(inp, stcb, net); 1457 did_output = 0; 1458 break; 1459 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1460 if (sctp_shutdownack_timer(inp, stcb, net)) { 1461 /* no need to unlock on tcb its gone */ 1462 goto out_decr; 1463 } 1464 SCTP_STAT_INCR(sctps_timoshutdownack); 1465 #ifdef SCTP_AUDITING_ENABLED 1466 sctp_auditing(4, inp, stcb, net); 1467 #endif 1468 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR); 1469 break; 1470 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1471 SCTP_STAT_INCR(sctps_timoshutdownguard); 1472 sctp_abort_an_association(inp, stcb, 1473 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL); 1474 /* no need to unlock on tcb its gone */ 1475 goto out_decr; 1476 break; 1477 1478 case SCTP_TIMER_TYPE_STRRESET: 1479 if (sctp_strreset_timer(inp, stcb, net)) { 1480 /* no need to unlock on tcb its gone */ 1481 goto out_decr; 1482 } 1483 SCTP_STAT_INCR(sctps_timostrmrst); 1484 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR); 1485 break; 1486 case SCTP_TIMER_TYPE_EARLYFR: 1487 /* Need to do FR of things for net */ 1488 SCTP_STAT_INCR(sctps_timoearlyfr); 1489 sctp_early_fr_timer(inp, stcb, net); 1490 break; 1491 case SCTP_TIMER_TYPE_ASCONF: 1492 if (sctp_asconf_timer(inp, stcb, net)) { 1493 /* no need to unlock on tcb its gone */ 1494 goto out_decr; 1495 } 1496 SCTP_STAT_INCR(sctps_timoasconf); 1497 #ifdef SCTP_AUDITING_ENABLED 1498 sctp_auditing(4, inp, stcb, net); 1499 #endif 1500 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR); 1501 break; 1502 1503 case SCTP_TIMER_TYPE_AUTOCLOSE: 1504 SCTP_STAT_INCR(sctps_timoautoclose); 1505 sctp_autoclose_timer(inp, stcb, net); 1506 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1507 did_output = 0; 1508 break; 1509 case SCTP_TIMER_TYPE_ASOCKILL: 1510 SCTP_STAT_INCR(sctps_timoassockill); 1511 /* Can we free it yet? */ 1512 SCTP_INP_DECR_REF(inp); 1513 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 1514 sctp_free_assoc(inp, stcb, 0); 1515 /* 1516 * free asoc, always unlocks (or destroy's) so prevent 1517 * duplicate unlock or unlock of a free mtx :-0 1518 */ 1519 stcb = NULL; 1520 goto out_no_decr; 1521 break; 1522 case SCTP_TIMER_TYPE_INPKILL: 1523 SCTP_STAT_INCR(sctps_timoinpkill); 1524 /* 1525 * special case, take away our increment since WE are the 1526 * killer 1527 */ 1528 SCTP_INP_DECR_REF(inp); 1529 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL); 1530 sctp_inpcb_free(inp, 1, 0); 1531 goto out_no_decr; 1532 break; 1533 default: 1534 #ifdef SCTP_DEBUG 1535 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1536 printf("sctp_timeout_handler:unknown timer %d\n", 1537 tmr->type); 1538 } 1539 #endif /* SCTP_DEBUG */ 1540 break; 1541 }; 1542 #ifdef SCTP_AUDITING_ENABLED 1543 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1544 if (inp) 1545 sctp_auditing(5, inp, stcb, net); 1546 #endif 1547 if ((did_output) && stcb) { 1548 /* 1549 * Now we need to clean up the control chunk chain if an 1550 * ECNE is on it. It must be marked as UNSENT again so next 1551 * call will continue to send it until such time that we get 1552 * a CWR, to remove it. It is, however, less likely that we 1553 * will find a ecn echo on the chain though. 1554 */ 1555 sctp_fix_ecn_echo(&stcb->asoc); 1556 } 1557 if (stcb) { 1558 SCTP_TCB_UNLOCK(stcb); 1559 } 1560 out_decr: 1561 if (inp) { 1562 SCTP_INP_DECR_REF(inp); 1563 } 1564 out_no_decr: 1565 1566 #ifdef SCTP_DEBUG 1567 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1568 printf("Timer now complete (type %d)\n", tmr->type); 1569 } 1570 #endif /* SCTP_DEBUG */ 1571 splx(s); 1572 if (inp) { 1573 } 1574 } 1575 1576 int 1577 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1578 struct sctp_nets *net) 1579 { 1580 int to_ticks; 1581 struct sctp_timer *tmr; 1582 1583 1584 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 1585 (inp == NULL)) 1586 return (EFAULT); 1587 1588 to_ticks = 0; 1589 1590 tmr = NULL; 1591 if (stcb) { 1592 SCTP_TCB_LOCK_ASSERT(stcb); 1593 } 1594 switch (t_type) { 1595 case SCTP_TIMER_TYPE_ADDR_WQ: 1596 /* Only 1 tick away :-) */ 1597 tmr = &sctppcbinfo.addr_wq_timer; 1598 to_ticks = 1; 1599 break; 1600 case SCTP_TIMER_TYPE_ITERATOR: 1601 { 1602 struct sctp_iterator *it; 1603 1604 it = (struct sctp_iterator *)inp; 1605 tmr = &it->tmr; 1606 to_ticks = SCTP_ITERATOR_TICKS; 1607 } 1608 break; 1609 case SCTP_TIMER_TYPE_SEND: 1610 /* Here we use the RTO timer */ 1611 { 1612 int rto_val; 1613 1614 if ((stcb == NULL) || (net == NULL)) { 1615 return (EFAULT); 1616 } 1617 tmr = &net->rxt_timer; 1618 if (net->RTO == 0) { 1619 rto_val = stcb->asoc.initial_rto; 1620 } else { 1621 rto_val = net->RTO; 1622 } 1623 to_ticks = MSEC_TO_TICKS(rto_val); 1624 } 1625 break; 1626 case SCTP_TIMER_TYPE_INIT: 1627 /* 1628 * Here we use the INIT timer default usually about 1 1629 * minute. 1630 */ 1631 if ((stcb == NULL) || (net == NULL)) { 1632 return (EFAULT); 1633 } 1634 tmr = &net->rxt_timer; 1635 if (net->RTO == 0) { 1636 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1637 } else { 1638 to_ticks = MSEC_TO_TICKS(net->RTO); 1639 } 1640 break; 1641 case SCTP_TIMER_TYPE_RECV: 1642 /* 1643 * Here we use the Delayed-Ack timer value from the inp 1644 * ususually about 200ms. 1645 */ 1646 if (stcb == NULL) { 1647 return (EFAULT); 1648 } 1649 tmr = &stcb->asoc.dack_timer; 1650 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1651 break; 1652 case SCTP_TIMER_TYPE_SHUTDOWN: 1653 /* Here we use the RTO of the destination. */ 1654 if ((stcb == NULL) || (net == NULL)) { 1655 return (EFAULT); 1656 } 1657 if (net->RTO == 0) { 1658 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1659 } else { 1660 to_ticks = MSEC_TO_TICKS(net->RTO); 1661 } 1662 tmr = &net->rxt_timer; 1663 break; 1664 case SCTP_TIMER_TYPE_HEARTBEAT: 1665 /* 1666 * the net is used here so that we can add in the RTO. Even 1667 * though we use a different timer. We also add the HB timer 1668 * PLUS a random jitter. 1669 */ 1670 if (stcb == NULL) { 1671 return (EFAULT); 1672 } { 1673 uint32_t rndval; 1674 uint8_t this_random; 1675 int cnt_of_unconf = 0; 1676 struct sctp_nets *lnet; 1677 1678 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1679 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1680 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1681 cnt_of_unconf++; 1682 } 1683 } 1684 if (cnt_of_unconf) { 1685 lnet = NULL; 1686 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 1687 } 1688 if (stcb->asoc.hb_random_idx > 3) { 1689 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1690 memcpy(stcb->asoc.hb_random_values, &rndval, 1691 sizeof(stcb->asoc.hb_random_values)); 1692 this_random = stcb->asoc.hb_random_values[0]; 1693 stcb->asoc.hb_random_idx = 0; 1694 stcb->asoc.hb_ect_randombit = 0; 1695 } else { 1696 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1697 stcb->asoc.hb_random_idx++; 1698 stcb->asoc.hb_ect_randombit = 0; 1699 } 1700 /* 1701 * this_random will be 0 - 256 ms RTO is in ms. 1702 */ 1703 if ((stcb->asoc.hb_is_disabled) && 1704 (cnt_of_unconf == 0)) { 1705 return (0); 1706 } 1707 if (net) { 1708 struct sctp_nets *lnet; 1709 int delay; 1710 1711 delay = stcb->asoc.heart_beat_delay; 1712 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1713 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1714 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 1715 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1716 delay = 0; 1717 } 1718 } 1719 if (net->RTO == 0) { 1720 /* Never been checked */ 1721 to_ticks = this_random + stcb->asoc.initial_rto + delay; 1722 } else { 1723 /* set rto_val to the ms */ 1724 to_ticks = delay + net->RTO + this_random; 1725 } 1726 } else { 1727 if (cnt_of_unconf) { 1728 to_ticks = this_random + stcb->asoc.initial_rto; 1729 } else { 1730 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 1731 } 1732 } 1733 /* 1734 * Now we must convert the to_ticks that are now in 1735 * ms to ticks. 1736 */ 1737 to_ticks = MSEC_TO_TICKS(to_ticks); 1738 tmr = &stcb->asoc.hb_timer; 1739 } 1740 break; 1741 case SCTP_TIMER_TYPE_COOKIE: 1742 /* 1743 * Here we can use the RTO timer from the network since one 1744 * RTT was compelete. If a retran happened then we will be 1745 * using the RTO initial value. 1746 */ 1747 if ((stcb == NULL) || (net == NULL)) { 1748 return (EFAULT); 1749 } 1750 if (net->RTO == 0) { 1751 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1752 } else { 1753 to_ticks = MSEC_TO_TICKS(net->RTO); 1754 } 1755 tmr = &net->rxt_timer; 1756 break; 1757 case SCTP_TIMER_TYPE_NEWCOOKIE: 1758 /* 1759 * nothing needed but the endpoint here ususually about 60 1760 * minutes. 1761 */ 1762 tmr = &inp->sctp_ep.signature_change; 1763 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1764 break; 1765 case SCTP_TIMER_TYPE_ASOCKILL: 1766 if (stcb == NULL) { 1767 return (EFAULT); 1768 } 1769 tmr = &stcb->asoc.strreset_timer; 1770 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1771 break; 1772 case SCTP_TIMER_TYPE_INPKILL: 1773 /* 1774 * The inp is setup to die. We re-use the signature_chage 1775 * timer since that has stopped and we are in the GONE 1776 * state. 1777 */ 1778 tmr = &inp->sctp_ep.signature_change; 1779 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 1780 break; 1781 case SCTP_TIMER_TYPE_PATHMTURAISE: 1782 /* 1783 * Here we use the value found in the EP for PMTU ususually 1784 * about 10 minutes. 1785 */ 1786 if (stcb == NULL) { 1787 return (EFAULT); 1788 } 1789 if (net == NULL) { 1790 return (EFAULT); 1791 } 1792 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 1793 tmr = &net->pmtu_timer; 1794 break; 1795 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1796 /* Here we use the RTO of the destination */ 1797 if ((stcb == NULL) || (net == NULL)) { 1798 return (EFAULT); 1799 } 1800 if (net->RTO == 0) { 1801 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1802 } else { 1803 to_ticks = MSEC_TO_TICKS(net->RTO); 1804 } 1805 tmr = &net->rxt_timer; 1806 break; 1807 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1808 /* 1809 * Here we use the endpoints shutdown guard timer usually 1810 * about 3 minutes. 1811 */ 1812 if (stcb == NULL) { 1813 return (EFAULT); 1814 } 1815 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 1816 tmr = &stcb->asoc.shut_guard_timer; 1817 break; 1818 case SCTP_TIMER_TYPE_STRRESET: 1819 /* 1820 * Here the timer comes from the inp but its value is from 1821 * the RTO. 1822 */ 1823 if ((stcb == NULL) || (net == NULL)) { 1824 return (EFAULT); 1825 } 1826 if (net->RTO == 0) { 1827 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1828 } else { 1829 to_ticks = MSEC_TO_TICKS(net->RTO); 1830 } 1831 tmr = &stcb->asoc.strreset_timer; 1832 break; 1833 1834 case SCTP_TIMER_TYPE_EARLYFR: 1835 { 1836 unsigned int msec; 1837 1838 if ((stcb == NULL) || (net == NULL)) { 1839 return (EFAULT); 1840 } 1841 if (net->flight_size > net->cwnd) { 1842 /* no need to start */ 1843 return (0); 1844 } 1845 SCTP_STAT_INCR(sctps_earlyfrstart); 1846 if (net->lastsa == 0) { 1847 /* Hmm no rtt estimate yet? */ 1848 msec = stcb->asoc.initial_rto >> 2; 1849 } else { 1850 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 1851 } 1852 if (msec < sctp_early_fr_msec) { 1853 msec = sctp_early_fr_msec; 1854 if (msec < SCTP_MINFR_MSEC_FLOOR) { 1855 msec = SCTP_MINFR_MSEC_FLOOR; 1856 } 1857 } 1858 to_ticks = MSEC_TO_TICKS(msec); 1859 tmr = &net->fr_timer; 1860 } 1861 break; 1862 case SCTP_TIMER_TYPE_ASCONF: 1863 /* 1864 * Here the timer comes from the inp but its value is from 1865 * the RTO. 1866 */ 1867 if ((stcb == NULL) || (net == NULL)) { 1868 return (EFAULT); 1869 } 1870 if (net->RTO == 0) { 1871 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1872 } else { 1873 to_ticks = MSEC_TO_TICKS(net->RTO); 1874 } 1875 tmr = &stcb->asoc.asconf_timer; 1876 break; 1877 case SCTP_TIMER_TYPE_AUTOCLOSE: 1878 if (stcb == NULL) { 1879 return (EFAULT); 1880 } 1881 if (stcb->asoc.sctp_autoclose_ticks == 0) { 1882 /* 1883 * Really an error since stcb is NOT set to 1884 * autoclose 1885 */ 1886 return (0); 1887 } 1888 to_ticks = stcb->asoc.sctp_autoclose_ticks; 1889 tmr = &stcb->asoc.autoclose_timer; 1890 break; 1891 default: 1892 #ifdef SCTP_DEBUG 1893 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1894 printf("sctp_timer_start:Unknown timer type %d\n", 1895 t_type); 1896 } 1897 #endif /* SCTP_DEBUG */ 1898 return (EFAULT); 1899 break; 1900 }; 1901 if ((to_ticks <= 0) || (tmr == NULL)) { 1902 #ifdef SCTP_DEBUG 1903 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1904 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n", 1905 t_type, to_ticks, tmr); 1906 } 1907 #endif /* SCTP_DEBUG */ 1908 return (EFAULT); 1909 } 1910 if (callout_pending(&tmr->timer)) { 1911 /* 1912 * we do NOT allow you to have it already running. if it is 1913 * we leave the current one up unchanged 1914 */ 1915 return (EALREADY); 1916 } 1917 /* At this point we can proceed */ 1918 if (t_type == SCTP_TIMER_TYPE_SEND) { 1919 stcb->asoc.num_send_timers_up++; 1920 } 1921 tmr->type = t_type; 1922 tmr->ep = (void *)inp; 1923 tmr->tcb = (void *)stcb; 1924 tmr->net = (void *)net; 1925 tmr->self = (void *)tmr; 1926 tmr->ticks = ticks; 1927 callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 1928 return (0); 1929 } 1930 1931 int 1932 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1933 struct sctp_nets *net) 1934 { 1935 struct sctp_timer *tmr; 1936 1937 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 1938 (inp == NULL)) 1939 return (EFAULT); 1940 1941 tmr = NULL; 1942 if (stcb) { 1943 SCTP_TCB_LOCK_ASSERT(stcb); 1944 } 1945 switch (t_type) { 1946 case SCTP_TIMER_TYPE_ADDR_WQ: 1947 tmr = &sctppcbinfo.addr_wq_timer; 1948 break; 1949 case SCTP_TIMER_TYPE_EARLYFR: 1950 if ((stcb == NULL) || (net == NULL)) { 1951 return (EFAULT); 1952 } 1953 tmr = &net->fr_timer; 1954 SCTP_STAT_INCR(sctps_earlyfrstop); 1955 break; 1956 case SCTP_TIMER_TYPE_ITERATOR: 1957 { 1958 struct sctp_iterator *it; 1959 1960 it = (struct sctp_iterator *)inp; 1961 tmr = &it->tmr; 1962 } 1963 break; 1964 case SCTP_TIMER_TYPE_SEND: 1965 if ((stcb == NULL) || (net == NULL)) { 1966 return (EFAULT); 1967 } 1968 tmr = &net->rxt_timer; 1969 break; 1970 case SCTP_TIMER_TYPE_INIT: 1971 if ((stcb == NULL) || (net == NULL)) { 1972 return (EFAULT); 1973 } 1974 tmr = &net->rxt_timer; 1975 break; 1976 case SCTP_TIMER_TYPE_RECV: 1977 if (stcb == NULL) { 1978 return (EFAULT); 1979 } 1980 tmr = &stcb->asoc.dack_timer; 1981 break; 1982 case SCTP_TIMER_TYPE_SHUTDOWN: 1983 if ((stcb == NULL) || (net == NULL)) { 1984 return (EFAULT); 1985 } 1986 tmr = &net->rxt_timer; 1987 break; 1988 case SCTP_TIMER_TYPE_HEARTBEAT: 1989 if (stcb == NULL) { 1990 return (EFAULT); 1991 } 1992 tmr = &stcb->asoc.hb_timer; 1993 break; 1994 case SCTP_TIMER_TYPE_COOKIE: 1995 if ((stcb == NULL) || (net == NULL)) { 1996 return (EFAULT); 1997 } 1998 tmr = &net->rxt_timer; 1999 break; 2000 case SCTP_TIMER_TYPE_NEWCOOKIE: 2001 /* nothing needed but the endpoint here */ 2002 tmr = &inp->sctp_ep.signature_change; 2003 /* 2004 * We re-use the newcookie timer for the INP kill timer. We 2005 * must assure that we do not kill it by accident. 2006 */ 2007 break; 2008 case SCTP_TIMER_TYPE_ASOCKILL: 2009 /* 2010 * Stop the asoc kill timer. 2011 */ 2012 if (stcb == NULL) { 2013 return (EFAULT); 2014 } 2015 tmr = &stcb->asoc.strreset_timer; 2016 break; 2017 2018 case SCTP_TIMER_TYPE_INPKILL: 2019 /* 2020 * The inp is setup to die. We re-use the signature_chage 2021 * timer since that has stopped and we are in the GONE 2022 * state. 2023 */ 2024 tmr = &inp->sctp_ep.signature_change; 2025 break; 2026 case SCTP_TIMER_TYPE_PATHMTURAISE: 2027 if ((stcb == NULL) || (net == NULL)) { 2028 return (EFAULT); 2029 } 2030 tmr = &net->pmtu_timer; 2031 break; 2032 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2033 if ((stcb == NULL) || (net == NULL)) { 2034 return (EFAULT); 2035 } 2036 tmr = &net->rxt_timer; 2037 break; 2038 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2039 if (stcb == NULL) { 2040 return (EFAULT); 2041 } 2042 tmr = &stcb->asoc.shut_guard_timer; 2043 break; 2044 case SCTP_TIMER_TYPE_STRRESET: 2045 if (stcb == NULL) { 2046 return (EFAULT); 2047 } 2048 tmr = &stcb->asoc.strreset_timer; 2049 break; 2050 case SCTP_TIMER_TYPE_ASCONF: 2051 if (stcb == NULL) { 2052 return (EFAULT); 2053 } 2054 tmr = &stcb->asoc.asconf_timer; 2055 break; 2056 case SCTP_TIMER_TYPE_AUTOCLOSE: 2057 if (stcb == NULL) { 2058 return (EFAULT); 2059 } 2060 tmr = &stcb->asoc.autoclose_timer; 2061 break; 2062 default: 2063 #ifdef SCTP_DEBUG 2064 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2065 printf("sctp_timer_stop:Unknown timer type %d\n", 2066 t_type); 2067 } 2068 #endif /* SCTP_DEBUG */ 2069 break; 2070 }; 2071 if (tmr == NULL) { 2072 return (EFAULT); 2073 } 2074 if ((tmr->type != t_type) && tmr->type) { 2075 /* 2076 * Ok we have a timer that is under joint use. Cookie timer 2077 * per chance with the SEND timer. We therefore are NOT 2078 * running the timer that the caller wants stopped. So just 2079 * return. 2080 */ 2081 return (0); 2082 } 2083 if (t_type == SCTP_TIMER_TYPE_SEND) { 2084 stcb->asoc.num_send_timers_up--; 2085 if (stcb->asoc.num_send_timers_up < 0) { 2086 stcb->asoc.num_send_timers_up = 0; 2087 } 2088 } 2089 tmr->self = NULL; 2090 callout_stop(&tmr->timer); 2091 return (0); 2092 } 2093 2094 #ifdef SCTP_USE_ADLER32 2095 static uint32_t 2096 update_adler32(uint32_t adler, uint8_t * buf, int32_t len) 2097 { 2098 uint32_t s1 = adler & 0xffff; 2099 uint32_t s2 = (adler >> 16) & 0xffff; 2100 int n; 2101 2102 for (n = 0; n < len; n++, buf++) { 2103 /* s1 = (s1 + buf[n]) % BASE */ 2104 /* first we add */ 2105 s1 = (s1 + *buf); 2106 /* 2107 * now if we need to, we do a mod by subtracting. It seems a 2108 * bit faster since I really will only ever do one subtract 2109 * at the MOST, since buf[n] is a max of 255. 2110 */ 2111 if (s1 >= SCTP_ADLER32_BASE) { 2112 s1 -= SCTP_ADLER32_BASE; 2113 } 2114 /* s2 = (s2 + s1) % BASE */ 2115 /* first we add */ 2116 s2 = (s2 + s1); 2117 /* 2118 * again, it is more efficent (it seems) to subtract since 2119 * the most s2 will ever be is (BASE-1 + BASE-1) in the 2120 * worse case. This would then be (2 * BASE) - 2, which will 2121 * still only do one subtract. On Intel this is much better 2122 * to do this way and avoid the divide. Have not -pg'd on 2123 * sparc. 2124 */ 2125 if (s2 >= SCTP_ADLER32_BASE) { 2126 s2 -= SCTP_ADLER32_BASE; 2127 } 2128 } 2129 /* Return the adler32 of the bytes buf[0..len-1] */ 2130 return ((s2 << 16) + s1); 2131 } 2132 2133 #endif 2134 2135 2136 uint32_t 2137 sctp_calculate_len(struct mbuf *m) 2138 { 2139 uint32_t tlen = 0; 2140 struct mbuf *at; 2141 2142 at = m; 2143 while (at) { 2144 tlen += at->m_len; 2145 at = at->m_next; 2146 } 2147 return (tlen); 2148 } 2149 2150 #if defined(SCTP_WITH_NO_CSUM) 2151 2152 uint32_t 2153 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2154 { 2155 /* 2156 * given a mbuf chain with a packetheader offset by 'offset' 2157 * pointing at a sctphdr (with csum set to 0) go through the chain 2158 * of m_next's and calculate the SCTP checksum. This is currently 2159 * Adler32 but will change to CRC32x soon. Also has a side bonus 2160 * calculate the total length of the mbuf chain. Note: if offset is 2161 * greater than the total mbuf length, checksum=1, pktlen=0 is 2162 * returned (ie. no real error code) 2163 */ 2164 if (pktlen == NULL) 2165 return (0); 2166 *pktlen = sctp_calculate_len(m); 2167 return (0); 2168 } 2169 2170 #elif defined(SCTP_USE_INCHKSUM) 2171 2172 #include <machine/in_cksum.h> 2173 2174 uint32_t 2175 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2176 { 2177 /* 2178 * given a mbuf chain with a packetheader offset by 'offset' 2179 * pointing at a sctphdr (with csum set to 0) go through the chain 2180 * of m_next's and calculate the SCTP checksum. This is currently 2181 * Adler32 but will change to CRC32x soon. Also has a side bonus 2182 * calculate the total length of the mbuf chain. Note: if offset is 2183 * greater than the total mbuf length, checksum=1, pktlen=0 is 2184 * returned (ie. no real error code) 2185 */ 2186 int32_t tlen = 0; 2187 struct mbuf *at; 2188 uint32_t the_sum, retsum; 2189 2190 at = m; 2191 while (at) { 2192 tlen += at->m_len; 2193 at = at->m_next; 2194 } 2195 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset)); 2196 if (pktlen != NULL) 2197 *pktlen = (tlen - offset); 2198 retsum = htons(the_sum); 2199 return (the_sum); 2200 } 2201 2202 #else 2203 2204 uint32_t 2205 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2206 { 2207 /* 2208 * given a mbuf chain with a packetheader offset by 'offset' 2209 * pointing at a sctphdr (with csum set to 0) go through the chain 2210 * of m_next's and calculate the SCTP checksum. This is currently 2211 * Adler32 but will change to CRC32x soon. Also has a side bonus 2212 * calculate the total length of the mbuf chain. Note: if offset is 2213 * greater than the total mbuf length, checksum=1, pktlen=0 is 2214 * returned (ie. no real error code) 2215 */ 2216 int32_t tlen = 0; 2217 2218 #ifdef SCTP_USE_ADLER32 2219 uint32_t base = 1L; 2220 2221 #else 2222 uint32_t base = 0xffffffff; 2223 2224 #endif /* SCTP_USE_ADLER32 */ 2225 struct mbuf *at; 2226 2227 at = m; 2228 /* find the correct mbuf and offset into mbuf */ 2229 while ((at != NULL) && (offset > (uint32_t) at->m_len)) { 2230 offset -= at->m_len; /* update remaining offset left */ 2231 at = at->m_next; 2232 } 2233 while (at != NULL) { 2234 if ((at->m_len - offset) > 0) { 2235 #ifdef SCTP_USE_ADLER32 2236 base = update_adler32(base, 2237 (unsigned char *)(at->m_data + offset), 2238 (unsigned int)(at->m_len - offset)); 2239 #else 2240 if ((at->m_len - offset) < 4) { 2241 /* Use old method if less than 4 bytes */ 2242 base = old_update_crc32(base, 2243 (unsigned char *)(at->m_data + offset), 2244 (unsigned int)(at->m_len - offset)); 2245 } else { 2246 base = update_crc32(base, 2247 (unsigned char *)(at->m_data + offset), 2248 (unsigned int)(at->m_len - offset)); 2249 } 2250 #endif /* SCTP_USE_ADLER32 */ 2251 tlen += at->m_len - offset; 2252 /* we only offset once into the first mbuf */ 2253 } 2254 if (offset) { 2255 if (offset < at->m_len) 2256 offset = 0; 2257 else 2258 offset -= at->m_len; 2259 } 2260 at = at->m_next; 2261 } 2262 if (pktlen != NULL) { 2263 *pktlen = tlen; 2264 } 2265 #ifdef SCTP_USE_ADLER32 2266 /* Adler32 */ 2267 base = htonl(base); 2268 #else 2269 /* CRC-32c */ 2270 base = sctp_csum_finalize(base); 2271 #endif 2272 return (base); 2273 } 2274 2275 2276 #endif 2277 2278 void 2279 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2280 struct sctp_association *asoc, u_long mtu) 2281 { 2282 /* 2283 * Reset the P-MTU size on this association, this involves changing 2284 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2285 * allow the DF flag to be cleared. 2286 */ 2287 struct sctp_tmit_chunk *chk; 2288 unsigned int eff_mtu, ovh; 2289 2290 asoc->smallest_mtu = mtu; 2291 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2292 ovh = SCTP_MIN_OVERHEAD; 2293 } else { 2294 ovh = SCTP_MIN_V4_OVERHEAD; 2295 } 2296 eff_mtu = mtu - ovh; 2297 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2298 2299 if (chk->send_size > eff_mtu) { 2300 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2301 } 2302 } 2303 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2304 if (chk->send_size > eff_mtu) { 2305 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2306 } 2307 } 2308 } 2309 2310 2311 /* 2312 * given an association and starting time of the current RTT period return 2313 * RTO in number of usecs net should point to the current network 2314 */ 2315 uint32_t 2316 sctp_calculate_rto(struct sctp_tcb *stcb, 2317 struct sctp_association *asoc, 2318 struct sctp_nets *net, 2319 struct timeval *old) 2320 { 2321 /* 2322 * given an association and the starting time of the current RTT 2323 * period (in value1/value2) return RTO in number of usecs. 2324 */ 2325 int calc_time = 0; 2326 int o_calctime; 2327 unsigned int new_rto = 0; 2328 int first_measure = 0; 2329 struct timeval now; 2330 2331 /************************/ 2332 /* 1. calculate new RTT */ 2333 /************************/ 2334 /* get the current time */ 2335 SCTP_GETTIME_TIMEVAL(&now); 2336 /* compute the RTT value */ 2337 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2338 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2339 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2340 calc_time += (((u_long)now.tv_usec - 2341 (u_long)old->tv_usec) / 1000); 2342 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2343 /* Borrow 1,000ms from current calculation */ 2344 calc_time -= 1000; 2345 /* Add in the slop over */ 2346 calc_time += ((int)now.tv_usec / 1000); 2347 /* Add in the pre-second ms's */ 2348 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2349 } 2350 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2351 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2352 calc_time = ((u_long)now.tv_usec - 2353 (u_long)old->tv_usec) / 1000; 2354 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2355 /* impossible .. garbage in nothing out */ 2356 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2357 } else { 2358 /* impossible .. garbage in nothing out */ 2359 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2360 } 2361 } else { 2362 /* Clock wrapped? */ 2363 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2364 } 2365 /***************************/ 2366 /* 2. update RTTVAR & SRTT */ 2367 /***************************/ 2368 #if 0 2369 /* if (net->lastsv || net->lastsa) { */ 2370 /* per Section 5.3.1 C3 in SCTP */ 2371 /* net->lastsv = (int) *//* RTTVAR */ 2372 /* 2373 * (((double)(1.0 - 0.25) * (double)net->lastsv) + (double)(0.25 * 2374 * (double)abs(net->lastsa - calc_time))); net->lastsa = (int) 2375 *//* SRTT */ 2376 /* 2377 * (((double)(1.0 - 0.125) * (double)net->lastsa) + (double)(0.125 * 2378 * (double)calc_time)); } else { 2379 *//* the first RTT calculation, per C2 Section 5.3.1 */ 2380 /* net->lastsa = calc_time; *//* SRTT */ 2381 /* net->lastsv = calc_time / 2; *//* RTTVAR */ 2382 /* } */ 2383 /* if RTTVAR goes to 0 you set to clock grainularity */ 2384 /* 2385 * if (net->lastsv == 0) { net->lastsv = SCTP_CLOCK_GRANULARITY; } 2386 * new_rto = net->lastsa + 4 * net->lastsv; 2387 */ 2388 #endif 2389 o_calctime = calc_time; 2390 /* this is Van Jacobson's integer version */ 2391 if (net->RTO) { 2392 calc_time -= (net->lastsa >> 3); 2393 if ((int)net->prev_rtt > o_calctime) { 2394 net->rtt_variance = net->prev_rtt - o_calctime; 2395 /* decreasing */ 2396 net->rto_variance_dir = 0; 2397 } else { 2398 /* increasing */ 2399 net->rtt_variance = o_calctime - net->prev_rtt; 2400 net->rto_variance_dir = 1; 2401 } 2402 #ifdef SCTP_RTTVAR_LOGGING 2403 rto_logging(net, SCTP_LOG_RTTVAR); 2404 #endif 2405 net->prev_rtt = o_calctime; 2406 net->lastsa += calc_time; 2407 if (calc_time < 0) { 2408 calc_time = -calc_time; 2409 } 2410 calc_time -= (net->lastsv >> 2); 2411 net->lastsv += calc_time; 2412 if (net->lastsv == 0) { 2413 net->lastsv = SCTP_CLOCK_GRANULARITY; 2414 } 2415 } else { 2416 /* First RTO measurment */ 2417 net->lastsa = calc_time; 2418 net->lastsv = calc_time >> 1; 2419 first_measure = 1; 2420 net->rto_variance_dir = 1; 2421 net->prev_rtt = o_calctime; 2422 net->rtt_variance = 0; 2423 #ifdef SCTP_RTTVAR_LOGGING 2424 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2425 #endif 2426 } 2427 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1; 2428 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2429 (stcb->asoc.sat_network_lockout == 0)) { 2430 stcb->asoc.sat_network = 1; 2431 } else if ((!first_measure) && stcb->asoc.sat_network) { 2432 stcb->asoc.sat_network = 0; 2433 stcb->asoc.sat_network_lockout = 1; 2434 } 2435 /* bound it, per C6/C7 in Section 5.3.1 */ 2436 if (new_rto < stcb->asoc.minrto) { 2437 new_rto = stcb->asoc.minrto; 2438 } 2439 if (new_rto > stcb->asoc.maxrto) { 2440 new_rto = stcb->asoc.maxrto; 2441 } 2442 /* we are now returning the RTT Smoothed */ 2443 return ((uint32_t) new_rto); 2444 } 2445 2446 2447 /* 2448 * return a pointer to a contiguous piece of data from the given mbuf chain 2449 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2450 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2451 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2452 */ 2453 __inline caddr_t 2454 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2455 { 2456 uint32_t count; 2457 uint8_t *ptr; 2458 2459 ptr = in_ptr; 2460 if ((off < 0) || (len <= 0)) 2461 return (NULL); 2462 2463 /* find the desired start location */ 2464 while ((m != NULL) && (off > 0)) { 2465 if (off < m->m_len) 2466 break; 2467 off -= m->m_len; 2468 m = m->m_next; 2469 } 2470 if (m == NULL) 2471 return (NULL); 2472 2473 /* is the current mbuf large enough (eg. contiguous)? */ 2474 if ((m->m_len - off) >= len) { 2475 return (mtod(m, caddr_t)+off); 2476 } else { 2477 /* else, it spans more than one mbuf, so save a temp copy... */ 2478 while ((m != NULL) && (len > 0)) { 2479 count = min(m->m_len - off, len); 2480 bcopy(mtod(m, caddr_t)+off, ptr, count); 2481 len -= count; 2482 ptr += count; 2483 off = 0; 2484 m = m->m_next; 2485 } 2486 if ((m == NULL) && (len > 0)) 2487 return (NULL); 2488 else 2489 return ((caddr_t)in_ptr); 2490 } 2491 } 2492 2493 2494 struct sctp_paramhdr * 2495 sctp_get_next_param(struct mbuf *m, 2496 int offset, 2497 struct sctp_paramhdr *pull, 2498 int pull_limit) 2499 { 2500 /* This just provides a typed signature to Peter's Pull routine */ 2501 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2502 (uint8_t *) pull)); 2503 } 2504 2505 2506 int 2507 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2508 { 2509 /* 2510 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2511 * padlen is > 3 this routine will fail. 2512 */ 2513 uint8_t *dp; 2514 int i; 2515 2516 if (padlen > 3) { 2517 return (ENOBUFS); 2518 } 2519 if (M_TRAILINGSPACE(m)) { 2520 /* 2521 * The easy way. We hope the majority of the time we hit 2522 * here :) 2523 */ 2524 dp = (uint8_t *) (mtod(m, caddr_t)+m->m_len); 2525 m->m_len += padlen; 2526 } else { 2527 /* Hard way we must grow the mbuf */ 2528 struct mbuf *tmp; 2529 2530 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2531 if (tmp == NULL) { 2532 /* Out of space GAK! we are in big trouble. */ 2533 return (ENOSPC); 2534 } 2535 /* setup and insert in middle */ 2536 tmp->m_next = m->m_next; 2537 tmp->m_len = padlen; 2538 m->m_next = tmp; 2539 dp = mtod(tmp, uint8_t *); 2540 } 2541 /* zero out the pad */ 2542 for (i = 0; i < padlen; i++) { 2543 *dp = 0; 2544 dp++; 2545 } 2546 return (0); 2547 } 2548 2549 int 2550 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2551 { 2552 /* find the last mbuf in chain and pad it */ 2553 struct mbuf *m_at; 2554 2555 m_at = m; 2556 if (last_mbuf) { 2557 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2558 } else { 2559 while (m_at) { 2560 if (m_at->m_next == NULL) { 2561 return (sctp_add_pad_tombuf(m_at, padval)); 2562 } 2563 m_at = m_at->m_next; 2564 } 2565 } 2566 return (EFAULT); 2567 } 2568 2569 int sctp_asoc_change_wake = 0; 2570 2571 static void 2572 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2573 uint32_t error, void *data) 2574 { 2575 struct mbuf *m_notify; 2576 struct sctp_assoc_change *sac; 2577 struct sctp_queued_to_read *control; 2578 int locked = 0; 2579 2580 /* 2581 * First if we are are going down dump everything we can to the 2582 * socket rcv queue. 2583 */ 2584 2585 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2586 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2587 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 2588 ) { 2589 /* If the socket is gone we are out of here */ 2590 return; 2591 } 2592 if ((event == SCTP_COMM_LOST) || (event == SCTP_SHUTDOWN_COMP)) { 2593 if (stcb->asoc.control_pdapi) { 2594 /* 2595 * we were in the middle of a PD-API verify its 2596 * there. 2597 */ 2598 SCTP_INP_READ_LOCK(stcb->sctp_ep); 2599 locked = 1; 2600 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 2601 if (control == stcb->asoc.control_pdapi) { 2602 /* Yep its here, notify them */ 2603 if (event == SCTP_COMM_LOST) { 2604 /* 2605 * Abort/broken we had a 2606 * real PD-API aborted 2607 */ 2608 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) { 2609 /* 2610 * hmm.. don't want 2611 * a notify if 2612 * held_lenght is 2613 * set,they may be 2614 * stuck. clear and 2615 * wake. 2616 */ 2617 if (control->held_length) { 2618 control->held_length = 0; 2619 control->end_added = 1; 2620 } 2621 } else { 2622 sctp_notify_partial_delivery_indication(stcb, event, 1); 2623 2624 } 2625 } else { 2626 /* implicit EOR on EOF */ 2627 control->held_length = 0; 2628 control->end_added = 1; 2629 } 2630 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 2631 locked = 0; 2632 /* wake him up */ 2633 control->do_not_ref_stcb = 1; 2634 stcb->asoc.control_pdapi = NULL; 2635 sorwakeup(stcb->sctp_socket); 2636 break; 2637 } 2638 } 2639 if (locked) 2640 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 2641 2642 } 2643 } 2644 /* 2645 * For TCP model AND UDP connected sockets we will send an error up 2646 * when an ABORT comes in. 2647 */ 2648 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2649 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2650 (event == SCTP_COMM_LOST)) { 2651 if (TAILQ_EMPTY(&stcb->sctp_ep->read_queue)) { 2652 stcb->sctp_socket->so_error = ECONNRESET; 2653 } 2654 /* Wake ANY sleepers */ 2655 sorwakeup(stcb->sctp_socket); 2656 sowwakeup(stcb->sctp_socket); 2657 sctp_asoc_change_wake++; 2658 } 2659 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2660 /* event not enabled */ 2661 return; 2662 } 2663 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 1, M_DONTWAIT, 1, MT_DATA); 2664 if (m_notify == NULL) 2665 /* no space left */ 2666 return; 2667 m_notify->m_len = 0; 2668 2669 sac = mtod(m_notify, struct sctp_assoc_change *); 2670 sac->sac_type = SCTP_ASSOC_CHANGE; 2671 sac->sac_flags = 0; 2672 sac->sac_length = sizeof(struct sctp_assoc_change); 2673 sac->sac_state = event; 2674 sac->sac_error = error; 2675 /* XXX verify these stream counts */ 2676 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2677 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2678 sac->sac_assoc_id = sctp_get_associd(stcb); 2679 m_notify->m_flags |= M_EOR | M_NOTIFICATION; 2680 m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change); 2681 m_notify->m_pkthdr.rcvif = 0; 2682 m_notify->m_len = sizeof(struct sctp_assoc_change); 2683 m_notify->m_next = NULL; 2684 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2685 0, 0, 0, 0, 0, 0, 2686 m_notify); 2687 if (control == NULL) { 2688 /* no memory */ 2689 sctp_m_freem(m_notify); 2690 return; 2691 } 2692 control->length = m_notify->m_len; 2693 /* not that we need this */ 2694 control->tail_mbuf = m_notify; 2695 sctp_add_to_readq(stcb->sctp_ep, stcb, 2696 control, 2697 &stcb->sctp_socket->so_rcv, 1); 2698 if (event == SCTP_COMM_LOST) { 2699 /* Wake up any sleeper */ 2700 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2701 } 2702 } 2703 2704 static void 2705 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2706 struct sockaddr *sa, uint32_t error) 2707 { 2708 struct mbuf *m_notify; 2709 struct sctp_paddr_change *spc; 2710 struct sctp_queued_to_read *control; 2711 2712 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2713 /* event not enabled */ 2714 return; 2715 2716 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 1, M_DONTWAIT, 1, MT_DATA); 2717 if (m_notify == NULL) 2718 return; 2719 m_notify->m_len = 0; 2720 spc = mtod(m_notify, struct sctp_paddr_change *); 2721 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2722 spc->spc_flags = 0; 2723 spc->spc_length = sizeof(struct sctp_paddr_change); 2724 if (sa->sa_family == AF_INET) { 2725 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2726 } else { 2727 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2728 } 2729 spc->spc_state = state; 2730 spc->spc_error = error; 2731 spc->spc_assoc_id = sctp_get_associd(stcb); 2732 2733 m_notify->m_flags |= M_EOR | M_NOTIFICATION; 2734 m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change); 2735 m_notify->m_pkthdr.rcvif = 0; 2736 m_notify->m_len = sizeof(struct sctp_paddr_change); 2737 m_notify->m_next = NULL; 2738 2739 /* append to socket */ 2740 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2741 0, 0, 0, 0, 0, 0, 2742 m_notify); 2743 if (control == NULL) { 2744 /* no memory */ 2745 sctp_m_freem(m_notify); 2746 return; 2747 } 2748 control->length = m_notify->m_len; 2749 /* not that we need this */ 2750 control->tail_mbuf = m_notify; 2751 sctp_add_to_readq(stcb->sctp_ep, stcb, 2752 control, 2753 &stcb->sctp_socket->so_rcv, 1); 2754 } 2755 2756 2757 static void 2758 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2759 struct sctp_tmit_chunk *chk) 2760 { 2761 struct mbuf *m_notify; 2762 struct sctp_send_failed *ssf; 2763 struct sctp_queued_to_read *control; 2764 int length; 2765 2766 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2767 /* event not enabled */ 2768 return; 2769 2770 length = sizeof(struct sctp_send_failed) + chk->send_size; 2771 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 1, M_DONTWAIT, 1, MT_DATA); 2772 if (m_notify == NULL) 2773 /* no space left */ 2774 return; 2775 m_notify->m_len = 0; 2776 ssf = mtod(m_notify, struct sctp_send_failed *); 2777 ssf->ssf_type = SCTP_SEND_FAILED; 2778 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2779 ssf->ssf_flags = SCTP_DATA_UNSENT; 2780 else 2781 ssf->ssf_flags = SCTP_DATA_SENT; 2782 ssf->ssf_length = length; 2783 ssf->ssf_error = error; 2784 /* not exactly what the user sent in, but should be close :) */ 2785 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2786 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2787 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2788 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2789 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2790 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2791 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2792 m_notify->m_next = chk->data; 2793 m_notify->m_flags |= M_NOTIFICATION; 2794 m_notify->m_pkthdr.len = length; 2795 m_notify->m_pkthdr.rcvif = 0; 2796 m_notify->m_len = sizeof(struct sctp_send_failed); 2797 2798 /* Steal off the mbuf */ 2799 chk->data = NULL; 2800 /* 2801 * For this case, we check the actual socket buffer, since the assoc 2802 * is going away we don't want to overfill the socket buffer for a 2803 * non-reader 2804 */ 2805 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < m_notify->m_len) { 2806 sctp_m_freem(m_notify); 2807 return; 2808 } 2809 /* append to socket */ 2810 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2811 0, 0, 0, 0, 0, 0, 2812 m_notify); 2813 if (control == NULL) { 2814 /* no memory */ 2815 sctp_m_freem(m_notify); 2816 return; 2817 } 2818 sctp_add_to_readq(stcb->sctp_ep, stcb, 2819 control, 2820 &stcb->sctp_socket->so_rcv, 1); 2821 } 2822 2823 2824 static void 2825 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2826 struct sctp_stream_queue_pending *sp) 2827 { 2828 struct mbuf *m_notify; 2829 struct sctp_send_failed *ssf; 2830 struct sctp_queued_to_read *control; 2831 int length; 2832 2833 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2834 /* event not enabled */ 2835 return; 2836 2837 length = sizeof(struct sctp_send_failed) + sp->length; 2838 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 1, M_DONTWAIT, 1, MT_DATA); 2839 if (m_notify == NULL) 2840 /* no space left */ 2841 return; 2842 m_notify->m_len = 0; 2843 ssf = mtod(m_notify, struct sctp_send_failed *); 2844 ssf->ssf_type = SCTP_SEND_FAILED; 2845 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2846 ssf->ssf_flags = SCTP_DATA_UNSENT; 2847 else 2848 ssf->ssf_flags = SCTP_DATA_SENT; 2849 ssf->ssf_length = length; 2850 ssf->ssf_error = error; 2851 /* not exactly what the user sent in, but should be close :) */ 2852 ssf->ssf_info.sinfo_stream = sp->stream; 2853 ssf->ssf_info.sinfo_ssn = sp->strseq; 2854 ssf->ssf_info.sinfo_flags = sp->sinfo_flags; 2855 ssf->ssf_info.sinfo_ppid = sp->ppid; 2856 ssf->ssf_info.sinfo_context = sp->context; 2857 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2858 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2859 m_notify->m_next = sp->data; 2860 m_notify->m_flags |= M_NOTIFICATION; 2861 m_notify->m_pkthdr.len = length; 2862 m_notify->m_pkthdr.rcvif = 0; 2863 m_notify->m_len = sizeof(struct sctp_send_failed); 2864 2865 /* Steal off the mbuf */ 2866 sp->data = NULL; 2867 /* 2868 * For this case, we check the actual socket buffer, since the assoc 2869 * is going away we don't want to overfill the socket buffer for a 2870 * non-reader 2871 */ 2872 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < m_notify->m_len) { 2873 sctp_m_freem(m_notify); 2874 return; 2875 } 2876 /* append to socket */ 2877 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2878 0, 0, 0, 0, 0, 0, 2879 m_notify); 2880 if (control == NULL) { 2881 /* no memory */ 2882 sctp_m_freem(m_notify); 2883 return; 2884 } 2885 sctp_add_to_readq(stcb->sctp_ep, stcb, 2886 control, 2887 &stcb->sctp_socket->so_rcv, 1); 2888 } 2889 2890 2891 2892 static void 2893 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 2894 uint32_t error) 2895 { 2896 struct mbuf *m_notify; 2897 struct sctp_adaptation_event *sai; 2898 struct sctp_queued_to_read *control; 2899 2900 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2901 /* event not enabled */ 2902 return; 2903 2904 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 1, M_DONTWAIT, 1, MT_DATA); 2905 if (m_notify == NULL) 2906 /* no space left */ 2907 return; 2908 m_notify->m_len = 0; 2909 sai = mtod(m_notify, struct sctp_adaptation_event *); 2910 sai->sai_type = SCTP_ADAPTATION_INDICATION; 2911 sai->sai_flags = 0; 2912 sai->sai_length = sizeof(struct sctp_adaptation_event); 2913 sai->sai_adaptation_ind = error; 2914 sai->sai_assoc_id = sctp_get_associd(stcb); 2915 2916 m_notify->m_flags |= M_EOR | M_NOTIFICATION; 2917 m_notify->m_pkthdr.len = sizeof(struct sctp_adaptation_event); 2918 m_notify->m_pkthdr.rcvif = 0; 2919 m_notify->m_len = sizeof(struct sctp_adaptation_event); 2920 m_notify->m_next = NULL; 2921 2922 /* append to socket */ 2923 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2924 0, 0, 0, 0, 0, 0, 2925 m_notify); 2926 if (control == NULL) { 2927 /* no memory */ 2928 sctp_m_freem(m_notify); 2929 return; 2930 } 2931 control->length = m_notify->m_len; 2932 /* not that we need this */ 2933 control->tail_mbuf = m_notify; 2934 sctp_add_to_readq(stcb->sctp_ep, stcb, 2935 control, 2936 &stcb->sctp_socket->so_rcv, 1); 2937 } 2938 2939 void 2940 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, 2941 uint32_t error, int no_lock) 2942 { 2943 struct mbuf *m_notify; 2944 struct sctp_pdapi_event *pdapi; 2945 struct sctp_queued_to_read *control; 2946 2947 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) 2948 /* event not enabled */ 2949 return; 2950 2951 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 1, M_DONTWAIT, 1, MT_DATA); 2952 if (m_notify == NULL) 2953 /* no space left */ 2954 return; 2955 m_notify->m_len = 0; 2956 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 2957 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 2958 pdapi->pdapi_flags = 0; 2959 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 2960 pdapi->pdapi_indication = error; 2961 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 2962 2963 m_notify->m_flags |= M_EOR | M_NOTIFICATION; 2964 m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event); 2965 m_notify->m_pkthdr.rcvif = 0; 2966 m_notify->m_len = sizeof(struct sctp_pdapi_event); 2967 m_notify->m_next = NULL; 2968 2969 if (stcb->asoc.control_pdapi != NULL) { 2970 /* we will do some substitution */ 2971 control = stcb->asoc.control_pdapi; 2972 if (no_lock == 0) 2973 SCTP_INP_READ_LOCK(stcb->sctp_ep); 2974 2975 if (control->data == NULL) { 2976 control->data = control->tail_mbuf = m_notify; 2977 control->held_length = 0; 2978 control->length = m_notify->m_len; 2979 control->end_added = 1; 2980 sctp_sballoc(stcb, 2981 &stcb->sctp_socket->so_rcv, 2982 m_notify); 2983 } else if (control->end_added == 0) { 2984 struct mbuf *m = NULL; 2985 2986 m = control->data; 2987 while (m) { 2988 sctp_sbfree(control, stcb, 2989 &stcb->sctp_socket->so_rcv, m); 2990 m = sctp_m_free(m); 2991 } 2992 control->data = NULL; 2993 control->length = m_notify->m_len; 2994 control->data = control->tail_mbuf = m_notify; 2995 control->held_length = 0; 2996 control->end_added = 1; 2997 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 2998 } else { 2999 /* Hmm .. should not happen */ 3000 control->end_added = 1; 3001 stcb->asoc.control_pdapi = NULL; 3002 goto add_to_end; 3003 } 3004 if (no_lock == 0) 3005 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 3006 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3007 } else { 3008 /* append to socket */ 3009 add_to_end: 3010 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3011 0, 0, 0, 0, 0, 0, 3012 m_notify); 3013 if (control == NULL) { 3014 /* no memory */ 3015 sctp_m_freem(m_notify); 3016 return; 3017 } 3018 control->length = m_notify->m_len; 3019 /* not that we need this */ 3020 control->tail_mbuf = m_notify; 3021 sctp_add_to_readq(stcb->sctp_ep, stcb, 3022 control, 3023 &stcb->sctp_socket->so_rcv, 1); 3024 } 3025 } 3026 3027 static void 3028 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3029 { 3030 struct mbuf *m_notify; 3031 struct sctp_shutdown_event *sse; 3032 struct sctp_queued_to_read *control; 3033 3034 /* 3035 * For TCP model AND UDP connected sockets we will send an error up 3036 * when an SHUTDOWN completes 3037 */ 3038 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3039 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3040 /* mark socket closed for read/write and wakeup! */ 3041 socantsendmore(stcb->sctp_socket); 3042 } 3043 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 3044 /* event not enabled */ 3045 return; 3046 3047 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 1, M_DONTWAIT, 1, MT_DATA); 3048 if (m_notify == NULL) 3049 /* no space left */ 3050 return; 3051 m_notify->m_len = 0; 3052 sse = mtod(m_notify, struct sctp_shutdown_event *); 3053 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3054 sse->sse_flags = 0; 3055 sse->sse_length = sizeof(struct sctp_shutdown_event); 3056 sse->sse_assoc_id = sctp_get_associd(stcb); 3057 3058 m_notify->m_flags |= M_EOR | M_NOTIFICATION; 3059 m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event); 3060 m_notify->m_pkthdr.rcvif = 0; 3061 m_notify->m_len = sizeof(struct sctp_shutdown_event); 3062 m_notify->m_next = NULL; 3063 3064 /* append to socket */ 3065 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3066 0, 0, 0, 0, 0, 0, 3067 m_notify); 3068 if (control == NULL) { 3069 /* no memory */ 3070 sctp_m_freem(m_notify); 3071 return; 3072 } 3073 control->length = m_notify->m_len; 3074 /* not that we need this */ 3075 control->tail_mbuf = m_notify; 3076 sctp_add_to_readq(stcb->sctp_ep, stcb, 3077 control, 3078 &stcb->sctp_socket->so_rcv, 1); 3079 } 3080 3081 static void 3082 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3083 int number_entries, uint16_t * list, int flag) 3084 { 3085 struct mbuf *m_notify; 3086 struct sctp_queued_to_read *control; 3087 struct sctp_stream_reset_event *strreset; 3088 int len; 3089 3090 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 3091 /* event not enabled */ 3092 return; 3093 3094 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 3095 if (m_notify == NULL) 3096 /* no space left */ 3097 return; 3098 m_notify->m_len = 0; 3099 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3100 if (len > M_TRAILINGSPACE(m_notify)) { 3101 /* never enough room */ 3102 sctp_m_freem(m_notify); 3103 return; 3104 } 3105 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3106 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3107 if (number_entries == 0) { 3108 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 3109 } else { 3110 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 3111 } 3112 strreset->strreset_length = len; 3113 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3114 if (number_entries) { 3115 int i; 3116 3117 for (i = 0; i < number_entries; i++) { 3118 strreset->strreset_list[i] = ntohs(list[i]); 3119 } 3120 } 3121 m_notify->m_flags |= M_EOR | M_NOTIFICATION; 3122 m_notify->m_pkthdr.len = len; 3123 m_notify->m_pkthdr.rcvif = 0; 3124 m_notify->m_len = len; 3125 m_notify->m_next = NULL; 3126 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < m_notify->m_len) { 3127 /* no space */ 3128 sctp_m_freem(m_notify); 3129 return; 3130 } 3131 /* append to socket */ 3132 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3133 0, 0, 0, 0, 0, 0, 3134 m_notify); 3135 if (control == NULL) { 3136 /* no memory */ 3137 sctp_m_freem(m_notify); 3138 return; 3139 } 3140 control->length = m_notify->m_len; 3141 /* not that we need this */ 3142 control->tail_mbuf = m_notify; 3143 sctp_add_to_readq(stcb->sctp_ep, stcb, 3144 control, 3145 &stcb->sctp_socket->so_rcv, 1); 3146 } 3147 3148 3149 void 3150 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3151 uint32_t error, void *data) 3152 { 3153 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3154 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3155 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 3156 ) { 3157 /* No notifications up when we are in a no socket state */ 3158 return; 3159 } 3160 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3161 /* Can't send up to a closed socket any notifications */ 3162 return; 3163 } 3164 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) { 3165 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) && 3166 (notification != SCTP_NOTIFY_ASSOC_ABORTED) && 3167 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) && 3168 (notification != SCTP_NOTIFY_DG_FAIL) && 3169 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) { 3170 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL); 3171 stcb->asoc.assoc_up_sent = 1; 3172 } 3173 } 3174 switch (notification) { 3175 case SCTP_NOTIFY_ASSOC_UP: 3176 if (stcb->asoc.assoc_up_sent == 0) { 3177 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL); 3178 stcb->asoc.assoc_up_sent = 1; 3179 } 3180 break; 3181 case SCTP_NOTIFY_ASSOC_DOWN: 3182 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL); 3183 break; 3184 case SCTP_NOTIFY_INTERFACE_DOWN: 3185 { 3186 struct sctp_nets *net; 3187 3188 net = (struct sctp_nets *)data; 3189 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3190 (struct sockaddr *)&net->ro._l_addr, error); 3191 break; 3192 } 3193 case SCTP_NOTIFY_INTERFACE_UP: 3194 { 3195 struct sctp_nets *net; 3196 3197 net = (struct sctp_nets *)data; 3198 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3199 (struct sockaddr *)&net->ro._l_addr, error); 3200 break; 3201 } 3202 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3203 { 3204 struct sctp_nets *net; 3205 3206 net = (struct sctp_nets *)data; 3207 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3208 (struct sockaddr *)&net->ro._l_addr, error); 3209 break; 3210 } 3211 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3212 sctp_notify_send_failed2(stcb, error, 3213 (struct sctp_stream_queue_pending *)data); 3214 break; 3215 case SCTP_NOTIFY_DG_FAIL: 3216 sctp_notify_send_failed(stcb, error, 3217 (struct sctp_tmit_chunk *)data); 3218 break; 3219 case SCTP_NOTIFY_ADAPTATION_INDICATION: 3220 /* Here the error is the adaptation indication */ 3221 sctp_notify_adaptation_layer(stcb, error); 3222 break; 3223 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3224 sctp_notify_partial_delivery_indication(stcb, error, 0); 3225 break; 3226 case SCTP_NOTIFY_STRDATA_ERR: 3227 break; 3228 case SCTP_NOTIFY_ASSOC_ABORTED: 3229 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL); 3230 break; 3231 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3232 break; 3233 case SCTP_NOTIFY_STREAM_OPENED_OK: 3234 break; 3235 case SCTP_NOTIFY_ASSOC_RESTART: 3236 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data); 3237 break; 3238 case SCTP_NOTIFY_HB_RESP: 3239 break; 3240 case SCTP_NOTIFY_STR_RESET_SEND: 3241 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3242 break; 3243 case SCTP_NOTIFY_STR_RESET_RECV: 3244 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3245 break; 3246 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3247 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3248 break; 3249 3250 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3251 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3252 break; 3253 3254 case SCTP_NOTIFY_ASCONF_ADD_IP: 3255 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3256 error); 3257 break; 3258 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3259 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3260 error); 3261 break; 3262 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3263 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3264 error); 3265 break; 3266 case SCTP_NOTIFY_ASCONF_SUCCESS: 3267 break; 3268 case SCTP_NOTIFY_ASCONF_FAILED: 3269 break; 3270 case SCTP_NOTIFY_PEER_SHUTDOWN: 3271 sctp_notify_shutdown_event(stcb); 3272 break; 3273 case SCTP_NOTIFY_AUTH_NEW_KEY: 3274 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3275 (uint16_t) (uintptr_t) data); 3276 break; 3277 #if 0 3278 case SCTP_NOTIFY_AUTH_KEY_CONFLICT: 3279 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT, 3280 error, (uint16_t) (uintptr_t) data); 3281 break; 3282 #endif /* not yet? remove? */ 3283 3284 3285 default: 3286 #ifdef SCTP_DEBUG 3287 if (sctp_debug_on & SCTP_DEBUG_UTIL1) { 3288 printf("NOTIFY: unknown notification %xh (%u)\n", 3289 notification, notification); 3290 } 3291 #endif /* SCTP_DEBUG */ 3292 break; 3293 } /* end switch */ 3294 } 3295 3296 void 3297 sctp_report_all_outbound(struct sctp_tcb *stcb) 3298 { 3299 struct sctp_association *asoc; 3300 struct sctp_stream_out *outs; 3301 struct sctp_tmit_chunk *chk; 3302 struct sctp_stream_queue_pending *sp; 3303 3304 asoc = &stcb->asoc; 3305 3306 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3307 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3308 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3309 return; 3310 } 3311 /* now through all the gunk freeing chunks */ 3312 3313 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 3314 /* now clean up any chunks here */ 3315 stcb->asoc.locked_on_sending = NULL; 3316 sp = TAILQ_FIRST(&outs->outqueue); 3317 while (sp) { 3318 stcb->asoc.stream_queue_cnt--; 3319 TAILQ_REMOVE(&outs->outqueue, sp, next); 3320 sctp_free_spbufspace(stcb, asoc, sp); 3321 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3322 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp); 3323 if (sp->data) { 3324 sctp_m_freem(sp->data); 3325 sp->data = NULL; 3326 } 3327 if (sp->net) 3328 sctp_free_remote_addr(sp->net); 3329 sp->net = NULL; 3330 /* Free the chunk */ 3331 sctp_free_a_strmoq(stcb, sp); 3332 sp = TAILQ_FIRST(&outs->outqueue); 3333 } 3334 } 3335 3336 /* pending send queue SHOULD be empty */ 3337 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3338 chk = TAILQ_FIRST(&asoc->send_queue); 3339 while (chk) { 3340 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3341 if (chk->data) { 3342 /* 3343 * trim off the sctp chunk header(it should 3344 * be there) 3345 */ 3346 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3347 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3348 sctp_mbuf_crush(chk->data); 3349 } 3350 } 3351 sctp_free_bufspace(stcb, asoc, chk, 1); 3352 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk); 3353 if (chk->data) { 3354 sctp_m_freem(chk->data); 3355 chk->data = NULL; 3356 } 3357 if (chk->whoTo) 3358 sctp_free_remote_addr(chk->whoTo); 3359 chk->whoTo = NULL; 3360 sctp_free_a_chunk(stcb, chk); 3361 chk = TAILQ_FIRST(&asoc->send_queue); 3362 } 3363 } 3364 /* sent queue SHOULD be empty */ 3365 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3366 chk = TAILQ_FIRST(&asoc->sent_queue); 3367 while (chk) { 3368 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3369 if (chk->data) { 3370 /* 3371 * trim off the sctp chunk header(it should 3372 * be there) 3373 */ 3374 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3375 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3376 sctp_mbuf_crush(chk->data); 3377 } 3378 } 3379 sctp_free_bufspace(stcb, asoc, chk, 1); 3380 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3381 SCTP_NOTIFY_DATAGRAM_SENT, chk); 3382 if (chk->data) { 3383 sctp_m_freem(chk->data); 3384 chk->data = NULL; 3385 } 3386 if (chk->whoTo) 3387 sctp_free_remote_addr(chk->whoTo); 3388 chk->whoTo = NULL; 3389 sctp_free_a_chunk(stcb, chk); 3390 chk = TAILQ_FIRST(&asoc->sent_queue); 3391 } 3392 } 3393 } 3394 3395 void 3396 sctp_abort_notification(struct sctp_tcb *stcb, int error) 3397 { 3398 3399 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3400 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3401 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3402 return; 3403 } 3404 /* Tell them we lost the asoc */ 3405 sctp_report_all_outbound(stcb); 3406 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3407 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3408 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3409 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3410 } 3411 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL); 3412 } 3413 3414 void 3415 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3416 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err) 3417 { 3418 uint32_t vtag; 3419 3420 vtag = 0; 3421 if (stcb != NULL) { 3422 /* We have a TCB to abort, send notification too */ 3423 vtag = stcb->asoc.peer_vtag; 3424 sctp_abort_notification(stcb, 0); 3425 } 3426 sctp_send_abort(m, iphlen, sh, vtag, op_err); 3427 if (stcb != NULL) { 3428 /* Ok, now lets free it */ 3429 sctp_free_assoc(inp, stcb, 0); 3430 } else { 3431 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3432 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3433 sctp_inpcb_free(inp, 1, 0); 3434 } 3435 } 3436 } 3437 } 3438 3439 void 3440 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3441 int error, struct mbuf *op_err) 3442 { 3443 uint32_t vtag; 3444 3445 if (stcb == NULL) { 3446 /* Got to have a TCB */ 3447 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3448 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3449 sctp_inpcb_free(inp, 1, 0); 3450 } 3451 } 3452 return; 3453 } 3454 vtag = stcb->asoc.peer_vtag; 3455 /* notify the ulp */ 3456 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3457 sctp_abort_notification(stcb, error); 3458 /* notify the peer */ 3459 sctp_send_abort_tcb(stcb, op_err); 3460 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3461 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3462 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3463 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3464 } 3465 /* now free the asoc */ 3466 sctp_free_assoc(inp, stcb, 0); 3467 } 3468 3469 void 3470 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3471 struct sctp_inpcb *inp, struct mbuf *op_err) 3472 { 3473 struct sctp_chunkhdr *ch, chunk_buf; 3474 unsigned int chk_length; 3475 3476 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3477 /* Generate a TO address for future reference */ 3478 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3479 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3480 sctp_inpcb_free(inp, 1, 0); 3481 } 3482 } 3483 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3484 sizeof(*ch), (uint8_t *) & chunk_buf); 3485 while (ch != NULL) { 3486 chk_length = ntohs(ch->chunk_length); 3487 if (chk_length < sizeof(*ch)) { 3488 /* break to abort land */ 3489 break; 3490 } 3491 switch (ch->chunk_type) { 3492 case SCTP_PACKET_DROPPED: 3493 /* we don't respond to pkt-dropped */ 3494 return; 3495 case SCTP_ABORT_ASSOCIATION: 3496 /* we don't respond with an ABORT to an ABORT */ 3497 return; 3498 case SCTP_SHUTDOWN_COMPLETE: 3499 /* 3500 * we ignore it since we are not waiting for it and 3501 * peer is gone 3502 */ 3503 return; 3504 case SCTP_SHUTDOWN_ACK: 3505 sctp_send_shutdown_complete2(m, iphlen, sh); 3506 return; 3507 default: 3508 break; 3509 } 3510 offset += SCTP_SIZE32(chk_length); 3511 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3512 sizeof(*ch), (uint8_t *) & chunk_buf); 3513 } 3514 sctp_send_abort(m, iphlen, sh, 0, op_err); 3515 } 3516 3517 /* 3518 * check the inbound datagram to make sure there is not an abort inside it, 3519 * if there is return 1, else return 0. 3520 */ 3521 int 3522 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 3523 { 3524 struct sctp_chunkhdr *ch; 3525 struct sctp_init_chunk *init_chk, chunk_buf; 3526 int offset; 3527 unsigned int chk_length; 3528 3529 offset = iphlen + sizeof(struct sctphdr); 3530 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 3531 (uint8_t *) & chunk_buf); 3532 while (ch != NULL) { 3533 chk_length = ntohs(ch->chunk_length); 3534 if (chk_length < sizeof(*ch)) { 3535 /* packet is probably corrupt */ 3536 break; 3537 } 3538 /* we seem to be ok, is it an abort? */ 3539 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 3540 /* yep, tell them */ 3541 return (1); 3542 } 3543 if (ch->chunk_type == SCTP_INITIATION) { 3544 /* need to update the Vtag */ 3545 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 3546 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 3547 if (init_chk != NULL) { 3548 *vtagfill = ntohl(init_chk->init.initiate_tag); 3549 } 3550 } 3551 /* Nope, move to the next chunk */ 3552 offset += SCTP_SIZE32(chk_length); 3553 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3554 sizeof(*ch), (uint8_t *) & chunk_buf); 3555 } 3556 return (0); 3557 } 3558 3559 /* 3560 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 3561 * set (i.e. it's 0) so, create this function to compare link local scopes 3562 */ 3563 uint32_t 3564 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 3565 { 3566 struct sockaddr_in6 a, b; 3567 3568 /* save copies */ 3569 a = *addr1; 3570 b = *addr2; 3571 3572 if (a.sin6_scope_id == 0) 3573 if (sa6_recoverscope(&a)) { 3574 /* can't get scope, so can't match */ 3575 return (0); 3576 } 3577 if (b.sin6_scope_id == 0) 3578 if (sa6_recoverscope(&b)) { 3579 /* can't get scope, so can't match */ 3580 return (0); 3581 } 3582 if (a.sin6_scope_id != b.sin6_scope_id) 3583 return (0); 3584 3585 return (1); 3586 } 3587 3588 /* 3589 * returns a sockaddr_in6 with embedded scope recovered and removed 3590 */ 3591 struct sockaddr_in6 * 3592 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 3593 { 3594 3595 /* check and strip embedded scope junk */ 3596 if (addr->sin6_family == AF_INET6) { 3597 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 3598 if (addr->sin6_scope_id == 0) { 3599 *store = *addr; 3600 if (!sa6_recoverscope(store)) { 3601 /* use the recovered scope */ 3602 addr = store; 3603 } 3604 /* else, return the original "to" addr */ 3605 } 3606 } 3607 } 3608 return (addr); 3609 } 3610 3611 /* 3612 * are the two addresses the same? currently a "scopeless" check returns: 1 3613 * if same, 0 if not 3614 */ 3615 __inline int 3616 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 3617 { 3618 3619 /* must be valid */ 3620 if (sa1 == NULL || sa2 == NULL) 3621 return (0); 3622 3623 /* must be the same family */ 3624 if (sa1->sa_family != sa2->sa_family) 3625 return (0); 3626 3627 if (sa1->sa_family == AF_INET6) { 3628 /* IPv6 addresses */ 3629 struct sockaddr_in6 *sin6_1, *sin6_2; 3630 3631 sin6_1 = (struct sockaddr_in6 *)sa1; 3632 sin6_2 = (struct sockaddr_in6 *)sa2; 3633 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, 3634 &sin6_2->sin6_addr)); 3635 } else if (sa1->sa_family == AF_INET) { 3636 /* IPv4 addresses */ 3637 struct sockaddr_in *sin_1, *sin_2; 3638 3639 sin_1 = (struct sockaddr_in *)sa1; 3640 sin_2 = (struct sockaddr_in *)sa2; 3641 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 3642 } else { 3643 /* we don't do these... */ 3644 return (0); 3645 } 3646 } 3647 3648 void 3649 sctp_print_address(struct sockaddr *sa) 3650 { 3651 3652 if (sa->sa_family == AF_INET6) { 3653 struct sockaddr_in6 *sin6; 3654 3655 sin6 = (struct sockaddr_in6 *)sa; 3656 printf("IPv6 address: %s:%d scope:%u\n", 3657 ip6_sprintf(&sin6->sin6_addr), ntohs(sin6->sin6_port), 3658 sin6->sin6_scope_id); 3659 } else if (sa->sa_family == AF_INET) { 3660 struct sockaddr_in *sin; 3661 unsigned char *p; 3662 3663 sin = (struct sockaddr_in *)sa; 3664 p = (unsigned char *)&sin->sin_addr; 3665 printf("IPv4 address: %u.%u.%u.%u:%d\n", 3666 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 3667 } else { 3668 printf("?\n"); 3669 } 3670 } 3671 3672 void 3673 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 3674 { 3675 if (iph->ip_v == IPVERSION) { 3676 struct sockaddr_in lsa, fsa; 3677 3678 bzero(&lsa, sizeof(lsa)); 3679 lsa.sin_len = sizeof(lsa); 3680 lsa.sin_family = AF_INET; 3681 lsa.sin_addr = iph->ip_src; 3682 lsa.sin_port = sh->src_port; 3683 bzero(&fsa, sizeof(fsa)); 3684 fsa.sin_len = sizeof(fsa); 3685 fsa.sin_family = AF_INET; 3686 fsa.sin_addr = iph->ip_dst; 3687 fsa.sin_port = sh->dest_port; 3688 printf("src: "); 3689 sctp_print_address((struct sockaddr *)&lsa); 3690 printf("dest: "); 3691 sctp_print_address((struct sockaddr *)&fsa); 3692 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3693 struct ip6_hdr *ip6; 3694 struct sockaddr_in6 lsa6, fsa6; 3695 3696 ip6 = (struct ip6_hdr *)iph; 3697 bzero(&lsa6, sizeof(lsa6)); 3698 lsa6.sin6_len = sizeof(lsa6); 3699 lsa6.sin6_family = AF_INET6; 3700 lsa6.sin6_addr = ip6->ip6_src; 3701 lsa6.sin6_port = sh->src_port; 3702 bzero(&fsa6, sizeof(fsa6)); 3703 fsa6.sin6_len = sizeof(fsa6); 3704 fsa6.sin6_family = AF_INET6; 3705 fsa6.sin6_addr = ip6->ip6_dst; 3706 fsa6.sin6_port = sh->dest_port; 3707 printf("src: "); 3708 sctp_print_address((struct sockaddr *)&lsa6); 3709 printf("dest: "); 3710 sctp_print_address((struct sockaddr *)&fsa6); 3711 } 3712 } 3713 3714 #if defined(HAVE_SCTP_SO_LASTRECORD) 3715 3716 /* cloned from uipc_socket.c */ 3717 3718 #define SCTP_SBLINKRECORD(sb, m0) do { \ 3719 if ((sb)->sb_lastrecord != NULL) \ 3720 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 3721 else \ 3722 (sb)->sb_mb = (m0); \ 3723 (sb)->sb_lastrecord = (m0); \ 3724 } while (/*CONSTCOND*/0) 3725 #endif 3726 3727 void 3728 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 3729 struct sctp_inpcb *new_inp, 3730 struct sctp_tcb *stcb) 3731 { 3732 /* 3733 * go through our old INP and pull off any control structures that 3734 * belong to stcb and move then to the new inp. 3735 */ 3736 struct socket *old_so, *new_so; 3737 struct sctp_queued_to_read *control, *nctl; 3738 struct sctp_readhead tmp_queue; 3739 struct mbuf *m; 3740 int error; 3741 3742 old_so = old_inp->sctp_socket; 3743 new_so = new_inp->sctp_socket; 3744 TAILQ_INIT(&tmp_queue); 3745 3746 SOCKBUF_LOCK(&(old_so->so_rcv)); 3747 3748 error = sblock(&old_so->so_rcv, 0); 3749 3750 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3751 if (error) { 3752 /* 3753 * Gak, can't get sblock, we have a problem. data will be 3754 * left stranded.. and we don't dare look at it since the 3755 * other thread may be reading something. Oh well, its a 3756 * screwed up app that does a peeloff OR a accept while 3757 * reading from the main socket... actually its only the 3758 * peeloff() case, since I think read will fail on a 3759 * listening socket.. 3760 */ 3761 return; 3762 } 3763 /* lock the socket buffers */ 3764 SCTP_INP_READ_LOCK(old_inp); 3765 control = TAILQ_FIRST(&old_inp->read_queue); 3766 /* Pull off all for out target stcb */ 3767 while (control) { 3768 nctl = TAILQ_NEXT(control, next); 3769 if (control->stcb == stcb) { 3770 /* remove it we want it */ 3771 TAILQ_REMOVE(&old_inp->read_queue, control, next); 3772 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 3773 m = control->data; 3774 while (m) { 3775 #ifdef SCTP_SB_LOGGING 3776 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len); 3777 #endif 3778 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 3779 #ifdef SCTP_SB_LOGGING 3780 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3781 #endif 3782 m = m->m_next; 3783 } 3784 } 3785 control = nctl; 3786 } 3787 SCTP_INP_READ_UNLOCK(old_inp); 3788 3789 /* Remove the sb-lock on the old socket */ 3790 SOCKBUF_LOCK(&(old_so->so_rcv)); 3791 3792 sbunlock(&old_so->so_rcv); 3793 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3794 3795 /* Now we move them over to the new socket buffer */ 3796 control = TAILQ_FIRST(&tmp_queue); 3797 SCTP_INP_READ_LOCK(new_inp); 3798 while (control) { 3799 nctl = TAILQ_NEXT(control, next); 3800 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 3801 m = control->data; 3802 while (m) { 3803 #ifdef SCTP_SB_LOGGING 3804 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, m->m_len); 3805 #endif 3806 sctp_sballoc(stcb, &new_so->so_rcv, m); 3807 #ifdef SCTP_SB_LOGGING 3808 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3809 #endif 3810 m = m->m_next; 3811 } 3812 control = nctl; 3813 } 3814 SCTP_INP_READ_UNLOCK(new_inp); 3815 } 3816 3817 3818 void 3819 sctp_add_to_readq(struct sctp_inpcb *inp, 3820 struct sctp_tcb *stcb, 3821 struct sctp_queued_to_read *control, 3822 struct sockbuf *sb, 3823 int end) 3824 { 3825 /* 3826 * Here we must place the control on the end of the socket read 3827 * queue AND increment sb_cc so that select will work properly on 3828 * read. 3829 */ 3830 struct mbuf *m, *prev = NULL; 3831 3832 SCTP_INP_READ_LOCK(inp); 3833 m = control->data; 3834 control->held_length = 0; 3835 control->length = 0; 3836 while (m) { 3837 if (m->m_len == 0) { 3838 /* Skip mbufs with NO length */ 3839 if (prev == NULL) { 3840 /* First one */ 3841 control->data = sctp_m_free(m); 3842 m = control->data; 3843 } else { 3844 prev->m_next = sctp_m_free(m); 3845 m = prev->m_next; 3846 } 3847 if (m == NULL) { 3848 control->tail_mbuf = prev;; 3849 } 3850 continue; 3851 } 3852 prev = m; 3853 #ifdef SCTP_SB_LOGGING 3854 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, m->m_len); 3855 #endif 3856 sctp_sballoc(stcb, sb, m); 3857 #ifdef SCTP_SB_LOGGING 3858 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3859 #endif 3860 atomic_add_int(&control->length, m->m_len); 3861 m = m->m_next; 3862 } 3863 if (prev != NULL) { 3864 control->tail_mbuf = prev; 3865 if (end) { 3866 prev->m_flags |= M_EOR; 3867 } 3868 } else { 3869 return; 3870 } 3871 if (end) { 3872 control->end_added = 1; 3873 } 3874 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 3875 SCTP_INP_READ_UNLOCK(inp); 3876 if (inp && inp->sctp_socket) { 3877 sctp_sorwakeup(inp, inp->sctp_socket); 3878 } 3879 } 3880 3881 3882 int 3883 sctp_append_to_readq(struct sctp_inpcb *inp, 3884 struct sctp_tcb *stcb, 3885 struct sctp_queued_to_read *control, 3886 struct mbuf *m, 3887 int end, 3888 int ctls_cumack, 3889 struct sockbuf *sb) 3890 { 3891 /* 3892 * A partial delivery API event is underway. OR we are appending on 3893 * the reassembly queue. 3894 * 3895 * If PDAPI this means we need to add m to the end of the data. 3896 * Increase the length in the control AND increment the sb_cc. 3897 * Otherwise sb is NULL and all we need to do is put it at the end 3898 * of the mbuf chain. 3899 */ 3900 int len = 0; 3901 struct mbuf *mm, *tail = NULL, *prev = NULL; 3902 3903 if (inp) { 3904 SCTP_INP_READ_LOCK(inp); 3905 } 3906 if (control == NULL) { 3907 get_out: 3908 if (inp) { 3909 SCTP_INP_READ_UNLOCK(inp); 3910 } 3911 return (-1); 3912 } 3913 if ((control->tail_mbuf) && 3914 (control->tail_mbuf->m_flags & M_EOR)) { 3915 /* huh this one is complete? */ 3916 goto get_out; 3917 } 3918 mm = m; 3919 if (mm == NULL) { 3920 goto get_out; 3921 } 3922 while (mm) { 3923 if (mm->m_len == 0) { 3924 /* Skip mbufs with NO lenght */ 3925 if (prev == NULL) { 3926 /* First one */ 3927 m = sctp_m_free(mm); 3928 mm = m; 3929 } else { 3930 prev->m_next = sctp_m_free(mm); 3931 mm = prev->m_next; 3932 } 3933 continue; 3934 } 3935 prev = mm; 3936 len += mm->m_len; 3937 if (sb) { 3938 #ifdef SCTP_SB_LOGGING 3939 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, mm->m_len); 3940 #endif 3941 sctp_sballoc(stcb, sb, mm); 3942 #ifdef SCTP_SB_LOGGING 3943 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3944 #endif 3945 } 3946 mm = mm->m_next; 3947 } 3948 if (prev) { 3949 tail = prev; 3950 } else { 3951 /* Really there should always be a prev */ 3952 if (m == NULL) { 3953 /* Huh nothing left? */ 3954 #ifdef INVARIENTS 3955 panic("Nothing left to add?"); 3956 #else 3957 goto get_out; 3958 #endif 3959 } 3960 tail = m; 3961 } 3962 if (end) { 3963 /* message is complete */ 3964 tail->m_flags |= M_EOR; 3965 if (control == stcb->asoc.control_pdapi) { 3966 stcb->asoc.control_pdapi = NULL; 3967 } 3968 control->held_length = 0; 3969 control->end_added = 1; 3970 } 3971 atomic_add_int(&control->length, len); 3972 if (control->tail_mbuf) { 3973 /* append */ 3974 control->tail_mbuf->m_next = m; 3975 control->tail_mbuf = tail; 3976 } else { 3977 /* nothing there */ 3978 #ifdef INVARIENTS 3979 if (control->data != NULL) { 3980 panic("This should NOT happen"); 3981 } 3982 #endif 3983 control->data = m; 3984 control->tail_mbuf = tail; 3985 } 3986 /* 3987 * When we are appending in partial delivery, the cum-ack is used 3988 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 3989 * is populated in the outbound sinfo structure from the true cumack 3990 * if the association exists... 3991 */ 3992 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 3993 if (inp) { 3994 SCTP_INP_READ_UNLOCK(inp); 3995 } 3996 if (inp && inp->sctp_socket) { 3997 sctp_sorwakeup(inp, inp->sctp_socket); 3998 } 3999 return (0); 4000 } 4001 4002 4003 4004 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4005 *************ALTERNATE ROUTING CODE 4006 */ 4007 4008 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4009 *************ALTERNATE ROUTING CODE 4010 */ 4011 4012 struct mbuf * 4013 sctp_generate_invmanparam(int err) 4014 { 4015 /* Return a MBUF with a invalid mandatory parameter */ 4016 struct mbuf *m; 4017 4018 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 4019 if (m) { 4020 struct sctp_paramhdr *ph; 4021 4022 m->m_len = sizeof(struct sctp_paramhdr); 4023 ph = mtod(m, struct sctp_paramhdr *); 4024 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4025 ph->param_type = htons(err); 4026 } 4027 return (m); 4028 } 4029 4030 #ifdef SCTP_MBCNT_LOGGING 4031 void 4032 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4033 struct sctp_tmit_chunk *tp1, int chk_cnt) 4034 { 4035 if (tp1->data == NULL) { 4036 return; 4037 } 4038 asoc->chunks_on_out_queue -= chk_cnt; 4039 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4040 asoc->total_output_queue_size, 4041 tp1->book_size, 4042 0, 4043 tp1->mbcnt); 4044 if (asoc->total_output_queue_size >= tp1->book_size) { 4045 asoc->total_output_queue_size -= tp1->book_size; 4046 } else { 4047 asoc->total_output_queue_size = 0; 4048 } 4049 4050 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4051 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4052 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4053 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4054 } else { 4055 stcb->sctp_socket->so_snd.sb_cc = 0; 4056 4057 } 4058 } 4059 } 4060 4061 #endif 4062 4063 int 4064 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4065 int reason, struct sctpchunk_listhead *queue) 4066 { 4067 int ret_sz = 0; 4068 int notdone; 4069 uint8_t foundeom = 0; 4070 4071 do { 4072 ret_sz += tp1->book_size; 4073 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4074 if (tp1->data) { 4075 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4076 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1); 4077 sctp_m_freem(tp1->data); 4078 tp1->data = NULL; 4079 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4080 } 4081 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4082 stcb->asoc.sent_queue_cnt_removeable--; 4083 } 4084 if (queue == &stcb->asoc.send_queue) { 4085 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4086 /* on to the sent queue */ 4087 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4088 sctp_next); 4089 stcb->asoc.sent_queue_cnt++; 4090 } 4091 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4092 SCTP_DATA_NOT_FRAG) { 4093 /* not frag'ed we ae done */ 4094 notdone = 0; 4095 foundeom = 1; 4096 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4097 /* end of frag, we are done */ 4098 notdone = 0; 4099 foundeom = 1; 4100 } else { 4101 /* 4102 * Its a begin or middle piece, we must mark all of 4103 * it 4104 */ 4105 notdone = 1; 4106 tp1 = TAILQ_NEXT(tp1, sctp_next); 4107 } 4108 } while (tp1 && notdone); 4109 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) { 4110 /* 4111 * The multi-part message was scattered across the send and 4112 * sent queue. 4113 */ 4114 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 4115 /* 4116 * recurse throught the send_queue too, starting at the 4117 * beginning. 4118 */ 4119 if (tp1) { 4120 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason, 4121 &stcb->asoc.send_queue); 4122 } else { 4123 printf("hmm, nothing on the send queue and no EOM?\n"); 4124 } 4125 } 4126 return (ret_sz); 4127 } 4128 4129 /* 4130 * checks to see if the given address, sa, is one that is currently known by 4131 * the kernel note: can't distinguish the same address on multiple interfaces 4132 * and doesn't handle multiple addresses with different zone/scope id's note: 4133 * ifa_ifwithaddr() compares the entire sockaddr struct 4134 */ 4135 struct ifaddr * 4136 sctp_find_ifa_by_addr(struct sockaddr *sa) 4137 { 4138 struct ifnet *ifn; 4139 struct ifaddr *ifa; 4140 4141 /* go through all our known interfaces */ 4142 TAILQ_FOREACH(ifn, &ifnet, if_list) { 4143 /* go through each interface addresses */ 4144 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 4145 /* correct family? */ 4146 if (ifa->ifa_addr->sa_family != sa->sa_family) 4147 continue; 4148 4149 #ifdef INET6 4150 if (ifa->ifa_addr->sa_family == AF_INET6) { 4151 /* IPv6 address */ 4152 struct sockaddr_in6 *sin1, *sin2, sin6_tmp; 4153 4154 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr; 4155 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) { 4156 /* create a copy and clear scope */ 4157 memcpy(&sin6_tmp, sin1, 4158 sizeof(struct sockaddr_in6)); 4159 sin1 = &sin6_tmp; 4160 in6_clearscope(&sin1->sin6_addr); 4161 } 4162 sin2 = (struct sockaddr_in6 *)sa; 4163 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, 4164 sizeof(struct in6_addr)) == 0) { 4165 /* found it */ 4166 return (ifa); 4167 } 4168 } else 4169 #endif 4170 if (ifa->ifa_addr->sa_family == AF_INET) { 4171 /* IPv4 address */ 4172 struct sockaddr_in *sin1, *sin2; 4173 4174 sin1 = (struct sockaddr_in *)ifa->ifa_addr; 4175 sin2 = (struct sockaddr_in *)sa; 4176 if (sin1->sin_addr.s_addr == 4177 sin2->sin_addr.s_addr) { 4178 /* found it */ 4179 return (ifa); 4180 } 4181 } 4182 /* else, not AF_INET or AF_INET6, so skip */ 4183 } /* end foreach ifa */ 4184 } /* end foreach ifn */ 4185 /* not found! */ 4186 return (NULL); 4187 } 4188 4189 4190 4191 4192 4193 4194 4195 4196 static void 4197 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock, 4198 uint32_t rwnd_req) 4199 { 4200 /* User pulled some data, do we need a rwnd update? */ 4201 int r_unlocked = 0; 4202 uint32_t dif, rwnd; 4203 struct socket *so = NULL; 4204 4205 if (stcb == NULL) 4206 return; 4207 4208 atomic_add_int(&stcb->asoc.refcnt, 1); 4209 4210 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4211 /* Pre-check If we are freeing no update */ 4212 goto no_lock; 4213 } 4214 SCTP_INP_INCR_REF(stcb->sctp_ep); 4215 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4216 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4217 goto out; 4218 } 4219 so = stcb->sctp_socket; 4220 if (so == NULL) { 4221 goto out; 4222 } 4223 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 4224 /* Have you have freed enough to look */ 4225 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4226 sctp_misc_ints(SCTP_ENTER_USER_RECV, 4227 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd), 4228 *freed_so_far, 4229 stcb->freed_by_sorcv_sincelast, 4230 rwnd_req); 4231 #endif 4232 *freed_so_far = 0; 4233 /* Yep, its worth a look and the lock overhead */ 4234 4235 /* Figure out what the rwnd would be */ 4236 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 4237 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 4238 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 4239 } else { 4240 dif = 0; 4241 } 4242 if (dif >= rwnd_req) { 4243 if (hold_rlock) { 4244 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 4245 r_unlocked = 1; 4246 } 4247 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4248 /* 4249 * One last check before we allow the guy possibly 4250 * to get in. There is a race, where the guy has not 4251 * reached the gate. In that case 4252 */ 4253 goto out; 4254 } 4255 SCTP_TCB_LOCK(stcb); 4256 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4257 /* No reports here */ 4258 SCTP_TCB_UNLOCK(stcb); 4259 goto out; 4260 } 4261 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4262 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4263 stcb->asoc.my_rwnd, 4264 stcb->asoc.my_last_reported_rwnd, 4265 stcb->freed_by_sorcv_sincelast, 4266 dif); 4267 #endif 4268 SCTP_STAT_INCR(sctps_wu_sacks_sent); 4269 sctp_send_sack(stcb); 4270 sctp_chunk_output(stcb->sctp_ep, stcb, 4271 SCTP_OUTPUT_FROM_USR_RCVD); 4272 /* make sure no timer is running */ 4273 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL); 4274 SCTP_TCB_UNLOCK(stcb); 4275 } else { 4276 /* Update how much we have pending */ 4277 stcb->freed_by_sorcv_sincelast = dif; 4278 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4279 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4280 stcb->asoc.my_rwnd, 4281 stcb->asoc.my_last_reported_rwnd, 4282 stcb->freed_by_sorcv_sincelast, 4283 0); 4284 #endif 4285 } 4286 out: 4287 if (so && r_unlocked && hold_rlock) { 4288 SCTP_STAT_INCR(sctps_locks_in_rcv); 4289 SCTP_INP_READ_LOCK(stcb->sctp_ep); 4290 } 4291 SCTP_INP_DECR_REF(stcb->sctp_ep); 4292 no_lock: 4293 atomic_add_int(&stcb->asoc.refcnt, -1); 4294 return; 4295 } 4296 4297 int 4298 sctp_sorecvmsg(struct socket *so, 4299 struct uio *uio, 4300 struct mbuf **mp, 4301 struct sockaddr *from, 4302 int fromlen, 4303 int *msg_flags, 4304 struct sctp_sndrcvinfo *sinfo, 4305 int filling_sinfo) 4306 { 4307 /* 4308 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 4309 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 4310 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 4311 * On the way out we may send out any combination of: 4312 * MSG_NOTIFICATION MSG_EOR 4313 * 4314 */ 4315 struct sctp_inpcb *inp = NULL; 4316 int my_len = 0; 4317 int cp_len = 0, error = 0; 4318 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 4319 struct mbuf *m = NULL, *embuf = NULL; 4320 struct sctp_tcb *stcb = NULL; 4321 int wakeup_read_socket = 0; 4322 int freecnt_applied = 0; 4323 int out_flags = 0, in_flags = 0; 4324 int block_allowed = 1; 4325 int freed_so_far = 0; 4326 int copied_so_far = 0; 4327 int s, in_eeor_mode = 0; 4328 int no_rcv_needed = 0; 4329 uint32_t rwnd_req = 0; 4330 int hold_sblock = 0; 4331 int hold_rlock = 0; 4332 int alen = 0, slen = 0; 4333 int held_length = 0; 4334 4335 if (msg_flags) { 4336 in_flags = *msg_flags; 4337 } else { 4338 in_flags = 0; 4339 } 4340 slen = uio->uio_resid; 4341 /* Pull in and set up our int flags */ 4342 if (in_flags & MSG_OOB) { 4343 /* Out of band's NOT supported */ 4344 return (EOPNOTSUPP); 4345 } 4346 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 4347 return (EINVAL); 4348 } 4349 if ((in_flags & (MSG_DONTWAIT 4350 | MSG_NBIO 4351 )) || 4352 (so->so_state & SS_NBIO)) { 4353 block_allowed = 0; 4354 } 4355 /* setup the endpoint */ 4356 inp = (struct sctp_inpcb *)so->so_pcb; 4357 if (inp == NULL) { 4358 return (EFAULT); 4359 } 4360 s = splnet(); 4361 rwnd_req = (so->so_rcv.sb_hiwat >> SCTP_RWND_HIWAT_SHIFT); 4362 /* Must be at least a MTU's worth */ 4363 if (rwnd_req < SCTP_MIN_RWND) 4364 rwnd_req = SCTP_MIN_RWND; 4365 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 4366 #ifdef SCTP_RECV_RWND_LOGGING 4367 sctp_misc_ints(SCTP_SORECV_ENTER, 4368 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 4369 #endif 4370 SOCKBUF_LOCK(&so->so_rcv); 4371 hold_sblock = 1; 4372 #ifdef SCTP_RECV_RWND_LOGGING 4373 sctp_misc_ints(SCTP_SORECV_ENTERPL, 4374 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 4375 #endif 4376 4377 4378 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4379 if (error) { 4380 goto release_unlocked; 4381 } 4382 restart: 4383 if (hold_sblock == 0) { 4384 SOCKBUF_LOCK(&so->so_rcv); 4385 hold_sblock = 1; 4386 } 4387 sbunlock(&so->so_rcv); 4388 4389 restart_nosblocks: 4390 if (hold_sblock == 0) { 4391 SOCKBUF_LOCK(&so->so_rcv); 4392 hold_sblock = 1; 4393 } 4394 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4395 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4396 goto out; 4397 } 4398 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4399 if (so->so_error) { 4400 error = so->so_error; 4401 } else { 4402 error = ENOTCONN; 4403 } 4404 goto out; 4405 } 4406 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 4407 /* we need to wait for data */ 4408 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4409 sctp_misc_ints(SCTP_SORECV_BLOCKSA, 4410 0, 0, so->so_rcv.sb_cc, uio->uio_resid); 4411 #endif 4412 if ((so->so_rcv.sb_cc == 0) && 4413 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4414 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 4415 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4416 /* 4417 * For active open side clear flags for 4418 * re-use passive open is blocked by 4419 * connect. 4420 */ 4421 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4422 /* 4423 * You were aborted, passive side 4424 * always hits here 4425 */ 4426 error = ECONNRESET; 4427 /* 4428 * You get this once if you are 4429 * active open side 4430 */ 4431 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4432 /* 4433 * Remove flag if on the 4434 * active open side 4435 */ 4436 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4437 } 4438 } 4439 so->so_state &= ~(SS_ISCONNECTING | 4440 SS_ISDISCONNECTING | 4441 SS_ISCONFIRMING | 4442 SS_ISCONNECTED); 4443 if (error == 0) { 4444 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4445 error = ENOTCONN; 4446 } else { 4447 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4448 } 4449 } 4450 goto out; 4451 } 4452 } 4453 error = sbwait(&so->so_rcv); 4454 if (error) { 4455 goto out; 4456 } 4457 held_length = 0; 4458 goto restart_nosblocks; 4459 } else if (so->so_rcv.sb_cc == 0) { 4460 error = EWOULDBLOCK; 4461 goto out; 4462 } 4463 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4464 /* we possibly have data we can read */ 4465 control = TAILQ_FIRST(&inp->read_queue); 4466 if (control == NULL) { 4467 /* 4468 * This could be happening since the appender did the 4469 * increment but as not yet did the tailq insert onto the 4470 * read_queue 4471 */ 4472 if (hold_rlock == 0) { 4473 SCTP_INP_READ_LOCK(inp); 4474 hold_rlock = 1; 4475 } 4476 control = TAILQ_FIRST(&inp->read_queue); 4477 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 4478 #ifdef INVARIENTS 4479 panic("Huh, its non zero and nothing on control?"); 4480 #endif 4481 so->so_rcv.sb_cc = 0; 4482 } 4483 SCTP_INP_READ_UNLOCK(inp); 4484 hold_rlock = 0; 4485 goto restart; 4486 } 4487 if ((control->length == 0) && 4488 (control->do_not_ref_stcb)) { 4489 /* 4490 * Clean up code for freeing assoc that left behind a 4491 * pdapi.. maybe a peer in EEOR that just closed after 4492 * sending and never indicated a EOR. 4493 */ 4494 SCTP_STAT_INCR(sctps_locks_in_rcva); 4495 if (hold_rlock == 0) { 4496 hold_rlock = 1; 4497 SCTP_INP_READ_LOCK(inp); 4498 } 4499 control->held_length = 0; 4500 if (control->data) { 4501 /* Hmm there is data here .. fix */ 4502 struct mbuf *m; 4503 int cnt = 0; 4504 4505 m = control->data; 4506 while (m) { 4507 cnt += m->m_len; 4508 if (m->m_next == NULL) { 4509 control->tail_mbuf = m; 4510 m->m_flags |= M_EOR; 4511 control->end_added = 1; 4512 } 4513 m = m->m_next; 4514 } 4515 control->length = cnt; 4516 } else { 4517 /* remove it */ 4518 TAILQ_REMOVE(&inp->read_queue, control, next); 4519 /* Add back any hiddend data */ 4520 sctp_free_remote_addr(control->whoFrom); 4521 sctp_free_a_readq(stcb, control); 4522 } 4523 if (hold_rlock) { 4524 hold_rlock = 0; 4525 SCTP_INP_READ_UNLOCK(inp); 4526 } 4527 goto restart; 4528 } 4529 if (control->length == 0) { 4530 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 4531 (filling_sinfo)) { 4532 /* find a more suitable one then this */ 4533 ctl = TAILQ_NEXT(control, next); 4534 while (ctl) { 4535 if ((ctl->stcb != control->stcb) && (ctl->length)) { 4536 /* found one */ 4537 control = ctl; 4538 goto found_one; 4539 } 4540 ctl = TAILQ_NEXT(ctl, next); 4541 } 4542 } 4543 /* 4544 * if we reach here, not suitable replacement is available 4545 * <or> fragment interleave is NOT on. So stuff the sb_cc 4546 * into the our held count, and its time to sleep again. 4547 */ 4548 held_length = so->so_rcv.sb_cc; 4549 control->held_length = so->so_rcv.sb_cc; 4550 goto restart; 4551 } 4552 /* Clear the held length since there is something to read */ 4553 control->held_length = 0; 4554 if (hold_rlock) { 4555 SCTP_INP_READ_UNLOCK(inp); 4556 hold_rlock = 0; 4557 } 4558 found_one: 4559 /* 4560 * If we reach here, control has a some data for us to read off. 4561 * Note that stcb COULD be NULL. 4562 */ 4563 if (hold_sblock) { 4564 SOCKBUF_UNLOCK(&so->so_rcv); 4565 hold_sblock = 0; 4566 } 4567 stcb = control->stcb; 4568 if (stcb) { 4569 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 4570 (control->do_not_ref_stcb == 0)) { 4571 if (freecnt_applied == 0) 4572 stcb = NULL; 4573 } else if (control->do_not_ref_stcb == 0) { 4574 /* you can't free it on me please */ 4575 /* 4576 * The lock on the socket buffer protects us so the 4577 * free code will stop. But since we used the 4578 * socketbuf lock and the sender uses the tcb_lock 4579 * to increment, we need to use the atomic add to 4580 * the refcnt 4581 */ 4582 atomic_add_int(&stcb->asoc.refcnt, 1); 4583 freecnt_applied = 1; 4584 /* 4585 * Setup to remember how much we have not yet told 4586 * the peer our rwnd has opened up. Note we grab the 4587 * value from the tcb from last time. Note too that 4588 * sack sending clears this when a sack is sent.. 4589 * which is fine. Once we hit the rwnd_req, we then 4590 * will go to the sctp_user_rcvd() that will not 4591 * lock until it KNOWs it MUST send a WUP-SACK. 4592 * 4593 */ 4594 freed_so_far = stcb->freed_by_sorcv_sincelast; 4595 stcb->freed_by_sorcv_sincelast = 0; 4596 } 4597 } 4598 /* First lets get off the sinfo and sockaddr info */ 4599 if ((sinfo) && filling_sinfo) { 4600 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 4601 nxt = TAILQ_NEXT(control, next); 4602 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 4603 struct sctp_extrcvinfo *s_extra; 4604 4605 s_extra = (struct sctp_extrcvinfo *)sinfo; 4606 if (nxt) { 4607 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL; 4608 if (nxt->sinfo_flags & SCTP_UNORDERED) { 4609 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 4610 } 4611 s_extra->next_asocid = nxt->sinfo_assoc_id; 4612 s_extra->next_length = nxt->length; 4613 s_extra->next_ppid = nxt->sinfo_ppid; 4614 s_extra->next_stream = nxt->sinfo_stream; 4615 if (nxt->tail_mbuf != NULL) { 4616 if (nxt->tail_mbuf->m_flags & M_EOR) { 4617 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 4618 } 4619 } 4620 } else { 4621 /* 4622 * we explicitly 0 this, since the memcpy 4623 * got some other things beyond the older 4624 * sinfo_ that is on the control's structure 4625 * :-D 4626 */ 4627 s_extra->next_flags = SCTP_NO_NEXT_MSG; 4628 s_extra->next_asocid = 0; 4629 s_extra->next_length = 0; 4630 s_extra->next_ppid = 0; 4631 s_extra->next_stream = 0; 4632 } 4633 } 4634 /* 4635 * update off the real current cum-ack, if we have an stcb. 4636 */ 4637 if (stcb) 4638 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 4639 /* 4640 * mask off the high bits, we keep the actual chunk bits in 4641 * there. 4642 */ 4643 sinfo->sinfo_flags &= 0x00ff; 4644 } 4645 if (fromlen && from) { 4646 struct sockaddr *to; 4647 4648 #ifdef AF_INET 4649 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len); 4650 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4651 ((struct sockaddr_in *)from)->sin_port = control->port_from; 4652 #else 4653 /* No AF_INET use AF_INET6 */ 4654 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len); 4655 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4656 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 4657 #endif 4658 4659 to = from; 4660 #if defined(AF_INET) && defined(AF_INET6) 4661 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 4662 (to->sa_family == AF_INET) && 4663 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 4664 struct sockaddr_in *sin; 4665 struct sockaddr_in6 sin6; 4666 4667 sin = (struct sockaddr_in *)to; 4668 bzero(&sin6, sizeof(sin6)); 4669 sin6.sin6_family = AF_INET6; 4670 sin6.sin6_len = sizeof(struct sockaddr_in6); 4671 sin6.sin6_addr.s6_addr16[2] = 0xffff; 4672 bcopy(&sin->sin_addr, 4673 &sin6.sin6_addr.s6_addr16[3], 4674 sizeof(sin6.sin6_addr.s6_addr16[3])); 4675 sin6.sin6_port = sin->sin_port; 4676 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 4677 } 4678 #endif 4679 #if defined(AF_INET6) 4680 { 4681 struct sockaddr_in6 lsa6, *to6; 4682 4683 to6 = (struct sockaddr_in6 *)to; 4684 sctp_recover_scope_mac(to6, (&lsa6)); 4685 4686 } 4687 #endif 4688 } 4689 /* now copy out what data we can */ 4690 if (mp == NULL) { 4691 /* copy out each mbuf in the chain up to length */ 4692 get_more_data: 4693 m = control->data; 4694 while (m) { 4695 /* Move out all we can */ 4696 cp_len = (int)uio->uio_resid; 4697 my_len = (int)m->m_len; 4698 if (cp_len > my_len) { 4699 /* not enough in this buf */ 4700 cp_len = my_len; 4701 } 4702 if (hold_rlock) { 4703 SCTP_INP_READ_UNLOCK(inp); 4704 hold_rlock = 0; 4705 } 4706 splx(s); 4707 if (cp_len > 0) 4708 error = uiomove(mtod(m, char *), cp_len, uio); 4709 s = splnet(); 4710 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4711 sctp_misc_ints(SCTP_SORCV_DOESCPY, 4712 so->so_rcv.sb_cc, 4713 cp_len, 4714 0, 4715 0); 4716 #endif 4717 /* re-read */ 4718 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4719 goto release; 4720 } 4721 if (stcb && 4722 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4723 no_rcv_needed = 1; 4724 } 4725 if (error) { 4726 /* error we are out of here */ 4727 goto release; 4728 } 4729 if ((m->m_next == NULL) && 4730 (cp_len >= m->m_len) && 4731 ((control->end_added == 0) || 4732 (control->end_added && (TAILQ_NEXT(control, next) == NULL))) 4733 ) { 4734 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4735 sctp_misc_ints(SCTP_SORCV_DOESLCK, 4736 so->so_rcv.sb_cc, 4737 cp_len, 4738 m->m_len, 4739 control->length); 4740 #endif 4741 SCTP_STAT_INCR(sctps_locks_in_rcvb); 4742 SCTP_INP_READ_LOCK(inp); 4743 hold_rlock = 1; 4744 } 4745 if (cp_len == m->m_len) { 4746 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4747 sctp_misc_ints(SCTP_SORCV_DOESADJ, 4748 so->so_rcv.sb_cc, 4749 control->length, 4750 cp_len, 4751 0); 4752 #endif 4753 if (m->m_flags & M_EOR) { 4754 out_flags |= MSG_EOR; 4755 } 4756 if (m->m_flags & M_NOTIFICATION) { 4757 out_flags |= MSG_NOTIFICATION; 4758 } 4759 /* we ate up the mbuf */ 4760 if (in_flags & MSG_PEEK) { 4761 /* just looking */ 4762 m = m->m_next; 4763 copied_so_far += cp_len; 4764 } else { 4765 /* dispose of the mbuf */ 4766 #ifdef SCTP_SB_LOGGING 4767 sctp_sblog(&so->so_rcv, 4768 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len); 4769 #endif 4770 sctp_sbfree(control, stcb, &so->so_rcv, m); 4771 #ifdef SCTP_SB_LOGGING 4772 sctp_sblog(&so->so_rcv, 4773 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4774 #endif 4775 embuf = m; 4776 copied_so_far += cp_len; 4777 freed_so_far += cp_len; 4778 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4779 if (alen < cp_len) { 4780 panic("Control length goes negative?"); 4781 } 4782 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4783 sctp_misc_ints(SCTP_SORCV_PASSBF, 4784 so->so_rcv.sb_cc, 4785 control->length, 4786 0, 4787 0); 4788 #endif 4789 control->data = sctp_m_free(m); 4790 m = control->data; 4791 /* 4792 * been through it all, must hold sb 4793 * lock ok to null tail 4794 */ 4795 if (control->data == NULL) { 4796 #ifdef INVARIENTS 4797 if ((control->end_added == 0) || 4798 (TAILQ_NEXT(control, next) == NULL)) { 4799 /* 4800 * If the end is not 4801 * added, OR the 4802 * next is NOT null 4803 * we MUST have the 4804 * lock. 4805 */ 4806 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 4807 panic("Hmm we don't own the lock?"); 4808 } 4809 } 4810 #endif 4811 control->tail_mbuf = NULL; 4812 #ifdef INVARIENTS 4813 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 4814 panic("end_added, nothing left and no MSG_EOR"); 4815 } 4816 #endif 4817 } 4818 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4819 sctp_misc_ints(SCTP_SORCV_ADJD, 4820 so->so_rcv.sb_cc, 4821 control->length, 4822 0, 4823 0); 4824 #endif 4825 } 4826 } else { 4827 /* Do we need to trim the mbuf? */ 4828 if (m->m_flags & M_NOTIFICATION) { 4829 out_flags |= MSG_NOTIFICATION; 4830 } 4831 if ((in_flags & MSG_PEEK) == 0) { 4832 if (out_flags & MSG_NOTIFICATION) { 4833 /* 4834 * remark this one with the 4835 * notify flag, they read 4836 * only part of the 4837 * notification. 4838 */ 4839 m->m_flags |= M_NOTIFICATION; 4840 } 4841 m->m_data += cp_len; 4842 m->m_len -= cp_len; 4843 #ifdef SCTP_SB_LOGGING 4844 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 4845 #endif 4846 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 4847 if (stcb) { 4848 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 4849 } 4850 copied_so_far += cp_len; 4851 embuf = m; 4852 freed_so_far += cp_len; 4853 #ifdef SCTP_SB_LOGGING 4854 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 4855 SCTP_LOG_SBRESULT, 0); 4856 #endif 4857 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4858 if (alen < cp_len) { 4859 panic("Control length goes negative2?"); 4860 } 4861 } else { 4862 copied_so_far += cp_len; 4863 } 4864 } 4865 if ((out_flags & MSG_EOR) || 4866 (uio->uio_resid == 0) 4867 ) { 4868 break; 4869 } 4870 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 4871 (control->do_not_ref_stcb == 0) && 4872 (freed_so_far >= rwnd_req)) { 4873 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4874 } 4875 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4876 sctp_misc_ints(SCTP_SORCV_BOTWHILE, 4877 so->so_rcv.sb_cc, 4878 control->length, 4879 0, 4880 0); 4881 #endif 4882 4883 } /* end while(m) */ 4884 /* 4885 * At this point we have looked at it all and we either have 4886 * a MSG_EOR/or read all the user wants... <OR> 4887 * control->length == 0. 4888 */ 4889 if ((out_flags & MSG_EOR) && 4890 ((in_flags & MSG_PEEK) == 0)) { 4891 /* we are done with this control */ 4892 if (control->length == 0) { 4893 if (control->data) { 4894 #ifdef INVARIENTS 4895 panic("control->data not null at read eor?"); 4896 #else 4897 printf("Strange, data left in the control buffer .. invarients would panic?\n"); 4898 sctp_m_freem(control->data); 4899 control->data = NULL; 4900 #endif 4901 } 4902 done_with_control: 4903 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4904 sctp_misc_ints(SCTP_SORCV_FREECTL, 4905 so->so_rcv.sb_cc, 4906 0, 4907 0, 4908 0); 4909 #endif 4910 if (TAILQ_NEXT(control, next) == NULL) { 4911 /* 4912 * If we don't have a next we need a 4913 * lock, if there is a next interupt 4914 * is filling ahead of us and we 4915 * don't need a lock to remove this 4916 * guy (which is the head of the 4917 * queue). 4918 */ 4919 if (hold_rlock == 0) { 4920 SCTP_STAT_INCR(sctps_locks_in_rcvc); 4921 SCTP_INP_READ_LOCK(inp); 4922 hold_rlock = 1; 4923 } 4924 } 4925 TAILQ_REMOVE(&inp->read_queue, control, next); 4926 /* Add back any hiddend data */ 4927 if (control->held_length) { 4928 held_length = 0; 4929 control->held_length = 0; 4930 wakeup_read_socket = 1; 4931 } 4932 no_rcv_needed = control->do_not_ref_stcb; 4933 sctp_free_remote_addr(control->whoFrom); 4934 control->data = NULL; 4935 sctp_free_a_readq(stcb, control); 4936 control = NULL; 4937 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0)) 4938 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4939 4940 } else { 4941 /* 4942 * The user did not read all of this 4943 * message, turn off the returned MSG_EOR 4944 * since we are leaving more behind on the 4945 * control to read. 4946 */ 4947 #ifdef INVARIENTS 4948 if (control->end_added && (control->data == NULL) && 4949 (control->tail_mbuf == NULL)) { 4950 panic("Gak, control->length is corrupt?"); 4951 } 4952 #endif 4953 no_rcv_needed = control->do_not_ref_stcb; 4954 out_flags &= ~MSG_EOR; 4955 } 4956 } 4957 if (out_flags & MSG_EOR) { 4958 goto release; 4959 } 4960 if ((uio->uio_resid == 0) || 4961 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 4962 ) { 4963 goto release; 4964 } 4965 /* 4966 * If I hit here the receiver wants more and this message is 4967 * NOT done (pd-api). So two questions. Can we block? if not 4968 * we are done. Did the user NOT set MSG_WAITALL? 4969 */ 4970 if (block_allowed == 0) { 4971 goto release; 4972 } 4973 /* 4974 * We need to wait for more data a few things: - We don't 4975 * sbunlock() so we don't get someone else reading. - We 4976 * must be sure to account for the case where what is added 4977 * is NOT to our control when we wakeup. 4978 */ 4979 4980 /* 4981 * Do we need to tell the transport a rwnd update might be 4982 * needed before we go to sleep? 4983 */ 4984 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 4985 ((freed_so_far >= rwnd_req) && 4986 (control->do_not_ref_stcb == 0) && 4987 (no_rcv_needed == 0))) { 4988 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4989 } 4990 wait_some_more: 4991 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4992 goto release; 4993 } 4994 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 4995 goto release; 4996 4997 if (hold_rlock == 1) { 4998 SCTP_INP_READ_UNLOCK(inp); 4999 hold_rlock = 0; 5000 } 5001 if (hold_sblock == 0) { 5002 SOCKBUF_LOCK(&so->so_rcv); 5003 hold_sblock = 1; 5004 } 5005 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5006 if (stcb) 5007 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5008 freed_so_far, 5009 stcb->asoc.my_rwnd, 5010 so->so_rcv.sb_cc, 5011 uio->uio_resid); 5012 else 5013 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5014 freed_so_far, 5015 0, 5016 so->so_rcv.sb_cc, 5017 uio->uio_resid); 5018 #endif 5019 if (so->so_rcv.sb_cc <= control->held_length) { 5020 error = sbwait(&so->so_rcv); 5021 if (error) { 5022 goto release; 5023 } 5024 control->held_length = 0; 5025 } 5026 if (hold_sblock) { 5027 SOCKBUF_UNLOCK(&so->so_rcv); 5028 hold_sblock = 0; 5029 } 5030 if (control->length == 0) { 5031 /* still nothing here */ 5032 if (control->end_added == 1) { 5033 /* he aborted, or is done i.e.did a shutdown */ 5034 out_flags |= MSG_EOR; 5035 goto done_with_control; 5036 } 5037 if (so->so_rcv.sb_cc > held_length) { 5038 SCTP_STAT_INCR(sctps_locks_in_rcvf); 5039 control->held_length = so->so_rcv.sb_cc; 5040 held_length = 0; 5041 } 5042 goto wait_some_more; 5043 } else if (control->data == NULL) { 5044 /* 5045 * we must re-sync since data is probably being 5046 * added 5047 */ 5048 SCTP_INP_READ_LOCK(inp); 5049 if ((control->length > 0) && (control->data == NULL)) { 5050 /* 5051 * big trouble.. we have the lock and its 5052 * corrupt? 5053 */ 5054 panic("Impossible data==NULL length !=0"); 5055 } 5056 SCTP_INP_READ_UNLOCK(inp); 5057 /* We will fall around to get more data */ 5058 } 5059 goto get_more_data; 5060 } else { 5061 /* copy out the mbuf chain */ 5062 get_more_data2: 5063 /* 5064 * Do we have a uio, I doubt it if so we grab the size from 5065 * it, if not you get it all 5066 */ 5067 if (uio) 5068 cp_len = uio->uio_resid; 5069 else 5070 cp_len = control->length; 5071 5072 if ((uint32_t) cp_len >= control->length) { 5073 /* easy way */ 5074 if ((control->end_added == 0) || 5075 (TAILQ_NEXT(control, next) == NULL)) { 5076 /* Need to get rlock */ 5077 if (hold_rlock == 0) { 5078 SCTP_INP_READ_LOCK(inp); 5079 hold_rlock = 1; 5080 } 5081 } 5082 if (control->tail_mbuf->m_flags & M_EOR) { 5083 out_flags |= MSG_EOR; 5084 } 5085 if (control->data->m_flags & M_NOTIFICATION) { 5086 out_flags |= MSG_NOTIFICATION; 5087 } 5088 if (uio) 5089 uio->uio_resid -= control->length; 5090 *mp = control->data; 5091 m = control->data; 5092 while (m) { 5093 #ifdef SCTP_SB_LOGGING 5094 sctp_sblog(&so->so_rcv, 5095 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len); 5096 #endif 5097 sctp_sbfree(control, stcb, &so->so_rcv, m); 5098 freed_so_far += m->m_len; 5099 #ifdef SCTP_SB_LOGGING 5100 sctp_sblog(&so->so_rcv, 5101 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5102 #endif 5103 m = m->m_next; 5104 } 5105 control->data = control->tail_mbuf = NULL; 5106 control->length = 0; 5107 if (out_flags & MSG_EOR) { 5108 /* Done with this control */ 5109 goto done_with_control; 5110 } 5111 /* still more to do with this conntrol */ 5112 /* do we really support msg_waitall here? */ 5113 if ((block_allowed == 0) || 5114 ((in_flags & MSG_WAITALL) == 0)) { 5115 goto release; 5116 } 5117 wait_some_more2: 5118 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 5119 goto release; 5120 if (hold_rlock == 1) { 5121 SCTP_INP_READ_UNLOCK(inp); 5122 hold_rlock = 0; 5123 } 5124 if (hold_sblock == 0) { 5125 SOCKBUF_LOCK(&so->so_rcv); 5126 hold_sblock = 1; 5127 } 5128 if (so->so_rcv.sb_cc <= control->held_length) { 5129 error = sbwait(&so->so_rcv); 5130 if (error) { 5131 goto release; 5132 } 5133 } 5134 if (hold_sblock) { 5135 SOCKBUF_UNLOCK(&so->so_rcv); 5136 hold_sblock = 0; 5137 } 5138 if (control->length == 0) { 5139 /* still nothing here */ 5140 if (control->end_added == 1) { 5141 /* 5142 * he aborted, or is done i.e. 5143 * shutdown 5144 */ 5145 out_flags |= MSG_EOR; 5146 goto done_with_control; 5147 } 5148 if (so->so_rcv.sb_cc > held_length) { 5149 control->held_length = so->so_rcv.sb_cc; 5150 /* 5151 * We don't use held_length while 5152 * getting a message 5153 */ 5154 held_length = 0; 5155 } 5156 goto wait_some_more2; 5157 } 5158 goto get_more_data2; 5159 } else { 5160 /* hard way mbuf by mbuf */ 5161 m = control->data; 5162 if (control->end_added == 0) { 5163 /* need the rlock */ 5164 if (hold_rlock == 0) { 5165 SCTP_INP_READ_LOCK(inp); 5166 hold_rlock = 1; 5167 } 5168 } 5169 if (m->m_flags & M_NOTIFICATION) { 5170 out_flags |= MSG_NOTIFICATION; 5171 } 5172 while ((m) && (cp_len > 0)) { 5173 if (cp_len >= m->m_len) { 5174 *mp = m; 5175 atomic_subtract_int(&control->length, m->m_len); 5176 if (uio) 5177 uio->uio_resid -= m->m_len; 5178 cp_len -= m->m_len; 5179 control->data = m->m_next; 5180 m->m_next = NULL; 5181 #ifdef SCTP_SB_LOGGING 5182 sctp_sblog(&so->so_rcv, 5183 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, m->m_len); 5184 #endif 5185 sctp_sbfree(control, stcb, &so->so_rcv, m); 5186 freed_so_far += m->m_len; 5187 #ifdef SCTP_SB_LOGGING 5188 sctp_sblog(&so->so_rcv, 5189 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5190 #endif 5191 mp = &m->m_next; 5192 m = control->data; 5193 } else { 5194 /* 5195 * got all he wants and its part of 5196 * this mbuf only. 5197 */ 5198 if (uio) 5199 uio->uio_resid -= m->m_len; 5200 cp_len -= m->m_len; 5201 if (hold_rlock) { 5202 SCTP_INP_READ_UNLOCK(inp); 5203 hold_rlock = 0; 5204 } 5205 if (hold_sblock) { 5206 SOCKBUF_UNLOCK(&so->so_rcv); 5207 hold_sblock = 0; 5208 } 5209 splx(s); 5210 *mp = sctp_m_copym(m, 0, cp_len, 5211 M_TRYWAIT 5212 ); 5213 s = splnet(); 5214 #ifdef SCTP_LOCK_LOGGING 5215 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R); 5216 #endif 5217 if (hold_sblock == 0) { 5218 SOCKBUF_LOCK(&so->so_rcv); 5219 hold_sblock = 1; 5220 } 5221 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5222 goto release; 5223 5224 if (stcb && 5225 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5226 no_rcv_needed = 1; 5227 } 5228 m->m_data += cp_len; 5229 m->m_len -= cp_len; 5230 #ifdef SCTP_SB_LOGGING 5231 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5232 #endif 5233 freed_so_far += cp_len; 5234 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5235 if (stcb) { 5236 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5237 if ((freed_so_far >= rwnd_req) && 5238 (control->do_not_ref_stcb == 0) && 5239 (no_rcv_needed == 0)) 5240 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5241 } 5242 #ifdef SCTP_SB_LOGGING 5243 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5244 SCTP_LOG_SBRESULT, 0); 5245 #endif 5246 if (out_flags & MSG_NOTIFICATION) { 5247 /* 5248 * remark the first mbuf if 5249 * they took a partial read. 5250 */ 5251 control->data->m_flags |= M_NOTIFICATION; 5252 } 5253 goto release; 5254 } 5255 } 5256 } 5257 } 5258 release: 5259 if (hold_rlock == 1) { 5260 SCTP_INP_READ_UNLOCK(inp); 5261 hold_rlock = 0; 5262 } 5263 if (hold_sblock == 0) { 5264 SOCKBUF_LOCK(&so->so_rcv); 5265 hold_sblock = 1; 5266 } 5267 sbunlock(&so->so_rcv); 5268 5269 release_unlocked: 5270 if (hold_sblock) { 5271 SOCKBUF_UNLOCK(&so->so_rcv); 5272 hold_sblock = 0; 5273 } 5274 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 5275 if ((freed_so_far >= rwnd_req) && 5276 (control && (control->do_not_ref_stcb == 0)) && 5277 (no_rcv_needed == 0)) 5278 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5279 } 5280 if (msg_flags) 5281 *msg_flags |= out_flags; 5282 out: 5283 if (hold_rlock == 1) { 5284 SCTP_INP_READ_UNLOCK(inp); 5285 hold_rlock = 0; 5286 } 5287 if (hold_sblock) { 5288 SOCKBUF_UNLOCK(&so->so_rcv); 5289 hold_sblock = 0; 5290 } 5291 if (freecnt_applied) { 5292 /* 5293 * The lock on the socket buffer protects us so the free 5294 * code will stop. But since we used the socketbuf lock and 5295 * the sender uses the tcb_lock to increment, we need to use 5296 * the atomic add to the refcnt. 5297 */ 5298 if (stcb == NULL) { 5299 panic("stcb for refcnt has gone NULL?"); 5300 } 5301 atomic_add_int(&stcb->asoc.refcnt, -1); 5302 freecnt_applied = 0; 5303 /* Save the value back for next time */ 5304 stcb->freed_by_sorcv_sincelast = freed_so_far; 5305 } 5306 splx(s); 5307 #ifdef SCTP_RECV_RWND_LOGGING 5308 if (stcb) { 5309 sctp_misc_ints(SCTP_SORECV_DONE, 5310 freed_so_far, 5311 ((uio) ? (slen - uio->uio_resid) : slen), 5312 stcb->asoc.my_rwnd, 5313 so->so_rcv.sb_cc); 5314 } else { 5315 sctp_misc_ints(SCTP_SORECV_DONE, 5316 freed_so_far, 5317 ((uio) ? (slen - uio->uio_resid) : slen), 5318 0, 5319 so->so_rcv.sb_cc); 5320 } 5321 #endif 5322 if (wakeup_read_socket) { 5323 sctp_sorwakeup(inp, so); 5324 } 5325 return (error); 5326 } 5327 5328 5329 #ifdef SCTP_MBUF_LOGGING 5330 struct mbuf * 5331 sctp_m_free(struct mbuf *m) 5332 { 5333 if (m->m_flags & M_EXT) { 5334 sctp_log_mb(m, SCTP_MBUF_IFREE); 5335 } 5336 return (m_free(m)); 5337 } 5338 5339 void 5340 sctp_m_freem(struct mbuf *mb) 5341 { 5342 while (mb != NULL) 5343 mb = sctp_m_free(mb); 5344 } 5345 5346 #endif 5347 5348 5349 int 5350 sctp_soreceive(so, psa, uio, mp0, controlp, flagsp) 5351 struct socket *so; 5352 struct sockaddr **psa; 5353 struct uio *uio; 5354 struct mbuf **mp0; 5355 struct mbuf **controlp; 5356 int *flagsp; 5357 { 5358 int error, fromlen; 5359 uint8_t sockbuf[256]; 5360 struct sockaddr *from; 5361 struct sctp_extrcvinfo sinfo; 5362 int filling_sinfo = 1; 5363 struct sctp_inpcb *inp; 5364 5365 inp = (struct sctp_inpcb *)so->so_pcb; 5366 /* pickup the assoc we are reading from */ 5367 if (inp == NULL) { 5368 return (EINVAL); 5369 } 5370 if ((sctp_is_feature_off(inp, 5371 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 5372 (controlp == NULL)) { 5373 /* user does not want the sndrcv ctl */ 5374 filling_sinfo = 0; 5375 } 5376 if (psa) { 5377 from = (struct sockaddr *)sockbuf; 5378 fromlen = sizeof(sockbuf); 5379 from->sa_len = 0; 5380 } else { 5381 from = NULL; 5382 fromlen = 0; 5383 } 5384 5385 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 5386 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 5387 if ((controlp) && (filling_sinfo)) { 5388 /* copy back the sinfo in a CMSG format */ 5389 if (filling_sinfo) 5390 *controlp = sctp_build_ctl_nchunk(inp, 5391 (struct sctp_sndrcvinfo *)&sinfo); 5392 else 5393 *controlp = NULL; 5394 } 5395 if (psa) { 5396 /* copy back the address info */ 5397 if (from && from->sa_len) { 5398 *psa = sodupsockaddr(from, M_NOWAIT); 5399 } else { 5400 *psa = NULL; 5401 } 5402 } 5403 return (error); 5404 } 5405