1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #ifdef INET6 41 #include <netinet6/sctp6_var.h> 42 #endif 43 #include <netinet/sctp_header.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_crc32.h> 48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 52 extern int sctp_warm_the_crc32_table; 53 54 #define NUMBER_OF_MTU_SIZES 18 55 56 #ifdef SCTP_DEBUG 57 extern uint32_t sctp_debug_on; 58 59 #endif 60 61 62 #ifdef SCTP_STAT_LOGGING 63 int global_sctp_cwnd_log_at = 0; 64 int global_sctp_cwnd_log_rolled = 0; 65 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE]; 66 67 static uint32_t 68 sctp_get_time_of_event(void) 69 { 70 struct timeval now; 71 uint32_t timeval; 72 73 SCTP_GETPTIME_TIMEVAL(&now); 74 timeval = (now.tv_sec % 0x00000fff); 75 timeval <<= 20; 76 timeval |= now.tv_usec & 0xfffff; 77 return (timeval); 78 } 79 80 81 void 82 sctp_clr_stat_log(void) 83 { 84 global_sctp_cwnd_log_at = 0; 85 global_sctp_cwnd_log_rolled = 0; 86 } 87 88 89 void 90 sctp_sblog(struct sockbuf *sb, 91 struct sctp_tcb *stcb, int from, int incr) 92 { 93 int sctp_cwnd_log_at; 94 95 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 96 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 97 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 98 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB; 99 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb; 100 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc; 101 if (stcb) 102 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc; 103 else 104 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0; 105 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr; 106 } 107 108 void 109 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 110 { 111 int sctp_cwnd_log_at; 112 113 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 114 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 115 sctp_clog[sctp_cwnd_log_at].from = 0; 116 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE; 117 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp; 118 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags; 119 if (stcb) { 120 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb; 121 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state; 122 } else { 123 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0; 124 sctp_clog[sctp_cwnd_log_at].x.close.state = 0; 125 } 126 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc; 127 } 128 129 130 void 131 rto_logging(struct sctp_nets *net, int from) 132 { 133 int sctp_cwnd_log_at; 134 135 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 136 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 137 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 138 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT; 139 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net; 140 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt; 141 sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance; 142 sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir; 143 } 144 145 void 146 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 147 { 148 int sctp_cwnd_log_at; 149 150 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 151 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 152 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 153 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 154 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb; 155 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn; 156 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq; 157 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 158 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 159 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream; 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 int sctp_cwnd_log_at; 166 167 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 168 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 169 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action; 170 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE; 171 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb; 172 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight; 173 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 174 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 175 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count; 176 } 177 178 179 void 180 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 181 { 182 int sctp_cwnd_log_at; 183 184 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 185 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 186 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 187 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK; 188 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack; 189 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack; 190 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn; 191 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps; 192 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups; 193 } 194 195 void 196 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 197 { 198 int sctp_cwnd_log_at; 199 200 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 201 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 202 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 203 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP; 204 sctp_clog[sctp_cwnd_log_at].x.map.base = map; 205 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum; 206 sctp_clog[sctp_cwnd_log_at].x.map.high = high; 207 } 208 209 void 210 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 211 int from) 212 { 213 int sctp_cwnd_log_at; 214 215 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 216 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 217 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 218 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR; 219 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn; 220 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn; 221 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn; 222 } 223 224 225 void 226 sctp_log_mb(struct mbuf *m, int from) 227 { 228 int sctp_cwnd_log_at; 229 230 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 231 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 232 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 233 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF; 234 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m; 235 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 236 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 237 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0); 238 if (SCTP_BUF_IS_EXTENDED(m)) { 239 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 240 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 241 } else { 242 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0; 243 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0; 244 } 245 } 246 247 248 void 249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 250 int from) 251 { 252 int sctp_cwnd_log_at; 253 254 if (control == NULL) { 255 printf("Gak log of NULL?\n"); 256 return; 257 } 258 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 259 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 260 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 261 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 262 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb; 263 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn; 264 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn; 265 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream; 266 if (poschk != NULL) { 267 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn; 268 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn; 269 } else { 270 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 271 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 272 } 273 } 274 275 void 276 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 277 { 278 int sctp_cwnd_log_at; 279 280 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 281 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 282 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 283 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND; 284 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 285 if (stcb->asoc.send_queue_cnt > 255) 286 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 287 else 288 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 289 if (stcb->asoc.stream_queue_cnt > 255) 290 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 291 else 292 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 293 294 if (net) { 295 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd; 296 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 297 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack; 298 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 299 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 300 } 301 if (SCTP_CWNDLOG_PRESEND == from) { 302 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 303 } 304 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment; 305 } 306 307 void 308 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 309 { 310 int sctp_cwnd_log_at; 311 312 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 313 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 314 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 315 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT; 316 if (inp) { 317 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket; 318 319 } else { 320 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL; 321 } 322 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp; 323 if (stcb) { 324 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 325 } else { 326 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 327 } 328 if (inp) { 329 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 330 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 331 } else { 332 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 333 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN; 334 } 335 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx); 336 if (inp->sctp_socket) { 337 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 338 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 339 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 340 } else { 341 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 342 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 343 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 344 } 345 } 346 347 void 348 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 349 { 350 int sctp_cwnd_log_at; 351 352 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 353 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 354 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 355 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST; 356 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 357 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error; 358 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 359 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst; 360 if (stcb->asoc.send_queue_cnt > 255) 361 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 362 else 363 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 364 if (stcb->asoc.stream_queue_cnt > 255) 365 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 366 else 367 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 368 } 369 370 void 371 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 372 { 373 int sctp_cwnd_log_at; 374 375 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 376 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 377 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 378 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 379 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 380 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size; 381 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 382 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0; 383 } 384 385 void 386 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 387 { 388 int sctp_cwnd_log_at; 389 390 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 391 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 392 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 393 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 394 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 395 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size; 396 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 397 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval; 398 } 399 400 void 401 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 402 { 403 int sctp_cwnd_log_at; 404 405 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 406 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 407 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 408 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT; 409 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq; 410 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book; 411 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q; 412 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt; 413 } 414 415 void 416 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 417 { 418 int sctp_cwnd_log_at; 419 420 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 421 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 422 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 423 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT; 424 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a; 425 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b; 426 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c; 427 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d; 428 } 429 430 void 431 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 432 { 433 int sctp_cwnd_log_at; 434 435 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 436 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 437 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 438 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE; 439 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb; 440 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt; 441 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count; 442 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt; 443 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt; 444 445 if (stcb->asoc.stream_queue_cnt < 0xff) 446 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 447 else 448 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff; 449 450 if (stcb->asoc.chunks_on_out_queue < 0xff) 451 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 452 else 453 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff; 454 455 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0; 456 /* set in the defered mode stuff */ 457 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 458 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1; 459 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 460 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2; 461 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 462 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4; 463 /* what about the sb */ 464 if (stcb->sctp_socket) { 465 struct socket *so = stcb->sctp_socket; 466 467 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 468 } else { 469 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff; 470 } 471 } 472 473 void 474 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 475 { 476 int sctp_cwnd_log_at; 477 478 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 479 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 480 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 481 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK; 482 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size; 483 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 484 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd; 485 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 486 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 487 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 488 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen; 489 } 490 491 int 492 sctp_fill_stat_log(struct mbuf *m) 493 { 494 int sctp_cwnd_log_at; 495 struct sctp_cwnd_log_req *req; 496 size_t size_limit; 497 int num, i, at, cnt_out = 0; 498 499 if (m == NULL) 500 return (EINVAL); 501 502 size_limit = (SCTP_BUF_LEN(m) - sizeof(struct sctp_cwnd_log_req)); 503 if (size_limit < sizeof(struct sctp_cwnd_log)) { 504 return (EINVAL); 505 } 506 sctp_cwnd_log_at = global_sctp_cwnd_log_at; 507 req = mtod(m, struct sctp_cwnd_log_req *); 508 num = size_limit / sizeof(struct sctp_cwnd_log); 509 if (global_sctp_cwnd_log_rolled) { 510 req->num_in_log = SCTP_STAT_LOG_SIZE; 511 } else { 512 req->num_in_log = sctp_cwnd_log_at; 513 /* 514 * if the log has not rolled, we don't let you have old 515 * data. 516 */ 517 if (req->end_at > sctp_cwnd_log_at) { 518 req->end_at = sctp_cwnd_log_at; 519 } 520 } 521 if ((num < SCTP_STAT_LOG_SIZE) && 522 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) { 523 /* we can't return all of it */ 524 if (((req->start_at == 0) && (req->end_at == 0)) || 525 (req->start_at >= SCTP_STAT_LOG_SIZE) || 526 (req->end_at >= SCTP_STAT_LOG_SIZE)) { 527 /* No user request or user is wacked. */ 528 req->num_ret = num; 529 req->end_at = sctp_cwnd_log_at - 1; 530 if ((sctp_cwnd_log_at - num) < 0) { 531 int cc; 532 533 cc = num - sctp_cwnd_log_at; 534 req->start_at = SCTP_STAT_LOG_SIZE - cc; 535 } else { 536 req->start_at = sctp_cwnd_log_at - num; 537 } 538 } else { 539 /* a user request */ 540 int cc; 541 542 if (req->start_at > req->end_at) { 543 cc = (SCTP_STAT_LOG_SIZE - req->start_at) + 544 (req->end_at + 1); 545 } else { 546 547 cc = (req->end_at - req->start_at) + 1; 548 } 549 if (cc < num) { 550 num = cc; 551 } 552 req->num_ret = num; 553 } 554 } else { 555 /* We can return all of it */ 556 req->start_at = 0; 557 req->end_at = sctp_cwnd_log_at - 1; 558 req->num_ret = sctp_cwnd_log_at; 559 } 560 #ifdef INVARIANTS 561 if (req->num_ret > num) { 562 panic("Bad statlog get?"); 563 } 564 #endif 565 for (i = 0, at = req->start_at; i < req->num_ret; i++) { 566 req->log[i] = sctp_clog[at]; 567 cnt_out++; 568 at++; 569 if (at >= SCTP_STAT_LOG_SIZE) 570 at = 0; 571 } 572 SCTP_BUF_LEN(m) = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req); 573 return (0); 574 } 575 576 #endif 577 578 #ifdef SCTP_AUDITING_ENABLED 579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 580 static int sctp_audit_indx = 0; 581 582 static 583 void 584 sctp_print_audit_report(void) 585 { 586 int i; 587 int cnt; 588 589 cnt = 0; 590 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 591 if ((sctp_audit_data[i][0] == 0xe0) && 592 (sctp_audit_data[i][1] == 0x01)) { 593 cnt = 0; 594 printf("\n"); 595 } else if (sctp_audit_data[i][0] == 0xf0) { 596 cnt = 0; 597 printf("\n"); 598 } else if ((sctp_audit_data[i][0] == 0xc0) && 599 (sctp_audit_data[i][1] == 0x01)) { 600 printf("\n"); 601 cnt = 0; 602 } 603 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 604 (uint32_t) sctp_audit_data[i][1]); 605 cnt++; 606 if ((cnt % 14) == 0) 607 printf("\n"); 608 } 609 for (i = 0; i < sctp_audit_indx; i++) { 610 if ((sctp_audit_data[i][0] == 0xe0) && 611 (sctp_audit_data[i][1] == 0x01)) { 612 cnt = 0; 613 printf("\n"); 614 } else if (sctp_audit_data[i][0] == 0xf0) { 615 cnt = 0; 616 printf("\n"); 617 } else if ((sctp_audit_data[i][0] == 0xc0) && 618 (sctp_audit_data[i][1] == 0x01)) { 619 printf("\n"); 620 cnt = 0; 621 } 622 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 623 (uint32_t) sctp_audit_data[i][1]); 624 cnt++; 625 if ((cnt % 14) == 0) 626 printf("\n"); 627 } 628 printf("\n"); 629 } 630 631 void 632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 633 struct sctp_nets *net) 634 { 635 int resend_cnt, tot_out, rep, tot_book_cnt; 636 struct sctp_nets *lnet; 637 struct sctp_tmit_chunk *chk; 638 639 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 640 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 641 sctp_audit_indx++; 642 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 643 sctp_audit_indx = 0; 644 } 645 if (inp == NULL) { 646 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 647 sctp_audit_data[sctp_audit_indx][1] = 0x01; 648 sctp_audit_indx++; 649 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 650 sctp_audit_indx = 0; 651 } 652 return; 653 } 654 if (stcb == NULL) { 655 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 656 sctp_audit_data[sctp_audit_indx][1] = 0x02; 657 sctp_audit_indx++; 658 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 659 sctp_audit_indx = 0; 660 } 661 return; 662 } 663 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 664 sctp_audit_data[sctp_audit_indx][1] = 665 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 666 sctp_audit_indx++; 667 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 668 sctp_audit_indx = 0; 669 } 670 rep = 0; 671 tot_book_cnt = 0; 672 resend_cnt = tot_out = 0; 673 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 674 if (chk->sent == SCTP_DATAGRAM_RESEND) { 675 resend_cnt++; 676 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 677 tot_out += chk->book_size; 678 tot_book_cnt++; 679 } 680 } 681 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 682 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 683 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 684 sctp_audit_indx++; 685 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 686 sctp_audit_indx = 0; 687 } 688 printf("resend_cnt:%d asoc-tot:%d\n", 689 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 690 rep = 1; 691 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 692 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 693 sctp_audit_data[sctp_audit_indx][1] = 694 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 695 sctp_audit_indx++; 696 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 697 sctp_audit_indx = 0; 698 } 699 } 700 if (tot_out != stcb->asoc.total_flight) { 701 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 702 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 703 sctp_audit_indx++; 704 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 705 sctp_audit_indx = 0; 706 } 707 rep = 1; 708 printf("tot_flt:%d asoc_tot:%d\n", tot_out, 709 (int)stcb->asoc.total_flight); 710 stcb->asoc.total_flight = tot_out; 711 } 712 if (tot_book_cnt != stcb->asoc.total_flight_count) { 713 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 714 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 715 sctp_audit_indx++; 716 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 717 sctp_audit_indx = 0; 718 } 719 rep = 1; 720 printf("tot_flt_book:%d\n", tot_book); 721 722 stcb->asoc.total_flight_count = tot_book_cnt; 723 } 724 tot_out = 0; 725 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 726 tot_out += lnet->flight_size; 727 } 728 if (tot_out != stcb->asoc.total_flight) { 729 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 730 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 731 sctp_audit_indx++; 732 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 733 sctp_audit_indx = 0; 734 } 735 rep = 1; 736 printf("real flight:%d net total was %d\n", 737 stcb->asoc.total_flight, tot_out); 738 /* now corrective action */ 739 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 740 741 tot_out = 0; 742 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 743 if ((chk->whoTo == lnet) && 744 (chk->sent < SCTP_DATAGRAM_RESEND)) { 745 tot_out += chk->book_size; 746 } 747 } 748 if (lnet->flight_size != tot_out) { 749 printf("net:%x flight was %d corrected to %d\n", 750 (uint32_t) lnet, lnet->flight_size, tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * a list of sizes based on typical mtu's, used only if next hop size not 776 * returned. 777 */ 778 static int sctp_mtu_sizes[] = { 779 68, 780 296, 781 508, 782 512, 783 544, 784 576, 785 1006, 786 1492, 787 1500, 788 1536, 789 2002, 790 2048, 791 4352, 792 4464, 793 8166, 794 17914, 795 32000, 796 65535 797 }; 798 799 void 800 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 801 { 802 struct sctp_association *asoc; 803 struct sctp_nets *net; 804 805 asoc = &stcb->asoc; 806 807 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 808 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 809 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 810 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 811 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 812 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 813 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 814 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 815 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 816 } 817 } 818 819 int 820 find_next_best_mtu(int totsz) 821 { 822 int i, perfer; 823 824 /* 825 * if we are in here we must find the next best fit based on the 826 * size of the dg that failed to be sent. 827 */ 828 perfer = 0; 829 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 830 if (totsz < sctp_mtu_sizes[i]) { 831 perfer = i - 1; 832 if (perfer < 0) 833 perfer = 0; 834 break; 835 } 836 } 837 return (sctp_mtu_sizes[perfer]); 838 } 839 840 void 841 sctp_fill_random_store(struct sctp_pcb *m) 842 { 843 /* 844 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 845 * our counter. The result becomes our good random numbers and we 846 * then setup to give these out. Note that we do no locking to 847 * protect this. This is ok, since if competing folks call this we 848 * will get more gobbled gook in the random store whic is what we 849 * want. There is a danger that two guys will use the same random 850 * numbers, but thats ok too since that is random as well :-> 851 */ 852 m->store_at = 0; 853 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 854 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 855 sizeof(m->random_counter), (uint8_t *) m->random_store); 856 m->random_counter++; 857 } 858 859 uint32_t 860 sctp_select_initial_TSN(struct sctp_pcb *m) 861 { 862 /* 863 * A true implementation should use random selection process to get 864 * the initial stream sequence number, using RFC1750 as a good 865 * guideline 866 */ 867 uint32_t x, *xp; 868 uint8_t *p; 869 870 if (m->initial_sequence_debug != 0) { 871 uint32_t ret; 872 873 ret = m->initial_sequence_debug; 874 m->initial_sequence_debug++; 875 return (ret); 876 } 877 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) { 878 /* Refill the random store */ 879 sctp_fill_random_store(m); 880 } 881 p = &m->random_store[(int)m->store_at]; 882 xp = (uint32_t *) p; 883 x = *xp; 884 m->store_at += sizeof(uint32_t); 885 return (x); 886 } 887 888 uint32_t 889 sctp_select_a_tag(struct sctp_inpcb *m) 890 { 891 u_long x, not_done; 892 struct timeval now; 893 894 SCTP_GETTIME_TIMEVAL(&now); 895 not_done = 1; 896 while (not_done) { 897 x = sctp_select_initial_TSN(&m->sctp_ep); 898 if (x == 0) { 899 /* we never use 0 */ 900 continue; 901 } 902 if (sctp_is_vtag_good(m, x, &now)) { 903 not_done = 0; 904 } 905 } 906 return (x); 907 } 908 909 910 int 911 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc, 912 int for_a_init, uint32_t override_tag) 913 { 914 /* 915 * Anything set to zero is taken care of by the allocation routine's 916 * bzero 917 */ 918 919 /* 920 * Up front select what scoping to apply on addresses I tell my peer 921 * Not sure what to do with these right now, we will need to come up 922 * with a way to set them. We may need to pass them through from the 923 * caller in the sctp_aloc_assoc() function. 924 */ 925 int i; 926 927 /* init all variables to a known value. */ 928 asoc->state = SCTP_STATE_INUSE; 929 asoc->max_burst = m->sctp_ep.max_burst; 930 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 931 asoc->cookie_life = m->sctp_ep.def_cookie_life; 932 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off; 933 #ifdef AF_INET 934 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 935 #else 936 asoc->default_tos = 0; 937 #endif 938 939 #ifdef AF_INET6 940 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 941 #else 942 asoc->default_flowlabel = 0; 943 #endif 944 if (override_tag) { 945 struct timeval now; 946 947 SCTP_GETTIME_TIMEVAL(&now); 948 if (sctp_is_vtag_good(m, override_tag, &now)) { 949 asoc->my_vtag = override_tag; 950 } else { 951 return (ENOMEM); 952 } 953 954 } else { 955 asoc->my_vtag = sctp_select_a_tag(m); 956 } 957 /* Get the nonce tags */ 958 asoc->my_vtag_nonce = sctp_select_a_tag(m); 959 asoc->peer_vtag_nonce = sctp_select_a_tag(m); 960 961 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 962 asoc->hb_is_disabled = 1; 963 else 964 asoc->hb_is_disabled = 0; 965 966 asoc->refcnt = 0; 967 asoc->assoc_up_sent = 0; 968 asoc->assoc_id = asoc->my_vtag; 969 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 970 sctp_select_initial_TSN(&m->sctp_ep); 971 /* we are optimisitic here */ 972 asoc->peer_supports_pktdrop = 1; 973 974 asoc->sent_queue_retran_cnt = 0; 975 976 /* for CMT */ 977 asoc->last_net_data_came_from = NULL; 978 979 /* This will need to be adjusted */ 980 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 981 asoc->last_acked_seq = asoc->init_seq_number - 1; 982 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 983 asoc->asconf_seq_in = asoc->last_acked_seq; 984 985 /* here we are different, we hold the next one we expect */ 986 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 987 988 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 989 asoc->initial_rto = m->sctp_ep.initial_rto; 990 991 asoc->max_init_times = m->sctp_ep.max_init_times; 992 asoc->max_send_times = m->sctp_ep.max_send_times; 993 asoc->def_net_failure = m->sctp_ep.def_net_failure; 994 asoc->free_chunk_cnt = 0; 995 996 asoc->iam_blocking = 0; 997 /* ECN Nonce initialization */ 998 asoc->context = m->sctp_context; 999 asoc->def_send = m->def_send; 1000 asoc->ecn_nonce_allowed = 0; 1001 asoc->receiver_nonce_sum = 1; 1002 asoc->nonce_sum_expect_base = 1; 1003 asoc->nonce_sum_check = 1; 1004 asoc->nonce_resync_tsn = 0; 1005 asoc->nonce_wait_for_ecne = 0; 1006 asoc->nonce_wait_tsn = 0; 1007 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1008 asoc->pr_sctp_cnt = 0; 1009 asoc->total_output_queue_size = 0; 1010 1011 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1012 struct in6pcb *inp6; 1013 1014 /* Its a V6 socket */ 1015 inp6 = (struct in6pcb *)m; 1016 asoc->ipv6_addr_legal = 1; 1017 /* Now look at the binding flag to see if V4 will be legal */ 1018 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 1019 asoc->ipv4_addr_legal = 1; 1020 } else { 1021 /* V4 addresses are NOT legal on the association */ 1022 asoc->ipv4_addr_legal = 0; 1023 } 1024 } else { 1025 /* Its a V4 socket, no - V6 */ 1026 asoc->ipv4_addr_legal = 1; 1027 asoc->ipv6_addr_legal = 0; 1028 } 1029 1030 asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND); 1031 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat; 1032 1033 asoc->smallest_mtu = m->sctp_frag_point; 1034 asoc->minrto = m->sctp_ep.sctp_minrto; 1035 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1036 1037 asoc->locked_on_sending = NULL; 1038 asoc->stream_locked_on = 0; 1039 asoc->ecn_echo_cnt_onq = 0; 1040 asoc->stream_locked = 0; 1041 1042 LIST_INIT(&asoc->sctp_local_addr_list); 1043 TAILQ_INIT(&asoc->nets); 1044 TAILQ_INIT(&asoc->pending_reply_queue); 1045 asoc->last_asconf_ack_sent = NULL; 1046 /* Setup to fill the hb random cache at first HB */ 1047 asoc->hb_random_idx = 4; 1048 1049 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1050 1051 /* 1052 * Now the stream parameters, here we allocate space for all streams 1053 * that we request by default. 1054 */ 1055 asoc->streamoutcnt = asoc->pre_open_streams = 1056 m->sctp_ep.pre_open_stream_count; 1057 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1058 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1059 "StreamsOut"); 1060 if (asoc->strmout == NULL) { 1061 /* big trouble no memory */ 1062 return (ENOMEM); 1063 } 1064 for (i = 0; i < asoc->streamoutcnt; i++) { 1065 /* 1066 * inbound side must be set to 0xffff, also NOTE when we get 1067 * the INIT-ACK back (for INIT sender) we MUST reduce the 1068 * count (streamoutcnt) but first check if we sent to any of 1069 * the upper streams that were dropped (if some were). Those 1070 * that were dropped must be notified to the upper layer as 1071 * failed to send. 1072 */ 1073 asoc->strmout[i].next_sequence_sent = 0x0; 1074 TAILQ_INIT(&asoc->strmout[i].outqueue); 1075 asoc->strmout[i].stream_no = i; 1076 asoc->strmout[i].last_msg_incomplete = 0; 1077 asoc->strmout[i].next_spoke.tqe_next = 0; 1078 asoc->strmout[i].next_spoke.tqe_prev = 0; 1079 } 1080 /* Now the mapping array */ 1081 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1082 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1083 "MappingArray"); 1084 if (asoc->mapping_array == NULL) { 1085 SCTP_FREE(asoc->strmout); 1086 return (ENOMEM); 1087 } 1088 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1089 /* Now the init of the other outqueues */ 1090 TAILQ_INIT(&asoc->free_chunks); 1091 TAILQ_INIT(&asoc->free_strmoq); 1092 TAILQ_INIT(&asoc->out_wheel); 1093 TAILQ_INIT(&asoc->control_send_queue); 1094 TAILQ_INIT(&asoc->send_queue); 1095 TAILQ_INIT(&asoc->sent_queue); 1096 TAILQ_INIT(&asoc->reasmqueue); 1097 TAILQ_INIT(&asoc->resetHead); 1098 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1099 TAILQ_INIT(&asoc->asconf_queue); 1100 /* authentication fields */ 1101 asoc->authinfo.random = NULL; 1102 asoc->authinfo.assoc_key = NULL; 1103 asoc->authinfo.assoc_keyid = 0; 1104 asoc->authinfo.recv_key = NULL; 1105 asoc->authinfo.recv_keyid = 0; 1106 LIST_INIT(&asoc->shared_keys); 1107 asoc->marked_retrans = 0; 1108 asoc->timoinit = 0; 1109 asoc->timodata = 0; 1110 asoc->timosack = 0; 1111 asoc->timoshutdown = 0; 1112 asoc->timoheartbeat = 0; 1113 asoc->timocookie = 0; 1114 asoc->timoshutdownack = 0; 1115 SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1116 SCTP_GETTIME_TIMEVAL(&asoc->discontinuity_time); 1117 1118 return (0); 1119 } 1120 1121 int 1122 sctp_expand_mapping_array(struct sctp_association *asoc) 1123 { 1124 /* mapping array needs to grow */ 1125 uint8_t *new_array; 1126 uint16_t new_size; 1127 1128 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR; 1129 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray"); 1130 if (new_array == NULL) { 1131 /* can't get more, forget it */ 1132 printf("No memory for expansion of SCTP mapping array %d\n", 1133 new_size); 1134 return (-1); 1135 } 1136 memset(new_array, 0, new_size); 1137 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1138 SCTP_FREE(asoc->mapping_array); 1139 asoc->mapping_array = new_array; 1140 asoc->mapping_array_size = new_size; 1141 return (0); 1142 } 1143 1144 extern unsigned int sctp_early_fr_msec; 1145 1146 static void 1147 sctp_handle_addr_wq(void) 1148 { 1149 /* deal with the ADDR wq from the rtsock calls */ 1150 struct sctp_laddr *wi; 1151 1152 SCTP_IPI_ADDR_LOCK(); 1153 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1154 if (wi == NULL) { 1155 SCTP_IPI_ADDR_UNLOCK(); 1156 return; 1157 } 1158 LIST_REMOVE(wi, sctp_nxt_addr); 1159 if (!SCTP_LIST_EMPTY(&sctppcbinfo.addr_wq)) { 1160 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1161 (struct sctp_inpcb *)NULL, 1162 (struct sctp_tcb *)NULL, 1163 (struct sctp_nets *)NULL); 1164 } 1165 SCTP_IPI_ADDR_UNLOCK(); 1166 if (wi->action == RTM_ADD) { 1167 sctp_add_ip_address(wi->ifa); 1168 } else if (wi->action == RTM_DELETE) { 1169 sctp_delete_ip_address(wi->ifa); 1170 } 1171 IFAFREE(wi->ifa); 1172 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, wi); 1173 SCTP_DECR_LADDR_COUNT(); 1174 } 1175 1176 void 1177 sctp_timeout_handler(void *t) 1178 { 1179 struct sctp_inpcb *inp; 1180 struct sctp_tcb *stcb; 1181 struct sctp_nets *net; 1182 struct sctp_timer *tmr; 1183 int did_output; 1184 struct sctp_iterator *it = NULL; 1185 1186 1187 tmr = (struct sctp_timer *)t; 1188 inp = (struct sctp_inpcb *)tmr->ep; 1189 stcb = (struct sctp_tcb *)tmr->tcb; 1190 net = (struct sctp_nets *)tmr->net; 1191 did_output = 1; 1192 1193 #ifdef SCTP_AUDITING_ENABLED 1194 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1195 sctp_auditing(3, inp, stcb, net); 1196 #endif 1197 1198 /* sanity checks... */ 1199 if (tmr->self != (void *)tmr) { 1200 /* 1201 * printf("Stale SCTP timer fired (%p), ignoring...\n", 1202 * tmr); 1203 */ 1204 return; 1205 } 1206 tmr->stopped_from = 0xa001; 1207 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1208 /* 1209 * printf("SCTP timer fired with invalid type: 0x%x\n", 1210 * tmr->type); 1211 */ 1212 return; 1213 } 1214 tmr->stopped_from = 0xa002; 1215 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1216 return; 1217 } 1218 /* if this is an iterator timeout, get the struct and clear inp */ 1219 tmr->stopped_from = 0xa003; 1220 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1221 it = (struct sctp_iterator *)inp; 1222 inp = NULL; 1223 } 1224 if (inp) { 1225 SCTP_INP_INCR_REF(inp); 1226 if ((inp->sctp_socket == 0) && 1227 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1228 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1229 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1230 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1231 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1232 ) { 1233 SCTP_INP_DECR_REF(inp); 1234 return; 1235 } 1236 } 1237 tmr->stopped_from = 0xa004; 1238 if (stcb) { 1239 if (stcb->asoc.state == 0) { 1240 if (inp) { 1241 SCTP_INP_DECR_REF(inp); 1242 } 1243 return; 1244 } 1245 } 1246 tmr->stopped_from = 0xa005; 1247 #ifdef SCTP_DEBUG 1248 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1249 printf("Timer type %d goes off\n", tmr->type); 1250 } 1251 #endif /* SCTP_DEBUG */ 1252 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1253 if (inp) { 1254 SCTP_INP_DECR_REF(inp); 1255 } 1256 return; 1257 } 1258 tmr->stopped_from = 0xa006; 1259 1260 if (stcb) { 1261 atomic_add_int(&stcb->asoc.refcnt, 1); 1262 SCTP_TCB_LOCK(stcb); 1263 atomic_add_int(&stcb->asoc.refcnt, -1); 1264 } 1265 /* record in stopped what t-o occured */ 1266 tmr->stopped_from = tmr->type; 1267 1268 /* mark as being serviced now */ 1269 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1270 /* 1271 * Callout has been rescheduled. 1272 */ 1273 goto get_out; 1274 } 1275 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1276 /* 1277 * Not active, so no action. 1278 */ 1279 goto get_out; 1280 } 1281 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1282 1283 /* call the handler for the appropriate timer type */ 1284 switch (tmr->type) { 1285 case SCTP_TIMER_TYPE_ADDR_WQ: 1286 sctp_handle_addr_wq(); 1287 break; 1288 case SCTP_TIMER_TYPE_ITERATOR: 1289 SCTP_STAT_INCR(sctps_timoiterator); 1290 sctp_iterator_timer(it); 1291 break; 1292 case SCTP_TIMER_TYPE_SEND: 1293 SCTP_STAT_INCR(sctps_timodata); 1294 stcb->asoc.timodata++; 1295 stcb->asoc.num_send_timers_up--; 1296 if (stcb->asoc.num_send_timers_up < 0) { 1297 stcb->asoc.num_send_timers_up = 0; 1298 } 1299 if (sctp_t3rxt_timer(inp, stcb, net)) { 1300 /* no need to unlock on tcb its gone */ 1301 1302 goto out_decr; 1303 } 1304 #ifdef SCTP_AUDITING_ENABLED 1305 sctp_auditing(4, inp, stcb, net); 1306 #endif 1307 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1308 if ((stcb->asoc.num_send_timers_up == 0) && 1309 (stcb->asoc.sent_queue_cnt > 0) 1310 ) { 1311 struct sctp_tmit_chunk *chk; 1312 1313 /* 1314 * safeguard. If there on some on the sent queue 1315 * somewhere but no timers running something is 1316 * wrong... so we start a timer on the first chunk 1317 * on the send queue on whatever net it is sent to. 1318 */ 1319 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1320 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1321 chk->whoTo); 1322 } 1323 break; 1324 case SCTP_TIMER_TYPE_INIT: 1325 SCTP_STAT_INCR(sctps_timoinit); 1326 stcb->asoc.timoinit++; 1327 if (sctp_t1init_timer(inp, stcb, net)) { 1328 /* no need to unlock on tcb its gone */ 1329 goto out_decr; 1330 } 1331 /* We do output but not here */ 1332 did_output = 0; 1333 break; 1334 case SCTP_TIMER_TYPE_RECV: 1335 SCTP_STAT_INCR(sctps_timosack); 1336 stcb->asoc.timosack++; 1337 sctp_send_sack(stcb); 1338 #ifdef SCTP_AUDITING_ENABLED 1339 sctp_auditing(4, inp, stcb, net); 1340 #endif 1341 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR); 1342 break; 1343 case SCTP_TIMER_TYPE_SHUTDOWN: 1344 if (sctp_shutdown_timer(inp, stcb, net)) { 1345 /* no need to unlock on tcb its gone */ 1346 goto out_decr; 1347 } 1348 SCTP_STAT_INCR(sctps_timoshutdown); 1349 stcb->asoc.timoshutdown++; 1350 #ifdef SCTP_AUDITING_ENABLED 1351 sctp_auditing(4, inp, stcb, net); 1352 #endif 1353 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR); 1354 break; 1355 case SCTP_TIMER_TYPE_HEARTBEAT: 1356 { 1357 struct sctp_nets *net; 1358 int cnt_of_unconf = 0; 1359 1360 SCTP_STAT_INCR(sctps_timoheartbeat); 1361 stcb->asoc.timoheartbeat++; 1362 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1363 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1364 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1365 cnt_of_unconf++; 1366 } 1367 } 1368 if (cnt_of_unconf == 0) { 1369 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) { 1370 /* no need to unlock on tcb its gone */ 1371 goto out_decr; 1372 } 1373 } 1374 #ifdef SCTP_AUDITING_ENABLED 1375 sctp_auditing(4, inp, stcb, net); 1376 #endif 1377 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 1378 stcb, net); 1379 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR); 1380 } 1381 break; 1382 case SCTP_TIMER_TYPE_COOKIE: 1383 if (sctp_cookie_timer(inp, stcb, net)) { 1384 /* no need to unlock on tcb its gone */ 1385 goto out_decr; 1386 } 1387 SCTP_STAT_INCR(sctps_timocookie); 1388 stcb->asoc.timocookie++; 1389 #ifdef SCTP_AUDITING_ENABLED 1390 sctp_auditing(4, inp, stcb, net); 1391 #endif 1392 /* 1393 * We consider T3 and Cookie timer pretty much the same with 1394 * respect to where from in chunk_output. 1395 */ 1396 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1397 break; 1398 case SCTP_TIMER_TYPE_NEWCOOKIE: 1399 { 1400 struct timeval tv; 1401 int i, secret; 1402 1403 SCTP_STAT_INCR(sctps_timosecret); 1404 SCTP_GETTIME_TIMEVAL(&tv); 1405 SCTP_INP_WLOCK(inp); 1406 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1407 inp->sctp_ep.last_secret_number = 1408 inp->sctp_ep.current_secret_number; 1409 inp->sctp_ep.current_secret_number++; 1410 if (inp->sctp_ep.current_secret_number >= 1411 SCTP_HOW_MANY_SECRETS) { 1412 inp->sctp_ep.current_secret_number = 0; 1413 } 1414 secret = (int)inp->sctp_ep.current_secret_number; 1415 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1416 inp->sctp_ep.secret_key[secret][i] = 1417 sctp_select_initial_TSN(&inp->sctp_ep); 1418 } 1419 SCTP_INP_WUNLOCK(inp); 1420 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1421 } 1422 did_output = 0; 1423 break; 1424 case SCTP_TIMER_TYPE_PATHMTURAISE: 1425 SCTP_STAT_INCR(sctps_timopathmtu); 1426 sctp_pathmtu_timer(inp, stcb, net); 1427 did_output = 0; 1428 break; 1429 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1430 if (sctp_shutdownack_timer(inp, stcb, net)) { 1431 /* no need to unlock on tcb its gone */ 1432 goto out_decr; 1433 } 1434 SCTP_STAT_INCR(sctps_timoshutdownack); 1435 stcb->asoc.timoshutdownack++; 1436 #ifdef SCTP_AUDITING_ENABLED 1437 sctp_auditing(4, inp, stcb, net); 1438 #endif 1439 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR); 1440 break; 1441 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1442 SCTP_STAT_INCR(sctps_timoshutdownguard); 1443 sctp_abort_an_association(inp, stcb, 1444 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL); 1445 /* no need to unlock on tcb its gone */ 1446 goto out_decr; 1447 break; 1448 1449 case SCTP_TIMER_TYPE_STRRESET: 1450 if (sctp_strreset_timer(inp, stcb, net)) { 1451 /* no need to unlock on tcb its gone */ 1452 goto out_decr; 1453 } 1454 SCTP_STAT_INCR(sctps_timostrmrst); 1455 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR); 1456 break; 1457 case SCTP_TIMER_TYPE_EARLYFR: 1458 /* Need to do FR of things for net */ 1459 SCTP_STAT_INCR(sctps_timoearlyfr); 1460 sctp_early_fr_timer(inp, stcb, net); 1461 break; 1462 case SCTP_TIMER_TYPE_ASCONF: 1463 if (sctp_asconf_timer(inp, stcb, net)) { 1464 /* no need to unlock on tcb its gone */ 1465 goto out_decr; 1466 } 1467 SCTP_STAT_INCR(sctps_timoasconf); 1468 #ifdef SCTP_AUDITING_ENABLED 1469 sctp_auditing(4, inp, stcb, net); 1470 #endif 1471 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR); 1472 break; 1473 1474 case SCTP_TIMER_TYPE_AUTOCLOSE: 1475 SCTP_STAT_INCR(sctps_timoautoclose); 1476 sctp_autoclose_timer(inp, stcb, net); 1477 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1478 did_output = 0; 1479 break; 1480 case SCTP_TIMER_TYPE_ASOCKILL: 1481 SCTP_STAT_INCR(sctps_timoassockill); 1482 /* Can we free it yet? */ 1483 SCTP_INP_DECR_REF(inp); 1484 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1485 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1486 /* 1487 * free asoc, always unlocks (or destroy's) so prevent 1488 * duplicate unlock or unlock of a free mtx :-0 1489 */ 1490 stcb = NULL; 1491 goto out_no_decr; 1492 break; 1493 case SCTP_TIMER_TYPE_INPKILL: 1494 SCTP_STAT_INCR(sctps_timoinpkill); 1495 /* 1496 * special case, take away our increment since WE are the 1497 * killer 1498 */ 1499 SCTP_INP_DECR_REF(inp); 1500 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1501 sctp_inpcb_free(inp, 1, 0); 1502 goto out_no_decr; 1503 break; 1504 default: 1505 #ifdef SCTP_DEBUG 1506 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1507 printf("sctp_timeout_handler:unknown timer %d\n", 1508 tmr->type); 1509 } 1510 #endif /* SCTP_DEBUG */ 1511 break; 1512 }; 1513 #ifdef SCTP_AUDITING_ENABLED 1514 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1515 if (inp) 1516 sctp_auditing(5, inp, stcb, net); 1517 #endif 1518 if ((did_output) && stcb) { 1519 /* 1520 * Now we need to clean up the control chunk chain if an 1521 * ECNE is on it. It must be marked as UNSENT again so next 1522 * call will continue to send it until such time that we get 1523 * a CWR, to remove it. It is, however, less likely that we 1524 * will find a ecn echo on the chain though. 1525 */ 1526 sctp_fix_ecn_echo(&stcb->asoc); 1527 } 1528 get_out: 1529 if (stcb) { 1530 SCTP_TCB_UNLOCK(stcb); 1531 } 1532 out_decr: 1533 if (inp) { 1534 SCTP_INP_DECR_REF(inp); 1535 } 1536 out_no_decr: 1537 1538 #ifdef SCTP_DEBUG 1539 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1540 printf("Timer now complete (type %d)\n", tmr->type); 1541 } 1542 #endif /* SCTP_DEBUG */ 1543 if (inp) { 1544 } 1545 } 1546 1547 int 1548 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1549 struct sctp_nets *net) 1550 { 1551 int to_ticks; 1552 struct sctp_timer *tmr; 1553 1554 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1555 return (EFAULT); 1556 1557 to_ticks = 0; 1558 1559 tmr = NULL; 1560 if (stcb) { 1561 SCTP_TCB_LOCK_ASSERT(stcb); 1562 } 1563 switch (t_type) { 1564 case SCTP_TIMER_TYPE_ADDR_WQ: 1565 /* Only 1 tick away :-) */ 1566 tmr = &sctppcbinfo.addr_wq_timer; 1567 to_ticks = 1; 1568 break; 1569 case SCTP_TIMER_TYPE_ITERATOR: 1570 { 1571 struct sctp_iterator *it; 1572 1573 it = (struct sctp_iterator *)inp; 1574 tmr = &it->tmr; 1575 to_ticks = SCTP_ITERATOR_TICKS; 1576 } 1577 break; 1578 case SCTP_TIMER_TYPE_SEND: 1579 /* Here we use the RTO timer */ 1580 { 1581 int rto_val; 1582 1583 if ((stcb == NULL) || (net == NULL)) { 1584 return (EFAULT); 1585 } 1586 tmr = &net->rxt_timer; 1587 if (net->RTO == 0) { 1588 rto_val = stcb->asoc.initial_rto; 1589 } else { 1590 rto_val = net->RTO; 1591 } 1592 to_ticks = MSEC_TO_TICKS(rto_val); 1593 } 1594 break; 1595 case SCTP_TIMER_TYPE_INIT: 1596 /* 1597 * Here we use the INIT timer default usually about 1 1598 * minute. 1599 */ 1600 if ((stcb == NULL) || (net == NULL)) { 1601 return (EFAULT); 1602 } 1603 tmr = &net->rxt_timer; 1604 if (net->RTO == 0) { 1605 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1606 } else { 1607 to_ticks = MSEC_TO_TICKS(net->RTO); 1608 } 1609 break; 1610 case SCTP_TIMER_TYPE_RECV: 1611 /* 1612 * Here we use the Delayed-Ack timer value from the inp 1613 * ususually about 200ms. 1614 */ 1615 if (stcb == NULL) { 1616 return (EFAULT); 1617 } 1618 tmr = &stcb->asoc.dack_timer; 1619 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1620 break; 1621 case SCTP_TIMER_TYPE_SHUTDOWN: 1622 /* Here we use the RTO of the destination. */ 1623 if ((stcb == NULL) || (net == NULL)) { 1624 return (EFAULT); 1625 } 1626 if (net->RTO == 0) { 1627 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1628 } else { 1629 to_ticks = MSEC_TO_TICKS(net->RTO); 1630 } 1631 tmr = &net->rxt_timer; 1632 break; 1633 case SCTP_TIMER_TYPE_HEARTBEAT: 1634 /* 1635 * the net is used here so that we can add in the RTO. Even 1636 * though we use a different timer. We also add the HB timer 1637 * PLUS a random jitter. 1638 */ 1639 if (stcb == NULL) { 1640 return (EFAULT); 1641 } { 1642 uint32_t rndval; 1643 uint8_t this_random; 1644 int cnt_of_unconf = 0; 1645 struct sctp_nets *lnet; 1646 1647 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1648 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1649 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1650 cnt_of_unconf++; 1651 } 1652 } 1653 if (cnt_of_unconf) { 1654 lnet = NULL; 1655 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 1656 } 1657 if (stcb->asoc.hb_random_idx > 3) { 1658 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1659 memcpy(stcb->asoc.hb_random_values, &rndval, 1660 sizeof(stcb->asoc.hb_random_values)); 1661 this_random = stcb->asoc.hb_random_values[0]; 1662 stcb->asoc.hb_random_idx = 0; 1663 stcb->asoc.hb_ect_randombit = 0; 1664 } else { 1665 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1666 stcb->asoc.hb_random_idx++; 1667 stcb->asoc.hb_ect_randombit = 0; 1668 } 1669 /* 1670 * this_random will be 0 - 256 ms RTO is in ms. 1671 */ 1672 if ((stcb->asoc.hb_is_disabled) && 1673 (cnt_of_unconf == 0)) { 1674 return (0); 1675 } 1676 if (net) { 1677 struct sctp_nets *lnet; 1678 int delay; 1679 1680 delay = stcb->asoc.heart_beat_delay; 1681 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1682 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1683 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 1684 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1685 delay = 0; 1686 } 1687 } 1688 if (net->RTO == 0) { 1689 /* Never been checked */ 1690 to_ticks = this_random + stcb->asoc.initial_rto + delay; 1691 } else { 1692 /* set rto_val to the ms */ 1693 to_ticks = delay + net->RTO + this_random; 1694 } 1695 } else { 1696 if (cnt_of_unconf) { 1697 to_ticks = this_random + stcb->asoc.initial_rto; 1698 } else { 1699 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 1700 } 1701 } 1702 /* 1703 * Now we must convert the to_ticks that are now in 1704 * ms to ticks. 1705 */ 1706 to_ticks = MSEC_TO_TICKS(to_ticks); 1707 tmr = &stcb->asoc.hb_timer; 1708 } 1709 break; 1710 case SCTP_TIMER_TYPE_COOKIE: 1711 /* 1712 * Here we can use the RTO timer from the network since one 1713 * RTT was compelete. If a retran happened then we will be 1714 * using the RTO initial value. 1715 */ 1716 if ((stcb == NULL) || (net == NULL)) { 1717 return (EFAULT); 1718 } 1719 if (net->RTO == 0) { 1720 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1721 } else { 1722 to_ticks = MSEC_TO_TICKS(net->RTO); 1723 } 1724 tmr = &net->rxt_timer; 1725 break; 1726 case SCTP_TIMER_TYPE_NEWCOOKIE: 1727 /* 1728 * nothing needed but the endpoint here ususually about 60 1729 * minutes. 1730 */ 1731 tmr = &inp->sctp_ep.signature_change; 1732 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1733 break; 1734 case SCTP_TIMER_TYPE_ASOCKILL: 1735 if (stcb == NULL) { 1736 return (EFAULT); 1737 } 1738 tmr = &stcb->asoc.strreset_timer; 1739 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1740 break; 1741 case SCTP_TIMER_TYPE_INPKILL: 1742 /* 1743 * The inp is setup to die. We re-use the signature_chage 1744 * timer since that has stopped and we are in the GONE 1745 * state. 1746 */ 1747 tmr = &inp->sctp_ep.signature_change; 1748 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 1749 break; 1750 case SCTP_TIMER_TYPE_PATHMTURAISE: 1751 /* 1752 * Here we use the value found in the EP for PMTU ususually 1753 * about 10 minutes. 1754 */ 1755 if (stcb == NULL) { 1756 return (EFAULT); 1757 } 1758 if (net == NULL) { 1759 return (EFAULT); 1760 } 1761 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 1762 tmr = &net->pmtu_timer; 1763 break; 1764 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1765 /* Here we use the RTO of the destination */ 1766 if ((stcb == NULL) || (net == NULL)) { 1767 return (EFAULT); 1768 } 1769 if (net->RTO == 0) { 1770 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1771 } else { 1772 to_ticks = MSEC_TO_TICKS(net->RTO); 1773 } 1774 tmr = &net->rxt_timer; 1775 break; 1776 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1777 /* 1778 * Here we use the endpoints shutdown guard timer usually 1779 * about 3 minutes. 1780 */ 1781 if (stcb == NULL) { 1782 return (EFAULT); 1783 } 1784 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 1785 tmr = &stcb->asoc.shut_guard_timer; 1786 break; 1787 case SCTP_TIMER_TYPE_STRRESET: 1788 /* 1789 * Here the timer comes from the inp but its value is from 1790 * the RTO. 1791 */ 1792 if ((stcb == NULL) || (net == NULL)) { 1793 return (EFAULT); 1794 } 1795 if (net->RTO == 0) { 1796 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1797 } else { 1798 to_ticks = MSEC_TO_TICKS(net->RTO); 1799 } 1800 tmr = &stcb->asoc.strreset_timer; 1801 break; 1802 1803 case SCTP_TIMER_TYPE_EARLYFR: 1804 { 1805 unsigned int msec; 1806 1807 if ((stcb == NULL) || (net == NULL)) { 1808 return (EFAULT); 1809 } 1810 if (net->flight_size > net->cwnd) { 1811 /* no need to start */ 1812 return (0); 1813 } 1814 SCTP_STAT_INCR(sctps_earlyfrstart); 1815 if (net->lastsa == 0) { 1816 /* Hmm no rtt estimate yet? */ 1817 msec = stcb->asoc.initial_rto >> 2; 1818 } else { 1819 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 1820 } 1821 if (msec < sctp_early_fr_msec) { 1822 msec = sctp_early_fr_msec; 1823 if (msec < SCTP_MINFR_MSEC_FLOOR) { 1824 msec = SCTP_MINFR_MSEC_FLOOR; 1825 } 1826 } 1827 to_ticks = MSEC_TO_TICKS(msec); 1828 tmr = &net->fr_timer; 1829 } 1830 break; 1831 case SCTP_TIMER_TYPE_ASCONF: 1832 /* 1833 * Here the timer comes from the inp but its value is from 1834 * the RTO. 1835 */ 1836 if ((stcb == NULL) || (net == NULL)) { 1837 return (EFAULT); 1838 } 1839 if (net->RTO == 0) { 1840 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1841 } else { 1842 to_ticks = MSEC_TO_TICKS(net->RTO); 1843 } 1844 tmr = &stcb->asoc.asconf_timer; 1845 break; 1846 case SCTP_TIMER_TYPE_AUTOCLOSE: 1847 if (stcb == NULL) { 1848 return (EFAULT); 1849 } 1850 if (stcb->asoc.sctp_autoclose_ticks == 0) { 1851 /* 1852 * Really an error since stcb is NOT set to 1853 * autoclose 1854 */ 1855 return (0); 1856 } 1857 to_ticks = stcb->asoc.sctp_autoclose_ticks; 1858 tmr = &stcb->asoc.autoclose_timer; 1859 break; 1860 default: 1861 #ifdef SCTP_DEBUG 1862 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1863 printf("sctp_timer_start:Unknown timer type %d\n", 1864 t_type); 1865 } 1866 #endif /* SCTP_DEBUG */ 1867 return (EFAULT); 1868 break; 1869 }; 1870 if ((to_ticks <= 0) || (tmr == NULL)) { 1871 #ifdef SCTP_DEBUG 1872 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1873 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n", 1874 t_type, to_ticks, tmr); 1875 } 1876 #endif /* SCTP_DEBUG */ 1877 return (EFAULT); 1878 } 1879 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1880 /* 1881 * we do NOT allow you to have it already running. if it is 1882 * we leave the current one up unchanged 1883 */ 1884 return (EALREADY); 1885 } 1886 /* At this point we can proceed */ 1887 if (t_type == SCTP_TIMER_TYPE_SEND) { 1888 stcb->asoc.num_send_timers_up++; 1889 } 1890 tmr->stopped_from = 0; 1891 tmr->type = t_type; 1892 tmr->ep = (void *)inp; 1893 tmr->tcb = (void *)stcb; 1894 tmr->net = (void *)net; 1895 tmr->self = (void *)tmr; 1896 tmr->ticks = ticks; 1897 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 1898 return (0); 1899 } 1900 1901 int 1902 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1903 struct sctp_nets *net, uint32_t from) 1904 { 1905 struct sctp_timer *tmr; 1906 1907 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 1908 (inp == NULL)) 1909 return (EFAULT); 1910 1911 tmr = NULL; 1912 if (stcb) { 1913 SCTP_TCB_LOCK_ASSERT(stcb); 1914 } 1915 switch (t_type) { 1916 case SCTP_TIMER_TYPE_ADDR_WQ: 1917 tmr = &sctppcbinfo.addr_wq_timer; 1918 break; 1919 case SCTP_TIMER_TYPE_EARLYFR: 1920 if ((stcb == NULL) || (net == NULL)) { 1921 return (EFAULT); 1922 } 1923 tmr = &net->fr_timer; 1924 SCTP_STAT_INCR(sctps_earlyfrstop); 1925 break; 1926 case SCTP_TIMER_TYPE_ITERATOR: 1927 { 1928 struct sctp_iterator *it; 1929 1930 it = (struct sctp_iterator *)inp; 1931 tmr = &it->tmr; 1932 } 1933 break; 1934 case SCTP_TIMER_TYPE_SEND: 1935 if ((stcb == NULL) || (net == NULL)) { 1936 return (EFAULT); 1937 } 1938 tmr = &net->rxt_timer; 1939 break; 1940 case SCTP_TIMER_TYPE_INIT: 1941 if ((stcb == NULL) || (net == NULL)) { 1942 return (EFAULT); 1943 } 1944 tmr = &net->rxt_timer; 1945 break; 1946 case SCTP_TIMER_TYPE_RECV: 1947 if (stcb == NULL) { 1948 return (EFAULT); 1949 } 1950 tmr = &stcb->asoc.dack_timer; 1951 break; 1952 case SCTP_TIMER_TYPE_SHUTDOWN: 1953 if ((stcb == NULL) || (net == NULL)) { 1954 return (EFAULT); 1955 } 1956 tmr = &net->rxt_timer; 1957 break; 1958 case SCTP_TIMER_TYPE_HEARTBEAT: 1959 if (stcb == NULL) { 1960 return (EFAULT); 1961 } 1962 tmr = &stcb->asoc.hb_timer; 1963 break; 1964 case SCTP_TIMER_TYPE_COOKIE: 1965 if ((stcb == NULL) || (net == NULL)) { 1966 return (EFAULT); 1967 } 1968 tmr = &net->rxt_timer; 1969 break; 1970 case SCTP_TIMER_TYPE_NEWCOOKIE: 1971 /* nothing needed but the endpoint here */ 1972 tmr = &inp->sctp_ep.signature_change; 1973 /* 1974 * We re-use the newcookie timer for the INP kill timer. We 1975 * must assure that we do not kill it by accident. 1976 */ 1977 break; 1978 case SCTP_TIMER_TYPE_ASOCKILL: 1979 /* 1980 * Stop the asoc kill timer. 1981 */ 1982 if (stcb == NULL) { 1983 return (EFAULT); 1984 } 1985 tmr = &stcb->asoc.strreset_timer; 1986 break; 1987 1988 case SCTP_TIMER_TYPE_INPKILL: 1989 /* 1990 * The inp is setup to die. We re-use the signature_chage 1991 * timer since that has stopped and we are in the GONE 1992 * state. 1993 */ 1994 tmr = &inp->sctp_ep.signature_change; 1995 break; 1996 case SCTP_TIMER_TYPE_PATHMTURAISE: 1997 if ((stcb == NULL) || (net == NULL)) { 1998 return (EFAULT); 1999 } 2000 tmr = &net->pmtu_timer; 2001 break; 2002 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2003 if ((stcb == NULL) || (net == NULL)) { 2004 return (EFAULT); 2005 } 2006 tmr = &net->rxt_timer; 2007 break; 2008 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2009 if (stcb == NULL) { 2010 return (EFAULT); 2011 } 2012 tmr = &stcb->asoc.shut_guard_timer; 2013 break; 2014 case SCTP_TIMER_TYPE_STRRESET: 2015 if (stcb == NULL) { 2016 return (EFAULT); 2017 } 2018 tmr = &stcb->asoc.strreset_timer; 2019 break; 2020 case SCTP_TIMER_TYPE_ASCONF: 2021 if (stcb == NULL) { 2022 return (EFAULT); 2023 } 2024 tmr = &stcb->asoc.asconf_timer; 2025 break; 2026 case SCTP_TIMER_TYPE_AUTOCLOSE: 2027 if (stcb == NULL) { 2028 return (EFAULT); 2029 } 2030 tmr = &stcb->asoc.autoclose_timer; 2031 break; 2032 default: 2033 #ifdef SCTP_DEBUG 2034 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2035 printf("sctp_timer_stop:Unknown timer type %d\n", 2036 t_type); 2037 } 2038 #endif /* SCTP_DEBUG */ 2039 break; 2040 }; 2041 if (tmr == NULL) { 2042 return (EFAULT); 2043 } 2044 if ((tmr->type != t_type) && tmr->type) { 2045 /* 2046 * Ok we have a timer that is under joint use. Cookie timer 2047 * per chance with the SEND timer. We therefore are NOT 2048 * running the timer that the caller wants stopped. So just 2049 * return. 2050 */ 2051 return (0); 2052 } 2053 if (t_type == SCTP_TIMER_TYPE_SEND) { 2054 stcb->asoc.num_send_timers_up--; 2055 if (stcb->asoc.num_send_timers_up < 0) { 2056 stcb->asoc.num_send_timers_up = 0; 2057 } 2058 } 2059 tmr->self = NULL; 2060 tmr->stopped_from = from; 2061 SCTP_OS_TIMER_STOP(&tmr->timer); 2062 return (0); 2063 } 2064 2065 #ifdef SCTP_USE_ADLER32 2066 static uint32_t 2067 update_adler32(uint32_t adler, uint8_t * buf, int32_t len) 2068 { 2069 uint32_t s1 = adler & 0xffff; 2070 uint32_t s2 = (adler >> 16) & 0xffff; 2071 int n; 2072 2073 for (n = 0; n < len; n++, buf++) { 2074 /* s1 = (s1 + buf[n]) % BASE */ 2075 /* first we add */ 2076 s1 = (s1 + *buf); 2077 /* 2078 * now if we need to, we do a mod by subtracting. It seems a 2079 * bit faster since I really will only ever do one subtract 2080 * at the MOST, since buf[n] is a max of 255. 2081 */ 2082 if (s1 >= SCTP_ADLER32_BASE) { 2083 s1 -= SCTP_ADLER32_BASE; 2084 } 2085 /* s2 = (s2 + s1) % BASE */ 2086 /* first we add */ 2087 s2 = (s2 + s1); 2088 /* 2089 * again, it is more efficent (it seems) to subtract since 2090 * the most s2 will ever be is (BASE-1 + BASE-1) in the 2091 * worse case. This would then be (2 * BASE) - 2, which will 2092 * still only do one subtract. On Intel this is much better 2093 * to do this way and avoid the divide. Have not -pg'd on 2094 * sparc. 2095 */ 2096 if (s2 >= SCTP_ADLER32_BASE) { 2097 s2 -= SCTP_ADLER32_BASE; 2098 } 2099 } 2100 /* Return the adler32 of the bytes buf[0..len-1] */ 2101 return ((s2 << 16) + s1); 2102 } 2103 2104 #endif 2105 2106 2107 uint32_t 2108 sctp_calculate_len(struct mbuf *m) 2109 { 2110 uint32_t tlen = 0; 2111 struct mbuf *at; 2112 2113 at = m; 2114 while (at) { 2115 tlen += SCTP_BUF_LEN(at); 2116 at = SCTP_BUF_NEXT(at); 2117 } 2118 return (tlen); 2119 } 2120 2121 #if defined(SCTP_WITH_NO_CSUM) 2122 2123 uint32_t 2124 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2125 { 2126 /* 2127 * given a mbuf chain with a packetheader offset by 'offset' 2128 * pointing at a sctphdr (with csum set to 0) go through the chain 2129 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2130 * currently Adler32 but will change to CRC32x soon. Also has a side 2131 * bonus calculate the total length of the mbuf chain. Note: if 2132 * offset is greater than the total mbuf length, checksum=1, 2133 * pktlen=0 is returned (ie. no real error code) 2134 */ 2135 if (pktlen == NULL) 2136 return (0); 2137 *pktlen = sctp_calculate_len(m); 2138 return (0); 2139 } 2140 2141 #elif defined(SCTP_USE_INCHKSUM) 2142 2143 #include <machine/in_cksum.h> 2144 2145 uint32_t 2146 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2147 { 2148 /* 2149 * given a mbuf chain with a packetheader offset by 'offset' 2150 * pointing at a sctphdr (with csum set to 0) go through the chain 2151 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2152 * currently Adler32 but will change to CRC32x soon. Also has a side 2153 * bonus calculate the total length of the mbuf chain. Note: if 2154 * offset is greater than the total mbuf length, checksum=1, 2155 * pktlen=0 is returned (ie. no real error code) 2156 */ 2157 int32_t tlen = 0; 2158 struct mbuf *at; 2159 uint32_t the_sum, retsum; 2160 2161 at = m; 2162 while (at) { 2163 tlen += SCTP_BUF_LEN(at); 2164 at = SCTP_BUF_NEXT(at); 2165 } 2166 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset)); 2167 if (pktlen != NULL) 2168 *pktlen = (tlen - offset); 2169 retsum = htons(the_sum); 2170 return (the_sum); 2171 } 2172 2173 #else 2174 2175 uint32_t 2176 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2177 { 2178 /* 2179 * given a mbuf chain with a packetheader offset by 'offset' 2180 * pointing at a sctphdr (with csum set to 0) go through the chain 2181 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2182 * currently Adler32 but will change to CRC32x soon. Also has a side 2183 * bonus calculate the total length of the mbuf chain. Note: if 2184 * offset is greater than the total mbuf length, checksum=1, 2185 * pktlen=0 is returned (ie. no real error code) 2186 */ 2187 int32_t tlen = 0; 2188 2189 #ifdef SCTP_USE_ADLER32 2190 uint32_t base = 1L; 2191 2192 #else 2193 uint32_t base = 0xffffffff; 2194 2195 #endif /* SCTP_USE_ADLER32 */ 2196 struct mbuf *at; 2197 2198 at = m; 2199 /* find the correct mbuf and offset into mbuf */ 2200 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) { 2201 offset -= SCTP_BUF_LEN(at); /* update remaining offset 2202 * left */ 2203 at = SCTP_BUF_NEXT(at); 2204 } 2205 while (at != NULL) { 2206 if ((SCTP_BUF_LEN(at) - offset) > 0) { 2207 #ifdef SCTP_USE_ADLER32 2208 base = update_adler32(base, 2209 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2210 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2211 #else 2212 if ((SCTP_BUF_LEN(at) - offset) < 4) { 2213 /* Use old method if less than 4 bytes */ 2214 base = old_update_crc32(base, 2215 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2216 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2217 } else { 2218 base = update_crc32(base, 2219 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2220 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2221 } 2222 #endif /* SCTP_USE_ADLER32 */ 2223 tlen += SCTP_BUF_LEN(at) - offset; 2224 /* we only offset once into the first mbuf */ 2225 } 2226 if (offset) { 2227 if (offset < SCTP_BUF_LEN(at)) 2228 offset = 0; 2229 else 2230 offset -= SCTP_BUF_LEN(at); 2231 } 2232 at = SCTP_BUF_NEXT(at); 2233 } 2234 if (pktlen != NULL) { 2235 *pktlen = tlen; 2236 } 2237 #ifdef SCTP_USE_ADLER32 2238 /* Adler32 */ 2239 base = htonl(base); 2240 #else 2241 /* CRC-32c */ 2242 base = sctp_csum_finalize(base); 2243 #endif 2244 return (base); 2245 } 2246 2247 2248 #endif 2249 2250 void 2251 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2252 struct sctp_association *asoc, uint32_t mtu) 2253 { 2254 /* 2255 * Reset the P-MTU size on this association, this involves changing 2256 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2257 * allow the DF flag to be cleared. 2258 */ 2259 struct sctp_tmit_chunk *chk; 2260 unsigned int eff_mtu, ovh; 2261 2262 asoc->smallest_mtu = mtu; 2263 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2264 ovh = SCTP_MIN_OVERHEAD; 2265 } else { 2266 ovh = SCTP_MIN_V4_OVERHEAD; 2267 } 2268 eff_mtu = mtu - ovh; 2269 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2270 2271 if (chk->send_size > eff_mtu) { 2272 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2273 } 2274 } 2275 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2276 if (chk->send_size > eff_mtu) { 2277 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2278 } 2279 } 2280 } 2281 2282 2283 /* 2284 * given an association and starting time of the current RTT period return 2285 * RTO in number of msecs net should point to the current network 2286 */ 2287 uint32_t 2288 sctp_calculate_rto(struct sctp_tcb *stcb, 2289 struct sctp_association *asoc, 2290 struct sctp_nets *net, 2291 struct timeval *old) 2292 { 2293 /* 2294 * given an association and the starting time of the current RTT 2295 * period (in value1/value2) return RTO in number of msecs. 2296 */ 2297 int calc_time = 0; 2298 int o_calctime; 2299 unsigned int new_rto = 0; 2300 int first_measure = 0; 2301 struct timeval now; 2302 2303 /************************/ 2304 /* 1. calculate new RTT */ 2305 /************************/ 2306 /* get the current time */ 2307 SCTP_GETTIME_TIMEVAL(&now); 2308 /* compute the RTT value */ 2309 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2310 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2311 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2312 calc_time += (((u_long)now.tv_usec - 2313 (u_long)old->tv_usec) / 1000); 2314 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2315 /* Borrow 1,000ms from current calculation */ 2316 calc_time -= 1000; 2317 /* Add in the slop over */ 2318 calc_time += ((int)now.tv_usec / 1000); 2319 /* Add in the pre-second ms's */ 2320 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2321 } 2322 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2323 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2324 calc_time = ((u_long)now.tv_usec - 2325 (u_long)old->tv_usec) / 1000; 2326 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2327 /* impossible .. garbage in nothing out */ 2328 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2329 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) { 2330 /* 2331 * We have to have 1 usec :-D this must be the 2332 * loopback. 2333 */ 2334 calc_time = 1; 2335 } else { 2336 /* impossible .. garbage in nothing out */ 2337 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2338 } 2339 } else { 2340 /* Clock wrapped? */ 2341 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2342 } 2343 /***************************/ 2344 /* 2. update RTTVAR & SRTT */ 2345 /***************************/ 2346 o_calctime = calc_time; 2347 /* this is Van Jacobson's integer version */ 2348 if (net->RTO) { 2349 calc_time -= (net->lastsa >> 3); 2350 if ((int)net->prev_rtt > o_calctime) { 2351 net->rtt_variance = net->prev_rtt - o_calctime; 2352 /* decreasing */ 2353 net->rto_variance_dir = 0; 2354 } else { 2355 /* increasing */ 2356 net->rtt_variance = o_calctime - net->prev_rtt; 2357 net->rto_variance_dir = 1; 2358 } 2359 #ifdef SCTP_RTTVAR_LOGGING 2360 rto_logging(net, SCTP_LOG_RTTVAR); 2361 #endif 2362 net->prev_rtt = o_calctime; 2363 net->lastsa += calc_time; 2364 if (calc_time < 0) { 2365 calc_time = -calc_time; 2366 } 2367 calc_time -= (net->lastsv >> 2); 2368 net->lastsv += calc_time; 2369 if (net->lastsv == 0) { 2370 net->lastsv = SCTP_CLOCK_GRANULARITY; 2371 } 2372 } else { 2373 /* First RTO measurment */ 2374 net->lastsa = calc_time; 2375 net->lastsv = calc_time >> 1; 2376 first_measure = 1; 2377 net->rto_variance_dir = 1; 2378 net->prev_rtt = o_calctime; 2379 net->rtt_variance = 0; 2380 #ifdef SCTP_RTTVAR_LOGGING 2381 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2382 #endif 2383 } 2384 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1; 2385 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2386 (stcb->asoc.sat_network_lockout == 0)) { 2387 stcb->asoc.sat_network = 1; 2388 } else if ((!first_measure) && stcb->asoc.sat_network) { 2389 stcb->asoc.sat_network = 0; 2390 stcb->asoc.sat_network_lockout = 1; 2391 } 2392 /* bound it, per C6/C7 in Section 5.3.1 */ 2393 if (new_rto < stcb->asoc.minrto) { 2394 new_rto = stcb->asoc.minrto; 2395 } 2396 if (new_rto > stcb->asoc.maxrto) { 2397 new_rto = stcb->asoc.maxrto; 2398 } 2399 /* we are now returning the RTT Smoothed */ 2400 return ((uint32_t) new_rto); 2401 } 2402 2403 /* 2404 * return a pointer to a contiguous piece of data from the given mbuf chain 2405 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2406 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2407 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2408 */ 2409 __inline caddr_t 2410 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2411 { 2412 uint32_t count; 2413 uint8_t *ptr; 2414 2415 ptr = in_ptr; 2416 if ((off < 0) || (len <= 0)) 2417 return (NULL); 2418 2419 /* find the desired start location */ 2420 while ((m != NULL) && (off > 0)) { 2421 if (off < SCTP_BUF_LEN(m)) 2422 break; 2423 off -= SCTP_BUF_LEN(m); 2424 m = SCTP_BUF_NEXT(m); 2425 } 2426 if (m == NULL) 2427 return (NULL); 2428 2429 /* is the current mbuf large enough (eg. contiguous)? */ 2430 if ((SCTP_BUF_LEN(m) - off) >= len) { 2431 return (mtod(m, caddr_t)+off); 2432 } else { 2433 /* else, it spans more than one mbuf, so save a temp copy... */ 2434 while ((m != NULL) && (len > 0)) { 2435 count = min(SCTP_BUF_LEN(m) - off, len); 2436 bcopy(mtod(m, caddr_t)+off, ptr, count); 2437 len -= count; 2438 ptr += count; 2439 off = 0; 2440 m = SCTP_BUF_NEXT(m); 2441 } 2442 if ((m == NULL) && (len > 0)) 2443 return (NULL); 2444 else 2445 return ((caddr_t)in_ptr); 2446 } 2447 } 2448 2449 2450 2451 struct sctp_paramhdr * 2452 sctp_get_next_param(struct mbuf *m, 2453 int offset, 2454 struct sctp_paramhdr *pull, 2455 int pull_limit) 2456 { 2457 /* This just provides a typed signature to Peter's Pull routine */ 2458 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2459 (uint8_t *) pull)); 2460 } 2461 2462 2463 int 2464 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2465 { 2466 /* 2467 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2468 * padlen is > 3 this routine will fail. 2469 */ 2470 uint8_t *dp; 2471 int i; 2472 2473 if (padlen > 3) { 2474 return (ENOBUFS); 2475 } 2476 if (M_TRAILINGSPACE(m)) { 2477 /* 2478 * The easy way. We hope the majority of the time we hit 2479 * here :) 2480 */ 2481 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2482 SCTP_BUF_LEN(m) += padlen; 2483 } else { 2484 /* Hard way we must grow the mbuf */ 2485 struct mbuf *tmp; 2486 2487 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2488 if (tmp == NULL) { 2489 /* Out of space GAK! we are in big trouble. */ 2490 return (ENOSPC); 2491 } 2492 /* setup and insert in middle */ 2493 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m); 2494 SCTP_BUF_LEN(tmp) = padlen; 2495 SCTP_BUF_NEXT(m) = tmp; 2496 dp = mtod(tmp, uint8_t *); 2497 } 2498 /* zero out the pad */ 2499 for (i = 0; i < padlen; i++) { 2500 *dp = 0; 2501 dp++; 2502 } 2503 return (0); 2504 } 2505 2506 int 2507 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2508 { 2509 /* find the last mbuf in chain and pad it */ 2510 struct mbuf *m_at; 2511 2512 m_at = m; 2513 if (last_mbuf) { 2514 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2515 } else { 2516 while (m_at) { 2517 if (SCTP_BUF_NEXT(m_at) == NULL) { 2518 return (sctp_add_pad_tombuf(m_at, padval)); 2519 } 2520 m_at = SCTP_BUF_NEXT(m_at); 2521 } 2522 } 2523 return (EFAULT); 2524 } 2525 2526 int sctp_asoc_change_wake = 0; 2527 2528 static void 2529 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2530 uint32_t error, void *data) 2531 { 2532 struct mbuf *m_notify; 2533 struct sctp_assoc_change *sac; 2534 struct sctp_queued_to_read *control; 2535 2536 /* 2537 * First if we are are going down dump everything we can to the 2538 * socket rcv queue. 2539 */ 2540 2541 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2542 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2543 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 2544 ) { 2545 /* If the socket is gone we are out of here */ 2546 return; 2547 } 2548 /* 2549 * For TCP model AND UDP connected sockets we will send an error up 2550 * when an ABORT comes in. 2551 */ 2552 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2553 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2554 (event == SCTP_COMM_LOST)) { 2555 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) 2556 stcb->sctp_socket->so_error = ECONNREFUSED; 2557 else 2558 stcb->sctp_socket->so_error = ECONNRESET; 2559 /* Wake ANY sleepers */ 2560 sorwakeup(stcb->sctp_socket); 2561 sowwakeup(stcb->sctp_socket); 2562 sctp_asoc_change_wake++; 2563 } 2564 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2565 /* event not enabled */ 2566 return; 2567 } 2568 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA); 2569 if (m_notify == NULL) 2570 /* no space left */ 2571 return; 2572 SCTP_BUF_LEN(m_notify) = 0; 2573 2574 sac = mtod(m_notify, struct sctp_assoc_change *); 2575 sac->sac_type = SCTP_ASSOC_CHANGE; 2576 sac->sac_flags = 0; 2577 sac->sac_length = sizeof(struct sctp_assoc_change); 2578 sac->sac_state = event; 2579 sac->sac_error = error; 2580 /* XXX verify these stream counts */ 2581 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2582 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2583 sac->sac_assoc_id = sctp_get_associd(stcb); 2584 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change); 2585 SCTP_BUF_NEXT(m_notify) = NULL; 2586 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2587 0, 0, 0, 0, 0, 0, 2588 m_notify); 2589 if (control == NULL) { 2590 /* no memory */ 2591 sctp_m_freem(m_notify); 2592 return; 2593 } 2594 control->length = SCTP_BUF_LEN(m_notify); 2595 /* not that we need this */ 2596 control->tail_mbuf = m_notify; 2597 control->spec_flags = M_NOTIFICATION; 2598 sctp_add_to_readq(stcb->sctp_ep, stcb, 2599 control, 2600 &stcb->sctp_socket->so_rcv, 1); 2601 if (event == SCTP_COMM_LOST) { 2602 /* Wake up any sleeper */ 2603 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2604 } 2605 } 2606 2607 static void 2608 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2609 struct sockaddr *sa, uint32_t error) 2610 { 2611 struct mbuf *m_notify; 2612 struct sctp_paddr_change *spc; 2613 struct sctp_queued_to_read *control; 2614 2615 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2616 /* event not enabled */ 2617 return; 2618 2619 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2620 if (m_notify == NULL) 2621 return; 2622 SCTP_BUF_LEN(m_notify) = 0; 2623 spc = mtod(m_notify, struct sctp_paddr_change *); 2624 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2625 spc->spc_flags = 0; 2626 spc->spc_length = sizeof(struct sctp_paddr_change); 2627 if (sa->sa_family == AF_INET) { 2628 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2629 } else { 2630 struct sockaddr_in6 *sin6; 2631 2632 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2633 2634 /* recover scope_id for user */ 2635 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2636 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2637 (void)sa6_recoverscope(sin6); 2638 } 2639 } 2640 spc->spc_state = state; 2641 spc->spc_error = error; 2642 spc->spc_assoc_id = sctp_get_associd(stcb); 2643 2644 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2645 SCTP_BUF_NEXT(m_notify) = NULL; 2646 2647 /* append to socket */ 2648 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2649 0, 0, 0, 0, 0, 0, 2650 m_notify); 2651 if (control == NULL) { 2652 /* no memory */ 2653 sctp_m_freem(m_notify); 2654 return; 2655 } 2656 control->length = SCTP_BUF_LEN(m_notify); 2657 control->spec_flags = M_NOTIFICATION; 2658 /* not that we need this */ 2659 control->tail_mbuf = m_notify; 2660 sctp_add_to_readq(stcb->sctp_ep, stcb, 2661 control, 2662 &stcb->sctp_socket->so_rcv, 1); 2663 } 2664 2665 2666 static void 2667 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2668 struct sctp_tmit_chunk *chk) 2669 { 2670 struct mbuf *m_notify; 2671 struct sctp_send_failed *ssf; 2672 struct sctp_queued_to_read *control; 2673 int length; 2674 2675 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2676 /* event not enabled */ 2677 return; 2678 2679 length = sizeof(struct sctp_send_failed) + chk->send_size; 2680 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 2681 if (m_notify == NULL) 2682 /* no space left */ 2683 return; 2684 SCTP_BUF_LEN(m_notify) = 0; 2685 ssf = mtod(m_notify, struct sctp_send_failed *); 2686 ssf->ssf_type = SCTP_SEND_FAILED; 2687 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2688 ssf->ssf_flags = SCTP_DATA_UNSENT; 2689 else 2690 ssf->ssf_flags = SCTP_DATA_SENT; 2691 ssf->ssf_length = length; 2692 ssf->ssf_error = error; 2693 /* not exactly what the user sent in, but should be close :) */ 2694 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2695 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2696 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2697 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2698 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2699 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2700 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2701 SCTP_BUF_NEXT(m_notify) = chk->data; 2702 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2703 2704 /* Steal off the mbuf */ 2705 chk->data = NULL; 2706 /* 2707 * For this case, we check the actual socket buffer, since the assoc 2708 * is going away we don't want to overfill the socket buffer for a 2709 * non-reader 2710 */ 2711 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2712 sctp_m_freem(m_notify); 2713 return; 2714 } 2715 /* append to socket */ 2716 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2717 0, 0, 0, 0, 0, 0, 2718 m_notify); 2719 if (control == NULL) { 2720 /* no memory */ 2721 sctp_m_freem(m_notify); 2722 return; 2723 } 2724 control->spec_flags = M_NOTIFICATION; 2725 sctp_add_to_readq(stcb->sctp_ep, stcb, 2726 control, 2727 &stcb->sctp_socket->so_rcv, 1); 2728 } 2729 2730 2731 static void 2732 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2733 struct sctp_stream_queue_pending *sp) 2734 { 2735 struct mbuf *m_notify; 2736 struct sctp_send_failed *ssf; 2737 struct sctp_queued_to_read *control; 2738 int length; 2739 2740 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2741 /* event not enabled */ 2742 return; 2743 2744 length = sizeof(struct sctp_send_failed) + sp->length; 2745 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2746 if (m_notify == NULL) 2747 /* no space left */ 2748 return; 2749 SCTP_BUF_LEN(m_notify) = 0; 2750 ssf = mtod(m_notify, struct sctp_send_failed *); 2751 ssf->ssf_type = SCTP_SEND_FAILED; 2752 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2753 ssf->ssf_flags = SCTP_DATA_UNSENT; 2754 else 2755 ssf->ssf_flags = SCTP_DATA_SENT; 2756 ssf->ssf_length = length; 2757 ssf->ssf_error = error; 2758 /* not exactly what the user sent in, but should be close :) */ 2759 ssf->ssf_info.sinfo_stream = sp->stream; 2760 ssf->ssf_info.sinfo_ssn = sp->strseq; 2761 ssf->ssf_info.sinfo_flags = sp->sinfo_flags; 2762 ssf->ssf_info.sinfo_ppid = sp->ppid; 2763 ssf->ssf_info.sinfo_context = sp->context; 2764 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2765 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2766 SCTP_BUF_NEXT(m_notify) = sp->data; 2767 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2768 2769 /* Steal off the mbuf */ 2770 sp->data = NULL; 2771 /* 2772 * For this case, we check the actual socket buffer, since the assoc 2773 * is going away we don't want to overfill the socket buffer for a 2774 * non-reader 2775 */ 2776 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2777 sctp_m_freem(m_notify); 2778 return; 2779 } 2780 /* append to socket */ 2781 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2782 0, 0, 0, 0, 0, 0, 2783 m_notify); 2784 if (control == NULL) { 2785 /* no memory */ 2786 sctp_m_freem(m_notify); 2787 return; 2788 } 2789 control->spec_flags = M_NOTIFICATION; 2790 sctp_add_to_readq(stcb->sctp_ep, stcb, 2791 control, 2792 &stcb->sctp_socket->so_rcv, 1); 2793 } 2794 2795 2796 2797 static void 2798 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 2799 uint32_t error) 2800 { 2801 struct mbuf *m_notify; 2802 struct sctp_adaptation_event *sai; 2803 struct sctp_queued_to_read *control; 2804 2805 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2806 /* event not enabled */ 2807 return; 2808 2809 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2810 if (m_notify == NULL) 2811 /* no space left */ 2812 return; 2813 SCTP_BUF_LEN(m_notify) = 0; 2814 sai = mtod(m_notify, struct sctp_adaptation_event *); 2815 sai->sai_type = SCTP_ADAPTATION_INDICATION; 2816 sai->sai_flags = 0; 2817 sai->sai_length = sizeof(struct sctp_adaptation_event); 2818 sai->sai_adaptation_ind = error; 2819 sai->sai_assoc_id = sctp_get_associd(stcb); 2820 2821 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 2822 SCTP_BUF_NEXT(m_notify) = NULL; 2823 2824 /* append to socket */ 2825 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2826 0, 0, 0, 0, 0, 0, 2827 m_notify); 2828 if (control == NULL) { 2829 /* no memory */ 2830 sctp_m_freem(m_notify); 2831 return; 2832 } 2833 control->length = SCTP_BUF_LEN(m_notify); 2834 control->spec_flags = M_NOTIFICATION; 2835 /* not that we need this */ 2836 control->tail_mbuf = m_notify; 2837 sctp_add_to_readq(stcb->sctp_ep, stcb, 2838 control, 2839 &stcb->sctp_socket->so_rcv, 1); 2840 } 2841 2842 /* This always must be called with the read-queue LOCKED in the INP */ 2843 void 2844 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, 2845 uint32_t error, int nolock) 2846 { 2847 struct mbuf *m_notify; 2848 struct sctp_pdapi_event *pdapi; 2849 struct sctp_queued_to_read *control; 2850 struct sockbuf *sb; 2851 2852 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) 2853 /* event not enabled */ 2854 return; 2855 2856 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 2857 if (m_notify == NULL) 2858 /* no space left */ 2859 return; 2860 SCTP_BUF_LEN(m_notify) = 0; 2861 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 2862 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 2863 pdapi->pdapi_flags = 0; 2864 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 2865 pdapi->pdapi_indication = error; 2866 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 2867 2868 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 2869 SCTP_BUF_NEXT(m_notify) = NULL; 2870 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2871 0, 0, 0, 0, 0, 0, 2872 m_notify); 2873 if (control == NULL) { 2874 /* no memory */ 2875 sctp_m_freem(m_notify); 2876 return; 2877 } 2878 control->spec_flags = M_NOTIFICATION; 2879 control->length = SCTP_BUF_LEN(m_notify); 2880 /* not that we need this */ 2881 control->tail_mbuf = m_notify; 2882 control->held_length = 0; 2883 control->length = 0; 2884 if (nolock == 0) { 2885 SCTP_INP_READ_LOCK(stcb->sctp_ep); 2886 } 2887 sb = &stcb->sctp_socket->so_rcv; 2888 #ifdef SCTP_SB_LOGGING 2889 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 2890 #endif 2891 sctp_sballoc(stcb, sb, m_notify); 2892 #ifdef SCTP_SB_LOGGING 2893 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 2894 #endif 2895 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 2896 control->end_added = 1; 2897 if (stcb->asoc.control_pdapi) 2898 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 2899 else { 2900 /* we really should not see this case */ 2901 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 2902 } 2903 if (nolock == 0) { 2904 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 2905 } 2906 if (stcb->sctp_ep && stcb->sctp_socket) { 2907 /* This should always be the case */ 2908 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 2909 } 2910 } 2911 2912 static void 2913 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 2914 { 2915 struct mbuf *m_notify; 2916 struct sctp_shutdown_event *sse; 2917 struct sctp_queued_to_read *control; 2918 2919 /* 2920 * For TCP model AND UDP connected sockets we will send an error up 2921 * when an SHUTDOWN completes 2922 */ 2923 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2924 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2925 /* mark socket closed for read/write and wakeup! */ 2926 socantsendmore(stcb->sctp_socket); 2927 } 2928 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2929 /* event not enabled */ 2930 return; 2931 2932 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 2933 if (m_notify == NULL) 2934 /* no space left */ 2935 return; 2936 sse = mtod(m_notify, struct sctp_shutdown_event *); 2937 sse->sse_type = SCTP_SHUTDOWN_EVENT; 2938 sse->sse_flags = 0; 2939 sse->sse_length = sizeof(struct sctp_shutdown_event); 2940 sse->sse_assoc_id = sctp_get_associd(stcb); 2941 2942 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 2943 SCTP_BUF_NEXT(m_notify) = NULL; 2944 2945 /* append to socket */ 2946 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2947 0, 0, 0, 0, 0, 0, 2948 m_notify); 2949 if (control == NULL) { 2950 /* no memory */ 2951 sctp_m_freem(m_notify); 2952 return; 2953 } 2954 control->spec_flags = M_NOTIFICATION; 2955 control->length = SCTP_BUF_LEN(m_notify); 2956 /* not that we need this */ 2957 control->tail_mbuf = m_notify; 2958 sctp_add_to_readq(stcb->sctp_ep, stcb, 2959 control, 2960 &stcb->sctp_socket->so_rcv, 1); 2961 } 2962 2963 static void 2964 sctp_notify_stream_reset(struct sctp_tcb *stcb, 2965 int number_entries, uint16_t * list, int flag) 2966 { 2967 struct mbuf *m_notify; 2968 struct sctp_queued_to_read *control; 2969 struct sctp_stream_reset_event *strreset; 2970 int len; 2971 2972 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2973 /* event not enabled */ 2974 return; 2975 2976 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 2977 if (m_notify == NULL) 2978 /* no space left */ 2979 return; 2980 SCTP_BUF_LEN(m_notify) = 0; 2981 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 2982 if (len > M_TRAILINGSPACE(m_notify)) { 2983 /* never enough room */ 2984 sctp_m_freem(m_notify); 2985 return; 2986 } 2987 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 2988 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 2989 if (number_entries == 0) { 2990 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 2991 } else { 2992 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 2993 } 2994 strreset->strreset_length = len; 2995 strreset->strreset_assoc_id = sctp_get_associd(stcb); 2996 if (number_entries) { 2997 int i; 2998 2999 for (i = 0; i < number_entries; i++) { 3000 strreset->strreset_list[i] = ntohs(list[i]); 3001 } 3002 } 3003 SCTP_BUF_LEN(m_notify) = len; 3004 SCTP_BUF_NEXT(m_notify) = NULL; 3005 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3006 /* no space */ 3007 sctp_m_freem(m_notify); 3008 return; 3009 } 3010 /* append to socket */ 3011 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3012 0, 0, 0, 0, 0, 0, 3013 m_notify); 3014 if (control == NULL) { 3015 /* no memory */ 3016 sctp_m_freem(m_notify); 3017 return; 3018 } 3019 control->spec_flags = M_NOTIFICATION; 3020 control->length = SCTP_BUF_LEN(m_notify); 3021 /* not that we need this */ 3022 control->tail_mbuf = m_notify; 3023 sctp_add_to_readq(stcb->sctp_ep, stcb, 3024 control, 3025 &stcb->sctp_socket->so_rcv, 1); 3026 } 3027 3028 3029 void 3030 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3031 uint32_t error, void *data) 3032 { 3033 if (stcb == NULL) { 3034 /* unlikely but */ 3035 return; 3036 } 3037 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3038 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3039 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 3040 ) { 3041 /* No notifications up when we are in a no socket state */ 3042 return; 3043 } 3044 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3045 /* Can't send up to a closed socket any notifications */ 3046 return; 3047 } 3048 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) { 3049 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) && 3050 (notification != SCTP_NOTIFY_ASSOC_ABORTED) && 3051 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) && 3052 (notification != SCTP_NOTIFY_DG_FAIL) && 3053 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) { 3054 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL); 3055 stcb->asoc.assoc_up_sent = 1; 3056 } 3057 } 3058 switch (notification) { 3059 case SCTP_NOTIFY_ASSOC_UP: 3060 if (stcb->asoc.assoc_up_sent == 0) { 3061 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL); 3062 stcb->asoc.assoc_up_sent = 1; 3063 } 3064 break; 3065 case SCTP_NOTIFY_ASSOC_DOWN: 3066 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL); 3067 break; 3068 case SCTP_NOTIFY_INTERFACE_DOWN: 3069 { 3070 struct sctp_nets *net; 3071 3072 net = (struct sctp_nets *)data; 3073 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3074 (struct sockaddr *)&net->ro._l_addr, error); 3075 break; 3076 } 3077 case SCTP_NOTIFY_INTERFACE_UP: 3078 { 3079 struct sctp_nets *net; 3080 3081 net = (struct sctp_nets *)data; 3082 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3083 (struct sockaddr *)&net->ro._l_addr, error); 3084 break; 3085 } 3086 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3087 { 3088 struct sctp_nets *net; 3089 3090 net = (struct sctp_nets *)data; 3091 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3092 (struct sockaddr *)&net->ro._l_addr, error); 3093 break; 3094 } 3095 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3096 sctp_notify_send_failed2(stcb, error, 3097 (struct sctp_stream_queue_pending *)data); 3098 break; 3099 case SCTP_NOTIFY_DG_FAIL: 3100 sctp_notify_send_failed(stcb, error, 3101 (struct sctp_tmit_chunk *)data); 3102 break; 3103 case SCTP_NOTIFY_ADAPTATION_INDICATION: 3104 /* Here the error is the adaptation indication */ 3105 sctp_notify_adaptation_layer(stcb, error); 3106 break; 3107 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3108 sctp_notify_partial_delivery_indication(stcb, error, 0); 3109 break; 3110 case SCTP_NOTIFY_STRDATA_ERR: 3111 break; 3112 case SCTP_NOTIFY_ASSOC_ABORTED: 3113 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL); 3114 break; 3115 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3116 break; 3117 case SCTP_NOTIFY_STREAM_OPENED_OK: 3118 break; 3119 case SCTP_NOTIFY_ASSOC_RESTART: 3120 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data); 3121 break; 3122 case SCTP_NOTIFY_HB_RESP: 3123 break; 3124 case SCTP_NOTIFY_STR_RESET_SEND: 3125 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3126 break; 3127 case SCTP_NOTIFY_STR_RESET_RECV: 3128 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3129 break; 3130 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3131 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3132 break; 3133 3134 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3135 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3136 break; 3137 3138 case SCTP_NOTIFY_ASCONF_ADD_IP: 3139 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3140 error); 3141 break; 3142 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3143 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3144 error); 3145 break; 3146 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3147 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3148 error); 3149 break; 3150 case SCTP_NOTIFY_ASCONF_SUCCESS: 3151 break; 3152 case SCTP_NOTIFY_ASCONF_FAILED: 3153 break; 3154 case SCTP_NOTIFY_PEER_SHUTDOWN: 3155 sctp_notify_shutdown_event(stcb); 3156 break; 3157 case SCTP_NOTIFY_AUTH_NEW_KEY: 3158 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3159 (uint16_t) (uintptr_t) data); 3160 break; 3161 #if 0 3162 case SCTP_NOTIFY_AUTH_KEY_CONFLICT: 3163 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT, 3164 error, (uint16_t) (uintptr_t) data); 3165 break; 3166 #endif /* not yet? remove? */ 3167 3168 3169 default: 3170 #ifdef SCTP_DEBUG 3171 if (sctp_debug_on & SCTP_DEBUG_UTIL1) { 3172 printf("NOTIFY: unknown notification %xh (%u)\n", 3173 notification, notification); 3174 } 3175 #endif /* SCTP_DEBUG */ 3176 break; 3177 } /* end switch */ 3178 } 3179 3180 void 3181 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock) 3182 { 3183 struct sctp_association *asoc; 3184 struct sctp_stream_out *outs; 3185 struct sctp_tmit_chunk *chk; 3186 struct sctp_stream_queue_pending *sp; 3187 int i; 3188 3189 asoc = &stcb->asoc; 3190 3191 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3192 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3193 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3194 return; 3195 } 3196 /* now through all the gunk freeing chunks */ 3197 if (holds_lock == 0) 3198 SCTP_TCB_SEND_LOCK(stcb); 3199 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3200 /* For each stream */ 3201 outs = &stcb->asoc.strmout[i]; 3202 /* clean up any sends there */ 3203 stcb->asoc.locked_on_sending = NULL; 3204 sp = TAILQ_FIRST(&outs->outqueue); 3205 while (sp) { 3206 stcb->asoc.stream_queue_cnt--; 3207 TAILQ_REMOVE(&outs->outqueue, sp, next); 3208 sctp_free_spbufspace(stcb, asoc, sp); 3209 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3210 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp); 3211 if (sp->data) { 3212 sctp_m_freem(sp->data); 3213 sp->data = NULL; 3214 } 3215 if (sp->net) 3216 sctp_free_remote_addr(sp->net); 3217 sp->net = NULL; 3218 /* Free the chunk */ 3219 sctp_free_a_strmoq(stcb, sp); 3220 sp = TAILQ_FIRST(&outs->outqueue); 3221 } 3222 } 3223 3224 /* pending send queue SHOULD be empty */ 3225 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3226 chk = TAILQ_FIRST(&asoc->send_queue); 3227 while (chk) { 3228 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3229 asoc->send_queue_cnt--; 3230 if (chk->data) { 3231 /* 3232 * trim off the sctp chunk header(it should 3233 * be there) 3234 */ 3235 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3236 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3237 sctp_mbuf_crush(chk->data); 3238 } 3239 } 3240 sctp_free_bufspace(stcb, asoc, chk, 1); 3241 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk); 3242 if (chk->data) { 3243 sctp_m_freem(chk->data); 3244 chk->data = NULL; 3245 } 3246 if (chk->whoTo) 3247 sctp_free_remote_addr(chk->whoTo); 3248 chk->whoTo = NULL; 3249 sctp_free_a_chunk(stcb, chk); 3250 chk = TAILQ_FIRST(&asoc->send_queue); 3251 } 3252 } 3253 /* sent queue SHOULD be empty */ 3254 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3255 chk = TAILQ_FIRST(&asoc->sent_queue); 3256 while (chk) { 3257 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3258 asoc->sent_queue_cnt--; 3259 if (chk->data) { 3260 /* 3261 * trim off the sctp chunk header(it should 3262 * be there) 3263 */ 3264 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3265 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3266 sctp_mbuf_crush(chk->data); 3267 } 3268 } 3269 sctp_free_bufspace(stcb, asoc, chk, 1); 3270 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3271 SCTP_NOTIFY_DATAGRAM_SENT, chk); 3272 if (chk->data) { 3273 sctp_m_freem(chk->data); 3274 chk->data = NULL; 3275 } 3276 if (chk->whoTo) 3277 sctp_free_remote_addr(chk->whoTo); 3278 chk->whoTo = NULL; 3279 sctp_free_a_chunk(stcb, chk); 3280 chk = TAILQ_FIRST(&asoc->sent_queue); 3281 } 3282 } 3283 if (holds_lock == 0) 3284 SCTP_TCB_SEND_UNLOCK(stcb); 3285 } 3286 3287 void 3288 sctp_abort_notification(struct sctp_tcb *stcb, int error) 3289 { 3290 3291 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3292 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3293 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3294 return; 3295 } 3296 /* Tell them we lost the asoc */ 3297 sctp_report_all_outbound(stcb, 1); 3298 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3299 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3300 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3301 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3302 } 3303 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL); 3304 } 3305 3306 void 3307 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3308 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err) 3309 { 3310 uint32_t vtag; 3311 3312 vtag = 0; 3313 if (stcb != NULL) { 3314 /* We have a TCB to abort, send notification too */ 3315 vtag = stcb->asoc.peer_vtag; 3316 sctp_abort_notification(stcb, 0); 3317 } 3318 sctp_send_abort(m, iphlen, sh, vtag, op_err); 3319 if (stcb != NULL) { 3320 /* Ok, now lets free it */ 3321 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3322 } else { 3323 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3324 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3325 sctp_inpcb_free(inp, 1, 0); 3326 } 3327 } 3328 } 3329 } 3330 3331 void 3332 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3333 int error, struct mbuf *op_err) 3334 { 3335 uint32_t vtag; 3336 3337 if (stcb == NULL) { 3338 /* Got to have a TCB */ 3339 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3340 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3341 sctp_inpcb_free(inp, 1, 0); 3342 } 3343 } 3344 return; 3345 } 3346 vtag = stcb->asoc.peer_vtag; 3347 /* notify the ulp */ 3348 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3349 sctp_abort_notification(stcb, error); 3350 /* notify the peer */ 3351 sctp_send_abort_tcb(stcb, op_err); 3352 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3353 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3354 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3355 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3356 } 3357 /* now free the asoc */ 3358 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 3359 } 3360 3361 void 3362 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3363 struct sctp_inpcb *inp, struct mbuf *op_err) 3364 { 3365 struct sctp_chunkhdr *ch, chunk_buf; 3366 unsigned int chk_length; 3367 3368 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3369 /* Generate a TO address for future reference */ 3370 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3371 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3372 sctp_inpcb_free(inp, 1, 0); 3373 } 3374 } 3375 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3376 sizeof(*ch), (uint8_t *) & chunk_buf); 3377 while (ch != NULL) { 3378 chk_length = ntohs(ch->chunk_length); 3379 if (chk_length < sizeof(*ch)) { 3380 /* break to abort land */ 3381 break; 3382 } 3383 switch (ch->chunk_type) { 3384 case SCTP_PACKET_DROPPED: 3385 /* we don't respond to pkt-dropped */ 3386 return; 3387 case SCTP_ABORT_ASSOCIATION: 3388 /* we don't respond with an ABORT to an ABORT */ 3389 return; 3390 case SCTP_SHUTDOWN_COMPLETE: 3391 /* 3392 * we ignore it since we are not waiting for it and 3393 * peer is gone 3394 */ 3395 return; 3396 case SCTP_SHUTDOWN_ACK: 3397 sctp_send_shutdown_complete2(m, iphlen, sh); 3398 return; 3399 default: 3400 break; 3401 } 3402 offset += SCTP_SIZE32(chk_length); 3403 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3404 sizeof(*ch), (uint8_t *) & chunk_buf); 3405 } 3406 sctp_send_abort(m, iphlen, sh, 0, op_err); 3407 } 3408 3409 /* 3410 * check the inbound datagram to make sure there is not an abort inside it, 3411 * if there is return 1, else return 0. 3412 */ 3413 int 3414 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 3415 { 3416 struct sctp_chunkhdr *ch; 3417 struct sctp_init_chunk *init_chk, chunk_buf; 3418 int offset; 3419 unsigned int chk_length; 3420 3421 offset = iphlen + sizeof(struct sctphdr); 3422 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 3423 (uint8_t *) & chunk_buf); 3424 while (ch != NULL) { 3425 chk_length = ntohs(ch->chunk_length); 3426 if (chk_length < sizeof(*ch)) { 3427 /* packet is probably corrupt */ 3428 break; 3429 } 3430 /* we seem to be ok, is it an abort? */ 3431 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 3432 /* yep, tell them */ 3433 return (1); 3434 } 3435 if (ch->chunk_type == SCTP_INITIATION) { 3436 /* need to update the Vtag */ 3437 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 3438 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 3439 if (init_chk != NULL) { 3440 *vtagfill = ntohl(init_chk->init.initiate_tag); 3441 } 3442 } 3443 /* Nope, move to the next chunk */ 3444 offset += SCTP_SIZE32(chk_length); 3445 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3446 sizeof(*ch), (uint8_t *) & chunk_buf); 3447 } 3448 return (0); 3449 } 3450 3451 /* 3452 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 3453 * set (i.e. it's 0) so, create this function to compare link local scopes 3454 */ 3455 uint32_t 3456 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 3457 { 3458 struct sockaddr_in6 a, b; 3459 3460 /* save copies */ 3461 a = *addr1; 3462 b = *addr2; 3463 3464 if (a.sin6_scope_id == 0) 3465 if (sa6_recoverscope(&a)) { 3466 /* can't get scope, so can't match */ 3467 return (0); 3468 } 3469 if (b.sin6_scope_id == 0) 3470 if (sa6_recoverscope(&b)) { 3471 /* can't get scope, so can't match */ 3472 return (0); 3473 } 3474 if (a.sin6_scope_id != b.sin6_scope_id) 3475 return (0); 3476 3477 return (1); 3478 } 3479 3480 /* 3481 * returns a sockaddr_in6 with embedded scope recovered and removed 3482 */ 3483 struct sockaddr_in6 * 3484 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 3485 { 3486 /* check and strip embedded scope junk */ 3487 if (addr->sin6_family == AF_INET6) { 3488 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 3489 if (addr->sin6_scope_id == 0) { 3490 *store = *addr; 3491 if (!sa6_recoverscope(store)) { 3492 /* use the recovered scope */ 3493 addr = store; 3494 } 3495 } else { 3496 /* else, return the original "to" addr */ 3497 in6_clearscope(&addr->sin6_addr); 3498 } 3499 } 3500 } 3501 return (addr); 3502 } 3503 3504 /* 3505 * are the two addresses the same? currently a "scopeless" check returns: 1 3506 * if same, 0 if not 3507 */ 3508 __inline int 3509 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 3510 { 3511 3512 /* must be valid */ 3513 if (sa1 == NULL || sa2 == NULL) 3514 return (0); 3515 3516 /* must be the same family */ 3517 if (sa1->sa_family != sa2->sa_family) 3518 return (0); 3519 3520 if (sa1->sa_family == AF_INET6) { 3521 /* IPv6 addresses */ 3522 struct sockaddr_in6 *sin6_1, *sin6_2; 3523 3524 sin6_1 = (struct sockaddr_in6 *)sa1; 3525 sin6_2 = (struct sockaddr_in6 *)sa2; 3526 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, 3527 &sin6_2->sin6_addr)); 3528 } else if (sa1->sa_family == AF_INET) { 3529 /* IPv4 addresses */ 3530 struct sockaddr_in *sin_1, *sin_2; 3531 3532 sin_1 = (struct sockaddr_in *)sa1; 3533 sin_2 = (struct sockaddr_in *)sa2; 3534 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 3535 } else { 3536 /* we don't do these... */ 3537 return (0); 3538 } 3539 } 3540 3541 void 3542 sctp_print_address(struct sockaddr *sa) 3543 { 3544 3545 if (sa->sa_family == AF_INET6) { 3546 struct sockaddr_in6 *sin6; 3547 char ip6buf[INET6_ADDRSTRLEN]; 3548 3549 sin6 = (struct sockaddr_in6 *)sa; 3550 printf("IPv6 address: %s:%d scope:%u\n", 3551 ip6_sprintf(ip6buf, &sin6->sin6_addr), 3552 ntohs(sin6->sin6_port), 3553 sin6->sin6_scope_id); 3554 } else if (sa->sa_family == AF_INET) { 3555 struct sockaddr_in *sin; 3556 unsigned char *p; 3557 3558 sin = (struct sockaddr_in *)sa; 3559 p = (unsigned char *)&sin->sin_addr; 3560 printf("IPv4 address: %u.%u.%u.%u:%d\n", 3561 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 3562 } else { 3563 printf("?\n"); 3564 } 3565 } 3566 3567 void 3568 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 3569 { 3570 if (iph->ip_v == IPVERSION) { 3571 struct sockaddr_in lsa, fsa; 3572 3573 bzero(&lsa, sizeof(lsa)); 3574 lsa.sin_len = sizeof(lsa); 3575 lsa.sin_family = AF_INET; 3576 lsa.sin_addr = iph->ip_src; 3577 lsa.sin_port = sh->src_port; 3578 bzero(&fsa, sizeof(fsa)); 3579 fsa.sin_len = sizeof(fsa); 3580 fsa.sin_family = AF_INET; 3581 fsa.sin_addr = iph->ip_dst; 3582 fsa.sin_port = sh->dest_port; 3583 printf("src: "); 3584 sctp_print_address((struct sockaddr *)&lsa); 3585 printf("dest: "); 3586 sctp_print_address((struct sockaddr *)&fsa); 3587 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3588 struct ip6_hdr *ip6; 3589 struct sockaddr_in6 lsa6, fsa6; 3590 3591 ip6 = (struct ip6_hdr *)iph; 3592 bzero(&lsa6, sizeof(lsa6)); 3593 lsa6.sin6_len = sizeof(lsa6); 3594 lsa6.sin6_family = AF_INET6; 3595 lsa6.sin6_addr = ip6->ip6_src; 3596 lsa6.sin6_port = sh->src_port; 3597 bzero(&fsa6, sizeof(fsa6)); 3598 fsa6.sin6_len = sizeof(fsa6); 3599 fsa6.sin6_family = AF_INET6; 3600 fsa6.sin6_addr = ip6->ip6_dst; 3601 fsa6.sin6_port = sh->dest_port; 3602 printf("src: "); 3603 sctp_print_address((struct sockaddr *)&lsa6); 3604 printf("dest: "); 3605 sctp_print_address((struct sockaddr *)&fsa6); 3606 } 3607 } 3608 3609 void 3610 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 3611 struct sctp_inpcb *new_inp, 3612 struct sctp_tcb *stcb) 3613 { 3614 /* 3615 * go through our old INP and pull off any control structures that 3616 * belong to stcb and move then to the new inp. 3617 */ 3618 struct socket *old_so, *new_so; 3619 struct sctp_queued_to_read *control, *nctl; 3620 struct sctp_readhead tmp_queue; 3621 struct mbuf *m; 3622 int error; 3623 3624 old_so = old_inp->sctp_socket; 3625 new_so = new_inp->sctp_socket; 3626 TAILQ_INIT(&tmp_queue); 3627 3628 SOCKBUF_LOCK(&(old_so->so_rcv)); 3629 3630 error = sblock(&old_so->so_rcv, 0); 3631 3632 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3633 if (error) { 3634 /* 3635 * Gak, can't get sblock, we have a problem. data will be 3636 * left stranded.. and we don't dare look at it since the 3637 * other thread may be reading something. Oh well, its a 3638 * screwed up app that does a peeloff OR a accept while 3639 * reading from the main socket... actually its only the 3640 * peeloff() case, since I think read will fail on a 3641 * listening socket.. 3642 */ 3643 return; 3644 } 3645 /* lock the socket buffers */ 3646 SCTP_INP_READ_LOCK(old_inp); 3647 control = TAILQ_FIRST(&old_inp->read_queue); 3648 /* Pull off all for out target stcb */ 3649 while (control) { 3650 nctl = TAILQ_NEXT(control, next); 3651 if (control->stcb == stcb) { 3652 /* remove it we want it */ 3653 TAILQ_REMOVE(&old_inp->read_queue, control, next); 3654 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 3655 m = control->data; 3656 while (m) { 3657 #ifdef SCTP_SB_LOGGING 3658 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 3659 #endif 3660 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 3661 #ifdef SCTP_SB_LOGGING 3662 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3663 #endif 3664 m = SCTP_BUF_NEXT(m); 3665 } 3666 } 3667 control = nctl; 3668 } 3669 SCTP_INP_READ_UNLOCK(old_inp); 3670 3671 /* Remove the sb-lock on the old socket */ 3672 SOCKBUF_LOCK(&(old_so->so_rcv)); 3673 3674 sbunlock(&old_so->so_rcv); 3675 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3676 3677 /* Now we move them over to the new socket buffer */ 3678 control = TAILQ_FIRST(&tmp_queue); 3679 SCTP_INP_READ_LOCK(new_inp); 3680 while (control) { 3681 nctl = TAILQ_NEXT(control, next); 3682 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 3683 m = control->data; 3684 while (m) { 3685 #ifdef SCTP_SB_LOGGING 3686 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3687 #endif 3688 sctp_sballoc(stcb, &new_so->so_rcv, m); 3689 #ifdef SCTP_SB_LOGGING 3690 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3691 #endif 3692 m = SCTP_BUF_NEXT(m); 3693 } 3694 control = nctl; 3695 } 3696 SCTP_INP_READ_UNLOCK(new_inp); 3697 } 3698 3699 3700 void 3701 sctp_add_to_readq(struct sctp_inpcb *inp, 3702 struct sctp_tcb *stcb, 3703 struct sctp_queued_to_read *control, 3704 struct sockbuf *sb, 3705 int end) 3706 { 3707 /* 3708 * Here we must place the control on the end of the socket read 3709 * queue AND increment sb_cc so that select will work properly on 3710 * read. 3711 */ 3712 struct mbuf *m, *prev = NULL; 3713 3714 if (inp == NULL) { 3715 /* Gak, TSNH!! */ 3716 #ifdef INVARIANTS 3717 panic("Gak, inp NULL on add_to_readq"); 3718 #endif 3719 return; 3720 } 3721 SCTP_INP_READ_LOCK(inp); 3722 atomic_add_int(&inp->total_recvs, 1); 3723 atomic_add_int(&stcb->total_recvs, 1); 3724 m = control->data; 3725 control->held_length = 0; 3726 control->length = 0; 3727 while (m) { 3728 if (SCTP_BUF_LEN(m) == 0) { 3729 /* Skip mbufs with NO length */ 3730 if (prev == NULL) { 3731 /* First one */ 3732 control->data = sctp_m_free(m); 3733 m = control->data; 3734 } else { 3735 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 3736 m = SCTP_BUF_NEXT(prev); 3737 } 3738 if (m == NULL) { 3739 control->tail_mbuf = prev;; 3740 } 3741 continue; 3742 } 3743 prev = m; 3744 #ifdef SCTP_SB_LOGGING 3745 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3746 #endif 3747 sctp_sballoc(stcb, sb, m); 3748 #ifdef SCTP_SB_LOGGING 3749 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3750 #endif 3751 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 3752 m = SCTP_BUF_NEXT(m); 3753 } 3754 if (prev != NULL) { 3755 control->tail_mbuf = prev; 3756 } else { 3757 /* Everything got collapsed out?? */ 3758 return; 3759 } 3760 if (end) { 3761 control->end_added = 1; 3762 } 3763 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 3764 SCTP_INP_READ_UNLOCK(inp); 3765 if (inp && inp->sctp_socket) { 3766 sctp_sorwakeup(inp, inp->sctp_socket); 3767 } 3768 } 3769 3770 3771 int 3772 sctp_append_to_readq(struct sctp_inpcb *inp, 3773 struct sctp_tcb *stcb, 3774 struct sctp_queued_to_read *control, 3775 struct mbuf *m, 3776 int end, 3777 int ctls_cumack, 3778 struct sockbuf *sb) 3779 { 3780 /* 3781 * A partial delivery API event is underway. OR we are appending on 3782 * the reassembly queue. 3783 * 3784 * If PDAPI this means we need to add m to the end of the data. 3785 * Increase the length in the control AND increment the sb_cc. 3786 * Otherwise sb is NULL and all we need to do is put it at the end 3787 * of the mbuf chain. 3788 */ 3789 int len = 0; 3790 struct mbuf *mm, *tail = NULL, *prev = NULL; 3791 3792 if (inp) { 3793 SCTP_INP_READ_LOCK(inp); 3794 } 3795 if (control == NULL) { 3796 get_out: 3797 if (inp) { 3798 SCTP_INP_READ_UNLOCK(inp); 3799 } 3800 return (-1); 3801 } 3802 if (control->end_added) { 3803 /* huh this one is complete? */ 3804 goto get_out; 3805 } 3806 mm = m; 3807 if (mm == NULL) { 3808 goto get_out; 3809 } 3810 while (mm) { 3811 if (SCTP_BUF_LEN(mm) == 0) { 3812 /* Skip mbufs with NO lenght */ 3813 if (prev == NULL) { 3814 /* First one */ 3815 m = sctp_m_free(mm); 3816 mm = m; 3817 } else { 3818 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 3819 mm = SCTP_BUF_NEXT(prev); 3820 } 3821 continue; 3822 } 3823 prev = mm; 3824 len += SCTP_BUF_LEN(mm); 3825 if (sb) { 3826 #ifdef SCTP_SB_LOGGING 3827 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 3828 #endif 3829 sctp_sballoc(stcb, sb, mm); 3830 #ifdef SCTP_SB_LOGGING 3831 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3832 #endif 3833 } 3834 mm = SCTP_BUF_NEXT(mm); 3835 } 3836 if (prev) { 3837 tail = prev; 3838 } else { 3839 /* Really there should always be a prev */ 3840 if (m == NULL) { 3841 /* Huh nothing left? */ 3842 #ifdef INVARIANTS 3843 panic("Nothing left to add?"); 3844 #else 3845 goto get_out; 3846 #endif 3847 } 3848 tail = m; 3849 } 3850 if (end) { 3851 /* message is complete */ 3852 if (control == stcb->asoc.control_pdapi) { 3853 stcb->asoc.control_pdapi = NULL; 3854 } 3855 control->held_length = 0; 3856 control->end_added = 1; 3857 } 3858 atomic_add_int(&control->length, len); 3859 if (control->tail_mbuf) { 3860 /* append */ 3861 SCTP_BUF_NEXT(control->tail_mbuf) = m; 3862 control->tail_mbuf = tail; 3863 } else { 3864 /* nothing there */ 3865 #ifdef INVARIANTS 3866 if (control->data != NULL) { 3867 panic("This should NOT happen"); 3868 } 3869 #endif 3870 control->data = m; 3871 control->tail_mbuf = tail; 3872 } 3873 /* 3874 * When we are appending in partial delivery, the cum-ack is used 3875 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 3876 * is populated in the outbound sinfo structure from the true cumack 3877 * if the association exists... 3878 */ 3879 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 3880 if (inp) { 3881 SCTP_INP_READ_UNLOCK(inp); 3882 } 3883 if (inp && inp->sctp_socket) { 3884 sctp_sorwakeup(inp, inp->sctp_socket); 3885 } 3886 return (0); 3887 } 3888 3889 3890 3891 /*************HOLD THIS COMMENT FOR PATCH FILE OF 3892 *************ALTERNATE ROUTING CODE 3893 */ 3894 3895 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 3896 *************ALTERNATE ROUTING CODE 3897 */ 3898 3899 struct mbuf * 3900 sctp_generate_invmanparam(int err) 3901 { 3902 /* Return a MBUF with a invalid mandatory parameter */ 3903 struct mbuf *m; 3904 3905 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 3906 if (m) { 3907 struct sctp_paramhdr *ph; 3908 3909 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 3910 ph = mtod(m, struct sctp_paramhdr *); 3911 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 3912 ph->param_type = htons(err); 3913 } 3914 return (m); 3915 } 3916 3917 #ifdef SCTP_MBCNT_LOGGING 3918 void 3919 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 3920 struct sctp_tmit_chunk *tp1, int chk_cnt) 3921 { 3922 if (tp1->data == NULL) { 3923 return; 3924 } 3925 asoc->chunks_on_out_queue -= chk_cnt; 3926 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 3927 asoc->total_output_queue_size, 3928 tp1->book_size, 3929 0, 3930 tp1->mbcnt); 3931 if (asoc->total_output_queue_size >= tp1->book_size) { 3932 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 3933 } else { 3934 asoc->total_output_queue_size = 0; 3935 } 3936 3937 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 3938 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 3939 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 3940 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 3941 } else { 3942 stcb->sctp_socket->so_snd.sb_cc = 0; 3943 3944 } 3945 } 3946 } 3947 3948 #endif 3949 3950 int 3951 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 3952 int reason, struct sctpchunk_listhead *queue) 3953 { 3954 int ret_sz = 0; 3955 int notdone; 3956 uint8_t foundeom = 0; 3957 3958 do { 3959 ret_sz += tp1->book_size; 3960 tp1->sent = SCTP_FORWARD_TSN_SKIP; 3961 if (tp1->data) { 3962 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3963 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1); 3964 sctp_m_freem(tp1->data); 3965 tp1->data = NULL; 3966 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 3967 } 3968 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 3969 stcb->asoc.sent_queue_cnt_removeable--; 3970 } 3971 if (queue == &stcb->asoc.send_queue) { 3972 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 3973 /* on to the sent queue */ 3974 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 3975 sctp_next); 3976 stcb->asoc.sent_queue_cnt++; 3977 } 3978 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 3979 SCTP_DATA_NOT_FRAG) { 3980 /* not frag'ed we ae done */ 3981 notdone = 0; 3982 foundeom = 1; 3983 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 3984 /* end of frag, we are done */ 3985 notdone = 0; 3986 foundeom = 1; 3987 } else { 3988 /* 3989 * Its a begin or middle piece, we must mark all of 3990 * it 3991 */ 3992 notdone = 1; 3993 tp1 = TAILQ_NEXT(tp1, sctp_next); 3994 } 3995 } while (tp1 && notdone); 3996 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) { 3997 /* 3998 * The multi-part message was scattered across the send and 3999 * sent queue. 4000 */ 4001 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 4002 /* 4003 * recurse throught the send_queue too, starting at the 4004 * beginning. 4005 */ 4006 if (tp1) { 4007 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason, 4008 &stcb->asoc.send_queue); 4009 } else { 4010 printf("hmm, nothing on the send queue and no EOM?\n"); 4011 } 4012 } 4013 return (ret_sz); 4014 } 4015 4016 /* 4017 * checks to see if the given address, sa, is one that is currently known by 4018 * the kernel note: can't distinguish the same address on multiple interfaces 4019 * and doesn't handle multiple addresses with different zone/scope id's note: 4020 * ifa_ifwithaddr() compares the entire sockaddr struct 4021 */ 4022 struct ifaddr * 4023 sctp_find_ifa_by_addr(struct sockaddr *sa) 4024 { 4025 struct ifnet *ifn; 4026 struct ifaddr *ifa; 4027 4028 /* go through all our known interfaces */ 4029 TAILQ_FOREACH(ifn, &ifnet, if_list) { 4030 /* go through each interface addresses */ 4031 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 4032 /* correct family? */ 4033 if (ifa->ifa_addr->sa_family != sa->sa_family) 4034 continue; 4035 4036 #ifdef INET6 4037 if (ifa->ifa_addr->sa_family == AF_INET6) { 4038 /* IPv6 address */ 4039 struct sockaddr_in6 *sin1, *sin2, sin6_tmp; 4040 4041 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr; 4042 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) { 4043 /* create a copy and clear scope */ 4044 memcpy(&sin6_tmp, sin1, 4045 sizeof(struct sockaddr_in6)); 4046 sin1 = &sin6_tmp; 4047 in6_clearscope(&sin1->sin6_addr); 4048 } 4049 sin2 = (struct sockaddr_in6 *)sa; 4050 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, 4051 sizeof(struct in6_addr)) == 0) { 4052 /* found it */ 4053 return (ifa); 4054 } 4055 } else 4056 #endif 4057 if (ifa->ifa_addr->sa_family == AF_INET) { 4058 /* IPv4 address */ 4059 struct sockaddr_in *sin1, *sin2; 4060 4061 sin1 = (struct sockaddr_in *)ifa->ifa_addr; 4062 sin2 = (struct sockaddr_in *)sa; 4063 if (sin1->sin_addr.s_addr == 4064 sin2->sin_addr.s_addr) { 4065 /* found it */ 4066 return (ifa); 4067 } 4068 } 4069 /* else, not AF_INET or AF_INET6, so skip */ 4070 } /* end foreach ifa */ 4071 } /* end foreach ifn */ 4072 /* not found! */ 4073 return (NULL); 4074 } 4075 4076 static void 4077 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock, 4078 uint32_t rwnd_req) 4079 { 4080 /* User pulled some data, do we need a rwnd update? */ 4081 int r_unlocked = 0; 4082 uint32_t dif, rwnd; 4083 struct socket *so = NULL; 4084 4085 if (stcb == NULL) 4086 return; 4087 4088 atomic_add_int(&stcb->asoc.refcnt, 1); 4089 4090 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4091 /* Pre-check If we are freeing no update */ 4092 goto no_lock; 4093 } 4094 SCTP_INP_INCR_REF(stcb->sctp_ep); 4095 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4096 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4097 goto out; 4098 } 4099 so = stcb->sctp_socket; 4100 if (so == NULL) { 4101 goto out; 4102 } 4103 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 4104 /* Have you have freed enough to look */ 4105 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4106 sctp_misc_ints(SCTP_ENTER_USER_RECV, 4107 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd), 4108 *freed_so_far, 4109 stcb->freed_by_sorcv_sincelast, 4110 rwnd_req); 4111 #endif 4112 *freed_so_far = 0; 4113 /* Yep, its worth a look and the lock overhead */ 4114 4115 /* Figure out what the rwnd would be */ 4116 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 4117 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 4118 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 4119 } else { 4120 dif = 0; 4121 } 4122 if (dif >= rwnd_req) { 4123 if (hold_rlock) { 4124 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 4125 r_unlocked = 1; 4126 } 4127 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4128 /* 4129 * One last check before we allow the guy possibly 4130 * to get in. There is a race, where the guy has not 4131 * reached the gate. In that case 4132 */ 4133 goto out; 4134 } 4135 SCTP_TCB_LOCK(stcb); 4136 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4137 /* No reports here */ 4138 SCTP_TCB_UNLOCK(stcb); 4139 goto out; 4140 } 4141 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4142 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4143 stcb->asoc.my_rwnd, 4144 stcb->asoc.my_last_reported_rwnd, 4145 stcb->freed_by_sorcv_sincelast, 4146 dif); 4147 #endif 4148 SCTP_STAT_INCR(sctps_wu_sacks_sent); 4149 sctp_send_sack(stcb); 4150 sctp_chunk_output(stcb->sctp_ep, stcb, 4151 SCTP_OUTPUT_FROM_USR_RCVD); 4152 /* make sure no timer is running */ 4153 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 4154 SCTP_TCB_UNLOCK(stcb); 4155 } else { 4156 /* Update how much we have pending */ 4157 stcb->freed_by_sorcv_sincelast = dif; 4158 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4159 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4160 stcb->asoc.my_rwnd, 4161 stcb->asoc.my_last_reported_rwnd, 4162 stcb->freed_by_sorcv_sincelast, 4163 0); 4164 #endif 4165 } 4166 out: 4167 if (so && r_unlocked && hold_rlock) { 4168 SCTP_INP_READ_LOCK(stcb->sctp_ep); 4169 } 4170 SCTP_INP_DECR_REF(stcb->sctp_ep); 4171 no_lock: 4172 atomic_add_int(&stcb->asoc.refcnt, -1); 4173 return; 4174 } 4175 4176 int 4177 sctp_sorecvmsg(struct socket *so, 4178 struct uio *uio, 4179 struct mbuf **mp, 4180 struct sockaddr *from, 4181 int fromlen, 4182 int *msg_flags, 4183 struct sctp_sndrcvinfo *sinfo, 4184 int filling_sinfo) 4185 { 4186 /* 4187 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 4188 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 4189 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 4190 * On the way out we may send out any combination of: 4191 * MSG_NOTIFICATION MSG_EOR 4192 * 4193 */ 4194 struct sctp_inpcb *inp = NULL; 4195 int my_len = 0; 4196 int cp_len = 0, error = 0; 4197 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 4198 struct mbuf *m = NULL, *embuf = NULL; 4199 struct sctp_tcb *stcb = NULL; 4200 int wakeup_read_socket = 0; 4201 int freecnt_applied = 0; 4202 int out_flags = 0, in_flags = 0; 4203 int block_allowed = 1; 4204 int freed_so_far = 0; 4205 int copied_so_far = 0; 4206 int in_eeor_mode = 0; 4207 int no_rcv_needed = 0; 4208 uint32_t rwnd_req = 0; 4209 int hold_sblock = 0; 4210 int hold_rlock = 0; 4211 int alen = 0, slen = 0; 4212 int held_length = 0; 4213 4214 if (msg_flags) { 4215 in_flags = *msg_flags; 4216 } else { 4217 in_flags = 0; 4218 } 4219 slen = uio->uio_resid; 4220 /* Pull in and set up our int flags */ 4221 if (in_flags & MSG_OOB) { 4222 /* Out of band's NOT supported */ 4223 return (EOPNOTSUPP); 4224 } 4225 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 4226 return (EINVAL); 4227 } 4228 if ((in_flags & (MSG_DONTWAIT 4229 | MSG_NBIO 4230 )) || 4231 (so->so_state & SS_NBIO)) { 4232 block_allowed = 0; 4233 } 4234 /* setup the endpoint */ 4235 inp = (struct sctp_inpcb *)so->so_pcb; 4236 if (inp == NULL) { 4237 return (EFAULT); 4238 } 4239 rwnd_req = (so->so_rcv.sb_hiwat >> SCTP_RWND_HIWAT_SHIFT); 4240 /* Must be at least a MTU's worth */ 4241 if (rwnd_req < SCTP_MIN_RWND) 4242 rwnd_req = SCTP_MIN_RWND; 4243 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 4244 #ifdef SCTP_RECV_RWND_LOGGING 4245 sctp_misc_ints(SCTP_SORECV_ENTER, 4246 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 4247 #endif 4248 SOCKBUF_LOCK(&so->so_rcv); 4249 hold_sblock = 1; 4250 #ifdef SCTP_RECV_RWND_LOGGING 4251 sctp_misc_ints(SCTP_SORECV_ENTERPL, 4252 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 4253 #endif 4254 4255 4256 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4257 if (error) { 4258 goto release_unlocked; 4259 } 4260 restart: 4261 if (hold_sblock == 0) { 4262 SOCKBUF_LOCK(&so->so_rcv); 4263 hold_sblock = 1; 4264 } 4265 sbunlock(&so->so_rcv); 4266 4267 restart_nosblocks: 4268 if (hold_sblock == 0) { 4269 SOCKBUF_LOCK(&so->so_rcv); 4270 hold_sblock = 1; 4271 } 4272 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4273 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4274 goto out; 4275 } 4276 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4277 if (so->so_error) { 4278 error = so->so_error; 4279 if ((in_flags & MSG_PEEK) == 0) 4280 so->so_error = 0; 4281 } else { 4282 error = ENOTCONN; 4283 } 4284 goto out; 4285 } 4286 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 4287 /* we need to wait for data */ 4288 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4289 sctp_misc_ints(SCTP_SORECV_BLOCKSA, 4290 0, 0, so->so_rcv.sb_cc, uio->uio_resid); 4291 #endif 4292 if ((so->so_rcv.sb_cc == 0) && 4293 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4294 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 4295 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4296 /* 4297 * For active open side clear flags for 4298 * re-use passive open is blocked by 4299 * connect. 4300 */ 4301 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4302 /* 4303 * You were aborted, passive side 4304 * always hits here 4305 */ 4306 error = ECONNRESET; 4307 /* 4308 * You get this once if you are 4309 * active open side 4310 */ 4311 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4312 /* 4313 * Remove flag if on the 4314 * active open side 4315 */ 4316 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4317 } 4318 } 4319 so->so_state &= ~(SS_ISCONNECTING | 4320 SS_ISDISCONNECTING | 4321 SS_ISCONFIRMING | 4322 SS_ISCONNECTED); 4323 if (error == 0) { 4324 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4325 error = ENOTCONN; 4326 } else { 4327 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4328 } 4329 } 4330 goto out; 4331 } 4332 } 4333 error = sbwait(&so->so_rcv); 4334 if (error) { 4335 goto out; 4336 } 4337 held_length = 0; 4338 goto restart_nosblocks; 4339 } else if (so->so_rcv.sb_cc == 0) { 4340 if (so->so_error) { 4341 error = so->so_error; 4342 if ((in_flags & MSG_PEEK) == 0) 4343 so->so_error = 0; 4344 } else { 4345 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4346 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4347 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4348 /* 4349 * For active open side clear flags 4350 * for re-use passive open is 4351 * blocked by connect. 4352 */ 4353 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4354 /* 4355 * You were aborted, passive 4356 * side always hits here 4357 */ 4358 error = ECONNRESET; 4359 /* 4360 * You get this once if you 4361 * are active open side 4362 */ 4363 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4364 /* 4365 * Remove flag if on 4366 * the active open 4367 * side 4368 */ 4369 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4370 } 4371 } 4372 so->so_state &= ~(SS_ISCONNECTING | 4373 SS_ISDISCONNECTING | 4374 SS_ISCONFIRMING | 4375 SS_ISCONNECTED); 4376 if (error == 0) { 4377 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4378 error = ENOTCONN; 4379 } else { 4380 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4381 } 4382 } 4383 goto out; 4384 } 4385 } 4386 error = EWOULDBLOCK; 4387 } 4388 goto out; 4389 } 4390 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4391 /* we possibly have data we can read */ 4392 control = TAILQ_FIRST(&inp->read_queue); 4393 if (control == NULL) { 4394 /* 4395 * This could be happening since the appender did the 4396 * increment but as not yet did the tailq insert onto the 4397 * read_queue 4398 */ 4399 if (hold_rlock == 0) { 4400 SCTP_INP_READ_LOCK(inp); 4401 hold_rlock = 1; 4402 } 4403 control = TAILQ_FIRST(&inp->read_queue); 4404 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 4405 #ifdef INVARIANTS 4406 panic("Huh, its non zero and nothing on control?"); 4407 #endif 4408 so->so_rcv.sb_cc = 0; 4409 } 4410 SCTP_INP_READ_UNLOCK(inp); 4411 hold_rlock = 0; 4412 goto restart; 4413 } 4414 if ((control->length == 0) && 4415 (control->do_not_ref_stcb)) { 4416 /* 4417 * Clean up code for freeing assoc that left behind a 4418 * pdapi.. maybe a peer in EEOR that just closed after 4419 * sending and never indicated a EOR. 4420 */ 4421 if (hold_rlock == 0) { 4422 hold_rlock = 1; 4423 SCTP_INP_READ_LOCK(inp); 4424 } 4425 control->held_length = 0; 4426 if (control->data) { 4427 /* Hmm there is data here .. fix */ 4428 struct mbuf *m; 4429 int cnt = 0; 4430 4431 m = control->data; 4432 while (m) { 4433 cnt += SCTP_BUF_LEN(m); 4434 if (SCTP_BUF_NEXT(m) == NULL) { 4435 control->tail_mbuf = m; 4436 control->end_added = 1; 4437 } 4438 m = SCTP_BUF_NEXT(m); 4439 } 4440 control->length = cnt; 4441 } else { 4442 /* remove it */ 4443 TAILQ_REMOVE(&inp->read_queue, control, next); 4444 /* Add back any hiddend data */ 4445 sctp_free_remote_addr(control->whoFrom); 4446 sctp_free_a_readq(stcb, control); 4447 } 4448 if (hold_rlock) { 4449 hold_rlock = 0; 4450 SCTP_INP_READ_UNLOCK(inp); 4451 } 4452 goto restart; 4453 } 4454 if (control->length == 0) { 4455 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 4456 (filling_sinfo)) { 4457 /* find a more suitable one then this */ 4458 ctl = TAILQ_NEXT(control, next); 4459 while (ctl) { 4460 if ((ctl->stcb != control->stcb) && (ctl->length)) { 4461 /* found one */ 4462 control = ctl; 4463 goto found_one; 4464 } 4465 ctl = TAILQ_NEXT(ctl, next); 4466 } 4467 } 4468 /* 4469 * if we reach here, not suitable replacement is available 4470 * <or> fragment interleave is NOT on. So stuff the sb_cc 4471 * into the our held count, and its time to sleep again. 4472 */ 4473 held_length = so->so_rcv.sb_cc; 4474 control->held_length = so->so_rcv.sb_cc; 4475 goto restart; 4476 } 4477 /* Clear the held length since there is something to read */ 4478 control->held_length = 0; 4479 if (hold_rlock) { 4480 SCTP_INP_READ_UNLOCK(inp); 4481 hold_rlock = 0; 4482 } 4483 found_one: 4484 /* 4485 * If we reach here, control has a some data for us to read off. 4486 * Note that stcb COULD be NULL. 4487 */ 4488 if (hold_sblock) { 4489 SOCKBUF_UNLOCK(&so->so_rcv); 4490 hold_sblock = 0; 4491 } 4492 stcb = control->stcb; 4493 if (stcb) { 4494 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 4495 (control->do_not_ref_stcb == 0)) { 4496 if (freecnt_applied == 0) 4497 stcb = NULL; 4498 } else if (control->do_not_ref_stcb == 0) { 4499 /* you can't free it on me please */ 4500 /* 4501 * The lock on the socket buffer protects us so the 4502 * free code will stop. But since we used the 4503 * socketbuf lock and the sender uses the tcb_lock 4504 * to increment, we need to use the atomic add to 4505 * the refcnt 4506 */ 4507 atomic_add_int(&stcb->asoc.refcnt, 1); 4508 freecnt_applied = 1; 4509 /* 4510 * Setup to remember how much we have not yet told 4511 * the peer our rwnd has opened up. Note we grab the 4512 * value from the tcb from last time. Note too that 4513 * sack sending clears this when a sack is sent.. 4514 * which is fine. Once we hit the rwnd_req, we then 4515 * will go to the sctp_user_rcvd() that will not 4516 * lock until it KNOWs it MUST send a WUP-SACK. 4517 * 4518 */ 4519 freed_so_far = stcb->freed_by_sorcv_sincelast; 4520 stcb->freed_by_sorcv_sincelast = 0; 4521 } 4522 } 4523 /* First lets get off the sinfo and sockaddr info */ 4524 if ((sinfo) && filling_sinfo) { 4525 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 4526 nxt = TAILQ_NEXT(control, next); 4527 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 4528 struct sctp_extrcvinfo *s_extra; 4529 4530 s_extra = (struct sctp_extrcvinfo *)sinfo; 4531 if (nxt) { 4532 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL; 4533 if (nxt->sinfo_flags & SCTP_UNORDERED) { 4534 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 4535 } 4536 if (nxt->spec_flags & M_NOTIFICATION) { 4537 s_extra->next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 4538 } 4539 s_extra->next_asocid = nxt->sinfo_assoc_id; 4540 s_extra->next_length = nxt->length; 4541 s_extra->next_ppid = nxt->sinfo_ppid; 4542 s_extra->next_stream = nxt->sinfo_stream; 4543 if (nxt->tail_mbuf != NULL) { 4544 if (nxt->end_added) { 4545 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 4546 } 4547 } 4548 } else { 4549 /* 4550 * we explicitly 0 this, since the memcpy 4551 * got some other things beyond the older 4552 * sinfo_ that is on the control's structure 4553 * :-D 4554 */ 4555 s_extra->next_flags = SCTP_NO_NEXT_MSG; 4556 s_extra->next_asocid = 0; 4557 s_extra->next_length = 0; 4558 s_extra->next_ppid = 0; 4559 s_extra->next_stream = 0; 4560 } 4561 } 4562 /* 4563 * update off the real current cum-ack, if we have an stcb. 4564 */ 4565 if (stcb) 4566 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 4567 /* 4568 * mask off the high bits, we keep the actual chunk bits in 4569 * there. 4570 */ 4571 sinfo->sinfo_flags &= 0x00ff; 4572 } 4573 if (fromlen && from) { 4574 struct sockaddr *to; 4575 4576 #ifdef AF_INET 4577 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len); 4578 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4579 ((struct sockaddr_in *)from)->sin_port = control->port_from; 4580 #else 4581 /* No AF_INET use AF_INET6 */ 4582 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len); 4583 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4584 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 4585 #endif 4586 4587 to = from; 4588 #if defined(AF_INET) && defined(AF_INET6) 4589 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 4590 (to->sa_family == AF_INET) && 4591 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 4592 struct sockaddr_in *sin; 4593 struct sockaddr_in6 sin6; 4594 4595 sin = (struct sockaddr_in *)to; 4596 bzero(&sin6, sizeof(sin6)); 4597 sin6.sin6_family = AF_INET6; 4598 sin6.sin6_len = sizeof(struct sockaddr_in6); 4599 sin6.sin6_addr.s6_addr16[2] = 0xffff; 4600 bcopy(&sin->sin_addr, 4601 &sin6.sin6_addr.s6_addr16[3], 4602 sizeof(sin6.sin6_addr.s6_addr16[3])); 4603 sin6.sin6_port = sin->sin_port; 4604 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 4605 } 4606 #endif 4607 #if defined(AF_INET6) 4608 { 4609 struct sockaddr_in6 lsa6, *to6; 4610 4611 to6 = (struct sockaddr_in6 *)to; 4612 sctp_recover_scope_mac(to6, (&lsa6)); 4613 } 4614 #endif 4615 } 4616 /* now copy out what data we can */ 4617 if (mp == NULL) { 4618 /* copy out each mbuf in the chain up to length */ 4619 get_more_data: 4620 m = control->data; 4621 while (m) { 4622 /* Move out all we can */ 4623 cp_len = (int)uio->uio_resid; 4624 my_len = (int)SCTP_BUF_LEN(m); 4625 if (cp_len > my_len) { 4626 /* not enough in this buf */ 4627 cp_len = my_len; 4628 } 4629 if (hold_rlock) { 4630 SCTP_INP_READ_UNLOCK(inp); 4631 hold_rlock = 0; 4632 } 4633 if (cp_len > 0) 4634 error = uiomove(mtod(m, char *), cp_len, uio); 4635 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4636 sctp_misc_ints(SCTP_SORCV_DOESCPY, 4637 so->so_rcv.sb_cc, 4638 cp_len, 4639 0, 4640 0); 4641 #endif 4642 /* re-read */ 4643 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4644 goto release; 4645 } 4646 if (stcb && 4647 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4648 no_rcv_needed = 1; 4649 } 4650 if (error) { 4651 /* error we are out of here */ 4652 goto release; 4653 } 4654 if ((SCTP_BUF_NEXT(m) == NULL) && 4655 (cp_len >= SCTP_BUF_LEN(m)) && 4656 ((control->end_added == 0) || 4657 (control->end_added && (TAILQ_NEXT(control, next) == NULL))) 4658 ) { 4659 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4660 sctp_misc_ints(SCTP_SORCV_DOESLCK, 4661 so->so_rcv.sb_cc, 4662 cp_len, 4663 SCTP_BUF_LEN(m), 4664 control->length); 4665 #endif 4666 SCTP_INP_READ_LOCK(inp); 4667 hold_rlock = 1; 4668 } 4669 if (cp_len == SCTP_BUF_LEN(m)) { 4670 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4671 sctp_misc_ints(SCTP_SORCV_DOESADJ, 4672 so->so_rcv.sb_cc, 4673 control->length, 4674 cp_len, 4675 0); 4676 #endif 4677 if ((SCTP_BUF_NEXT(m) == NULL) && 4678 (control->end_added)) { 4679 out_flags |= MSG_EOR; 4680 } 4681 if (control->spec_flags & M_NOTIFICATION) { 4682 out_flags |= MSG_NOTIFICATION; 4683 } 4684 /* we ate up the mbuf */ 4685 if (in_flags & MSG_PEEK) { 4686 /* just looking */ 4687 m = SCTP_BUF_NEXT(m); 4688 copied_so_far += cp_len; 4689 } else { 4690 /* dispose of the mbuf */ 4691 #ifdef SCTP_SB_LOGGING 4692 sctp_sblog(&so->so_rcv, 4693 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4694 #endif 4695 sctp_sbfree(control, stcb, &so->so_rcv, m); 4696 #ifdef SCTP_SB_LOGGING 4697 sctp_sblog(&so->so_rcv, 4698 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4699 #endif 4700 embuf = m; 4701 copied_so_far += cp_len; 4702 freed_so_far += cp_len; 4703 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4704 if (alen < cp_len) { 4705 panic("Control length goes negative?"); 4706 } 4707 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4708 sctp_misc_ints(SCTP_SORCV_PASSBF, 4709 so->so_rcv.sb_cc, 4710 control->length, 4711 0, 4712 0); 4713 #endif 4714 control->data = sctp_m_free(m); 4715 m = control->data; 4716 /* 4717 * been through it all, must hold sb 4718 * lock ok to null tail 4719 */ 4720 if (control->data == NULL) { 4721 #ifdef INVARIANTS 4722 if ((control->end_added == 0) || 4723 (TAILQ_NEXT(control, next) == NULL)) { 4724 /* 4725 * If the end is not 4726 * added, OR the 4727 * next is NOT null 4728 * we MUST have the 4729 * lock. 4730 */ 4731 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 4732 panic("Hmm we don't own the lock?"); 4733 } 4734 } 4735 #endif 4736 control->tail_mbuf = NULL; 4737 #ifdef INVARIANTS 4738 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 4739 panic("end_added, nothing left and no MSG_EOR"); 4740 } 4741 #endif 4742 } 4743 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4744 sctp_misc_ints(SCTP_SORCV_ADJD, 4745 so->so_rcv.sb_cc, 4746 control->length, 4747 0, 4748 0); 4749 #endif 4750 } 4751 } else { 4752 /* Do we need to trim the mbuf? */ 4753 if (control->spec_flags & M_NOTIFICATION) { 4754 out_flags |= MSG_NOTIFICATION; 4755 } 4756 if ((in_flags & MSG_PEEK) == 0) { 4757 SCTP_BUF_RESV_UF(m, cp_len); 4758 SCTP_BUF_LEN(m) -= cp_len; 4759 #ifdef SCTP_SB_LOGGING 4760 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 4761 #endif 4762 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 4763 if (stcb) { 4764 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 4765 } 4766 copied_so_far += cp_len; 4767 embuf = m; 4768 freed_so_far += cp_len; 4769 #ifdef SCTP_SB_LOGGING 4770 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 4771 SCTP_LOG_SBRESULT, 0); 4772 #endif 4773 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4774 if (alen < cp_len) { 4775 panic("Control length goes negative2?"); 4776 } 4777 } else { 4778 copied_so_far += cp_len; 4779 } 4780 } 4781 if ((out_flags & MSG_EOR) || 4782 (uio->uio_resid == 0) 4783 ) { 4784 break; 4785 } 4786 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 4787 (control->do_not_ref_stcb == 0) && 4788 (freed_so_far >= rwnd_req)) { 4789 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4790 } 4791 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4792 sctp_misc_ints(SCTP_SORCV_BOTWHILE, 4793 so->so_rcv.sb_cc, 4794 control->length, 4795 0, 4796 0); 4797 #endif 4798 4799 } /* end while(m) */ 4800 /* 4801 * At this point we have looked at it all and we either have 4802 * a MSG_EOR/or read all the user wants... <OR> 4803 * control->length == 0. 4804 */ 4805 if ((out_flags & MSG_EOR) && 4806 ((in_flags & MSG_PEEK) == 0)) { 4807 /* we are done with this control */ 4808 if (control->length == 0) { 4809 if (control->data) { 4810 #ifdef INVARIANTS 4811 panic("control->data not null at read eor?"); 4812 #else 4813 printf("Strange, data left in the control buffer .. invarients would panic?\n"); 4814 sctp_m_freem(control->data); 4815 control->data = NULL; 4816 #endif 4817 } 4818 done_with_control: 4819 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4820 sctp_misc_ints(SCTP_SORCV_FREECTL, 4821 so->so_rcv.sb_cc, 4822 0, 4823 0, 4824 0); 4825 #endif 4826 if (TAILQ_NEXT(control, next) == NULL) { 4827 /* 4828 * If we don't have a next we need a 4829 * lock, if there is a next interupt 4830 * is filling ahead of us and we 4831 * don't need a lock to remove this 4832 * guy (which is the head of the 4833 * queue). 4834 */ 4835 if (hold_rlock == 0) { 4836 SCTP_INP_READ_LOCK(inp); 4837 hold_rlock = 1; 4838 } 4839 } 4840 TAILQ_REMOVE(&inp->read_queue, control, next); 4841 /* Add back any hiddend data */ 4842 if (control->held_length) { 4843 held_length = 0; 4844 control->held_length = 0; 4845 wakeup_read_socket = 1; 4846 } 4847 no_rcv_needed = control->do_not_ref_stcb; 4848 sctp_free_remote_addr(control->whoFrom); 4849 control->data = NULL; 4850 sctp_free_a_readq(stcb, control); 4851 control = NULL; 4852 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0)) 4853 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4854 4855 } else { 4856 /* 4857 * The user did not read all of this 4858 * message, turn off the returned MSG_EOR 4859 * since we are leaving more behind on the 4860 * control to read. 4861 */ 4862 #ifdef INVARIANTS 4863 if (control->end_added && (control->data == NULL) && 4864 (control->tail_mbuf == NULL)) { 4865 panic("Gak, control->length is corrupt?"); 4866 } 4867 #endif 4868 no_rcv_needed = control->do_not_ref_stcb; 4869 out_flags &= ~MSG_EOR; 4870 } 4871 } 4872 if (out_flags & MSG_EOR) { 4873 goto release; 4874 } 4875 if ((uio->uio_resid == 0) || 4876 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 4877 ) { 4878 goto release; 4879 } 4880 /* 4881 * If I hit here the receiver wants more and this message is 4882 * NOT done (pd-api). So two questions. Can we block? if not 4883 * we are done. Did the user NOT set MSG_WAITALL? 4884 */ 4885 if (block_allowed == 0) { 4886 goto release; 4887 } 4888 /* 4889 * We need to wait for more data a few things: - We don't 4890 * sbunlock() so we don't get someone else reading. - We 4891 * must be sure to account for the case where what is added 4892 * is NOT to our control when we wakeup. 4893 */ 4894 4895 /* 4896 * Do we need to tell the transport a rwnd update might be 4897 * needed before we go to sleep? 4898 */ 4899 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 4900 ((freed_so_far >= rwnd_req) && 4901 (control->do_not_ref_stcb == 0) && 4902 (no_rcv_needed == 0))) { 4903 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4904 } 4905 wait_some_more: 4906 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4907 goto release; 4908 } 4909 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 4910 goto release; 4911 4912 if (hold_rlock == 1) { 4913 SCTP_INP_READ_UNLOCK(inp); 4914 hold_rlock = 0; 4915 } 4916 if (hold_sblock == 0) { 4917 SOCKBUF_LOCK(&so->so_rcv); 4918 hold_sblock = 1; 4919 } 4920 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4921 if (stcb) 4922 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 4923 freed_so_far, 4924 stcb->asoc.my_rwnd, 4925 so->so_rcv.sb_cc, 4926 uio->uio_resid); 4927 else 4928 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 4929 freed_so_far, 4930 0, 4931 so->so_rcv.sb_cc, 4932 uio->uio_resid); 4933 #endif 4934 if (so->so_rcv.sb_cc <= control->held_length) { 4935 error = sbwait(&so->so_rcv); 4936 if (error) { 4937 goto release; 4938 } 4939 control->held_length = 0; 4940 } 4941 if (hold_sblock) { 4942 SOCKBUF_UNLOCK(&so->so_rcv); 4943 hold_sblock = 0; 4944 } 4945 if (control->length == 0) { 4946 /* still nothing here */ 4947 if (control->end_added == 1) { 4948 /* he aborted, or is done i.e.did a shutdown */ 4949 out_flags |= MSG_EOR; 4950 if (control->pdapi_aborted) 4951 out_flags |= MSG_TRUNC; 4952 goto done_with_control; 4953 } 4954 if (so->so_rcv.sb_cc > held_length) { 4955 control->held_length = so->so_rcv.sb_cc; 4956 held_length = 0; 4957 } 4958 goto wait_some_more; 4959 } else if (control->data == NULL) { 4960 /* 4961 * we must re-sync since data is probably being 4962 * added 4963 */ 4964 SCTP_INP_READ_LOCK(inp); 4965 if ((control->length > 0) && (control->data == NULL)) { 4966 /* 4967 * big trouble.. we have the lock and its 4968 * corrupt? 4969 */ 4970 panic("Impossible data==NULL length !=0"); 4971 } 4972 SCTP_INP_READ_UNLOCK(inp); 4973 /* We will fall around to get more data */ 4974 } 4975 goto get_more_data; 4976 } else { 4977 /* copy out the mbuf chain */ 4978 get_more_data2: 4979 /* 4980 * Do we have a uio, I doubt it if so we grab the size from 4981 * it, if not you get it all 4982 */ 4983 if (uio) 4984 cp_len = uio->uio_resid; 4985 else 4986 cp_len = control->length; 4987 4988 if ((uint32_t) cp_len >= control->length) { 4989 /* easy way */ 4990 if ((control->end_added == 0) || 4991 (TAILQ_NEXT(control, next) == NULL)) { 4992 /* Need to get rlock */ 4993 if (hold_rlock == 0) { 4994 SCTP_INP_READ_LOCK(inp); 4995 hold_rlock = 1; 4996 } 4997 } 4998 if (control->end_added) { 4999 out_flags |= MSG_EOR; 5000 } 5001 if (control->spec_flags & M_NOTIFICATION) { 5002 out_flags |= MSG_NOTIFICATION; 5003 } 5004 if (uio) 5005 uio->uio_resid -= control->length; 5006 *mp = control->data; 5007 m = control->data; 5008 while (m) { 5009 #ifdef SCTP_SB_LOGGING 5010 sctp_sblog(&so->so_rcv, 5011 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5012 #endif 5013 sctp_sbfree(control, stcb, &so->so_rcv, m); 5014 freed_so_far += SCTP_BUF_LEN(m); 5015 #ifdef SCTP_SB_LOGGING 5016 sctp_sblog(&so->so_rcv, 5017 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5018 #endif 5019 m = SCTP_BUF_NEXT(m); 5020 } 5021 control->data = control->tail_mbuf = NULL; 5022 control->length = 0; 5023 if (out_flags & MSG_EOR) { 5024 /* Done with this control */ 5025 goto done_with_control; 5026 } 5027 /* still more to do with this conntrol */ 5028 /* do we really support msg_waitall here? */ 5029 if ((block_allowed == 0) || 5030 ((in_flags & MSG_WAITALL) == 0)) { 5031 goto release; 5032 } 5033 wait_some_more2: 5034 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 5035 goto release; 5036 if (hold_rlock == 1) { 5037 SCTP_INP_READ_UNLOCK(inp); 5038 hold_rlock = 0; 5039 } 5040 if (hold_sblock == 0) { 5041 SOCKBUF_LOCK(&so->so_rcv); 5042 hold_sblock = 1; 5043 } 5044 if (so->so_rcv.sb_cc <= control->held_length) { 5045 error = sbwait(&so->so_rcv); 5046 if (error) { 5047 goto release; 5048 } 5049 } 5050 if (hold_sblock) { 5051 SOCKBUF_UNLOCK(&so->so_rcv); 5052 hold_sblock = 0; 5053 } 5054 if (control->length == 0) { 5055 /* still nothing here */ 5056 if (control->end_added == 1) { 5057 /* 5058 * he aborted, or is done i.e. 5059 * shutdown 5060 */ 5061 out_flags |= MSG_EOR; 5062 if (control->pdapi_aborted) 5063 out_flags |= MSG_TRUNC; 5064 goto done_with_control; 5065 } 5066 if (so->so_rcv.sb_cc > held_length) { 5067 control->held_length = so->so_rcv.sb_cc; 5068 /* 5069 * We don't use held_length while 5070 * getting a message 5071 */ 5072 held_length = 0; 5073 } 5074 goto wait_some_more2; 5075 } 5076 goto get_more_data2; 5077 } else { 5078 /* hard way mbuf by mbuf */ 5079 m = control->data; 5080 if (control->end_added == 0) { 5081 /* need the rlock */ 5082 if (hold_rlock == 0) { 5083 SCTP_INP_READ_LOCK(inp); 5084 hold_rlock = 1; 5085 } 5086 } 5087 if (control->spec_flags & M_NOTIFICATION) { 5088 out_flags |= MSG_NOTIFICATION; 5089 } 5090 while ((m) && (cp_len > 0)) { 5091 if (cp_len >= SCTP_BUF_LEN(m)) { 5092 *mp = m; 5093 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m)); 5094 if (uio) 5095 uio->uio_resid -= SCTP_BUF_LEN(m); 5096 cp_len -= SCTP_BUF_LEN(m); 5097 control->data = SCTP_BUF_NEXT(m); 5098 SCTP_BUF_NEXT(m) = NULL; 5099 #ifdef SCTP_SB_LOGGING 5100 sctp_sblog(&so->so_rcv, 5101 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5102 #endif 5103 sctp_sbfree(control, stcb, &so->so_rcv, m); 5104 freed_so_far += SCTP_BUF_LEN(m); 5105 #ifdef SCTP_SB_LOGGING 5106 sctp_sblog(&so->so_rcv, 5107 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5108 #endif 5109 mp = &SCTP_BUF_NEXT(m); 5110 m = control->data; 5111 } else { 5112 /* 5113 * got all he wants and its part of 5114 * this mbuf only. 5115 */ 5116 if (uio) 5117 uio->uio_resid -= SCTP_BUF_LEN(m); 5118 cp_len -= SCTP_BUF_LEN(m); 5119 if (hold_rlock) { 5120 SCTP_INP_READ_UNLOCK(inp); 5121 hold_rlock = 0; 5122 } 5123 if (hold_sblock) { 5124 SOCKBUF_UNLOCK(&so->so_rcv); 5125 hold_sblock = 0; 5126 } 5127 *mp = SCTP_M_COPYM(m, 0, cp_len, 5128 M_TRYWAIT 5129 ); 5130 #ifdef SCTP_LOCK_LOGGING 5131 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R); 5132 #endif 5133 if (hold_sblock == 0) { 5134 SOCKBUF_LOCK(&so->so_rcv); 5135 hold_sblock = 1; 5136 } 5137 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5138 goto release; 5139 5140 if (stcb && 5141 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5142 no_rcv_needed = 1; 5143 } 5144 SCTP_BUF_RESV_UF(m, cp_len); 5145 SCTP_BUF_LEN(m) -= cp_len; 5146 #ifdef SCTP_SB_LOGGING 5147 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5148 #endif 5149 freed_so_far += cp_len; 5150 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5151 if (stcb) { 5152 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5153 if ((freed_so_far >= rwnd_req) && 5154 (control->do_not_ref_stcb == 0) && 5155 (no_rcv_needed == 0)) 5156 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5157 } 5158 #ifdef SCTP_SB_LOGGING 5159 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5160 SCTP_LOG_SBRESULT, 0); 5161 #endif 5162 goto release; 5163 } 5164 } 5165 } 5166 } 5167 release: 5168 if (hold_rlock == 1) { 5169 SCTP_INP_READ_UNLOCK(inp); 5170 hold_rlock = 0; 5171 } 5172 if (hold_sblock == 0) { 5173 SOCKBUF_LOCK(&so->so_rcv); 5174 hold_sblock = 1; 5175 } 5176 sbunlock(&so->so_rcv); 5177 5178 release_unlocked: 5179 if (hold_sblock) { 5180 SOCKBUF_UNLOCK(&so->so_rcv); 5181 hold_sblock = 0; 5182 } 5183 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 5184 if ((freed_so_far >= rwnd_req) && 5185 (control && (control->do_not_ref_stcb == 0)) && 5186 (no_rcv_needed == 0)) 5187 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5188 } 5189 if (msg_flags) 5190 *msg_flags |= out_flags; 5191 out: 5192 if (hold_rlock == 1) { 5193 SCTP_INP_READ_UNLOCK(inp); 5194 hold_rlock = 0; 5195 } 5196 if (hold_sblock) { 5197 SOCKBUF_UNLOCK(&so->so_rcv); 5198 hold_sblock = 0; 5199 } 5200 if (freecnt_applied) { 5201 /* 5202 * The lock on the socket buffer protects us so the free 5203 * code will stop. But since we used the socketbuf lock and 5204 * the sender uses the tcb_lock to increment, we need to use 5205 * the atomic add to the refcnt. 5206 */ 5207 if (stcb == NULL) { 5208 panic("stcb for refcnt has gone NULL?"); 5209 } 5210 atomic_add_int(&stcb->asoc.refcnt, -1); 5211 freecnt_applied = 0; 5212 /* Save the value back for next time */ 5213 stcb->freed_by_sorcv_sincelast = freed_so_far; 5214 } 5215 #ifdef SCTP_RECV_RWND_LOGGING 5216 if (stcb) { 5217 sctp_misc_ints(SCTP_SORECV_DONE, 5218 freed_so_far, 5219 ((uio) ? (slen - uio->uio_resid) : slen), 5220 stcb->asoc.my_rwnd, 5221 so->so_rcv.sb_cc); 5222 } else { 5223 sctp_misc_ints(SCTP_SORECV_DONE, 5224 freed_so_far, 5225 ((uio) ? (slen - uio->uio_resid) : slen), 5226 0, 5227 so->so_rcv.sb_cc); 5228 } 5229 #endif 5230 if (wakeup_read_socket) { 5231 sctp_sorwakeup(inp, so); 5232 } 5233 return (error); 5234 } 5235 5236 5237 #ifdef SCTP_MBUF_LOGGING 5238 struct mbuf * 5239 sctp_m_free(struct mbuf *m) 5240 { 5241 if (SCTP_BUF_IS_EXTENDED(m)) { 5242 sctp_log_mb(m, SCTP_MBUF_IFREE); 5243 } 5244 return (m_free(m)); 5245 } 5246 5247 void 5248 sctp_m_freem(struct mbuf *mb) 5249 { 5250 while (mb != NULL) 5251 mb = sctp_m_free(mb); 5252 } 5253 5254 #endif 5255 5256 5257 int 5258 sctp_soreceive(so, psa, uio, mp0, controlp, flagsp) 5259 struct socket *so; 5260 struct sockaddr **psa; 5261 struct uio *uio; 5262 struct mbuf **mp0; 5263 struct mbuf **controlp; 5264 int *flagsp; 5265 { 5266 int error, fromlen; 5267 uint8_t sockbuf[256]; 5268 struct sockaddr *from; 5269 struct sctp_extrcvinfo sinfo; 5270 int filling_sinfo = 1; 5271 struct sctp_inpcb *inp; 5272 5273 inp = (struct sctp_inpcb *)so->so_pcb; 5274 /* pickup the assoc we are reading from */ 5275 if (inp == NULL) { 5276 return (EINVAL); 5277 } 5278 if ((sctp_is_feature_off(inp, 5279 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 5280 (controlp == NULL)) { 5281 /* user does not want the sndrcv ctl */ 5282 filling_sinfo = 0; 5283 } 5284 if (psa) { 5285 from = (struct sockaddr *)sockbuf; 5286 fromlen = sizeof(sockbuf); 5287 from->sa_len = 0; 5288 } else { 5289 from = NULL; 5290 fromlen = 0; 5291 } 5292 5293 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 5294 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 5295 if ((controlp) && (filling_sinfo)) { 5296 /* copy back the sinfo in a CMSG format */ 5297 if (filling_sinfo) 5298 *controlp = sctp_build_ctl_nchunk(inp, 5299 (struct sctp_sndrcvinfo *)&sinfo); 5300 else 5301 *controlp = NULL; 5302 } 5303 if (psa) { 5304 /* copy back the address info */ 5305 if (from && from->sa_len) { 5306 *psa = sodupsockaddr(from, M_NOWAIT); 5307 } else { 5308 *psa = NULL; 5309 } 5310 } 5311 return (error); 5312 } 5313