1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #ifdef INET6 41 #include <netinet6/sctp6_var.h> 42 #endif 43 #include <netinet/sctp_header.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_crc32.h> 48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49 #include <netinet/sctp_auth.h> 50 #include <netinet/sctp_asconf.h> 51 52 extern int sctp_warm_the_crc32_table; 53 54 #define NUMBER_OF_MTU_SIZES 18 55 56 #ifdef SCTP_DEBUG 57 extern uint32_t sctp_debug_on; 58 59 #endif 60 61 62 #ifdef SCTP_STAT_LOGGING 63 int global_sctp_cwnd_log_at = 0; 64 int global_sctp_cwnd_log_rolled = 0; 65 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE]; 66 67 static uint32_t 68 sctp_get_time_of_event(void) 69 { 70 struct timeval now; 71 uint32_t timeval; 72 73 SCTP_GETPTIME_TIMEVAL(&now); 74 timeval = (now.tv_sec % 0x00000fff); 75 timeval <<= 20; 76 timeval |= now.tv_usec & 0xfffff; 77 return (timeval); 78 } 79 80 81 void 82 sctp_clr_stat_log(void) 83 { 84 global_sctp_cwnd_log_at = 0; 85 global_sctp_cwnd_log_rolled = 0; 86 } 87 88 89 void 90 sctp_sblog(struct sockbuf *sb, 91 struct sctp_tcb *stcb, int from, int incr) 92 { 93 int sctp_cwnd_log_at; 94 95 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 96 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 97 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 98 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB; 99 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb; 100 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc; 101 if (stcb) 102 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc; 103 else 104 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0; 105 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr; 106 } 107 108 void 109 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 110 { 111 int sctp_cwnd_log_at; 112 113 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 114 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 115 sctp_clog[sctp_cwnd_log_at].from = 0; 116 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE; 117 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp; 118 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags; 119 if (stcb) { 120 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb; 121 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state; 122 } else { 123 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0; 124 sctp_clog[sctp_cwnd_log_at].x.close.state = 0; 125 } 126 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc; 127 } 128 129 130 void 131 rto_logging(struct sctp_nets *net, int from) 132 { 133 int sctp_cwnd_log_at; 134 135 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 136 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 137 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 138 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT; 139 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net; 140 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt; 141 sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance; 142 sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir; 143 } 144 145 void 146 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 147 { 148 int sctp_cwnd_log_at; 149 150 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 151 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 152 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 153 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 154 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb; 155 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn; 156 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq; 157 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 158 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 159 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream; 160 } 161 162 void 163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 164 { 165 int sctp_cwnd_log_at; 166 167 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 168 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 169 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action; 170 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE; 171 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb; 172 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight; 173 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 174 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 175 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count; 176 } 177 178 179 void 180 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 181 { 182 int sctp_cwnd_log_at; 183 184 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 185 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 186 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 187 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK; 188 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack; 189 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack; 190 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn; 191 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps; 192 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups; 193 } 194 195 void 196 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 197 { 198 int sctp_cwnd_log_at; 199 200 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 201 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 202 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 203 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP; 204 sctp_clog[sctp_cwnd_log_at].x.map.base = map; 205 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum; 206 sctp_clog[sctp_cwnd_log_at].x.map.high = high; 207 } 208 209 void 210 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 211 int from) 212 { 213 int sctp_cwnd_log_at; 214 215 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 216 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 217 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 218 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR; 219 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn; 220 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn; 221 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn; 222 } 223 224 225 void 226 sctp_log_mb(struct mbuf *m, int from) 227 { 228 int sctp_cwnd_log_at; 229 230 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 231 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 232 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 233 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF; 234 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m; 235 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 236 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 237 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0); 238 if (SCTP_BUF_IS_EXTENDED(m)) { 239 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 240 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 241 } else { 242 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0; 243 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0; 244 } 245 } 246 247 248 void 249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 250 int from) 251 { 252 int sctp_cwnd_log_at; 253 254 if (control == NULL) { 255 printf("Gak log of NULL?\n"); 256 return; 257 } 258 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 259 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 260 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 261 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 262 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb; 263 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn; 264 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn; 265 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream; 266 if (poschk != NULL) { 267 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn; 268 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn; 269 } else { 270 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 271 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 272 } 273 } 274 275 void 276 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 277 { 278 int sctp_cwnd_log_at; 279 280 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 281 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 282 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 283 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND; 284 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 285 if (stcb->asoc.send_queue_cnt > 255) 286 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 287 else 288 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 289 if (stcb->asoc.stream_queue_cnt > 255) 290 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 291 else 292 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 293 294 if (net) { 295 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd; 296 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 297 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack; 298 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 299 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 300 } 301 if (SCTP_CWNDLOG_PRESEND == from) { 302 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 303 } 304 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment; 305 } 306 307 void 308 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 309 { 310 int sctp_cwnd_log_at; 311 312 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 313 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 314 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 315 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT; 316 if (inp) { 317 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket; 318 319 } else { 320 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL; 321 } 322 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp; 323 if (stcb) { 324 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 325 } else { 326 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 327 } 328 if (inp) { 329 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 330 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 331 } else { 332 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 333 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN; 334 } 335 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx); 336 if (inp->sctp_socket) { 337 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 338 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 339 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 340 } else { 341 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 342 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 343 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 344 } 345 } 346 347 void 348 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 349 { 350 int sctp_cwnd_log_at; 351 352 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 353 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 354 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 355 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST; 356 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 357 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error; 358 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 359 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst; 360 if (stcb->asoc.send_queue_cnt > 255) 361 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 362 else 363 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 364 if (stcb->asoc.stream_queue_cnt > 255) 365 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 366 else 367 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 368 } 369 370 void 371 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 372 { 373 int sctp_cwnd_log_at; 374 375 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 376 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 377 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 378 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 379 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 380 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size; 381 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 382 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0; 383 } 384 385 void 386 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 387 { 388 int sctp_cwnd_log_at; 389 390 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 391 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 392 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 393 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 394 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 395 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size; 396 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 397 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval; 398 } 399 400 void 401 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 402 { 403 int sctp_cwnd_log_at; 404 405 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 406 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 407 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 408 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT; 409 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq; 410 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book; 411 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q; 412 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt; 413 } 414 415 void 416 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 417 { 418 int sctp_cwnd_log_at; 419 420 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 421 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 422 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 423 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT; 424 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a; 425 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b; 426 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c; 427 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d; 428 } 429 430 void 431 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 432 { 433 int sctp_cwnd_log_at; 434 435 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 436 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 437 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 438 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE; 439 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb; 440 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt; 441 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count; 442 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt; 443 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt; 444 445 if (stcb->asoc.stream_queue_cnt < 0xff) 446 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 447 else 448 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff; 449 450 if (stcb->asoc.chunks_on_out_queue < 0xff) 451 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 452 else 453 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff; 454 455 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0; 456 /* set in the defered mode stuff */ 457 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 458 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1; 459 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 460 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2; 461 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 462 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4; 463 /* what about the sb */ 464 if (stcb->sctp_socket) { 465 struct socket *so = stcb->sctp_socket; 466 467 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 468 } else { 469 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff; 470 } 471 } 472 473 void 474 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 475 { 476 int sctp_cwnd_log_at; 477 478 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 479 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 480 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 481 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK; 482 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size; 483 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 484 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd; 485 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 486 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 487 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 488 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen; 489 } 490 491 int 492 sctp_fill_stat_log(struct mbuf *m) 493 { 494 int sctp_cwnd_log_at; 495 struct sctp_cwnd_log_req *req; 496 size_t size_limit; 497 int num, i, at, cnt_out = 0; 498 499 if (m == NULL) 500 return (EINVAL); 501 502 size_limit = (SCTP_BUF_LEN(m) - sizeof(struct sctp_cwnd_log_req)); 503 if (size_limit < sizeof(struct sctp_cwnd_log)) { 504 return (EINVAL); 505 } 506 sctp_cwnd_log_at = global_sctp_cwnd_log_at; 507 req = mtod(m, struct sctp_cwnd_log_req *); 508 num = size_limit / sizeof(struct sctp_cwnd_log); 509 if (global_sctp_cwnd_log_rolled) { 510 req->num_in_log = SCTP_STAT_LOG_SIZE; 511 } else { 512 req->num_in_log = sctp_cwnd_log_at; 513 /* 514 * if the log has not rolled, we don't let you have old 515 * data. 516 */ 517 if (req->end_at > sctp_cwnd_log_at) { 518 req->end_at = sctp_cwnd_log_at; 519 } 520 } 521 if ((num < SCTP_STAT_LOG_SIZE) && 522 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) { 523 /* we can't return all of it */ 524 if (((req->start_at == 0) && (req->end_at == 0)) || 525 (req->start_at >= SCTP_STAT_LOG_SIZE) || 526 (req->end_at >= SCTP_STAT_LOG_SIZE)) { 527 /* No user request or user is wacked. */ 528 req->num_ret = num; 529 req->end_at = sctp_cwnd_log_at - 1; 530 if ((sctp_cwnd_log_at - num) < 0) { 531 int cc; 532 533 cc = num - sctp_cwnd_log_at; 534 req->start_at = SCTP_STAT_LOG_SIZE - cc; 535 } else { 536 req->start_at = sctp_cwnd_log_at - num; 537 } 538 } else { 539 /* a user request */ 540 int cc; 541 542 if (req->start_at > req->end_at) { 543 cc = (SCTP_STAT_LOG_SIZE - req->start_at) + 544 (req->end_at + 1); 545 } else { 546 547 cc = (req->end_at - req->start_at) + 1; 548 } 549 if (cc < num) { 550 num = cc; 551 } 552 req->num_ret = num; 553 } 554 } else { 555 /* We can return all of it */ 556 req->start_at = 0; 557 req->end_at = sctp_cwnd_log_at - 1; 558 req->num_ret = sctp_cwnd_log_at; 559 } 560 #ifdef INVARIANTS 561 if (req->num_ret > num) { 562 panic("Bad statlog get?"); 563 } 564 #endif 565 for (i = 0, at = req->start_at; i < req->num_ret; i++) { 566 req->log[i] = sctp_clog[at]; 567 cnt_out++; 568 at++; 569 if (at >= SCTP_STAT_LOG_SIZE) 570 at = 0; 571 } 572 SCTP_BUF_LEN(m) = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req); 573 return (0); 574 } 575 576 #endif 577 578 #ifdef SCTP_AUDITING_ENABLED 579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 580 static int sctp_audit_indx = 0; 581 582 static 583 void 584 sctp_print_audit_report(void) 585 { 586 int i; 587 int cnt; 588 589 cnt = 0; 590 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 591 if ((sctp_audit_data[i][0] == 0xe0) && 592 (sctp_audit_data[i][1] == 0x01)) { 593 cnt = 0; 594 printf("\n"); 595 } else if (sctp_audit_data[i][0] == 0xf0) { 596 cnt = 0; 597 printf("\n"); 598 } else if ((sctp_audit_data[i][0] == 0xc0) && 599 (sctp_audit_data[i][1] == 0x01)) { 600 printf("\n"); 601 cnt = 0; 602 } 603 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 604 (uint32_t) sctp_audit_data[i][1]); 605 cnt++; 606 if ((cnt % 14) == 0) 607 printf("\n"); 608 } 609 for (i = 0; i < sctp_audit_indx; i++) { 610 if ((sctp_audit_data[i][0] == 0xe0) && 611 (sctp_audit_data[i][1] == 0x01)) { 612 cnt = 0; 613 printf("\n"); 614 } else if (sctp_audit_data[i][0] == 0xf0) { 615 cnt = 0; 616 printf("\n"); 617 } else if ((sctp_audit_data[i][0] == 0xc0) && 618 (sctp_audit_data[i][1] == 0x01)) { 619 printf("\n"); 620 cnt = 0; 621 } 622 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 623 (uint32_t) sctp_audit_data[i][1]); 624 cnt++; 625 if ((cnt % 14) == 0) 626 printf("\n"); 627 } 628 printf("\n"); 629 } 630 631 void 632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 633 struct sctp_nets *net) 634 { 635 int resend_cnt, tot_out, rep, tot_book_cnt; 636 struct sctp_nets *lnet; 637 struct sctp_tmit_chunk *chk; 638 639 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 640 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 641 sctp_audit_indx++; 642 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 643 sctp_audit_indx = 0; 644 } 645 if (inp == NULL) { 646 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 647 sctp_audit_data[sctp_audit_indx][1] = 0x01; 648 sctp_audit_indx++; 649 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 650 sctp_audit_indx = 0; 651 } 652 return; 653 } 654 if (stcb == NULL) { 655 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 656 sctp_audit_data[sctp_audit_indx][1] = 0x02; 657 sctp_audit_indx++; 658 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 659 sctp_audit_indx = 0; 660 } 661 return; 662 } 663 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 664 sctp_audit_data[sctp_audit_indx][1] = 665 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 666 sctp_audit_indx++; 667 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 668 sctp_audit_indx = 0; 669 } 670 rep = 0; 671 tot_book_cnt = 0; 672 resend_cnt = tot_out = 0; 673 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 674 if (chk->sent == SCTP_DATAGRAM_RESEND) { 675 resend_cnt++; 676 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 677 tot_out += chk->book_size; 678 tot_book_cnt++; 679 } 680 } 681 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 682 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 683 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 684 sctp_audit_indx++; 685 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 686 sctp_audit_indx = 0; 687 } 688 printf("resend_cnt:%d asoc-tot:%d\n", 689 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 690 rep = 1; 691 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 692 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 693 sctp_audit_data[sctp_audit_indx][1] = 694 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 695 sctp_audit_indx++; 696 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 697 sctp_audit_indx = 0; 698 } 699 } 700 if (tot_out != stcb->asoc.total_flight) { 701 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 702 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 703 sctp_audit_indx++; 704 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 705 sctp_audit_indx = 0; 706 } 707 rep = 1; 708 printf("tot_flt:%d asoc_tot:%d\n", tot_out, 709 (int)stcb->asoc.total_flight); 710 stcb->asoc.total_flight = tot_out; 711 } 712 if (tot_book_cnt != stcb->asoc.total_flight_count) { 713 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 714 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 715 sctp_audit_indx++; 716 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 717 sctp_audit_indx = 0; 718 } 719 rep = 1; 720 printf("tot_flt_book:%d\n", tot_book); 721 722 stcb->asoc.total_flight_count = tot_book_cnt; 723 } 724 tot_out = 0; 725 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 726 tot_out += lnet->flight_size; 727 } 728 if (tot_out != stcb->asoc.total_flight) { 729 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 730 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 731 sctp_audit_indx++; 732 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 733 sctp_audit_indx = 0; 734 } 735 rep = 1; 736 printf("real flight:%d net total was %d\n", 737 stcb->asoc.total_flight, tot_out); 738 /* now corrective action */ 739 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 740 741 tot_out = 0; 742 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 743 if ((chk->whoTo == lnet) && 744 (chk->sent < SCTP_DATAGRAM_RESEND)) { 745 tot_out += chk->book_size; 746 } 747 } 748 if (lnet->flight_size != tot_out) { 749 printf("net:%x flight was %d corrected to %d\n", 750 (uint32_t) lnet, lnet->flight_size, tot_out); 751 lnet->flight_size = tot_out; 752 } 753 } 754 } 755 if (rep) { 756 sctp_print_audit_report(); 757 } 758 } 759 760 void 761 sctp_audit_log(uint8_t ev, uint8_t fd) 762 { 763 764 sctp_audit_data[sctp_audit_indx][0] = ev; 765 sctp_audit_data[sctp_audit_indx][1] = fd; 766 sctp_audit_indx++; 767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 768 sctp_audit_indx = 0; 769 } 770 } 771 772 #endif 773 774 /* 775 * a list of sizes based on typical mtu's, used only if next hop size not 776 * returned. 777 */ 778 static int sctp_mtu_sizes[] = { 779 68, 780 296, 781 508, 782 512, 783 544, 784 576, 785 1006, 786 1492, 787 1500, 788 1536, 789 2002, 790 2048, 791 4352, 792 4464, 793 8166, 794 17914, 795 32000, 796 65535 797 }; 798 799 void 800 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 801 { 802 struct sctp_association *asoc; 803 struct sctp_nets *net; 804 805 asoc = &stcb->asoc; 806 807 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 808 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 809 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 810 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 811 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 812 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 813 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 814 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 815 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 816 } 817 } 818 819 int 820 find_next_best_mtu(int totsz) 821 { 822 int i, perfer; 823 824 /* 825 * if we are in here we must find the next best fit based on the 826 * size of the dg that failed to be sent. 827 */ 828 perfer = 0; 829 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 830 if (totsz < sctp_mtu_sizes[i]) { 831 perfer = i - 1; 832 if (perfer < 0) 833 perfer = 0; 834 break; 835 } 836 } 837 return (sctp_mtu_sizes[perfer]); 838 } 839 840 void 841 sctp_fill_random_store(struct sctp_pcb *m) 842 { 843 /* 844 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 845 * our counter. The result becomes our good random numbers and we 846 * then setup to give these out. Note that we do no locking to 847 * protect this. This is ok, since if competing folks call this we 848 * will get more gobbled gook in the random store whic is what we 849 * want. There is a danger that two guys will use the same random 850 * numbers, but thats ok too since that is random as well :-> 851 */ 852 m->store_at = 0; 853 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 854 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 855 sizeof(m->random_counter), (uint8_t *) m->random_store); 856 m->random_counter++; 857 } 858 859 uint32_t 860 sctp_select_initial_TSN(struct sctp_pcb *m) 861 { 862 /* 863 * A true implementation should use random selection process to get 864 * the initial stream sequence number, using RFC1750 as a good 865 * guideline 866 */ 867 uint32_t x, *xp; 868 uint8_t *p; 869 870 if (m->initial_sequence_debug != 0) { 871 uint32_t ret; 872 873 ret = m->initial_sequence_debug; 874 m->initial_sequence_debug++; 875 return (ret); 876 } 877 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) { 878 /* Refill the random store */ 879 sctp_fill_random_store(m); 880 } 881 p = &m->random_store[(int)m->store_at]; 882 xp = (uint32_t *) p; 883 x = *xp; 884 m->store_at += sizeof(uint32_t); 885 return (x); 886 } 887 888 uint32_t 889 sctp_select_a_tag(struct sctp_inpcb *m) 890 { 891 u_long x, not_done; 892 struct timeval now; 893 894 SCTP_GETTIME_TIMEVAL(&now); 895 not_done = 1; 896 while (not_done) { 897 x = sctp_select_initial_TSN(&m->sctp_ep); 898 if (x == 0) { 899 /* we never use 0 */ 900 continue; 901 } 902 if (sctp_is_vtag_good(m, x, &now)) { 903 not_done = 0; 904 } 905 } 906 return (x); 907 } 908 909 910 int 911 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc, 912 int for_a_init, uint32_t override_tag) 913 { 914 /* 915 * Anything set to zero is taken care of by the allocation routine's 916 * bzero 917 */ 918 919 /* 920 * Up front select what scoping to apply on addresses I tell my peer 921 * Not sure what to do with these right now, we will need to come up 922 * with a way to set them. We may need to pass them through from the 923 * caller in the sctp_aloc_assoc() function. 924 */ 925 int i; 926 927 /* init all variables to a known value. */ 928 asoc->state = SCTP_STATE_INUSE; 929 asoc->max_burst = m->sctp_ep.max_burst; 930 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 931 asoc->cookie_life = m->sctp_ep.def_cookie_life; 932 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off; 933 #ifdef AF_INET 934 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 935 #else 936 asoc->default_tos = 0; 937 #endif 938 939 #ifdef AF_INET6 940 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 941 #else 942 asoc->default_flowlabel = 0; 943 #endif 944 if (override_tag) { 945 struct timeval now; 946 947 SCTP_GETTIME_TIMEVAL(&now); 948 if (sctp_is_vtag_good(m, override_tag, &now)) { 949 asoc->my_vtag = override_tag; 950 } else { 951 return (ENOMEM); 952 } 953 954 } else { 955 asoc->my_vtag = sctp_select_a_tag(m); 956 } 957 /* Get the nonce tags */ 958 asoc->my_vtag_nonce = sctp_select_a_tag(m); 959 asoc->peer_vtag_nonce = sctp_select_a_tag(m); 960 961 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 962 asoc->hb_is_disabled = 1; 963 else 964 asoc->hb_is_disabled = 0; 965 966 asoc->refcnt = 0; 967 asoc->assoc_up_sent = 0; 968 asoc->assoc_id = asoc->my_vtag; 969 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 970 sctp_select_initial_TSN(&m->sctp_ep); 971 /* we are optimisitic here */ 972 asoc->peer_supports_pktdrop = 1; 973 974 asoc->sent_queue_retran_cnt = 0; 975 976 /* for CMT */ 977 asoc->last_net_data_came_from = NULL; 978 979 /* This will need to be adjusted */ 980 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 981 asoc->last_acked_seq = asoc->init_seq_number - 1; 982 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 983 asoc->asconf_seq_in = asoc->last_acked_seq; 984 985 /* here we are different, we hold the next one we expect */ 986 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 987 988 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 989 asoc->initial_rto = m->sctp_ep.initial_rto; 990 991 asoc->max_init_times = m->sctp_ep.max_init_times; 992 asoc->max_send_times = m->sctp_ep.max_send_times; 993 asoc->def_net_failure = m->sctp_ep.def_net_failure; 994 asoc->free_chunk_cnt = 0; 995 996 asoc->iam_blocking = 0; 997 /* ECN Nonce initialization */ 998 asoc->context = m->sctp_context; 999 asoc->def_send = m->def_send; 1000 asoc->ecn_nonce_allowed = 0; 1001 asoc->receiver_nonce_sum = 1; 1002 asoc->nonce_sum_expect_base = 1; 1003 asoc->nonce_sum_check = 1; 1004 asoc->nonce_resync_tsn = 0; 1005 asoc->nonce_wait_for_ecne = 0; 1006 asoc->nonce_wait_tsn = 0; 1007 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1008 asoc->pr_sctp_cnt = 0; 1009 asoc->total_output_queue_size = 0; 1010 1011 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1012 struct in6pcb *inp6; 1013 1014 /* Its a V6 socket */ 1015 inp6 = (struct in6pcb *)m; 1016 asoc->ipv6_addr_legal = 1; 1017 /* Now look at the binding flag to see if V4 will be legal */ 1018 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 1019 asoc->ipv4_addr_legal = 1; 1020 } else { 1021 /* V4 addresses are NOT legal on the association */ 1022 asoc->ipv4_addr_legal = 0; 1023 } 1024 } else { 1025 /* Its a V4 socket, no - V6 */ 1026 asoc->ipv4_addr_legal = 1; 1027 asoc->ipv6_addr_legal = 0; 1028 } 1029 1030 asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND); 1031 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat; 1032 1033 asoc->smallest_mtu = m->sctp_frag_point; 1034 asoc->minrto = m->sctp_ep.sctp_minrto; 1035 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1036 1037 asoc->locked_on_sending = NULL; 1038 asoc->stream_locked_on = 0; 1039 asoc->ecn_echo_cnt_onq = 0; 1040 asoc->stream_locked = 0; 1041 1042 LIST_INIT(&asoc->sctp_local_addr_list); 1043 TAILQ_INIT(&asoc->nets); 1044 TAILQ_INIT(&asoc->pending_reply_queue); 1045 asoc->last_asconf_ack_sent = NULL; 1046 /* Setup to fill the hb random cache at first HB */ 1047 asoc->hb_random_idx = 4; 1048 1049 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1050 1051 /* 1052 * Now the stream parameters, here we allocate space for all streams 1053 * that we request by default. 1054 */ 1055 asoc->streamoutcnt = asoc->pre_open_streams = 1056 m->sctp_ep.pre_open_stream_count; 1057 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1058 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1059 "StreamsOut"); 1060 if (asoc->strmout == NULL) { 1061 /* big trouble no memory */ 1062 return (ENOMEM); 1063 } 1064 for (i = 0; i < asoc->streamoutcnt; i++) { 1065 /* 1066 * inbound side must be set to 0xffff, also NOTE when we get 1067 * the INIT-ACK back (for INIT sender) we MUST reduce the 1068 * count (streamoutcnt) but first check if we sent to any of 1069 * the upper streams that were dropped (if some were). Those 1070 * that were dropped must be notified to the upper layer as 1071 * failed to send. 1072 */ 1073 asoc->strmout[i].next_sequence_sent = 0x0; 1074 TAILQ_INIT(&asoc->strmout[i].outqueue); 1075 asoc->strmout[i].stream_no = i; 1076 asoc->strmout[i].last_msg_incomplete = 0; 1077 asoc->strmout[i].next_spoke.tqe_next = 0; 1078 asoc->strmout[i].next_spoke.tqe_prev = 0; 1079 } 1080 /* Now the mapping array */ 1081 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1082 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1083 "MappingArray"); 1084 if (asoc->mapping_array == NULL) { 1085 SCTP_FREE(asoc->strmout); 1086 return (ENOMEM); 1087 } 1088 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1089 /* Now the init of the other outqueues */ 1090 TAILQ_INIT(&asoc->free_chunks); 1091 TAILQ_INIT(&asoc->free_strmoq); 1092 TAILQ_INIT(&asoc->out_wheel); 1093 TAILQ_INIT(&asoc->control_send_queue); 1094 TAILQ_INIT(&asoc->send_queue); 1095 TAILQ_INIT(&asoc->sent_queue); 1096 TAILQ_INIT(&asoc->reasmqueue); 1097 TAILQ_INIT(&asoc->resetHead); 1098 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1099 TAILQ_INIT(&asoc->asconf_queue); 1100 /* authentication fields */ 1101 asoc->authinfo.random = NULL; 1102 asoc->authinfo.assoc_key = NULL; 1103 asoc->authinfo.assoc_keyid = 0; 1104 asoc->authinfo.recv_key = NULL; 1105 asoc->authinfo.recv_keyid = 0; 1106 LIST_INIT(&asoc->shared_keys); 1107 1108 return (0); 1109 } 1110 1111 int 1112 sctp_expand_mapping_array(struct sctp_association *asoc) 1113 { 1114 /* mapping array needs to grow */ 1115 uint8_t *new_array; 1116 uint16_t new_size; 1117 1118 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR; 1119 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray"); 1120 if (new_array == NULL) { 1121 /* can't get more, forget it */ 1122 printf("No memory for expansion of SCTP mapping array %d\n", 1123 new_size); 1124 return (-1); 1125 } 1126 memset(new_array, 0, new_size); 1127 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1128 SCTP_FREE(asoc->mapping_array); 1129 asoc->mapping_array = new_array; 1130 asoc->mapping_array_size = new_size; 1131 return (0); 1132 } 1133 1134 extern unsigned int sctp_early_fr_msec; 1135 1136 static void 1137 sctp_handle_addr_wq(void) 1138 { 1139 /* deal with the ADDR wq from the rtsock calls */ 1140 struct sctp_laddr *wi; 1141 1142 SCTP_IPI_ADDR_LOCK(); 1143 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1144 if (wi == NULL) { 1145 SCTP_IPI_ADDR_UNLOCK(); 1146 return; 1147 } 1148 LIST_REMOVE(wi, sctp_nxt_addr); 1149 if (!LIST_EMPTY(&sctppcbinfo.addr_wq)) { 1150 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1151 (struct sctp_inpcb *)NULL, 1152 (struct sctp_tcb *)NULL, 1153 (struct sctp_nets *)NULL); 1154 } 1155 SCTP_IPI_ADDR_UNLOCK(); 1156 if (wi->action == RTM_ADD) { 1157 sctp_add_ip_address(wi->ifa); 1158 } else if (wi->action == RTM_DELETE) { 1159 sctp_delete_ip_address(wi->ifa); 1160 } 1161 IFAFREE(wi->ifa); 1162 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, wi); 1163 SCTP_DECR_LADDR_COUNT(); 1164 } 1165 1166 void 1167 sctp_timeout_handler(void *t) 1168 { 1169 struct sctp_inpcb *inp; 1170 struct sctp_tcb *stcb; 1171 struct sctp_nets *net; 1172 struct sctp_timer *tmr; 1173 int did_output; 1174 struct sctp_iterator *it = NULL; 1175 1176 1177 tmr = (struct sctp_timer *)t; 1178 inp = (struct sctp_inpcb *)tmr->ep; 1179 stcb = (struct sctp_tcb *)tmr->tcb; 1180 net = (struct sctp_nets *)tmr->net; 1181 did_output = 1; 1182 1183 #ifdef SCTP_AUDITING_ENABLED 1184 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1185 sctp_auditing(3, inp, stcb, net); 1186 #endif 1187 1188 /* sanity checks... */ 1189 if (tmr->self != (void *)tmr) { 1190 /* 1191 * printf("Stale SCTP timer fired (%p), ignoring...\n", 1192 * tmr); 1193 */ 1194 return; 1195 } 1196 tmr->stopped_from = 0xa001; 1197 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1198 /* 1199 * printf("SCTP timer fired with invalid type: 0x%x\n", 1200 * tmr->type); 1201 */ 1202 return; 1203 } 1204 tmr->stopped_from = 0xa002; 1205 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1206 return; 1207 } 1208 /* if this is an iterator timeout, get the struct and clear inp */ 1209 tmr->stopped_from = 0xa003; 1210 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1211 it = (struct sctp_iterator *)inp; 1212 inp = NULL; 1213 } 1214 if (inp) { 1215 SCTP_INP_INCR_REF(inp); 1216 if ((inp->sctp_socket == 0) && 1217 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1218 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1219 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1220 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1221 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1222 ) { 1223 SCTP_INP_DECR_REF(inp); 1224 return; 1225 } 1226 } 1227 tmr->stopped_from = 0xa004; 1228 if (stcb) { 1229 if (stcb->asoc.state == 0) { 1230 if (inp) { 1231 SCTP_INP_DECR_REF(inp); 1232 } 1233 return; 1234 } 1235 } 1236 tmr->stopped_from = 0xa005; 1237 #ifdef SCTP_DEBUG 1238 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1239 printf("Timer type %d goes off\n", tmr->type); 1240 } 1241 #endif /* SCTP_DEBUG */ 1242 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1243 if (inp) { 1244 SCTP_INP_DECR_REF(inp); 1245 } 1246 return; 1247 } 1248 tmr->stopped_from = 0xa006; 1249 1250 if (stcb) { 1251 atomic_add_int(&stcb->asoc.refcnt, 1); 1252 SCTP_TCB_LOCK(stcb); 1253 atomic_add_int(&stcb->asoc.refcnt, -1); 1254 } 1255 /* record in stopped what t-o occured */ 1256 tmr->stopped_from = tmr->type; 1257 1258 /* mark as being serviced now */ 1259 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1260 /* 1261 * Callout has been rescheduled. 1262 */ 1263 goto get_out; 1264 } 1265 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1266 /* 1267 * Not active, so no action. 1268 */ 1269 goto get_out; 1270 } 1271 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1272 1273 /* call the handler for the appropriate timer type */ 1274 switch (tmr->type) { 1275 case SCTP_TIMER_TYPE_ADDR_WQ: 1276 sctp_handle_addr_wq(); 1277 break; 1278 case SCTP_TIMER_TYPE_ITERATOR: 1279 SCTP_STAT_INCR(sctps_timoiterator); 1280 sctp_iterator_timer(it); 1281 break; 1282 case SCTP_TIMER_TYPE_SEND: 1283 SCTP_STAT_INCR(sctps_timodata); 1284 stcb->asoc.num_send_timers_up--; 1285 if (stcb->asoc.num_send_timers_up < 0) { 1286 stcb->asoc.num_send_timers_up = 0; 1287 } 1288 if (sctp_t3rxt_timer(inp, stcb, net)) { 1289 /* no need to unlock on tcb its gone */ 1290 1291 goto out_decr; 1292 } 1293 #ifdef SCTP_AUDITING_ENABLED 1294 sctp_auditing(4, inp, stcb, net); 1295 #endif 1296 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1297 if ((stcb->asoc.num_send_timers_up == 0) && 1298 (stcb->asoc.sent_queue_cnt > 0) 1299 ) { 1300 struct sctp_tmit_chunk *chk; 1301 1302 /* 1303 * safeguard. If there on some on the sent queue 1304 * somewhere but no timers running something is 1305 * wrong... so we start a timer on the first chunk 1306 * on the send queue on whatever net it is sent to. 1307 */ 1308 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1309 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1310 chk->whoTo); 1311 } 1312 break; 1313 case SCTP_TIMER_TYPE_INIT: 1314 SCTP_STAT_INCR(sctps_timoinit); 1315 if (sctp_t1init_timer(inp, stcb, net)) { 1316 /* no need to unlock on tcb its gone */ 1317 goto out_decr; 1318 } 1319 /* We do output but not here */ 1320 did_output = 0; 1321 break; 1322 case SCTP_TIMER_TYPE_RECV: 1323 SCTP_STAT_INCR(sctps_timosack); 1324 sctp_send_sack(stcb); 1325 #ifdef SCTP_AUDITING_ENABLED 1326 sctp_auditing(4, inp, stcb, net); 1327 #endif 1328 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR); 1329 break; 1330 case SCTP_TIMER_TYPE_SHUTDOWN: 1331 if (sctp_shutdown_timer(inp, stcb, net)) { 1332 /* no need to unlock on tcb its gone */ 1333 goto out_decr; 1334 } 1335 SCTP_STAT_INCR(sctps_timoshutdown); 1336 #ifdef SCTP_AUDITING_ENABLED 1337 sctp_auditing(4, inp, stcb, net); 1338 #endif 1339 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR); 1340 break; 1341 case SCTP_TIMER_TYPE_HEARTBEAT: 1342 { 1343 struct sctp_nets *net; 1344 int cnt_of_unconf = 0; 1345 1346 SCTP_STAT_INCR(sctps_timoheartbeat); 1347 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1348 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1349 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1350 cnt_of_unconf++; 1351 } 1352 } 1353 if (cnt_of_unconf == 0) { 1354 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) { 1355 /* no need to unlock on tcb its gone */ 1356 goto out_decr; 1357 } 1358 } 1359 #ifdef SCTP_AUDITING_ENABLED 1360 sctp_auditing(4, inp, stcb, net); 1361 #endif 1362 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 1363 stcb, net); 1364 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR); 1365 } 1366 break; 1367 case SCTP_TIMER_TYPE_COOKIE: 1368 if (sctp_cookie_timer(inp, stcb, net)) { 1369 /* no need to unlock on tcb its gone */ 1370 goto out_decr; 1371 } 1372 SCTP_STAT_INCR(sctps_timocookie); 1373 #ifdef SCTP_AUDITING_ENABLED 1374 sctp_auditing(4, inp, stcb, net); 1375 #endif 1376 /* 1377 * We consider T3 and Cookie timer pretty much the same with 1378 * respect to where from in chunk_output. 1379 */ 1380 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1381 break; 1382 case SCTP_TIMER_TYPE_NEWCOOKIE: 1383 { 1384 struct timeval tv; 1385 int i, secret; 1386 1387 SCTP_STAT_INCR(sctps_timosecret); 1388 SCTP_GETTIME_TIMEVAL(&tv); 1389 SCTP_INP_WLOCK(inp); 1390 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1391 inp->sctp_ep.last_secret_number = 1392 inp->sctp_ep.current_secret_number; 1393 inp->sctp_ep.current_secret_number++; 1394 if (inp->sctp_ep.current_secret_number >= 1395 SCTP_HOW_MANY_SECRETS) { 1396 inp->sctp_ep.current_secret_number = 0; 1397 } 1398 secret = (int)inp->sctp_ep.current_secret_number; 1399 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1400 inp->sctp_ep.secret_key[secret][i] = 1401 sctp_select_initial_TSN(&inp->sctp_ep); 1402 } 1403 SCTP_INP_WUNLOCK(inp); 1404 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1405 } 1406 did_output = 0; 1407 break; 1408 case SCTP_TIMER_TYPE_PATHMTURAISE: 1409 SCTP_STAT_INCR(sctps_timopathmtu); 1410 sctp_pathmtu_timer(inp, stcb, net); 1411 did_output = 0; 1412 break; 1413 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1414 if (sctp_shutdownack_timer(inp, stcb, net)) { 1415 /* no need to unlock on tcb its gone */ 1416 goto out_decr; 1417 } 1418 SCTP_STAT_INCR(sctps_timoshutdownack); 1419 #ifdef SCTP_AUDITING_ENABLED 1420 sctp_auditing(4, inp, stcb, net); 1421 #endif 1422 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR); 1423 break; 1424 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1425 SCTP_STAT_INCR(sctps_timoshutdownguard); 1426 sctp_abort_an_association(inp, stcb, 1427 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL); 1428 /* no need to unlock on tcb its gone */ 1429 goto out_decr; 1430 break; 1431 1432 case SCTP_TIMER_TYPE_STRRESET: 1433 if (sctp_strreset_timer(inp, stcb, net)) { 1434 /* no need to unlock on tcb its gone */ 1435 goto out_decr; 1436 } 1437 SCTP_STAT_INCR(sctps_timostrmrst); 1438 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR); 1439 break; 1440 case SCTP_TIMER_TYPE_EARLYFR: 1441 /* Need to do FR of things for net */ 1442 SCTP_STAT_INCR(sctps_timoearlyfr); 1443 sctp_early_fr_timer(inp, stcb, net); 1444 break; 1445 case SCTP_TIMER_TYPE_ASCONF: 1446 if (sctp_asconf_timer(inp, stcb, net)) { 1447 /* no need to unlock on tcb its gone */ 1448 goto out_decr; 1449 } 1450 SCTP_STAT_INCR(sctps_timoasconf); 1451 #ifdef SCTP_AUDITING_ENABLED 1452 sctp_auditing(4, inp, stcb, net); 1453 #endif 1454 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR); 1455 break; 1456 1457 case SCTP_TIMER_TYPE_AUTOCLOSE: 1458 SCTP_STAT_INCR(sctps_timoautoclose); 1459 sctp_autoclose_timer(inp, stcb, net); 1460 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1461 did_output = 0; 1462 break; 1463 case SCTP_TIMER_TYPE_ASOCKILL: 1464 SCTP_STAT_INCR(sctps_timoassockill); 1465 /* Can we free it yet? */ 1466 SCTP_INP_DECR_REF(inp); 1467 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1468 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1469 /* 1470 * free asoc, always unlocks (or destroy's) so prevent 1471 * duplicate unlock or unlock of a free mtx :-0 1472 */ 1473 stcb = NULL; 1474 goto out_no_decr; 1475 break; 1476 case SCTP_TIMER_TYPE_INPKILL: 1477 SCTP_STAT_INCR(sctps_timoinpkill); 1478 /* 1479 * special case, take away our increment since WE are the 1480 * killer 1481 */ 1482 SCTP_INP_DECR_REF(inp); 1483 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1484 sctp_inpcb_free(inp, 1, 0); 1485 goto out_no_decr; 1486 break; 1487 default: 1488 #ifdef SCTP_DEBUG 1489 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1490 printf("sctp_timeout_handler:unknown timer %d\n", 1491 tmr->type); 1492 } 1493 #endif /* SCTP_DEBUG */ 1494 break; 1495 }; 1496 #ifdef SCTP_AUDITING_ENABLED 1497 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1498 if (inp) 1499 sctp_auditing(5, inp, stcb, net); 1500 #endif 1501 if ((did_output) && stcb) { 1502 /* 1503 * Now we need to clean up the control chunk chain if an 1504 * ECNE is on it. It must be marked as UNSENT again so next 1505 * call will continue to send it until such time that we get 1506 * a CWR, to remove it. It is, however, less likely that we 1507 * will find a ecn echo on the chain though. 1508 */ 1509 sctp_fix_ecn_echo(&stcb->asoc); 1510 } 1511 get_out: 1512 if (stcb) { 1513 SCTP_TCB_UNLOCK(stcb); 1514 } 1515 out_decr: 1516 if (inp) { 1517 SCTP_INP_DECR_REF(inp); 1518 } 1519 out_no_decr: 1520 1521 #ifdef SCTP_DEBUG 1522 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1523 printf("Timer now complete (type %d)\n", tmr->type); 1524 } 1525 #endif /* SCTP_DEBUG */ 1526 if (inp) { 1527 } 1528 } 1529 1530 int 1531 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1532 struct sctp_nets *net) 1533 { 1534 int to_ticks; 1535 struct sctp_timer *tmr; 1536 1537 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1538 return (EFAULT); 1539 1540 to_ticks = 0; 1541 1542 tmr = NULL; 1543 if (stcb) { 1544 SCTP_TCB_LOCK_ASSERT(stcb); 1545 } 1546 switch (t_type) { 1547 case SCTP_TIMER_TYPE_ADDR_WQ: 1548 /* Only 1 tick away :-) */ 1549 tmr = &sctppcbinfo.addr_wq_timer; 1550 to_ticks = 1; 1551 break; 1552 case SCTP_TIMER_TYPE_ITERATOR: 1553 { 1554 struct sctp_iterator *it; 1555 1556 it = (struct sctp_iterator *)inp; 1557 tmr = &it->tmr; 1558 to_ticks = SCTP_ITERATOR_TICKS; 1559 } 1560 break; 1561 case SCTP_TIMER_TYPE_SEND: 1562 /* Here we use the RTO timer */ 1563 { 1564 int rto_val; 1565 1566 if ((stcb == NULL) || (net == NULL)) { 1567 return (EFAULT); 1568 } 1569 tmr = &net->rxt_timer; 1570 if (net->RTO == 0) { 1571 rto_val = stcb->asoc.initial_rto; 1572 } else { 1573 rto_val = net->RTO; 1574 } 1575 to_ticks = MSEC_TO_TICKS(rto_val); 1576 } 1577 break; 1578 case SCTP_TIMER_TYPE_INIT: 1579 /* 1580 * Here we use the INIT timer default usually about 1 1581 * minute. 1582 */ 1583 if ((stcb == NULL) || (net == NULL)) { 1584 return (EFAULT); 1585 } 1586 tmr = &net->rxt_timer; 1587 if (net->RTO == 0) { 1588 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1589 } else { 1590 to_ticks = MSEC_TO_TICKS(net->RTO); 1591 } 1592 break; 1593 case SCTP_TIMER_TYPE_RECV: 1594 /* 1595 * Here we use the Delayed-Ack timer value from the inp 1596 * ususually about 200ms. 1597 */ 1598 if (stcb == NULL) { 1599 return (EFAULT); 1600 } 1601 tmr = &stcb->asoc.dack_timer; 1602 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1603 break; 1604 case SCTP_TIMER_TYPE_SHUTDOWN: 1605 /* Here we use the RTO of the destination. */ 1606 if ((stcb == NULL) || (net == NULL)) { 1607 return (EFAULT); 1608 } 1609 if (net->RTO == 0) { 1610 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1611 } else { 1612 to_ticks = MSEC_TO_TICKS(net->RTO); 1613 } 1614 tmr = &net->rxt_timer; 1615 break; 1616 case SCTP_TIMER_TYPE_HEARTBEAT: 1617 /* 1618 * the net is used here so that we can add in the RTO. Even 1619 * though we use a different timer. We also add the HB timer 1620 * PLUS a random jitter. 1621 */ 1622 if (stcb == NULL) { 1623 return (EFAULT); 1624 } { 1625 uint32_t rndval; 1626 uint8_t this_random; 1627 int cnt_of_unconf = 0; 1628 struct sctp_nets *lnet; 1629 1630 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1631 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1632 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1633 cnt_of_unconf++; 1634 } 1635 } 1636 if (cnt_of_unconf) { 1637 lnet = NULL; 1638 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 1639 } 1640 if (stcb->asoc.hb_random_idx > 3) { 1641 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1642 memcpy(stcb->asoc.hb_random_values, &rndval, 1643 sizeof(stcb->asoc.hb_random_values)); 1644 this_random = stcb->asoc.hb_random_values[0]; 1645 stcb->asoc.hb_random_idx = 0; 1646 stcb->asoc.hb_ect_randombit = 0; 1647 } else { 1648 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1649 stcb->asoc.hb_random_idx++; 1650 stcb->asoc.hb_ect_randombit = 0; 1651 } 1652 /* 1653 * this_random will be 0 - 256 ms RTO is in ms. 1654 */ 1655 if ((stcb->asoc.hb_is_disabled) && 1656 (cnt_of_unconf == 0)) { 1657 return (0); 1658 } 1659 if (net) { 1660 struct sctp_nets *lnet; 1661 int delay; 1662 1663 delay = stcb->asoc.heart_beat_delay; 1664 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1665 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1666 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 1667 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1668 delay = 0; 1669 } 1670 } 1671 if (net->RTO == 0) { 1672 /* Never been checked */ 1673 to_ticks = this_random + stcb->asoc.initial_rto + delay; 1674 } else { 1675 /* set rto_val to the ms */ 1676 to_ticks = delay + net->RTO + this_random; 1677 } 1678 } else { 1679 if (cnt_of_unconf) { 1680 to_ticks = this_random + stcb->asoc.initial_rto; 1681 } else { 1682 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 1683 } 1684 } 1685 /* 1686 * Now we must convert the to_ticks that are now in 1687 * ms to ticks. 1688 */ 1689 to_ticks = MSEC_TO_TICKS(to_ticks); 1690 tmr = &stcb->asoc.hb_timer; 1691 } 1692 break; 1693 case SCTP_TIMER_TYPE_COOKIE: 1694 /* 1695 * Here we can use the RTO timer from the network since one 1696 * RTT was compelete. If a retran happened then we will be 1697 * using the RTO initial value. 1698 */ 1699 if ((stcb == NULL) || (net == NULL)) { 1700 return (EFAULT); 1701 } 1702 if (net->RTO == 0) { 1703 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1704 } else { 1705 to_ticks = MSEC_TO_TICKS(net->RTO); 1706 } 1707 tmr = &net->rxt_timer; 1708 break; 1709 case SCTP_TIMER_TYPE_NEWCOOKIE: 1710 /* 1711 * nothing needed but the endpoint here ususually about 60 1712 * minutes. 1713 */ 1714 tmr = &inp->sctp_ep.signature_change; 1715 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1716 break; 1717 case SCTP_TIMER_TYPE_ASOCKILL: 1718 if (stcb == NULL) { 1719 return (EFAULT); 1720 } 1721 tmr = &stcb->asoc.strreset_timer; 1722 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1723 break; 1724 case SCTP_TIMER_TYPE_INPKILL: 1725 /* 1726 * The inp is setup to die. We re-use the signature_chage 1727 * timer since that has stopped and we are in the GONE 1728 * state. 1729 */ 1730 tmr = &inp->sctp_ep.signature_change; 1731 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 1732 break; 1733 case SCTP_TIMER_TYPE_PATHMTURAISE: 1734 /* 1735 * Here we use the value found in the EP for PMTU ususually 1736 * about 10 minutes. 1737 */ 1738 if (stcb == NULL) { 1739 return (EFAULT); 1740 } 1741 if (net == NULL) { 1742 return (EFAULT); 1743 } 1744 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 1745 tmr = &net->pmtu_timer; 1746 break; 1747 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1748 /* Here we use the RTO of the destination */ 1749 if ((stcb == NULL) || (net == NULL)) { 1750 return (EFAULT); 1751 } 1752 if (net->RTO == 0) { 1753 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1754 } else { 1755 to_ticks = MSEC_TO_TICKS(net->RTO); 1756 } 1757 tmr = &net->rxt_timer; 1758 break; 1759 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1760 /* 1761 * Here we use the endpoints shutdown guard timer usually 1762 * about 3 minutes. 1763 */ 1764 if (stcb == NULL) { 1765 return (EFAULT); 1766 } 1767 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 1768 tmr = &stcb->asoc.shut_guard_timer; 1769 break; 1770 case SCTP_TIMER_TYPE_STRRESET: 1771 /* 1772 * Here the timer comes from the inp but its value is from 1773 * the RTO. 1774 */ 1775 if ((stcb == NULL) || (net == NULL)) { 1776 return (EFAULT); 1777 } 1778 if (net->RTO == 0) { 1779 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1780 } else { 1781 to_ticks = MSEC_TO_TICKS(net->RTO); 1782 } 1783 tmr = &stcb->asoc.strreset_timer; 1784 break; 1785 1786 case SCTP_TIMER_TYPE_EARLYFR: 1787 { 1788 unsigned int msec; 1789 1790 if ((stcb == NULL) || (net == NULL)) { 1791 return (EFAULT); 1792 } 1793 if (net->flight_size > net->cwnd) { 1794 /* no need to start */ 1795 return (0); 1796 } 1797 SCTP_STAT_INCR(sctps_earlyfrstart); 1798 if (net->lastsa == 0) { 1799 /* Hmm no rtt estimate yet? */ 1800 msec = stcb->asoc.initial_rto >> 2; 1801 } else { 1802 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 1803 } 1804 if (msec < sctp_early_fr_msec) { 1805 msec = sctp_early_fr_msec; 1806 if (msec < SCTP_MINFR_MSEC_FLOOR) { 1807 msec = SCTP_MINFR_MSEC_FLOOR; 1808 } 1809 } 1810 to_ticks = MSEC_TO_TICKS(msec); 1811 tmr = &net->fr_timer; 1812 } 1813 break; 1814 case SCTP_TIMER_TYPE_ASCONF: 1815 /* 1816 * Here the timer comes from the inp but its value is from 1817 * the RTO. 1818 */ 1819 if ((stcb == NULL) || (net == NULL)) { 1820 return (EFAULT); 1821 } 1822 if (net->RTO == 0) { 1823 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1824 } else { 1825 to_ticks = MSEC_TO_TICKS(net->RTO); 1826 } 1827 tmr = &stcb->asoc.asconf_timer; 1828 break; 1829 case SCTP_TIMER_TYPE_AUTOCLOSE: 1830 if (stcb == NULL) { 1831 return (EFAULT); 1832 } 1833 if (stcb->asoc.sctp_autoclose_ticks == 0) { 1834 /* 1835 * Really an error since stcb is NOT set to 1836 * autoclose 1837 */ 1838 return (0); 1839 } 1840 to_ticks = stcb->asoc.sctp_autoclose_ticks; 1841 tmr = &stcb->asoc.autoclose_timer; 1842 break; 1843 default: 1844 #ifdef SCTP_DEBUG 1845 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1846 printf("sctp_timer_start:Unknown timer type %d\n", 1847 t_type); 1848 } 1849 #endif /* SCTP_DEBUG */ 1850 return (EFAULT); 1851 break; 1852 }; 1853 if ((to_ticks <= 0) || (tmr == NULL)) { 1854 #ifdef SCTP_DEBUG 1855 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1856 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n", 1857 t_type, to_ticks, tmr); 1858 } 1859 #endif /* SCTP_DEBUG */ 1860 return (EFAULT); 1861 } 1862 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1863 /* 1864 * we do NOT allow you to have it already running. if it is 1865 * we leave the current one up unchanged 1866 */ 1867 return (EALREADY); 1868 } 1869 /* At this point we can proceed */ 1870 if (t_type == SCTP_TIMER_TYPE_SEND) { 1871 stcb->asoc.num_send_timers_up++; 1872 } 1873 tmr->stopped_from = 0; 1874 tmr->type = t_type; 1875 tmr->ep = (void *)inp; 1876 tmr->tcb = (void *)stcb; 1877 tmr->net = (void *)net; 1878 tmr->self = (void *)tmr; 1879 tmr->ticks = ticks; 1880 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 1881 return (0); 1882 } 1883 1884 int 1885 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1886 struct sctp_nets *net, uint32_t from) 1887 { 1888 struct sctp_timer *tmr; 1889 1890 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 1891 (inp == NULL)) 1892 return (EFAULT); 1893 1894 tmr = NULL; 1895 if (stcb) { 1896 SCTP_TCB_LOCK_ASSERT(stcb); 1897 } 1898 switch (t_type) { 1899 case SCTP_TIMER_TYPE_ADDR_WQ: 1900 tmr = &sctppcbinfo.addr_wq_timer; 1901 break; 1902 case SCTP_TIMER_TYPE_EARLYFR: 1903 if ((stcb == NULL) || (net == NULL)) { 1904 return (EFAULT); 1905 } 1906 tmr = &net->fr_timer; 1907 SCTP_STAT_INCR(sctps_earlyfrstop); 1908 break; 1909 case SCTP_TIMER_TYPE_ITERATOR: 1910 { 1911 struct sctp_iterator *it; 1912 1913 it = (struct sctp_iterator *)inp; 1914 tmr = &it->tmr; 1915 } 1916 break; 1917 case SCTP_TIMER_TYPE_SEND: 1918 if ((stcb == NULL) || (net == NULL)) { 1919 return (EFAULT); 1920 } 1921 tmr = &net->rxt_timer; 1922 break; 1923 case SCTP_TIMER_TYPE_INIT: 1924 if ((stcb == NULL) || (net == NULL)) { 1925 return (EFAULT); 1926 } 1927 tmr = &net->rxt_timer; 1928 break; 1929 case SCTP_TIMER_TYPE_RECV: 1930 if (stcb == NULL) { 1931 return (EFAULT); 1932 } 1933 tmr = &stcb->asoc.dack_timer; 1934 break; 1935 case SCTP_TIMER_TYPE_SHUTDOWN: 1936 if ((stcb == NULL) || (net == NULL)) { 1937 return (EFAULT); 1938 } 1939 tmr = &net->rxt_timer; 1940 break; 1941 case SCTP_TIMER_TYPE_HEARTBEAT: 1942 if (stcb == NULL) { 1943 return (EFAULT); 1944 } 1945 tmr = &stcb->asoc.hb_timer; 1946 break; 1947 case SCTP_TIMER_TYPE_COOKIE: 1948 if ((stcb == NULL) || (net == NULL)) { 1949 return (EFAULT); 1950 } 1951 tmr = &net->rxt_timer; 1952 break; 1953 case SCTP_TIMER_TYPE_NEWCOOKIE: 1954 /* nothing needed but the endpoint here */ 1955 tmr = &inp->sctp_ep.signature_change; 1956 /* 1957 * We re-use the newcookie timer for the INP kill timer. We 1958 * must assure that we do not kill it by accident. 1959 */ 1960 break; 1961 case SCTP_TIMER_TYPE_ASOCKILL: 1962 /* 1963 * Stop the asoc kill timer. 1964 */ 1965 if (stcb == NULL) { 1966 return (EFAULT); 1967 } 1968 tmr = &stcb->asoc.strreset_timer; 1969 break; 1970 1971 case SCTP_TIMER_TYPE_INPKILL: 1972 /* 1973 * The inp is setup to die. We re-use the signature_chage 1974 * timer since that has stopped and we are in the GONE 1975 * state. 1976 */ 1977 tmr = &inp->sctp_ep.signature_change; 1978 break; 1979 case SCTP_TIMER_TYPE_PATHMTURAISE: 1980 if ((stcb == NULL) || (net == NULL)) { 1981 return (EFAULT); 1982 } 1983 tmr = &net->pmtu_timer; 1984 break; 1985 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1986 if ((stcb == NULL) || (net == NULL)) { 1987 return (EFAULT); 1988 } 1989 tmr = &net->rxt_timer; 1990 break; 1991 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1992 if (stcb == NULL) { 1993 return (EFAULT); 1994 } 1995 tmr = &stcb->asoc.shut_guard_timer; 1996 break; 1997 case SCTP_TIMER_TYPE_STRRESET: 1998 if (stcb == NULL) { 1999 return (EFAULT); 2000 } 2001 tmr = &stcb->asoc.strreset_timer; 2002 break; 2003 case SCTP_TIMER_TYPE_ASCONF: 2004 if (stcb == NULL) { 2005 return (EFAULT); 2006 } 2007 tmr = &stcb->asoc.asconf_timer; 2008 break; 2009 case SCTP_TIMER_TYPE_AUTOCLOSE: 2010 if (stcb == NULL) { 2011 return (EFAULT); 2012 } 2013 tmr = &stcb->asoc.autoclose_timer; 2014 break; 2015 default: 2016 #ifdef SCTP_DEBUG 2017 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2018 printf("sctp_timer_stop:Unknown timer type %d\n", 2019 t_type); 2020 } 2021 #endif /* SCTP_DEBUG */ 2022 break; 2023 }; 2024 if (tmr == NULL) { 2025 return (EFAULT); 2026 } 2027 if ((tmr->type != t_type) && tmr->type) { 2028 /* 2029 * Ok we have a timer that is under joint use. Cookie timer 2030 * per chance with the SEND timer. We therefore are NOT 2031 * running the timer that the caller wants stopped. So just 2032 * return. 2033 */ 2034 return (0); 2035 } 2036 if (t_type == SCTP_TIMER_TYPE_SEND) { 2037 stcb->asoc.num_send_timers_up--; 2038 if (stcb->asoc.num_send_timers_up < 0) { 2039 stcb->asoc.num_send_timers_up = 0; 2040 } 2041 } 2042 tmr->self = NULL; 2043 tmr->stopped_from = from; 2044 SCTP_OS_TIMER_STOP(&tmr->timer); 2045 return (0); 2046 } 2047 2048 #ifdef SCTP_USE_ADLER32 2049 static uint32_t 2050 update_adler32(uint32_t adler, uint8_t * buf, int32_t len) 2051 { 2052 uint32_t s1 = adler & 0xffff; 2053 uint32_t s2 = (adler >> 16) & 0xffff; 2054 int n; 2055 2056 for (n = 0; n < len; n++, buf++) { 2057 /* s1 = (s1 + buf[n]) % BASE */ 2058 /* first we add */ 2059 s1 = (s1 + *buf); 2060 /* 2061 * now if we need to, we do a mod by subtracting. It seems a 2062 * bit faster since I really will only ever do one subtract 2063 * at the MOST, since buf[n] is a max of 255. 2064 */ 2065 if (s1 >= SCTP_ADLER32_BASE) { 2066 s1 -= SCTP_ADLER32_BASE; 2067 } 2068 /* s2 = (s2 + s1) % BASE */ 2069 /* first we add */ 2070 s2 = (s2 + s1); 2071 /* 2072 * again, it is more efficent (it seems) to subtract since 2073 * the most s2 will ever be is (BASE-1 + BASE-1) in the 2074 * worse case. This would then be (2 * BASE) - 2, which will 2075 * still only do one subtract. On Intel this is much better 2076 * to do this way and avoid the divide. Have not -pg'd on 2077 * sparc. 2078 */ 2079 if (s2 >= SCTP_ADLER32_BASE) { 2080 s2 -= SCTP_ADLER32_BASE; 2081 } 2082 } 2083 /* Return the adler32 of the bytes buf[0..len-1] */ 2084 return ((s2 << 16) + s1); 2085 } 2086 2087 #endif 2088 2089 2090 uint32_t 2091 sctp_calculate_len(struct mbuf *m) 2092 { 2093 uint32_t tlen = 0; 2094 struct mbuf *at; 2095 2096 at = m; 2097 while (at) { 2098 tlen += SCTP_BUF_LEN(at); 2099 at = SCTP_BUF_NEXT(at); 2100 } 2101 return (tlen); 2102 } 2103 2104 #if defined(SCTP_WITH_NO_CSUM) 2105 2106 uint32_t 2107 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2108 { 2109 /* 2110 * given a mbuf chain with a packetheader offset by 'offset' 2111 * pointing at a sctphdr (with csum set to 0) go through the chain 2112 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2113 * currently Adler32 but will change to CRC32x soon. Also has a side 2114 * bonus calculate the total length of the mbuf chain. Note: if 2115 * offset is greater than the total mbuf length, checksum=1, 2116 * pktlen=0 is returned (ie. no real error code) 2117 */ 2118 if (pktlen == NULL) 2119 return (0); 2120 *pktlen = sctp_calculate_len(m); 2121 return (0); 2122 } 2123 2124 #elif defined(SCTP_USE_INCHKSUM) 2125 2126 #include <machine/in_cksum.h> 2127 2128 uint32_t 2129 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2130 { 2131 /* 2132 * given a mbuf chain with a packetheader offset by 'offset' 2133 * pointing at a sctphdr (with csum set to 0) go through the chain 2134 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2135 * currently Adler32 but will change to CRC32x soon. Also has a side 2136 * bonus calculate the total length of the mbuf chain. Note: if 2137 * offset is greater than the total mbuf length, checksum=1, 2138 * pktlen=0 is returned (ie. no real error code) 2139 */ 2140 int32_t tlen = 0; 2141 struct mbuf *at; 2142 uint32_t the_sum, retsum; 2143 2144 at = m; 2145 while (at) { 2146 tlen += SCTP_BUF_LEN(at); 2147 at = SCTP_BUF_NEXT(at); 2148 } 2149 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset)); 2150 if (pktlen != NULL) 2151 *pktlen = (tlen - offset); 2152 retsum = htons(the_sum); 2153 return (the_sum); 2154 } 2155 2156 #else 2157 2158 uint32_t 2159 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2160 { 2161 /* 2162 * given a mbuf chain with a packetheader offset by 'offset' 2163 * pointing at a sctphdr (with csum set to 0) go through the chain 2164 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2165 * currently Adler32 but will change to CRC32x soon. Also has a side 2166 * bonus calculate the total length of the mbuf chain. Note: if 2167 * offset is greater than the total mbuf length, checksum=1, 2168 * pktlen=0 is returned (ie. no real error code) 2169 */ 2170 int32_t tlen = 0; 2171 2172 #ifdef SCTP_USE_ADLER32 2173 uint32_t base = 1L; 2174 2175 #else 2176 uint32_t base = 0xffffffff; 2177 2178 #endif /* SCTP_USE_ADLER32 */ 2179 struct mbuf *at; 2180 2181 at = m; 2182 /* find the correct mbuf and offset into mbuf */ 2183 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) { 2184 offset -= SCTP_BUF_LEN(at); /* update remaining offset 2185 * left */ 2186 at = SCTP_BUF_NEXT(at); 2187 } 2188 while (at != NULL) { 2189 if ((SCTP_BUF_LEN(at) - offset) > 0) { 2190 #ifdef SCTP_USE_ADLER32 2191 base = update_adler32(base, 2192 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2193 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2194 #else 2195 if ((SCTP_BUF_LEN(at) - offset) < 4) { 2196 /* Use old method if less than 4 bytes */ 2197 base = old_update_crc32(base, 2198 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2199 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2200 } else { 2201 base = update_crc32(base, 2202 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2203 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2204 } 2205 #endif /* SCTP_USE_ADLER32 */ 2206 tlen += SCTP_BUF_LEN(at) - offset; 2207 /* we only offset once into the first mbuf */ 2208 } 2209 if (offset) { 2210 if (offset < SCTP_BUF_LEN(at)) 2211 offset = 0; 2212 else 2213 offset -= SCTP_BUF_LEN(at); 2214 } 2215 at = SCTP_BUF_NEXT(at); 2216 } 2217 if (pktlen != NULL) { 2218 *pktlen = tlen; 2219 } 2220 #ifdef SCTP_USE_ADLER32 2221 /* Adler32 */ 2222 base = htonl(base); 2223 #else 2224 /* CRC-32c */ 2225 base = sctp_csum_finalize(base); 2226 #endif 2227 return (base); 2228 } 2229 2230 2231 #endif 2232 2233 void 2234 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2235 struct sctp_association *asoc, uint32_t mtu) 2236 { 2237 /* 2238 * Reset the P-MTU size on this association, this involves changing 2239 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2240 * allow the DF flag to be cleared. 2241 */ 2242 struct sctp_tmit_chunk *chk; 2243 unsigned int eff_mtu, ovh; 2244 2245 asoc->smallest_mtu = mtu; 2246 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2247 ovh = SCTP_MIN_OVERHEAD; 2248 } else { 2249 ovh = SCTP_MIN_V4_OVERHEAD; 2250 } 2251 eff_mtu = mtu - ovh; 2252 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2253 2254 if (chk->send_size > eff_mtu) { 2255 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2256 } 2257 } 2258 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2259 if (chk->send_size > eff_mtu) { 2260 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2261 } 2262 } 2263 } 2264 2265 2266 /* 2267 * given an association and starting time of the current RTT period return 2268 * RTO in number of usecs net should point to the current network 2269 */ 2270 uint32_t 2271 sctp_calculate_rto(struct sctp_tcb *stcb, 2272 struct sctp_association *asoc, 2273 struct sctp_nets *net, 2274 struct timeval *old) 2275 { 2276 /* 2277 * given an association and the starting time of the current RTT 2278 * period (in value1/value2) return RTO in number of usecs. 2279 */ 2280 int calc_time = 0; 2281 int o_calctime; 2282 unsigned int new_rto = 0; 2283 int first_measure = 0; 2284 struct timeval now; 2285 2286 /************************/ 2287 /* 1. calculate new RTT */ 2288 /************************/ 2289 /* get the current time */ 2290 SCTP_GETTIME_TIMEVAL(&now); 2291 /* compute the RTT value */ 2292 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2293 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2294 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2295 calc_time += (((u_long)now.tv_usec - 2296 (u_long)old->tv_usec) / 1000); 2297 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2298 /* Borrow 1,000ms from current calculation */ 2299 calc_time -= 1000; 2300 /* Add in the slop over */ 2301 calc_time += ((int)now.tv_usec / 1000); 2302 /* Add in the pre-second ms's */ 2303 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2304 } 2305 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2306 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2307 calc_time = ((u_long)now.tv_usec - 2308 (u_long)old->tv_usec) / 1000; 2309 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2310 /* impossible .. garbage in nothing out */ 2311 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2312 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) { 2313 /* 2314 * We have to have 1 usec :-D this must be the 2315 * loopback. 2316 */ 2317 calc_time = 1; 2318 } else { 2319 /* impossible .. garbage in nothing out */ 2320 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2321 } 2322 } else { 2323 /* Clock wrapped? */ 2324 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2325 } 2326 /***************************/ 2327 /* 2. update RTTVAR & SRTT */ 2328 /***************************/ 2329 o_calctime = calc_time; 2330 /* this is Van Jacobson's integer version */ 2331 if (net->RTO) { 2332 calc_time -= (net->lastsa >> 3); 2333 if ((int)net->prev_rtt > o_calctime) { 2334 net->rtt_variance = net->prev_rtt - o_calctime; 2335 /* decreasing */ 2336 net->rto_variance_dir = 0; 2337 } else { 2338 /* increasing */ 2339 net->rtt_variance = o_calctime - net->prev_rtt; 2340 net->rto_variance_dir = 1; 2341 } 2342 #ifdef SCTP_RTTVAR_LOGGING 2343 rto_logging(net, SCTP_LOG_RTTVAR); 2344 #endif 2345 net->prev_rtt = o_calctime; 2346 net->lastsa += calc_time; 2347 if (calc_time < 0) { 2348 calc_time = -calc_time; 2349 } 2350 calc_time -= (net->lastsv >> 2); 2351 net->lastsv += calc_time; 2352 if (net->lastsv == 0) { 2353 net->lastsv = SCTP_CLOCK_GRANULARITY; 2354 } 2355 } else { 2356 /* First RTO measurment */ 2357 net->lastsa = calc_time; 2358 net->lastsv = calc_time >> 1; 2359 first_measure = 1; 2360 net->rto_variance_dir = 1; 2361 net->prev_rtt = o_calctime; 2362 net->rtt_variance = 0; 2363 #ifdef SCTP_RTTVAR_LOGGING 2364 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2365 #endif 2366 } 2367 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1; 2368 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2369 (stcb->asoc.sat_network_lockout == 0)) { 2370 stcb->asoc.sat_network = 1; 2371 } else if ((!first_measure) && stcb->asoc.sat_network) { 2372 stcb->asoc.sat_network = 0; 2373 stcb->asoc.sat_network_lockout = 1; 2374 } 2375 /* bound it, per C6/C7 in Section 5.3.1 */ 2376 if (new_rto < stcb->asoc.minrto) { 2377 new_rto = stcb->asoc.minrto; 2378 } 2379 if (new_rto > stcb->asoc.maxrto) { 2380 new_rto = stcb->asoc.maxrto; 2381 } 2382 /* we are now returning the RTT Smoothed */ 2383 return ((uint32_t) new_rto); 2384 } 2385 2386 /* 2387 * return a pointer to a contiguous piece of data from the given mbuf chain 2388 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2389 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2390 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2391 */ 2392 __inline caddr_t 2393 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2394 { 2395 uint32_t count; 2396 uint8_t *ptr; 2397 2398 ptr = in_ptr; 2399 if ((off < 0) || (len <= 0)) 2400 return (NULL); 2401 2402 /* find the desired start location */ 2403 while ((m != NULL) && (off > 0)) { 2404 if (off < SCTP_BUF_LEN(m)) 2405 break; 2406 off -= SCTP_BUF_LEN(m); 2407 m = SCTP_BUF_NEXT(m); 2408 } 2409 if (m == NULL) 2410 return (NULL); 2411 2412 /* is the current mbuf large enough (eg. contiguous)? */ 2413 if ((SCTP_BUF_LEN(m) - off) >= len) { 2414 return (mtod(m, caddr_t)+off); 2415 } else { 2416 /* else, it spans more than one mbuf, so save a temp copy... */ 2417 while ((m != NULL) && (len > 0)) { 2418 count = min(SCTP_BUF_LEN(m) - off, len); 2419 bcopy(mtod(m, caddr_t)+off, ptr, count); 2420 len -= count; 2421 ptr += count; 2422 off = 0; 2423 m = SCTP_BUF_NEXT(m); 2424 } 2425 if ((m == NULL) && (len > 0)) 2426 return (NULL); 2427 else 2428 return ((caddr_t)in_ptr); 2429 } 2430 } 2431 2432 2433 2434 struct sctp_paramhdr * 2435 sctp_get_next_param(struct mbuf *m, 2436 int offset, 2437 struct sctp_paramhdr *pull, 2438 int pull_limit) 2439 { 2440 /* This just provides a typed signature to Peter's Pull routine */ 2441 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2442 (uint8_t *) pull)); 2443 } 2444 2445 2446 int 2447 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2448 { 2449 /* 2450 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2451 * padlen is > 3 this routine will fail. 2452 */ 2453 uint8_t *dp; 2454 int i; 2455 2456 if (padlen > 3) { 2457 return (ENOBUFS); 2458 } 2459 if (M_TRAILINGSPACE(m)) { 2460 /* 2461 * The easy way. We hope the majority of the time we hit 2462 * here :) 2463 */ 2464 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2465 SCTP_BUF_LEN(m) += padlen; 2466 } else { 2467 /* Hard way we must grow the mbuf */ 2468 struct mbuf *tmp; 2469 2470 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2471 if (tmp == NULL) { 2472 /* Out of space GAK! we are in big trouble. */ 2473 return (ENOSPC); 2474 } 2475 /* setup and insert in middle */ 2476 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m); 2477 SCTP_BUF_LEN(tmp) = padlen; 2478 SCTP_BUF_NEXT(m) = tmp; 2479 dp = mtod(tmp, uint8_t *); 2480 } 2481 /* zero out the pad */ 2482 for (i = 0; i < padlen; i++) { 2483 *dp = 0; 2484 dp++; 2485 } 2486 return (0); 2487 } 2488 2489 int 2490 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2491 { 2492 /* find the last mbuf in chain and pad it */ 2493 struct mbuf *m_at; 2494 2495 m_at = m; 2496 if (last_mbuf) { 2497 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2498 } else { 2499 while (m_at) { 2500 if (SCTP_BUF_NEXT(m_at) == NULL) { 2501 return (sctp_add_pad_tombuf(m_at, padval)); 2502 } 2503 m_at = SCTP_BUF_NEXT(m_at); 2504 } 2505 } 2506 return (EFAULT); 2507 } 2508 2509 int sctp_asoc_change_wake = 0; 2510 2511 static void 2512 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2513 uint32_t error, void *data) 2514 { 2515 struct mbuf *m_notify; 2516 struct sctp_assoc_change *sac; 2517 struct sctp_queued_to_read *control; 2518 2519 /* 2520 * First if we are are going down dump everything we can to the 2521 * socket rcv queue. 2522 */ 2523 2524 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2525 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2526 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 2527 ) { 2528 /* If the socket is gone we are out of here */ 2529 return; 2530 } 2531 /* 2532 * For TCP model AND UDP connected sockets we will send an error up 2533 * when an ABORT comes in. 2534 */ 2535 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2536 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2537 (event == SCTP_COMM_LOST)) { 2538 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) 2539 stcb->sctp_socket->so_error = ECONNREFUSED; 2540 else 2541 stcb->sctp_socket->so_error = ECONNRESET; 2542 /* Wake ANY sleepers */ 2543 sorwakeup(stcb->sctp_socket); 2544 sowwakeup(stcb->sctp_socket); 2545 sctp_asoc_change_wake++; 2546 } 2547 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2548 /* event not enabled */ 2549 return; 2550 } 2551 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA); 2552 if (m_notify == NULL) 2553 /* no space left */ 2554 return; 2555 SCTP_BUF_LEN(m_notify) = 0; 2556 2557 sac = mtod(m_notify, struct sctp_assoc_change *); 2558 sac->sac_type = SCTP_ASSOC_CHANGE; 2559 sac->sac_flags = 0; 2560 sac->sac_length = sizeof(struct sctp_assoc_change); 2561 sac->sac_state = event; 2562 sac->sac_error = error; 2563 /* XXX verify these stream counts */ 2564 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2565 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2566 sac->sac_assoc_id = sctp_get_associd(stcb); 2567 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change); 2568 SCTP_BUF_NEXT(m_notify) = NULL; 2569 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2570 0, 0, 0, 0, 0, 0, 2571 m_notify); 2572 if (control == NULL) { 2573 /* no memory */ 2574 sctp_m_freem(m_notify); 2575 return; 2576 } 2577 control->length = SCTP_BUF_LEN(m_notify); 2578 /* not that we need this */ 2579 control->tail_mbuf = m_notify; 2580 control->spec_flags = M_NOTIFICATION; 2581 sctp_add_to_readq(stcb->sctp_ep, stcb, 2582 control, 2583 &stcb->sctp_socket->so_rcv, 1); 2584 if (event == SCTP_COMM_LOST) { 2585 /* Wake up any sleeper */ 2586 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2587 } 2588 } 2589 2590 static void 2591 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2592 struct sockaddr *sa, uint32_t error) 2593 { 2594 struct mbuf *m_notify; 2595 struct sctp_paddr_change *spc; 2596 struct sctp_queued_to_read *control; 2597 2598 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2599 /* event not enabled */ 2600 return; 2601 2602 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2603 if (m_notify == NULL) 2604 return; 2605 SCTP_BUF_LEN(m_notify) = 0; 2606 spc = mtod(m_notify, struct sctp_paddr_change *); 2607 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2608 spc->spc_flags = 0; 2609 spc->spc_length = sizeof(struct sctp_paddr_change); 2610 if (sa->sa_family == AF_INET) { 2611 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2612 } else { 2613 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2614 } 2615 spc->spc_state = state; 2616 spc->spc_error = error; 2617 spc->spc_assoc_id = sctp_get_associd(stcb); 2618 2619 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2620 SCTP_BUF_NEXT(m_notify) = NULL; 2621 2622 /* append to socket */ 2623 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2624 0, 0, 0, 0, 0, 0, 2625 m_notify); 2626 if (control == NULL) { 2627 /* no memory */ 2628 sctp_m_freem(m_notify); 2629 return; 2630 } 2631 control->length = SCTP_BUF_LEN(m_notify); 2632 control->spec_flags = M_NOTIFICATION; 2633 /* not that we need this */ 2634 control->tail_mbuf = m_notify; 2635 sctp_add_to_readq(stcb->sctp_ep, stcb, 2636 control, 2637 &stcb->sctp_socket->so_rcv, 1); 2638 } 2639 2640 2641 static void 2642 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2643 struct sctp_tmit_chunk *chk) 2644 { 2645 struct mbuf *m_notify; 2646 struct sctp_send_failed *ssf; 2647 struct sctp_queued_to_read *control; 2648 int length; 2649 2650 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2651 /* event not enabled */ 2652 return; 2653 2654 length = sizeof(struct sctp_send_failed) + chk->send_size; 2655 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 2656 if (m_notify == NULL) 2657 /* no space left */ 2658 return; 2659 SCTP_BUF_LEN(m_notify) = 0; 2660 ssf = mtod(m_notify, struct sctp_send_failed *); 2661 ssf->ssf_type = SCTP_SEND_FAILED; 2662 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2663 ssf->ssf_flags = SCTP_DATA_UNSENT; 2664 else 2665 ssf->ssf_flags = SCTP_DATA_SENT; 2666 ssf->ssf_length = length; 2667 ssf->ssf_error = error; 2668 /* not exactly what the user sent in, but should be close :) */ 2669 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2670 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2671 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2672 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2673 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2674 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2675 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2676 SCTP_BUF_NEXT(m_notify) = chk->data; 2677 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2678 2679 /* Steal off the mbuf */ 2680 chk->data = NULL; 2681 /* 2682 * For this case, we check the actual socket buffer, since the assoc 2683 * is going away we don't want to overfill the socket buffer for a 2684 * non-reader 2685 */ 2686 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2687 sctp_m_freem(m_notify); 2688 return; 2689 } 2690 /* append to socket */ 2691 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2692 0, 0, 0, 0, 0, 0, 2693 m_notify); 2694 if (control == NULL) { 2695 /* no memory */ 2696 sctp_m_freem(m_notify); 2697 return; 2698 } 2699 control->spec_flags = M_NOTIFICATION; 2700 sctp_add_to_readq(stcb->sctp_ep, stcb, 2701 control, 2702 &stcb->sctp_socket->so_rcv, 1); 2703 } 2704 2705 2706 static void 2707 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2708 struct sctp_stream_queue_pending *sp) 2709 { 2710 struct mbuf *m_notify; 2711 struct sctp_send_failed *ssf; 2712 struct sctp_queued_to_read *control; 2713 int length; 2714 2715 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2716 /* event not enabled */ 2717 return; 2718 2719 length = sizeof(struct sctp_send_failed) + sp->length; 2720 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2721 if (m_notify == NULL) 2722 /* no space left */ 2723 return; 2724 SCTP_BUF_LEN(m_notify) = 0; 2725 ssf = mtod(m_notify, struct sctp_send_failed *); 2726 ssf->ssf_type = SCTP_SEND_FAILED; 2727 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2728 ssf->ssf_flags = SCTP_DATA_UNSENT; 2729 else 2730 ssf->ssf_flags = SCTP_DATA_SENT; 2731 ssf->ssf_length = length; 2732 ssf->ssf_error = error; 2733 /* not exactly what the user sent in, but should be close :) */ 2734 ssf->ssf_info.sinfo_stream = sp->stream; 2735 ssf->ssf_info.sinfo_ssn = sp->strseq; 2736 ssf->ssf_info.sinfo_flags = sp->sinfo_flags; 2737 ssf->ssf_info.sinfo_ppid = sp->ppid; 2738 ssf->ssf_info.sinfo_context = sp->context; 2739 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2740 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2741 SCTP_BUF_NEXT(m_notify) = sp->data; 2742 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2743 2744 /* Steal off the mbuf */ 2745 sp->data = NULL; 2746 /* 2747 * For this case, we check the actual socket buffer, since the assoc 2748 * is going away we don't want to overfill the socket buffer for a 2749 * non-reader 2750 */ 2751 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2752 sctp_m_freem(m_notify); 2753 return; 2754 } 2755 /* append to socket */ 2756 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2757 0, 0, 0, 0, 0, 0, 2758 m_notify); 2759 if (control == NULL) { 2760 /* no memory */ 2761 sctp_m_freem(m_notify); 2762 return; 2763 } 2764 control->spec_flags = M_NOTIFICATION; 2765 sctp_add_to_readq(stcb->sctp_ep, stcb, 2766 control, 2767 &stcb->sctp_socket->so_rcv, 1); 2768 } 2769 2770 2771 2772 static void 2773 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 2774 uint32_t error) 2775 { 2776 struct mbuf *m_notify; 2777 struct sctp_adaptation_event *sai; 2778 struct sctp_queued_to_read *control; 2779 2780 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2781 /* event not enabled */ 2782 return; 2783 2784 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2785 if (m_notify == NULL) 2786 /* no space left */ 2787 return; 2788 SCTP_BUF_LEN(m_notify) = 0; 2789 sai = mtod(m_notify, struct sctp_adaptation_event *); 2790 sai->sai_type = SCTP_ADAPTATION_INDICATION; 2791 sai->sai_flags = 0; 2792 sai->sai_length = sizeof(struct sctp_adaptation_event); 2793 sai->sai_adaptation_ind = error; 2794 sai->sai_assoc_id = sctp_get_associd(stcb); 2795 2796 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 2797 SCTP_BUF_NEXT(m_notify) = NULL; 2798 2799 /* append to socket */ 2800 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2801 0, 0, 0, 0, 0, 0, 2802 m_notify); 2803 if (control == NULL) { 2804 /* no memory */ 2805 sctp_m_freem(m_notify); 2806 return; 2807 } 2808 control->length = SCTP_BUF_LEN(m_notify); 2809 control->spec_flags = M_NOTIFICATION; 2810 /* not that we need this */ 2811 control->tail_mbuf = m_notify; 2812 sctp_add_to_readq(stcb->sctp_ep, stcb, 2813 control, 2814 &stcb->sctp_socket->so_rcv, 1); 2815 } 2816 2817 /* This always must be called with the read-queue LOCKED in the INP */ 2818 void 2819 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, 2820 uint32_t error, int nolock) 2821 { 2822 struct mbuf *m_notify; 2823 struct sctp_pdapi_event *pdapi; 2824 struct sctp_queued_to_read *control; 2825 struct sockbuf *sb; 2826 2827 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) 2828 /* event not enabled */ 2829 return; 2830 2831 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 2832 if (m_notify == NULL) 2833 /* no space left */ 2834 return; 2835 SCTP_BUF_LEN(m_notify) = 0; 2836 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 2837 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 2838 pdapi->pdapi_flags = 0; 2839 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 2840 pdapi->pdapi_indication = error; 2841 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 2842 2843 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 2844 SCTP_BUF_NEXT(m_notify) = NULL; 2845 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2846 0, 0, 0, 0, 0, 0, 2847 m_notify); 2848 if (control == NULL) { 2849 /* no memory */ 2850 sctp_m_freem(m_notify); 2851 return; 2852 } 2853 control->spec_flags = M_NOTIFICATION; 2854 control->length = SCTP_BUF_LEN(m_notify); 2855 /* not that we need this */ 2856 control->tail_mbuf = m_notify; 2857 control->held_length = 0; 2858 control->length = 0; 2859 if (nolock == 0) { 2860 SCTP_INP_READ_LOCK(stcb->sctp_ep); 2861 } 2862 sb = &stcb->sctp_socket->so_rcv; 2863 #ifdef SCTP_SB_LOGGING 2864 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 2865 #endif 2866 sctp_sballoc(stcb, sb, m_notify); 2867 #ifdef SCTP_SB_LOGGING 2868 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 2869 #endif 2870 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 2871 control->end_added = 1; 2872 if (stcb->asoc.control_pdapi) 2873 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 2874 else { 2875 /* we really should not see this case */ 2876 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 2877 } 2878 if (nolock == 0) { 2879 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 2880 } 2881 if (stcb->sctp_ep && stcb->sctp_socket) { 2882 /* This should always be the case */ 2883 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 2884 } 2885 } 2886 2887 static void 2888 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 2889 { 2890 struct mbuf *m_notify; 2891 struct sctp_shutdown_event *sse; 2892 struct sctp_queued_to_read *control; 2893 2894 /* 2895 * For TCP model AND UDP connected sockets we will send an error up 2896 * when an SHUTDOWN completes 2897 */ 2898 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2899 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2900 /* mark socket closed for read/write and wakeup! */ 2901 socantsendmore(stcb->sctp_socket); 2902 } 2903 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 2904 /* event not enabled */ 2905 return; 2906 2907 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 2908 if (m_notify == NULL) 2909 /* no space left */ 2910 return; 2911 sse = mtod(m_notify, struct sctp_shutdown_event *); 2912 sse->sse_type = SCTP_SHUTDOWN_EVENT; 2913 sse->sse_flags = 0; 2914 sse->sse_length = sizeof(struct sctp_shutdown_event); 2915 sse->sse_assoc_id = sctp_get_associd(stcb); 2916 2917 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 2918 SCTP_BUF_NEXT(m_notify) = NULL; 2919 2920 /* append to socket */ 2921 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2922 0, 0, 0, 0, 0, 0, 2923 m_notify); 2924 if (control == NULL) { 2925 /* no memory */ 2926 sctp_m_freem(m_notify); 2927 return; 2928 } 2929 control->spec_flags = M_NOTIFICATION; 2930 control->length = SCTP_BUF_LEN(m_notify); 2931 /* not that we need this */ 2932 control->tail_mbuf = m_notify; 2933 sctp_add_to_readq(stcb->sctp_ep, stcb, 2934 control, 2935 &stcb->sctp_socket->so_rcv, 1); 2936 } 2937 2938 static void 2939 sctp_notify_stream_reset(struct sctp_tcb *stcb, 2940 int number_entries, uint16_t * list, int flag) 2941 { 2942 struct mbuf *m_notify; 2943 struct sctp_queued_to_read *control; 2944 struct sctp_stream_reset_event *strreset; 2945 int len; 2946 2947 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 2948 /* event not enabled */ 2949 return; 2950 2951 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 2952 if (m_notify == NULL) 2953 /* no space left */ 2954 return; 2955 SCTP_BUF_LEN(m_notify) = 0; 2956 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 2957 if (len > M_TRAILINGSPACE(m_notify)) { 2958 /* never enough room */ 2959 sctp_m_freem(m_notify); 2960 return; 2961 } 2962 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 2963 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 2964 if (number_entries == 0) { 2965 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 2966 } else { 2967 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 2968 } 2969 strreset->strreset_length = len; 2970 strreset->strreset_assoc_id = sctp_get_associd(stcb); 2971 if (number_entries) { 2972 int i; 2973 2974 for (i = 0; i < number_entries; i++) { 2975 strreset->strreset_list[i] = ntohs(list[i]); 2976 } 2977 } 2978 SCTP_BUF_LEN(m_notify) = len; 2979 SCTP_BUF_NEXT(m_notify) = NULL; 2980 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2981 /* no space */ 2982 sctp_m_freem(m_notify); 2983 return; 2984 } 2985 /* append to socket */ 2986 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2987 0, 0, 0, 0, 0, 0, 2988 m_notify); 2989 if (control == NULL) { 2990 /* no memory */ 2991 sctp_m_freem(m_notify); 2992 return; 2993 } 2994 control->spec_flags = M_NOTIFICATION; 2995 control->length = SCTP_BUF_LEN(m_notify); 2996 /* not that we need this */ 2997 control->tail_mbuf = m_notify; 2998 sctp_add_to_readq(stcb->sctp_ep, stcb, 2999 control, 3000 &stcb->sctp_socket->so_rcv, 1); 3001 } 3002 3003 3004 void 3005 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3006 uint32_t error, void *data) 3007 { 3008 if (stcb == NULL) { 3009 /* unlikely but */ 3010 return; 3011 } 3012 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3013 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3014 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 3015 ) { 3016 /* No notifications up when we are in a no socket state */ 3017 return; 3018 } 3019 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3020 /* Can't send up to a closed socket any notifications */ 3021 return; 3022 } 3023 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) { 3024 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) && 3025 (notification != SCTP_NOTIFY_ASSOC_ABORTED) && 3026 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) && 3027 (notification != SCTP_NOTIFY_DG_FAIL) && 3028 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) { 3029 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL); 3030 stcb->asoc.assoc_up_sent = 1; 3031 } 3032 } 3033 switch (notification) { 3034 case SCTP_NOTIFY_ASSOC_UP: 3035 if (stcb->asoc.assoc_up_sent == 0) { 3036 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL); 3037 stcb->asoc.assoc_up_sent = 1; 3038 } 3039 break; 3040 case SCTP_NOTIFY_ASSOC_DOWN: 3041 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL); 3042 break; 3043 case SCTP_NOTIFY_INTERFACE_DOWN: 3044 { 3045 struct sctp_nets *net; 3046 3047 net = (struct sctp_nets *)data; 3048 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3049 (struct sockaddr *)&net->ro._l_addr, error); 3050 break; 3051 } 3052 case SCTP_NOTIFY_INTERFACE_UP: 3053 { 3054 struct sctp_nets *net; 3055 3056 net = (struct sctp_nets *)data; 3057 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3058 (struct sockaddr *)&net->ro._l_addr, error); 3059 break; 3060 } 3061 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3062 { 3063 struct sctp_nets *net; 3064 3065 net = (struct sctp_nets *)data; 3066 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3067 (struct sockaddr *)&net->ro._l_addr, error); 3068 break; 3069 } 3070 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3071 sctp_notify_send_failed2(stcb, error, 3072 (struct sctp_stream_queue_pending *)data); 3073 break; 3074 case SCTP_NOTIFY_DG_FAIL: 3075 sctp_notify_send_failed(stcb, error, 3076 (struct sctp_tmit_chunk *)data); 3077 break; 3078 case SCTP_NOTIFY_ADAPTATION_INDICATION: 3079 /* Here the error is the adaptation indication */ 3080 sctp_notify_adaptation_layer(stcb, error); 3081 break; 3082 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3083 sctp_notify_partial_delivery_indication(stcb, error, 0); 3084 break; 3085 case SCTP_NOTIFY_STRDATA_ERR: 3086 break; 3087 case SCTP_NOTIFY_ASSOC_ABORTED: 3088 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL); 3089 break; 3090 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3091 break; 3092 case SCTP_NOTIFY_STREAM_OPENED_OK: 3093 break; 3094 case SCTP_NOTIFY_ASSOC_RESTART: 3095 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data); 3096 break; 3097 case SCTP_NOTIFY_HB_RESP: 3098 break; 3099 case SCTP_NOTIFY_STR_RESET_SEND: 3100 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3101 break; 3102 case SCTP_NOTIFY_STR_RESET_RECV: 3103 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3104 break; 3105 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3106 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3107 break; 3108 3109 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3110 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3111 break; 3112 3113 case SCTP_NOTIFY_ASCONF_ADD_IP: 3114 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3115 error); 3116 break; 3117 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3118 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3119 error); 3120 break; 3121 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3122 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3123 error); 3124 break; 3125 case SCTP_NOTIFY_ASCONF_SUCCESS: 3126 break; 3127 case SCTP_NOTIFY_ASCONF_FAILED: 3128 break; 3129 case SCTP_NOTIFY_PEER_SHUTDOWN: 3130 sctp_notify_shutdown_event(stcb); 3131 break; 3132 case SCTP_NOTIFY_AUTH_NEW_KEY: 3133 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3134 (uint16_t) (uintptr_t) data); 3135 break; 3136 #if 0 3137 case SCTP_NOTIFY_AUTH_KEY_CONFLICT: 3138 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT, 3139 error, (uint16_t) (uintptr_t) data); 3140 break; 3141 #endif /* not yet? remove? */ 3142 3143 3144 default: 3145 #ifdef SCTP_DEBUG 3146 if (sctp_debug_on & SCTP_DEBUG_UTIL1) { 3147 printf("NOTIFY: unknown notification %xh (%u)\n", 3148 notification, notification); 3149 } 3150 #endif /* SCTP_DEBUG */ 3151 break; 3152 } /* end switch */ 3153 } 3154 3155 void 3156 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock) 3157 { 3158 struct sctp_association *asoc; 3159 struct sctp_stream_out *outs; 3160 struct sctp_tmit_chunk *chk; 3161 struct sctp_stream_queue_pending *sp; 3162 int i; 3163 3164 asoc = &stcb->asoc; 3165 3166 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3167 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3168 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3169 return; 3170 } 3171 /* now through all the gunk freeing chunks */ 3172 if (holds_lock == 0) 3173 SCTP_TCB_SEND_LOCK(stcb); 3174 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3175 /* For each stream */ 3176 outs = &stcb->asoc.strmout[i]; 3177 /* clean up any sends there */ 3178 stcb->asoc.locked_on_sending = NULL; 3179 sp = TAILQ_FIRST(&outs->outqueue); 3180 while (sp) { 3181 stcb->asoc.stream_queue_cnt--; 3182 TAILQ_REMOVE(&outs->outqueue, sp, next); 3183 sctp_free_spbufspace(stcb, asoc, sp); 3184 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3185 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp); 3186 if (sp->data) { 3187 sctp_m_freem(sp->data); 3188 sp->data = NULL; 3189 } 3190 if (sp->net) 3191 sctp_free_remote_addr(sp->net); 3192 sp->net = NULL; 3193 /* Free the chunk */ 3194 sctp_free_a_strmoq(stcb, sp); 3195 sp = TAILQ_FIRST(&outs->outqueue); 3196 } 3197 } 3198 3199 /* pending send queue SHOULD be empty */ 3200 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3201 chk = TAILQ_FIRST(&asoc->send_queue); 3202 while (chk) { 3203 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3204 asoc->send_queue_cnt--; 3205 if (chk->data) { 3206 /* 3207 * trim off the sctp chunk header(it should 3208 * be there) 3209 */ 3210 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3211 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3212 sctp_mbuf_crush(chk->data); 3213 } 3214 } 3215 sctp_free_bufspace(stcb, asoc, chk, 1); 3216 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk); 3217 if (chk->data) { 3218 sctp_m_freem(chk->data); 3219 chk->data = NULL; 3220 } 3221 if (chk->whoTo) 3222 sctp_free_remote_addr(chk->whoTo); 3223 chk->whoTo = NULL; 3224 sctp_free_a_chunk(stcb, chk); 3225 chk = TAILQ_FIRST(&asoc->send_queue); 3226 } 3227 } 3228 /* sent queue SHOULD be empty */ 3229 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3230 chk = TAILQ_FIRST(&asoc->sent_queue); 3231 while (chk) { 3232 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3233 asoc->sent_queue_cnt--; 3234 if (chk->data) { 3235 /* 3236 * trim off the sctp chunk header(it should 3237 * be there) 3238 */ 3239 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3240 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3241 sctp_mbuf_crush(chk->data); 3242 } 3243 } 3244 sctp_free_bufspace(stcb, asoc, chk, 1); 3245 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3246 SCTP_NOTIFY_DATAGRAM_SENT, chk); 3247 if (chk->data) { 3248 sctp_m_freem(chk->data); 3249 chk->data = NULL; 3250 } 3251 if (chk->whoTo) 3252 sctp_free_remote_addr(chk->whoTo); 3253 chk->whoTo = NULL; 3254 sctp_free_a_chunk(stcb, chk); 3255 chk = TAILQ_FIRST(&asoc->sent_queue); 3256 } 3257 } 3258 if (holds_lock == 0) 3259 SCTP_TCB_SEND_UNLOCK(stcb); 3260 } 3261 3262 void 3263 sctp_abort_notification(struct sctp_tcb *stcb, int error) 3264 { 3265 3266 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3267 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3268 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3269 return; 3270 } 3271 /* Tell them we lost the asoc */ 3272 sctp_report_all_outbound(stcb, 1); 3273 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3274 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3275 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3276 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3277 } 3278 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL); 3279 } 3280 3281 void 3282 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3283 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err) 3284 { 3285 uint32_t vtag; 3286 3287 vtag = 0; 3288 if (stcb != NULL) { 3289 /* We have a TCB to abort, send notification too */ 3290 vtag = stcb->asoc.peer_vtag; 3291 sctp_abort_notification(stcb, 0); 3292 } 3293 sctp_send_abort(m, iphlen, sh, vtag, op_err); 3294 if (stcb != NULL) { 3295 /* Ok, now lets free it */ 3296 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3297 } else { 3298 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3299 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3300 sctp_inpcb_free(inp, 1, 0); 3301 } 3302 } 3303 } 3304 } 3305 3306 void 3307 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3308 int error, struct mbuf *op_err) 3309 { 3310 uint32_t vtag; 3311 3312 if (stcb == NULL) { 3313 /* Got to have a TCB */ 3314 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3315 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3316 sctp_inpcb_free(inp, 1, 0); 3317 } 3318 } 3319 return; 3320 } 3321 vtag = stcb->asoc.peer_vtag; 3322 /* notify the ulp */ 3323 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3324 sctp_abort_notification(stcb, error); 3325 /* notify the peer */ 3326 sctp_send_abort_tcb(stcb, op_err); 3327 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3328 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3329 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3330 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3331 } 3332 /* now free the asoc */ 3333 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 3334 } 3335 3336 void 3337 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3338 struct sctp_inpcb *inp, struct mbuf *op_err) 3339 { 3340 struct sctp_chunkhdr *ch, chunk_buf; 3341 unsigned int chk_length; 3342 3343 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3344 /* Generate a TO address for future reference */ 3345 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3346 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3347 sctp_inpcb_free(inp, 1, 0); 3348 } 3349 } 3350 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3351 sizeof(*ch), (uint8_t *) & chunk_buf); 3352 while (ch != NULL) { 3353 chk_length = ntohs(ch->chunk_length); 3354 if (chk_length < sizeof(*ch)) { 3355 /* break to abort land */ 3356 break; 3357 } 3358 switch (ch->chunk_type) { 3359 case SCTP_PACKET_DROPPED: 3360 /* we don't respond to pkt-dropped */ 3361 return; 3362 case SCTP_ABORT_ASSOCIATION: 3363 /* we don't respond with an ABORT to an ABORT */ 3364 return; 3365 case SCTP_SHUTDOWN_COMPLETE: 3366 /* 3367 * we ignore it since we are not waiting for it and 3368 * peer is gone 3369 */ 3370 return; 3371 case SCTP_SHUTDOWN_ACK: 3372 sctp_send_shutdown_complete2(m, iphlen, sh); 3373 return; 3374 default: 3375 break; 3376 } 3377 offset += SCTP_SIZE32(chk_length); 3378 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3379 sizeof(*ch), (uint8_t *) & chunk_buf); 3380 } 3381 sctp_send_abort(m, iphlen, sh, 0, op_err); 3382 } 3383 3384 /* 3385 * check the inbound datagram to make sure there is not an abort inside it, 3386 * if there is return 1, else return 0. 3387 */ 3388 int 3389 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 3390 { 3391 struct sctp_chunkhdr *ch; 3392 struct sctp_init_chunk *init_chk, chunk_buf; 3393 int offset; 3394 unsigned int chk_length; 3395 3396 offset = iphlen + sizeof(struct sctphdr); 3397 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 3398 (uint8_t *) & chunk_buf); 3399 while (ch != NULL) { 3400 chk_length = ntohs(ch->chunk_length); 3401 if (chk_length < sizeof(*ch)) { 3402 /* packet is probably corrupt */ 3403 break; 3404 } 3405 /* we seem to be ok, is it an abort? */ 3406 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 3407 /* yep, tell them */ 3408 return (1); 3409 } 3410 if (ch->chunk_type == SCTP_INITIATION) { 3411 /* need to update the Vtag */ 3412 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 3413 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 3414 if (init_chk != NULL) { 3415 *vtagfill = ntohl(init_chk->init.initiate_tag); 3416 } 3417 } 3418 /* Nope, move to the next chunk */ 3419 offset += SCTP_SIZE32(chk_length); 3420 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3421 sizeof(*ch), (uint8_t *) & chunk_buf); 3422 } 3423 return (0); 3424 } 3425 3426 /* 3427 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 3428 * set (i.e. it's 0) so, create this function to compare link local scopes 3429 */ 3430 uint32_t 3431 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 3432 { 3433 struct sockaddr_in6 a, b; 3434 3435 /* save copies */ 3436 a = *addr1; 3437 b = *addr2; 3438 3439 if (a.sin6_scope_id == 0) 3440 if (sa6_recoverscope(&a)) { 3441 /* can't get scope, so can't match */ 3442 return (0); 3443 } 3444 if (b.sin6_scope_id == 0) 3445 if (sa6_recoverscope(&b)) { 3446 /* can't get scope, so can't match */ 3447 return (0); 3448 } 3449 if (a.sin6_scope_id != b.sin6_scope_id) 3450 return (0); 3451 3452 return (1); 3453 } 3454 3455 /* 3456 * returns a sockaddr_in6 with embedded scope recovered and removed 3457 */ 3458 struct sockaddr_in6 * 3459 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 3460 { 3461 3462 /* check and strip embedded scope junk */ 3463 if (addr->sin6_family == AF_INET6) { 3464 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 3465 if (addr->sin6_scope_id == 0) { 3466 *store = *addr; 3467 if (!sa6_recoverscope(store)) { 3468 /* use the recovered scope */ 3469 addr = store; 3470 } 3471 /* else, return the original "to" addr */ 3472 } 3473 } 3474 } 3475 return (addr); 3476 } 3477 3478 /* 3479 * are the two addresses the same? currently a "scopeless" check returns: 1 3480 * if same, 0 if not 3481 */ 3482 __inline int 3483 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 3484 { 3485 3486 /* must be valid */ 3487 if (sa1 == NULL || sa2 == NULL) 3488 return (0); 3489 3490 /* must be the same family */ 3491 if (sa1->sa_family != sa2->sa_family) 3492 return (0); 3493 3494 if (sa1->sa_family == AF_INET6) { 3495 /* IPv6 addresses */ 3496 struct sockaddr_in6 *sin6_1, *sin6_2; 3497 3498 sin6_1 = (struct sockaddr_in6 *)sa1; 3499 sin6_2 = (struct sockaddr_in6 *)sa2; 3500 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, 3501 &sin6_2->sin6_addr)); 3502 } else if (sa1->sa_family == AF_INET) { 3503 /* IPv4 addresses */ 3504 struct sockaddr_in *sin_1, *sin_2; 3505 3506 sin_1 = (struct sockaddr_in *)sa1; 3507 sin_2 = (struct sockaddr_in *)sa2; 3508 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 3509 } else { 3510 /* we don't do these... */ 3511 return (0); 3512 } 3513 } 3514 3515 void 3516 sctp_print_address(struct sockaddr *sa) 3517 { 3518 3519 if (sa->sa_family == AF_INET6) { 3520 struct sockaddr_in6 *sin6; 3521 char ip6buf[INET6_ADDRSTRLEN]; 3522 3523 sin6 = (struct sockaddr_in6 *)sa; 3524 printf("IPv6 address: %s:%d scope:%u\n", 3525 ip6_sprintf(ip6buf, &sin6->sin6_addr), 3526 ntohs(sin6->sin6_port), 3527 sin6->sin6_scope_id); 3528 } else if (sa->sa_family == AF_INET) { 3529 struct sockaddr_in *sin; 3530 unsigned char *p; 3531 3532 sin = (struct sockaddr_in *)sa; 3533 p = (unsigned char *)&sin->sin_addr; 3534 printf("IPv4 address: %u.%u.%u.%u:%d\n", 3535 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 3536 } else { 3537 printf("?\n"); 3538 } 3539 } 3540 3541 void 3542 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 3543 { 3544 if (iph->ip_v == IPVERSION) { 3545 struct sockaddr_in lsa, fsa; 3546 3547 bzero(&lsa, sizeof(lsa)); 3548 lsa.sin_len = sizeof(lsa); 3549 lsa.sin_family = AF_INET; 3550 lsa.sin_addr = iph->ip_src; 3551 lsa.sin_port = sh->src_port; 3552 bzero(&fsa, sizeof(fsa)); 3553 fsa.sin_len = sizeof(fsa); 3554 fsa.sin_family = AF_INET; 3555 fsa.sin_addr = iph->ip_dst; 3556 fsa.sin_port = sh->dest_port; 3557 printf("src: "); 3558 sctp_print_address((struct sockaddr *)&lsa); 3559 printf("dest: "); 3560 sctp_print_address((struct sockaddr *)&fsa); 3561 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3562 struct ip6_hdr *ip6; 3563 struct sockaddr_in6 lsa6, fsa6; 3564 3565 ip6 = (struct ip6_hdr *)iph; 3566 bzero(&lsa6, sizeof(lsa6)); 3567 lsa6.sin6_len = sizeof(lsa6); 3568 lsa6.sin6_family = AF_INET6; 3569 lsa6.sin6_addr = ip6->ip6_src; 3570 lsa6.sin6_port = sh->src_port; 3571 bzero(&fsa6, sizeof(fsa6)); 3572 fsa6.sin6_len = sizeof(fsa6); 3573 fsa6.sin6_family = AF_INET6; 3574 fsa6.sin6_addr = ip6->ip6_dst; 3575 fsa6.sin6_port = sh->dest_port; 3576 printf("src: "); 3577 sctp_print_address((struct sockaddr *)&lsa6); 3578 printf("dest: "); 3579 sctp_print_address((struct sockaddr *)&fsa6); 3580 } 3581 } 3582 3583 void 3584 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 3585 struct sctp_inpcb *new_inp, 3586 struct sctp_tcb *stcb) 3587 { 3588 /* 3589 * go through our old INP and pull off any control structures that 3590 * belong to stcb and move then to the new inp. 3591 */ 3592 struct socket *old_so, *new_so; 3593 struct sctp_queued_to_read *control, *nctl; 3594 struct sctp_readhead tmp_queue; 3595 struct mbuf *m; 3596 int error; 3597 3598 old_so = old_inp->sctp_socket; 3599 new_so = new_inp->sctp_socket; 3600 TAILQ_INIT(&tmp_queue); 3601 3602 SOCKBUF_LOCK(&(old_so->so_rcv)); 3603 3604 error = sblock(&old_so->so_rcv, 0); 3605 3606 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3607 if (error) { 3608 /* 3609 * Gak, can't get sblock, we have a problem. data will be 3610 * left stranded.. and we don't dare look at it since the 3611 * other thread may be reading something. Oh well, its a 3612 * screwed up app that does a peeloff OR a accept while 3613 * reading from the main socket... actually its only the 3614 * peeloff() case, since I think read will fail on a 3615 * listening socket.. 3616 */ 3617 return; 3618 } 3619 /* lock the socket buffers */ 3620 SCTP_INP_READ_LOCK(old_inp); 3621 control = TAILQ_FIRST(&old_inp->read_queue); 3622 /* Pull off all for out target stcb */ 3623 while (control) { 3624 nctl = TAILQ_NEXT(control, next); 3625 if (control->stcb == stcb) { 3626 /* remove it we want it */ 3627 TAILQ_REMOVE(&old_inp->read_queue, control, next); 3628 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 3629 m = control->data; 3630 while (m) { 3631 #ifdef SCTP_SB_LOGGING 3632 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 3633 #endif 3634 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 3635 #ifdef SCTP_SB_LOGGING 3636 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3637 #endif 3638 m = SCTP_BUF_NEXT(m); 3639 } 3640 } 3641 control = nctl; 3642 } 3643 SCTP_INP_READ_UNLOCK(old_inp); 3644 3645 /* Remove the sb-lock on the old socket */ 3646 SOCKBUF_LOCK(&(old_so->so_rcv)); 3647 3648 sbunlock(&old_so->so_rcv); 3649 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3650 3651 /* Now we move them over to the new socket buffer */ 3652 control = TAILQ_FIRST(&tmp_queue); 3653 SCTP_INP_READ_LOCK(new_inp); 3654 while (control) { 3655 nctl = TAILQ_NEXT(control, next); 3656 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 3657 m = control->data; 3658 while (m) { 3659 #ifdef SCTP_SB_LOGGING 3660 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3661 #endif 3662 sctp_sballoc(stcb, &new_so->so_rcv, m); 3663 #ifdef SCTP_SB_LOGGING 3664 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3665 #endif 3666 m = SCTP_BUF_NEXT(m); 3667 } 3668 control = nctl; 3669 } 3670 SCTP_INP_READ_UNLOCK(new_inp); 3671 } 3672 3673 3674 void 3675 sctp_add_to_readq(struct sctp_inpcb *inp, 3676 struct sctp_tcb *stcb, 3677 struct sctp_queued_to_read *control, 3678 struct sockbuf *sb, 3679 int end) 3680 { 3681 /* 3682 * Here we must place the control on the end of the socket read 3683 * queue AND increment sb_cc so that select will work properly on 3684 * read. 3685 */ 3686 struct mbuf *m, *prev = NULL; 3687 3688 if (inp == NULL) { 3689 /* Gak, TSNH!! */ 3690 #ifdef INVARIANTS 3691 panic("Gak, inp NULL on add_to_readq"); 3692 #endif 3693 return; 3694 } 3695 SCTP_INP_READ_LOCK(inp); 3696 atomic_add_int(&inp->total_recvs, 1); 3697 atomic_add_int(&stcb->total_recvs, 1); 3698 m = control->data; 3699 control->held_length = 0; 3700 control->length = 0; 3701 while (m) { 3702 if (SCTP_BUF_LEN(m) == 0) { 3703 /* Skip mbufs with NO length */ 3704 if (prev == NULL) { 3705 /* First one */ 3706 control->data = sctp_m_free(m); 3707 m = control->data; 3708 } else { 3709 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 3710 m = SCTP_BUF_NEXT(prev); 3711 } 3712 if (m == NULL) { 3713 control->tail_mbuf = prev;; 3714 } 3715 continue; 3716 } 3717 prev = m; 3718 #ifdef SCTP_SB_LOGGING 3719 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3720 #endif 3721 sctp_sballoc(stcb, sb, m); 3722 #ifdef SCTP_SB_LOGGING 3723 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3724 #endif 3725 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 3726 m = SCTP_BUF_NEXT(m); 3727 } 3728 if (prev != NULL) { 3729 control->tail_mbuf = prev; 3730 } else { 3731 /* Everything got collapsed out?? */ 3732 return; 3733 } 3734 if (end) { 3735 control->end_added = 1; 3736 } 3737 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 3738 SCTP_INP_READ_UNLOCK(inp); 3739 if (inp && inp->sctp_socket) { 3740 sctp_sorwakeup(inp, inp->sctp_socket); 3741 } 3742 } 3743 3744 3745 int 3746 sctp_append_to_readq(struct sctp_inpcb *inp, 3747 struct sctp_tcb *stcb, 3748 struct sctp_queued_to_read *control, 3749 struct mbuf *m, 3750 int end, 3751 int ctls_cumack, 3752 struct sockbuf *sb) 3753 { 3754 /* 3755 * A partial delivery API event is underway. OR we are appending on 3756 * the reassembly queue. 3757 * 3758 * If PDAPI this means we need to add m to the end of the data. 3759 * Increase the length in the control AND increment the sb_cc. 3760 * Otherwise sb is NULL and all we need to do is put it at the end 3761 * of the mbuf chain. 3762 */ 3763 int len = 0; 3764 struct mbuf *mm, *tail = NULL, *prev = NULL; 3765 3766 if (inp) { 3767 SCTP_INP_READ_LOCK(inp); 3768 } 3769 if (control == NULL) { 3770 get_out: 3771 if (inp) { 3772 SCTP_INP_READ_UNLOCK(inp); 3773 } 3774 return (-1); 3775 } 3776 if (control->end_added) { 3777 /* huh this one is complete? */ 3778 goto get_out; 3779 } 3780 mm = m; 3781 if (mm == NULL) { 3782 goto get_out; 3783 } 3784 while (mm) { 3785 if (SCTP_BUF_LEN(mm) == 0) { 3786 /* Skip mbufs with NO lenght */ 3787 if (prev == NULL) { 3788 /* First one */ 3789 m = sctp_m_free(mm); 3790 mm = m; 3791 } else { 3792 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 3793 mm = SCTP_BUF_NEXT(prev); 3794 } 3795 continue; 3796 } 3797 prev = mm; 3798 len += SCTP_BUF_LEN(mm); 3799 if (sb) { 3800 #ifdef SCTP_SB_LOGGING 3801 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 3802 #endif 3803 sctp_sballoc(stcb, sb, mm); 3804 #ifdef SCTP_SB_LOGGING 3805 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3806 #endif 3807 } 3808 mm = SCTP_BUF_NEXT(mm); 3809 } 3810 if (prev) { 3811 tail = prev; 3812 } else { 3813 /* Really there should always be a prev */ 3814 if (m == NULL) { 3815 /* Huh nothing left? */ 3816 #ifdef INVARIANTS 3817 panic("Nothing left to add?"); 3818 #else 3819 goto get_out; 3820 #endif 3821 } 3822 tail = m; 3823 } 3824 if (end) { 3825 /* message is complete */ 3826 if (control == stcb->asoc.control_pdapi) { 3827 stcb->asoc.control_pdapi = NULL; 3828 } 3829 control->held_length = 0; 3830 control->end_added = 1; 3831 } 3832 atomic_add_int(&control->length, len); 3833 if (control->tail_mbuf) { 3834 /* append */ 3835 SCTP_BUF_NEXT(control->tail_mbuf) = m; 3836 control->tail_mbuf = tail; 3837 } else { 3838 /* nothing there */ 3839 #ifdef INVARIANTS 3840 if (control->data != NULL) { 3841 panic("This should NOT happen"); 3842 } 3843 #endif 3844 control->data = m; 3845 control->tail_mbuf = tail; 3846 } 3847 /* 3848 * When we are appending in partial delivery, the cum-ack is used 3849 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 3850 * is populated in the outbound sinfo structure from the true cumack 3851 * if the association exists... 3852 */ 3853 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 3854 if (inp) { 3855 SCTP_INP_READ_UNLOCK(inp); 3856 } 3857 if (inp && inp->sctp_socket) { 3858 sctp_sorwakeup(inp, inp->sctp_socket); 3859 } 3860 return (0); 3861 } 3862 3863 3864 3865 /*************HOLD THIS COMMENT FOR PATCH FILE OF 3866 *************ALTERNATE ROUTING CODE 3867 */ 3868 3869 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 3870 *************ALTERNATE ROUTING CODE 3871 */ 3872 3873 struct mbuf * 3874 sctp_generate_invmanparam(int err) 3875 { 3876 /* Return a MBUF with a invalid mandatory parameter */ 3877 struct mbuf *m; 3878 3879 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 3880 if (m) { 3881 struct sctp_paramhdr *ph; 3882 3883 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 3884 ph = mtod(m, struct sctp_paramhdr *); 3885 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 3886 ph->param_type = htons(err); 3887 } 3888 return (m); 3889 } 3890 3891 #ifdef SCTP_MBCNT_LOGGING 3892 void 3893 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 3894 struct sctp_tmit_chunk *tp1, int chk_cnt) 3895 { 3896 if (tp1->data == NULL) { 3897 return; 3898 } 3899 asoc->chunks_on_out_queue -= chk_cnt; 3900 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 3901 asoc->total_output_queue_size, 3902 tp1->book_size, 3903 0, 3904 tp1->mbcnt); 3905 if (asoc->total_output_queue_size >= tp1->book_size) { 3906 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 3907 } else { 3908 asoc->total_output_queue_size = 0; 3909 } 3910 3911 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 3912 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 3913 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 3914 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 3915 } else { 3916 stcb->sctp_socket->so_snd.sb_cc = 0; 3917 3918 } 3919 } 3920 } 3921 3922 #endif 3923 3924 int 3925 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 3926 int reason, struct sctpchunk_listhead *queue) 3927 { 3928 int ret_sz = 0; 3929 int notdone; 3930 uint8_t foundeom = 0; 3931 3932 do { 3933 ret_sz += tp1->book_size; 3934 tp1->sent = SCTP_FORWARD_TSN_SKIP; 3935 if (tp1->data) { 3936 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3937 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1); 3938 sctp_m_freem(tp1->data); 3939 tp1->data = NULL; 3940 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 3941 } 3942 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 3943 stcb->asoc.sent_queue_cnt_removeable--; 3944 } 3945 if (queue == &stcb->asoc.send_queue) { 3946 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 3947 /* on to the sent queue */ 3948 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 3949 sctp_next); 3950 stcb->asoc.sent_queue_cnt++; 3951 } 3952 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 3953 SCTP_DATA_NOT_FRAG) { 3954 /* not frag'ed we ae done */ 3955 notdone = 0; 3956 foundeom = 1; 3957 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 3958 /* end of frag, we are done */ 3959 notdone = 0; 3960 foundeom = 1; 3961 } else { 3962 /* 3963 * Its a begin or middle piece, we must mark all of 3964 * it 3965 */ 3966 notdone = 1; 3967 tp1 = TAILQ_NEXT(tp1, sctp_next); 3968 } 3969 } while (tp1 && notdone); 3970 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) { 3971 /* 3972 * The multi-part message was scattered across the send and 3973 * sent queue. 3974 */ 3975 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3976 /* 3977 * recurse throught the send_queue too, starting at the 3978 * beginning. 3979 */ 3980 if (tp1) { 3981 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason, 3982 &stcb->asoc.send_queue); 3983 } else { 3984 printf("hmm, nothing on the send queue and no EOM?\n"); 3985 } 3986 } 3987 return (ret_sz); 3988 } 3989 3990 /* 3991 * checks to see if the given address, sa, is one that is currently known by 3992 * the kernel note: can't distinguish the same address on multiple interfaces 3993 * and doesn't handle multiple addresses with different zone/scope id's note: 3994 * ifa_ifwithaddr() compares the entire sockaddr struct 3995 */ 3996 struct ifaddr * 3997 sctp_find_ifa_by_addr(struct sockaddr *sa) 3998 { 3999 struct ifnet *ifn; 4000 struct ifaddr *ifa; 4001 4002 /* go through all our known interfaces */ 4003 TAILQ_FOREACH(ifn, &ifnet, if_list) { 4004 /* go through each interface addresses */ 4005 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 4006 /* correct family? */ 4007 if (ifa->ifa_addr->sa_family != sa->sa_family) 4008 continue; 4009 4010 #ifdef INET6 4011 if (ifa->ifa_addr->sa_family == AF_INET6) { 4012 /* IPv6 address */ 4013 struct sockaddr_in6 *sin1, *sin2, sin6_tmp; 4014 4015 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr; 4016 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) { 4017 /* create a copy and clear scope */ 4018 memcpy(&sin6_tmp, sin1, 4019 sizeof(struct sockaddr_in6)); 4020 sin1 = &sin6_tmp; 4021 in6_clearscope(&sin1->sin6_addr); 4022 } 4023 sin2 = (struct sockaddr_in6 *)sa; 4024 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, 4025 sizeof(struct in6_addr)) == 0) { 4026 /* found it */ 4027 return (ifa); 4028 } 4029 } else 4030 #endif 4031 if (ifa->ifa_addr->sa_family == AF_INET) { 4032 /* IPv4 address */ 4033 struct sockaddr_in *sin1, *sin2; 4034 4035 sin1 = (struct sockaddr_in *)ifa->ifa_addr; 4036 sin2 = (struct sockaddr_in *)sa; 4037 if (sin1->sin_addr.s_addr == 4038 sin2->sin_addr.s_addr) { 4039 /* found it */ 4040 return (ifa); 4041 } 4042 } 4043 /* else, not AF_INET or AF_INET6, so skip */ 4044 } /* end foreach ifa */ 4045 } /* end foreach ifn */ 4046 /* not found! */ 4047 return (NULL); 4048 } 4049 4050 static void 4051 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock, 4052 uint32_t rwnd_req) 4053 { 4054 /* User pulled some data, do we need a rwnd update? */ 4055 int r_unlocked = 0; 4056 uint32_t dif, rwnd; 4057 struct socket *so = NULL; 4058 4059 if (stcb == NULL) 4060 return; 4061 4062 atomic_add_int(&stcb->asoc.refcnt, 1); 4063 4064 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4065 /* Pre-check If we are freeing no update */ 4066 goto no_lock; 4067 } 4068 SCTP_INP_INCR_REF(stcb->sctp_ep); 4069 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4070 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4071 goto out; 4072 } 4073 so = stcb->sctp_socket; 4074 if (so == NULL) { 4075 goto out; 4076 } 4077 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 4078 /* Have you have freed enough to look */ 4079 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4080 sctp_misc_ints(SCTP_ENTER_USER_RECV, 4081 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd), 4082 *freed_so_far, 4083 stcb->freed_by_sorcv_sincelast, 4084 rwnd_req); 4085 #endif 4086 *freed_so_far = 0; 4087 /* Yep, its worth a look and the lock overhead */ 4088 4089 /* Figure out what the rwnd would be */ 4090 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 4091 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 4092 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 4093 } else { 4094 dif = 0; 4095 } 4096 if (dif >= rwnd_req) { 4097 if (hold_rlock) { 4098 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 4099 r_unlocked = 1; 4100 } 4101 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4102 /* 4103 * One last check before we allow the guy possibly 4104 * to get in. There is a race, where the guy has not 4105 * reached the gate. In that case 4106 */ 4107 goto out; 4108 } 4109 SCTP_TCB_LOCK(stcb); 4110 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4111 /* No reports here */ 4112 SCTP_TCB_UNLOCK(stcb); 4113 goto out; 4114 } 4115 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4116 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4117 stcb->asoc.my_rwnd, 4118 stcb->asoc.my_last_reported_rwnd, 4119 stcb->freed_by_sorcv_sincelast, 4120 dif); 4121 #endif 4122 SCTP_STAT_INCR(sctps_wu_sacks_sent); 4123 sctp_send_sack(stcb); 4124 sctp_chunk_output(stcb->sctp_ep, stcb, 4125 SCTP_OUTPUT_FROM_USR_RCVD); 4126 /* make sure no timer is running */ 4127 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 4128 SCTP_TCB_UNLOCK(stcb); 4129 } else { 4130 /* Update how much we have pending */ 4131 stcb->freed_by_sorcv_sincelast = dif; 4132 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4133 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4134 stcb->asoc.my_rwnd, 4135 stcb->asoc.my_last_reported_rwnd, 4136 stcb->freed_by_sorcv_sincelast, 4137 0); 4138 #endif 4139 } 4140 out: 4141 if (so && r_unlocked && hold_rlock) { 4142 SCTP_INP_READ_LOCK(stcb->sctp_ep); 4143 } 4144 SCTP_INP_DECR_REF(stcb->sctp_ep); 4145 no_lock: 4146 atomic_add_int(&stcb->asoc.refcnt, -1); 4147 return; 4148 } 4149 4150 int 4151 sctp_sorecvmsg(struct socket *so, 4152 struct uio *uio, 4153 struct mbuf **mp, 4154 struct sockaddr *from, 4155 int fromlen, 4156 int *msg_flags, 4157 struct sctp_sndrcvinfo *sinfo, 4158 int filling_sinfo) 4159 { 4160 /* 4161 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 4162 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 4163 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 4164 * On the way out we may send out any combination of: 4165 * MSG_NOTIFICATION MSG_EOR 4166 * 4167 */ 4168 struct sctp_inpcb *inp = NULL; 4169 int my_len = 0; 4170 int cp_len = 0, error = 0; 4171 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 4172 struct mbuf *m = NULL, *embuf = NULL; 4173 struct sctp_tcb *stcb = NULL; 4174 int wakeup_read_socket = 0; 4175 int freecnt_applied = 0; 4176 int out_flags = 0, in_flags = 0; 4177 int block_allowed = 1; 4178 int freed_so_far = 0; 4179 int copied_so_far = 0; 4180 int in_eeor_mode = 0; 4181 int no_rcv_needed = 0; 4182 uint32_t rwnd_req = 0; 4183 int hold_sblock = 0; 4184 int hold_rlock = 0; 4185 int alen = 0, slen = 0; 4186 int held_length = 0; 4187 4188 if (msg_flags) { 4189 in_flags = *msg_flags; 4190 } else { 4191 in_flags = 0; 4192 } 4193 slen = uio->uio_resid; 4194 /* Pull in and set up our int flags */ 4195 if (in_flags & MSG_OOB) { 4196 /* Out of band's NOT supported */ 4197 return (EOPNOTSUPP); 4198 } 4199 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 4200 return (EINVAL); 4201 } 4202 if ((in_flags & (MSG_DONTWAIT 4203 | MSG_NBIO 4204 )) || 4205 (so->so_state & SS_NBIO)) { 4206 block_allowed = 0; 4207 } 4208 /* setup the endpoint */ 4209 inp = (struct sctp_inpcb *)so->so_pcb; 4210 if (inp == NULL) { 4211 return (EFAULT); 4212 } 4213 rwnd_req = (so->so_rcv.sb_hiwat >> SCTP_RWND_HIWAT_SHIFT); 4214 /* Must be at least a MTU's worth */ 4215 if (rwnd_req < SCTP_MIN_RWND) 4216 rwnd_req = SCTP_MIN_RWND; 4217 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 4218 #ifdef SCTP_RECV_RWND_LOGGING 4219 sctp_misc_ints(SCTP_SORECV_ENTER, 4220 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 4221 #endif 4222 SOCKBUF_LOCK(&so->so_rcv); 4223 hold_sblock = 1; 4224 #ifdef SCTP_RECV_RWND_LOGGING 4225 sctp_misc_ints(SCTP_SORECV_ENTERPL, 4226 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 4227 #endif 4228 4229 4230 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4231 if (error) { 4232 goto release_unlocked; 4233 } 4234 restart: 4235 if (hold_sblock == 0) { 4236 SOCKBUF_LOCK(&so->so_rcv); 4237 hold_sblock = 1; 4238 } 4239 sbunlock(&so->so_rcv); 4240 4241 restart_nosblocks: 4242 if (hold_sblock == 0) { 4243 SOCKBUF_LOCK(&so->so_rcv); 4244 hold_sblock = 1; 4245 } 4246 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4247 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4248 goto out; 4249 } 4250 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4251 if (so->so_error) { 4252 error = so->so_error; 4253 if ((in_flags & MSG_PEEK) == 0) 4254 so->so_error = 0; 4255 } else { 4256 error = ENOTCONN; 4257 } 4258 goto out; 4259 } 4260 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 4261 /* we need to wait for data */ 4262 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4263 sctp_misc_ints(SCTP_SORECV_BLOCKSA, 4264 0, 0, so->so_rcv.sb_cc, uio->uio_resid); 4265 #endif 4266 if ((so->so_rcv.sb_cc == 0) && 4267 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4268 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 4269 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4270 /* 4271 * For active open side clear flags for 4272 * re-use passive open is blocked by 4273 * connect. 4274 */ 4275 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4276 /* 4277 * You were aborted, passive side 4278 * always hits here 4279 */ 4280 error = ECONNRESET; 4281 /* 4282 * You get this once if you are 4283 * active open side 4284 */ 4285 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4286 /* 4287 * Remove flag if on the 4288 * active open side 4289 */ 4290 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4291 } 4292 } 4293 so->so_state &= ~(SS_ISCONNECTING | 4294 SS_ISDISCONNECTING | 4295 SS_ISCONFIRMING | 4296 SS_ISCONNECTED); 4297 if (error == 0) { 4298 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4299 error = ENOTCONN; 4300 } else { 4301 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4302 } 4303 } 4304 goto out; 4305 } 4306 } 4307 error = sbwait(&so->so_rcv); 4308 if (error) { 4309 goto out; 4310 } 4311 held_length = 0; 4312 goto restart_nosblocks; 4313 } else if (so->so_rcv.sb_cc == 0) { 4314 if (so->so_error) { 4315 error = so->so_error; 4316 if ((in_flags & MSG_PEEK) == 0) 4317 so->so_error = 0; 4318 } else { 4319 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4320 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4321 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4322 /* 4323 * For active open side clear flags 4324 * for re-use passive open is 4325 * blocked by connect. 4326 */ 4327 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4328 /* 4329 * You were aborted, passive 4330 * side always hits here 4331 */ 4332 error = ECONNRESET; 4333 /* 4334 * You get this once if you 4335 * are active open side 4336 */ 4337 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4338 /* 4339 * Remove flag if on 4340 * the active open 4341 * side 4342 */ 4343 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4344 } 4345 } 4346 so->so_state &= ~(SS_ISCONNECTING | 4347 SS_ISDISCONNECTING | 4348 SS_ISCONFIRMING | 4349 SS_ISCONNECTED); 4350 if (error == 0) { 4351 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4352 error = ENOTCONN; 4353 } else { 4354 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4355 } 4356 } 4357 goto out; 4358 } 4359 } 4360 error = EWOULDBLOCK; 4361 } 4362 goto out; 4363 } 4364 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4365 /* we possibly have data we can read */ 4366 control = TAILQ_FIRST(&inp->read_queue); 4367 if (control == NULL) { 4368 /* 4369 * This could be happening since the appender did the 4370 * increment but as not yet did the tailq insert onto the 4371 * read_queue 4372 */ 4373 if (hold_rlock == 0) { 4374 SCTP_INP_READ_LOCK(inp); 4375 hold_rlock = 1; 4376 } 4377 control = TAILQ_FIRST(&inp->read_queue); 4378 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 4379 #ifdef INVARIANTS 4380 panic("Huh, its non zero and nothing on control?"); 4381 #endif 4382 so->so_rcv.sb_cc = 0; 4383 } 4384 SCTP_INP_READ_UNLOCK(inp); 4385 hold_rlock = 0; 4386 goto restart; 4387 } 4388 if ((control->length == 0) && 4389 (control->do_not_ref_stcb)) { 4390 /* 4391 * Clean up code for freeing assoc that left behind a 4392 * pdapi.. maybe a peer in EEOR that just closed after 4393 * sending and never indicated a EOR. 4394 */ 4395 if (hold_rlock == 0) { 4396 hold_rlock = 1; 4397 SCTP_INP_READ_LOCK(inp); 4398 } 4399 control->held_length = 0; 4400 if (control->data) { 4401 /* Hmm there is data here .. fix */ 4402 struct mbuf *m; 4403 int cnt = 0; 4404 4405 m = control->data; 4406 while (m) { 4407 cnt += SCTP_BUF_LEN(m); 4408 if (SCTP_BUF_NEXT(m) == NULL) { 4409 control->tail_mbuf = m; 4410 control->end_added = 1; 4411 } 4412 m = SCTP_BUF_NEXT(m); 4413 } 4414 control->length = cnt; 4415 } else { 4416 /* remove it */ 4417 TAILQ_REMOVE(&inp->read_queue, control, next); 4418 /* Add back any hiddend data */ 4419 sctp_free_remote_addr(control->whoFrom); 4420 sctp_free_a_readq(stcb, control); 4421 } 4422 if (hold_rlock) { 4423 hold_rlock = 0; 4424 SCTP_INP_READ_UNLOCK(inp); 4425 } 4426 goto restart; 4427 } 4428 if (control->length == 0) { 4429 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 4430 (filling_sinfo)) { 4431 /* find a more suitable one then this */ 4432 ctl = TAILQ_NEXT(control, next); 4433 while (ctl) { 4434 if ((ctl->stcb != control->stcb) && (ctl->length)) { 4435 /* found one */ 4436 control = ctl; 4437 goto found_one; 4438 } 4439 ctl = TAILQ_NEXT(ctl, next); 4440 } 4441 } 4442 /* 4443 * if we reach here, not suitable replacement is available 4444 * <or> fragment interleave is NOT on. So stuff the sb_cc 4445 * into the our held count, and its time to sleep again. 4446 */ 4447 held_length = so->so_rcv.sb_cc; 4448 control->held_length = so->so_rcv.sb_cc; 4449 goto restart; 4450 } 4451 /* Clear the held length since there is something to read */ 4452 control->held_length = 0; 4453 if (hold_rlock) { 4454 SCTP_INP_READ_UNLOCK(inp); 4455 hold_rlock = 0; 4456 } 4457 found_one: 4458 /* 4459 * If we reach here, control has a some data for us to read off. 4460 * Note that stcb COULD be NULL. 4461 */ 4462 if (hold_sblock) { 4463 SOCKBUF_UNLOCK(&so->so_rcv); 4464 hold_sblock = 0; 4465 } 4466 stcb = control->stcb; 4467 if (stcb) { 4468 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 4469 (control->do_not_ref_stcb == 0)) { 4470 if (freecnt_applied == 0) 4471 stcb = NULL; 4472 } else if (control->do_not_ref_stcb == 0) { 4473 /* you can't free it on me please */ 4474 /* 4475 * The lock on the socket buffer protects us so the 4476 * free code will stop. But since we used the 4477 * socketbuf lock and the sender uses the tcb_lock 4478 * to increment, we need to use the atomic add to 4479 * the refcnt 4480 */ 4481 atomic_add_int(&stcb->asoc.refcnt, 1); 4482 freecnt_applied = 1; 4483 /* 4484 * Setup to remember how much we have not yet told 4485 * the peer our rwnd has opened up. Note we grab the 4486 * value from the tcb from last time. Note too that 4487 * sack sending clears this when a sack is sent.. 4488 * which is fine. Once we hit the rwnd_req, we then 4489 * will go to the sctp_user_rcvd() that will not 4490 * lock until it KNOWs it MUST send a WUP-SACK. 4491 * 4492 */ 4493 freed_so_far = stcb->freed_by_sorcv_sincelast; 4494 stcb->freed_by_sorcv_sincelast = 0; 4495 } 4496 } 4497 /* First lets get off the sinfo and sockaddr info */ 4498 if ((sinfo) && filling_sinfo) { 4499 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 4500 nxt = TAILQ_NEXT(control, next); 4501 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 4502 struct sctp_extrcvinfo *s_extra; 4503 4504 s_extra = (struct sctp_extrcvinfo *)sinfo; 4505 if (nxt) { 4506 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL; 4507 if (nxt->sinfo_flags & SCTP_UNORDERED) { 4508 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 4509 } 4510 s_extra->next_asocid = nxt->sinfo_assoc_id; 4511 s_extra->next_length = nxt->length; 4512 s_extra->next_ppid = nxt->sinfo_ppid; 4513 s_extra->next_stream = nxt->sinfo_stream; 4514 if (nxt->tail_mbuf != NULL) { 4515 if (nxt->end_added) { 4516 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 4517 } 4518 } 4519 } else { 4520 /* 4521 * we explicitly 0 this, since the memcpy 4522 * got some other things beyond the older 4523 * sinfo_ that is on the control's structure 4524 * :-D 4525 */ 4526 s_extra->next_flags = SCTP_NO_NEXT_MSG; 4527 s_extra->next_asocid = 0; 4528 s_extra->next_length = 0; 4529 s_extra->next_ppid = 0; 4530 s_extra->next_stream = 0; 4531 } 4532 } 4533 /* 4534 * update off the real current cum-ack, if we have an stcb. 4535 */ 4536 if (stcb) 4537 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 4538 /* 4539 * mask off the high bits, we keep the actual chunk bits in 4540 * there. 4541 */ 4542 sinfo->sinfo_flags &= 0x00ff; 4543 } 4544 if (fromlen && from) { 4545 struct sockaddr *to; 4546 4547 #ifdef AF_INET 4548 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len); 4549 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4550 ((struct sockaddr_in *)from)->sin_port = control->port_from; 4551 #else 4552 /* No AF_INET use AF_INET6 */ 4553 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len); 4554 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4555 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 4556 #endif 4557 4558 to = from; 4559 #if defined(AF_INET) && defined(AF_INET6) 4560 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 4561 (to->sa_family == AF_INET) && 4562 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 4563 struct sockaddr_in *sin; 4564 struct sockaddr_in6 sin6; 4565 4566 sin = (struct sockaddr_in *)to; 4567 bzero(&sin6, sizeof(sin6)); 4568 sin6.sin6_family = AF_INET6; 4569 sin6.sin6_len = sizeof(struct sockaddr_in6); 4570 sin6.sin6_addr.s6_addr16[2] = 0xffff; 4571 bcopy(&sin->sin_addr, 4572 &sin6.sin6_addr.s6_addr16[3], 4573 sizeof(sin6.sin6_addr.s6_addr16[3])); 4574 sin6.sin6_port = sin->sin_port; 4575 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 4576 } 4577 #endif 4578 #if defined(AF_INET6) 4579 { 4580 struct sockaddr_in6 lsa6, *to6; 4581 4582 to6 = (struct sockaddr_in6 *)to; 4583 sctp_recover_scope_mac(to6, (&lsa6)); 4584 4585 } 4586 #endif 4587 } 4588 /* now copy out what data we can */ 4589 if (mp == NULL) { 4590 /* copy out each mbuf in the chain up to length */ 4591 get_more_data: 4592 m = control->data; 4593 while (m) { 4594 /* Move out all we can */ 4595 cp_len = (int)uio->uio_resid; 4596 my_len = (int)SCTP_BUF_LEN(m); 4597 if (cp_len > my_len) { 4598 /* not enough in this buf */ 4599 cp_len = my_len; 4600 } 4601 if (hold_rlock) { 4602 SCTP_INP_READ_UNLOCK(inp); 4603 hold_rlock = 0; 4604 } 4605 if (cp_len > 0) 4606 error = uiomove(mtod(m, char *), cp_len, uio); 4607 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4608 sctp_misc_ints(SCTP_SORCV_DOESCPY, 4609 so->so_rcv.sb_cc, 4610 cp_len, 4611 0, 4612 0); 4613 #endif 4614 /* re-read */ 4615 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4616 goto release; 4617 } 4618 if (stcb && 4619 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4620 no_rcv_needed = 1; 4621 } 4622 if (error) { 4623 /* error we are out of here */ 4624 goto release; 4625 } 4626 if ((SCTP_BUF_NEXT(m) == NULL) && 4627 (cp_len >= SCTP_BUF_LEN(m)) && 4628 ((control->end_added == 0) || 4629 (control->end_added && (TAILQ_NEXT(control, next) == NULL))) 4630 ) { 4631 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4632 sctp_misc_ints(SCTP_SORCV_DOESLCK, 4633 so->so_rcv.sb_cc, 4634 cp_len, 4635 SCTP_BUF_LEN(m), 4636 control->length); 4637 #endif 4638 SCTP_INP_READ_LOCK(inp); 4639 hold_rlock = 1; 4640 } 4641 if (cp_len == SCTP_BUF_LEN(m)) { 4642 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4643 sctp_misc_ints(SCTP_SORCV_DOESADJ, 4644 so->so_rcv.sb_cc, 4645 control->length, 4646 cp_len, 4647 0); 4648 #endif 4649 if ((SCTP_BUF_NEXT(m) == NULL) && 4650 (control->end_added)) { 4651 out_flags |= MSG_EOR; 4652 } 4653 if (control->spec_flags & M_NOTIFICATION) { 4654 out_flags |= MSG_NOTIFICATION; 4655 } 4656 /* we ate up the mbuf */ 4657 if (in_flags & MSG_PEEK) { 4658 /* just looking */ 4659 m = SCTP_BUF_NEXT(m); 4660 copied_so_far += cp_len; 4661 } else { 4662 /* dispose of the mbuf */ 4663 #ifdef SCTP_SB_LOGGING 4664 sctp_sblog(&so->so_rcv, 4665 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4666 #endif 4667 sctp_sbfree(control, stcb, &so->so_rcv, m); 4668 #ifdef SCTP_SB_LOGGING 4669 sctp_sblog(&so->so_rcv, 4670 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4671 #endif 4672 embuf = m; 4673 copied_so_far += cp_len; 4674 freed_so_far += cp_len; 4675 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4676 if (alen < cp_len) { 4677 panic("Control length goes negative?"); 4678 } 4679 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4680 sctp_misc_ints(SCTP_SORCV_PASSBF, 4681 so->so_rcv.sb_cc, 4682 control->length, 4683 0, 4684 0); 4685 #endif 4686 control->data = sctp_m_free(m); 4687 m = control->data; 4688 /* 4689 * been through it all, must hold sb 4690 * lock ok to null tail 4691 */ 4692 if (control->data == NULL) { 4693 #ifdef INVARIANTS 4694 if ((control->end_added == 0) || 4695 (TAILQ_NEXT(control, next) == NULL)) { 4696 /* 4697 * If the end is not 4698 * added, OR the 4699 * next is NOT null 4700 * we MUST have the 4701 * lock. 4702 */ 4703 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 4704 panic("Hmm we don't own the lock?"); 4705 } 4706 } 4707 #endif 4708 control->tail_mbuf = NULL; 4709 #ifdef INVARIANTS 4710 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 4711 panic("end_added, nothing left and no MSG_EOR"); 4712 } 4713 #endif 4714 } 4715 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4716 sctp_misc_ints(SCTP_SORCV_ADJD, 4717 so->so_rcv.sb_cc, 4718 control->length, 4719 0, 4720 0); 4721 #endif 4722 } 4723 } else { 4724 /* Do we need to trim the mbuf? */ 4725 if (control->spec_flags & M_NOTIFICATION) { 4726 out_flags |= MSG_NOTIFICATION; 4727 } 4728 if ((in_flags & MSG_PEEK) == 0) { 4729 SCTP_BUF_RESV_UF(m, cp_len); 4730 SCTP_BUF_LEN(m) -= cp_len; 4731 #ifdef SCTP_SB_LOGGING 4732 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 4733 #endif 4734 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 4735 if (stcb) { 4736 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 4737 } 4738 copied_so_far += cp_len; 4739 embuf = m; 4740 freed_so_far += cp_len; 4741 #ifdef SCTP_SB_LOGGING 4742 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 4743 SCTP_LOG_SBRESULT, 0); 4744 #endif 4745 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4746 if (alen < cp_len) { 4747 panic("Control length goes negative2?"); 4748 } 4749 } else { 4750 copied_so_far += cp_len; 4751 } 4752 } 4753 if ((out_flags & MSG_EOR) || 4754 (uio->uio_resid == 0) 4755 ) { 4756 break; 4757 } 4758 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 4759 (control->do_not_ref_stcb == 0) && 4760 (freed_so_far >= rwnd_req)) { 4761 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4762 } 4763 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4764 sctp_misc_ints(SCTP_SORCV_BOTWHILE, 4765 so->so_rcv.sb_cc, 4766 control->length, 4767 0, 4768 0); 4769 #endif 4770 4771 } /* end while(m) */ 4772 /* 4773 * At this point we have looked at it all and we either have 4774 * a MSG_EOR/or read all the user wants... <OR> 4775 * control->length == 0. 4776 */ 4777 if ((out_flags & MSG_EOR) && 4778 ((in_flags & MSG_PEEK) == 0)) { 4779 /* we are done with this control */ 4780 if (control->length == 0) { 4781 if (control->data) { 4782 #ifdef INVARIANTS 4783 panic("control->data not null at read eor?"); 4784 #else 4785 printf("Strange, data left in the control buffer .. invarients would panic?\n"); 4786 sctp_m_freem(control->data); 4787 control->data = NULL; 4788 #endif 4789 } 4790 done_with_control: 4791 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4792 sctp_misc_ints(SCTP_SORCV_FREECTL, 4793 so->so_rcv.sb_cc, 4794 0, 4795 0, 4796 0); 4797 #endif 4798 if (TAILQ_NEXT(control, next) == NULL) { 4799 /* 4800 * If we don't have a next we need a 4801 * lock, if there is a next interupt 4802 * is filling ahead of us and we 4803 * don't need a lock to remove this 4804 * guy (which is the head of the 4805 * queue). 4806 */ 4807 if (hold_rlock == 0) { 4808 SCTP_INP_READ_LOCK(inp); 4809 hold_rlock = 1; 4810 } 4811 } 4812 TAILQ_REMOVE(&inp->read_queue, control, next); 4813 /* Add back any hiddend data */ 4814 if (control->held_length) { 4815 held_length = 0; 4816 control->held_length = 0; 4817 wakeup_read_socket = 1; 4818 } 4819 no_rcv_needed = control->do_not_ref_stcb; 4820 sctp_free_remote_addr(control->whoFrom); 4821 control->data = NULL; 4822 sctp_free_a_readq(stcb, control); 4823 control = NULL; 4824 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0)) 4825 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4826 4827 } else { 4828 /* 4829 * The user did not read all of this 4830 * message, turn off the returned MSG_EOR 4831 * since we are leaving more behind on the 4832 * control to read. 4833 */ 4834 #ifdef INVARIANTS 4835 if (control->end_added && (control->data == NULL) && 4836 (control->tail_mbuf == NULL)) { 4837 panic("Gak, control->length is corrupt?"); 4838 } 4839 #endif 4840 no_rcv_needed = control->do_not_ref_stcb; 4841 out_flags &= ~MSG_EOR; 4842 } 4843 } 4844 if (out_flags & MSG_EOR) { 4845 goto release; 4846 } 4847 if ((uio->uio_resid == 0) || 4848 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 4849 ) { 4850 goto release; 4851 } 4852 /* 4853 * If I hit here the receiver wants more and this message is 4854 * NOT done (pd-api). So two questions. Can we block? if not 4855 * we are done. Did the user NOT set MSG_WAITALL? 4856 */ 4857 if (block_allowed == 0) { 4858 goto release; 4859 } 4860 /* 4861 * We need to wait for more data a few things: - We don't 4862 * sbunlock() so we don't get someone else reading. - We 4863 * must be sure to account for the case where what is added 4864 * is NOT to our control when we wakeup. 4865 */ 4866 4867 /* 4868 * Do we need to tell the transport a rwnd update might be 4869 * needed before we go to sleep? 4870 */ 4871 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 4872 ((freed_so_far >= rwnd_req) && 4873 (control->do_not_ref_stcb == 0) && 4874 (no_rcv_needed == 0))) { 4875 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 4876 } 4877 wait_some_more: 4878 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4879 goto release; 4880 } 4881 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 4882 goto release; 4883 4884 if (hold_rlock == 1) { 4885 SCTP_INP_READ_UNLOCK(inp); 4886 hold_rlock = 0; 4887 } 4888 if (hold_sblock == 0) { 4889 SOCKBUF_LOCK(&so->so_rcv); 4890 hold_sblock = 1; 4891 } 4892 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4893 if (stcb) 4894 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 4895 freed_so_far, 4896 stcb->asoc.my_rwnd, 4897 so->so_rcv.sb_cc, 4898 uio->uio_resid); 4899 else 4900 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 4901 freed_so_far, 4902 0, 4903 so->so_rcv.sb_cc, 4904 uio->uio_resid); 4905 #endif 4906 if (so->so_rcv.sb_cc <= control->held_length) { 4907 error = sbwait(&so->so_rcv); 4908 if (error) { 4909 goto release; 4910 } 4911 control->held_length = 0; 4912 } 4913 if (hold_sblock) { 4914 SOCKBUF_UNLOCK(&so->so_rcv); 4915 hold_sblock = 0; 4916 } 4917 if (control->length == 0) { 4918 /* still nothing here */ 4919 if (control->end_added == 1) { 4920 /* he aborted, or is done i.e.did a shutdown */ 4921 out_flags |= MSG_EOR; 4922 if (control->pdapi_aborted) 4923 out_flags |= MSG_TRUNC; 4924 goto done_with_control; 4925 } 4926 if (so->so_rcv.sb_cc > held_length) { 4927 control->held_length = so->so_rcv.sb_cc; 4928 held_length = 0; 4929 } 4930 goto wait_some_more; 4931 } else if (control->data == NULL) { 4932 /* 4933 * we must re-sync since data is probably being 4934 * added 4935 */ 4936 SCTP_INP_READ_LOCK(inp); 4937 if ((control->length > 0) && (control->data == NULL)) { 4938 /* 4939 * big trouble.. we have the lock and its 4940 * corrupt? 4941 */ 4942 panic("Impossible data==NULL length !=0"); 4943 } 4944 SCTP_INP_READ_UNLOCK(inp); 4945 /* We will fall around to get more data */ 4946 } 4947 goto get_more_data; 4948 } else { 4949 /* copy out the mbuf chain */ 4950 get_more_data2: 4951 /* 4952 * Do we have a uio, I doubt it if so we grab the size from 4953 * it, if not you get it all 4954 */ 4955 if (uio) 4956 cp_len = uio->uio_resid; 4957 else 4958 cp_len = control->length; 4959 4960 if ((uint32_t) cp_len >= control->length) { 4961 /* easy way */ 4962 if ((control->end_added == 0) || 4963 (TAILQ_NEXT(control, next) == NULL)) { 4964 /* Need to get rlock */ 4965 if (hold_rlock == 0) { 4966 SCTP_INP_READ_LOCK(inp); 4967 hold_rlock = 1; 4968 } 4969 } 4970 if (control->end_added) { 4971 out_flags |= MSG_EOR; 4972 } 4973 if (control->spec_flags & M_NOTIFICATION) { 4974 out_flags |= MSG_NOTIFICATION; 4975 } 4976 if (uio) 4977 uio->uio_resid -= control->length; 4978 *mp = control->data; 4979 m = control->data; 4980 while (m) { 4981 #ifdef SCTP_SB_LOGGING 4982 sctp_sblog(&so->so_rcv, 4983 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4984 #endif 4985 sctp_sbfree(control, stcb, &so->so_rcv, m); 4986 freed_so_far += SCTP_BUF_LEN(m); 4987 #ifdef SCTP_SB_LOGGING 4988 sctp_sblog(&so->so_rcv, 4989 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4990 #endif 4991 m = SCTP_BUF_NEXT(m); 4992 } 4993 control->data = control->tail_mbuf = NULL; 4994 control->length = 0; 4995 if (out_flags & MSG_EOR) { 4996 /* Done with this control */ 4997 goto done_with_control; 4998 } 4999 /* still more to do with this conntrol */ 5000 /* do we really support msg_waitall here? */ 5001 if ((block_allowed == 0) || 5002 ((in_flags & MSG_WAITALL) == 0)) { 5003 goto release; 5004 } 5005 wait_some_more2: 5006 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 5007 goto release; 5008 if (hold_rlock == 1) { 5009 SCTP_INP_READ_UNLOCK(inp); 5010 hold_rlock = 0; 5011 } 5012 if (hold_sblock == 0) { 5013 SOCKBUF_LOCK(&so->so_rcv); 5014 hold_sblock = 1; 5015 } 5016 if (so->so_rcv.sb_cc <= control->held_length) { 5017 error = sbwait(&so->so_rcv); 5018 if (error) { 5019 goto release; 5020 } 5021 } 5022 if (hold_sblock) { 5023 SOCKBUF_UNLOCK(&so->so_rcv); 5024 hold_sblock = 0; 5025 } 5026 if (control->length == 0) { 5027 /* still nothing here */ 5028 if (control->end_added == 1) { 5029 /* 5030 * he aborted, or is done i.e. 5031 * shutdown 5032 */ 5033 out_flags |= MSG_EOR; 5034 if (control->pdapi_aborted) 5035 out_flags |= MSG_TRUNC; 5036 goto done_with_control; 5037 } 5038 if (so->so_rcv.sb_cc > held_length) { 5039 control->held_length = so->so_rcv.sb_cc; 5040 /* 5041 * We don't use held_length while 5042 * getting a message 5043 */ 5044 held_length = 0; 5045 } 5046 goto wait_some_more2; 5047 } 5048 goto get_more_data2; 5049 } else { 5050 /* hard way mbuf by mbuf */ 5051 m = control->data; 5052 if (control->end_added == 0) { 5053 /* need the rlock */ 5054 if (hold_rlock == 0) { 5055 SCTP_INP_READ_LOCK(inp); 5056 hold_rlock = 1; 5057 } 5058 } 5059 if (control->spec_flags & M_NOTIFICATION) { 5060 out_flags |= MSG_NOTIFICATION; 5061 } 5062 while ((m) && (cp_len > 0)) { 5063 if (cp_len >= SCTP_BUF_LEN(m)) { 5064 *mp = m; 5065 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m)); 5066 if (uio) 5067 uio->uio_resid -= SCTP_BUF_LEN(m); 5068 cp_len -= SCTP_BUF_LEN(m); 5069 control->data = SCTP_BUF_NEXT(m); 5070 SCTP_BUF_NEXT(m) = NULL; 5071 #ifdef SCTP_SB_LOGGING 5072 sctp_sblog(&so->so_rcv, 5073 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5074 #endif 5075 sctp_sbfree(control, stcb, &so->so_rcv, m); 5076 freed_so_far += SCTP_BUF_LEN(m); 5077 #ifdef SCTP_SB_LOGGING 5078 sctp_sblog(&so->so_rcv, 5079 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5080 #endif 5081 mp = &SCTP_BUF_NEXT(m); 5082 m = control->data; 5083 } else { 5084 /* 5085 * got all he wants and its part of 5086 * this mbuf only. 5087 */ 5088 if (uio) 5089 uio->uio_resid -= SCTP_BUF_LEN(m); 5090 cp_len -= SCTP_BUF_LEN(m); 5091 if (hold_rlock) { 5092 SCTP_INP_READ_UNLOCK(inp); 5093 hold_rlock = 0; 5094 } 5095 if (hold_sblock) { 5096 SOCKBUF_UNLOCK(&so->so_rcv); 5097 hold_sblock = 0; 5098 } 5099 *mp = SCTP_M_COPYM(m, 0, cp_len, 5100 M_TRYWAIT 5101 ); 5102 #ifdef SCTP_LOCK_LOGGING 5103 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R); 5104 #endif 5105 if (hold_sblock == 0) { 5106 SOCKBUF_LOCK(&so->so_rcv); 5107 hold_sblock = 1; 5108 } 5109 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5110 goto release; 5111 5112 if (stcb && 5113 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5114 no_rcv_needed = 1; 5115 } 5116 SCTP_BUF_RESV_UF(m, cp_len); 5117 SCTP_BUF_LEN(m) -= cp_len; 5118 #ifdef SCTP_SB_LOGGING 5119 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5120 #endif 5121 freed_so_far += cp_len; 5122 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5123 if (stcb) { 5124 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5125 if ((freed_so_far >= rwnd_req) && 5126 (control->do_not_ref_stcb == 0) && 5127 (no_rcv_needed == 0)) 5128 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5129 } 5130 #ifdef SCTP_SB_LOGGING 5131 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5132 SCTP_LOG_SBRESULT, 0); 5133 #endif 5134 goto release; 5135 } 5136 } 5137 } 5138 } 5139 release: 5140 if (hold_rlock == 1) { 5141 SCTP_INP_READ_UNLOCK(inp); 5142 hold_rlock = 0; 5143 } 5144 if (hold_sblock == 0) { 5145 SOCKBUF_LOCK(&so->so_rcv); 5146 hold_sblock = 1; 5147 } 5148 sbunlock(&so->so_rcv); 5149 5150 release_unlocked: 5151 if (hold_sblock) { 5152 SOCKBUF_UNLOCK(&so->so_rcv); 5153 hold_sblock = 0; 5154 } 5155 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 5156 if ((freed_so_far >= rwnd_req) && 5157 (control && (control->do_not_ref_stcb == 0)) && 5158 (no_rcv_needed == 0)) 5159 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5160 } 5161 if (msg_flags) 5162 *msg_flags |= out_flags; 5163 out: 5164 if (hold_rlock == 1) { 5165 SCTP_INP_READ_UNLOCK(inp); 5166 hold_rlock = 0; 5167 } 5168 if (hold_sblock) { 5169 SOCKBUF_UNLOCK(&so->so_rcv); 5170 hold_sblock = 0; 5171 } 5172 if (freecnt_applied) { 5173 /* 5174 * The lock on the socket buffer protects us so the free 5175 * code will stop. But since we used the socketbuf lock and 5176 * the sender uses the tcb_lock to increment, we need to use 5177 * the atomic add to the refcnt. 5178 */ 5179 if (stcb == NULL) { 5180 panic("stcb for refcnt has gone NULL?"); 5181 } 5182 atomic_add_int(&stcb->asoc.refcnt, -1); 5183 freecnt_applied = 0; 5184 /* Save the value back for next time */ 5185 stcb->freed_by_sorcv_sincelast = freed_so_far; 5186 } 5187 #ifdef SCTP_RECV_RWND_LOGGING 5188 if (stcb) { 5189 sctp_misc_ints(SCTP_SORECV_DONE, 5190 freed_so_far, 5191 ((uio) ? (slen - uio->uio_resid) : slen), 5192 stcb->asoc.my_rwnd, 5193 so->so_rcv.sb_cc); 5194 } else { 5195 sctp_misc_ints(SCTP_SORECV_DONE, 5196 freed_so_far, 5197 ((uio) ? (slen - uio->uio_resid) : slen), 5198 0, 5199 so->so_rcv.sb_cc); 5200 } 5201 #endif 5202 if (wakeup_read_socket) { 5203 sctp_sorwakeup(inp, so); 5204 } 5205 return (error); 5206 } 5207 5208 5209 #ifdef SCTP_MBUF_LOGGING 5210 struct mbuf * 5211 sctp_m_free(struct mbuf *m) 5212 { 5213 if (SCTP_BUF_IS_EXTENDED(m)) { 5214 sctp_log_mb(m, SCTP_MBUF_IFREE); 5215 } 5216 return (m_free(m)); 5217 } 5218 5219 void 5220 sctp_m_freem(struct mbuf *mb) 5221 { 5222 while (mb != NULL) 5223 mb = sctp_m_free(mb); 5224 } 5225 5226 #endif 5227 5228 5229 int 5230 sctp_soreceive(so, psa, uio, mp0, controlp, flagsp) 5231 struct socket *so; 5232 struct sockaddr **psa; 5233 struct uio *uio; 5234 struct mbuf **mp0; 5235 struct mbuf **controlp; 5236 int *flagsp; 5237 { 5238 int error, fromlen; 5239 uint8_t sockbuf[256]; 5240 struct sockaddr *from; 5241 struct sctp_extrcvinfo sinfo; 5242 int filling_sinfo = 1; 5243 struct sctp_inpcb *inp; 5244 5245 inp = (struct sctp_inpcb *)so->so_pcb; 5246 /* pickup the assoc we are reading from */ 5247 if (inp == NULL) { 5248 return (EINVAL); 5249 } 5250 if ((sctp_is_feature_off(inp, 5251 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 5252 (controlp == NULL)) { 5253 /* user does not want the sndrcv ctl */ 5254 filling_sinfo = 0; 5255 } 5256 if (psa) { 5257 from = (struct sockaddr *)sockbuf; 5258 fromlen = sizeof(sockbuf); 5259 from->sa_len = 0; 5260 } else { 5261 from = NULL; 5262 fromlen = 0; 5263 } 5264 5265 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 5266 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 5267 if ((controlp) && (filling_sinfo)) { 5268 /* copy back the sinfo in a CMSG format */ 5269 if (filling_sinfo) 5270 *controlp = sctp_build_ctl_nchunk(inp, 5271 (struct sctp_sndrcvinfo *)&sinfo); 5272 else 5273 *controlp = NULL; 5274 } 5275 if (psa) { 5276 /* copy back the address info */ 5277 if (from && from->sa_len) { 5278 *psa = sodupsockaddr(from, M_NOWAIT); 5279 } else { 5280 *psa = NULL; 5281 } 5282 } 5283 return (error); 5284 } 5285