1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_crc32.h> 49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 50 #include <netinet/sctp_auth.h> 51 #include <netinet/sctp_asconf.h> 52 #include <netinet/sctp_bsd_addr.h> 53 54 #define NUMBER_OF_MTU_SIZES 18 55 56 57 #ifdef SCTP_STAT_LOGGING 58 int global_sctp_cwnd_log_at = 0; 59 int global_sctp_cwnd_log_rolled = 0; 60 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE]; 61 62 static uint32_t 63 sctp_get_time_of_event(void) 64 { 65 struct timeval now; 66 uint32_t timeval; 67 68 SCTP_GETPTIME_TIMEVAL(&now); 69 timeval = (now.tv_sec % 0x00000fff); 70 timeval <<= 20; 71 timeval |= now.tv_usec & 0xfffff; 72 return (timeval); 73 } 74 75 76 void 77 sctp_clr_stat_log(void) 78 { 79 global_sctp_cwnd_log_at = 0; 80 global_sctp_cwnd_log_rolled = 0; 81 } 82 83 84 void 85 sctp_sblog(struct sockbuf *sb, 86 struct sctp_tcb *stcb, int from, int incr) 87 { 88 int sctp_cwnd_log_at; 89 90 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 91 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 92 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 93 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB; 94 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb; 95 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc; 96 if (stcb) 97 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc; 98 else 99 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0; 100 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr; 101 } 102 103 void 104 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 105 { 106 int sctp_cwnd_log_at; 107 108 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 109 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 110 sctp_clog[sctp_cwnd_log_at].from = 0; 111 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE; 112 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp; 113 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags; 114 if (stcb) { 115 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb; 116 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state; 117 } else { 118 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0; 119 sctp_clog[sctp_cwnd_log_at].x.close.state = 0; 120 } 121 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc; 122 } 123 124 125 void 126 rto_logging(struct sctp_nets *net, int from) 127 { 128 int sctp_cwnd_log_at; 129 130 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 131 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 132 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 133 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT; 134 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net; 135 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt; 136 sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance; 137 sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir; 138 } 139 140 void 141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 142 { 143 int sctp_cwnd_log_at; 144 145 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 146 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 147 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 148 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 149 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb; 150 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn; 151 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq; 152 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 153 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 154 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream; 155 } 156 157 void 158 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 159 { 160 int sctp_cwnd_log_at; 161 162 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 163 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 164 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action; 165 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE; 166 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb; 167 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight; 168 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 169 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 170 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count; 171 } 172 173 174 void 175 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 176 { 177 int sctp_cwnd_log_at; 178 179 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 180 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 181 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 182 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK; 183 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack; 184 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack; 185 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn; 186 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps; 187 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups; 188 } 189 190 void 191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 192 { 193 int sctp_cwnd_log_at; 194 195 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 196 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 197 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 198 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP; 199 sctp_clog[sctp_cwnd_log_at].x.map.base = map; 200 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum; 201 sctp_clog[sctp_cwnd_log_at].x.map.high = high; 202 } 203 204 void 205 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 206 int from) 207 { 208 int sctp_cwnd_log_at; 209 210 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 211 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 212 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 213 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR; 214 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn; 215 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn; 216 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn; 217 } 218 219 220 void 221 sctp_log_mb(struct mbuf *m, int from) 222 { 223 int sctp_cwnd_log_at; 224 225 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 226 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 227 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 228 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF; 229 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m; 230 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 231 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 232 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0); 233 if (SCTP_BUF_IS_EXTENDED(m)) { 234 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 235 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 236 } else { 237 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0; 238 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0; 239 } 240 } 241 242 243 void 244 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 245 int from) 246 { 247 int sctp_cwnd_log_at; 248 249 if (control == NULL) { 250 printf("Gak log of NULL?\n"); 251 return; 252 } 253 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 254 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 255 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 256 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 257 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb; 258 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn; 259 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn; 260 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream; 261 if (poschk != NULL) { 262 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn; 263 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn; 264 } else { 265 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 266 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 267 } 268 } 269 270 void 271 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 272 { 273 int sctp_cwnd_log_at; 274 275 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 276 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 277 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 278 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND; 279 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 280 if (stcb->asoc.send_queue_cnt > 255) 281 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 282 else 283 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 284 if (stcb->asoc.stream_queue_cnt > 255) 285 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 286 else 287 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 288 289 if (net) { 290 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd; 291 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 292 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack; 293 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 294 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 295 } 296 if (SCTP_CWNDLOG_PRESEND == from) { 297 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 298 } 299 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment; 300 } 301 302 void 303 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 304 { 305 int sctp_cwnd_log_at; 306 307 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 308 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 309 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 310 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT; 311 if (inp) { 312 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket; 313 314 } else { 315 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL; 316 } 317 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp; 318 if (stcb) { 319 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 320 } else { 321 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 322 } 323 if (inp) { 324 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 325 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 326 } else { 327 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 328 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN; 329 } 330 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx); 331 if (inp->sctp_socket) { 332 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 333 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 334 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 335 } else { 336 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 337 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 338 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 339 } 340 } 341 342 void 343 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 344 { 345 int sctp_cwnd_log_at; 346 347 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 348 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 349 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 350 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST; 351 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 352 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error; 353 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 354 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst; 355 if (stcb->asoc.send_queue_cnt > 255) 356 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 357 else 358 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 359 if (stcb->asoc.stream_queue_cnt > 255) 360 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 361 else 362 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 363 } 364 365 void 366 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 367 { 368 int sctp_cwnd_log_at; 369 370 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 371 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 372 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 373 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 374 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 375 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size; 376 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 377 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0; 378 } 379 380 void 381 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 382 { 383 int sctp_cwnd_log_at; 384 385 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 386 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 387 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 388 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 389 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 390 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size; 391 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 392 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval; 393 } 394 395 void 396 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 397 { 398 int sctp_cwnd_log_at; 399 400 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 401 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 402 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 403 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT; 404 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq; 405 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book; 406 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q; 407 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt; 408 } 409 410 void 411 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 412 { 413 int sctp_cwnd_log_at; 414 415 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 416 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 417 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 418 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT; 419 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a; 420 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b; 421 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c; 422 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d; 423 } 424 425 void 426 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 427 { 428 int sctp_cwnd_log_at; 429 430 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 431 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 432 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 433 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE; 434 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb; 435 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt; 436 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count; 437 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt; 438 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt; 439 440 if (stcb->asoc.stream_queue_cnt < 0xff) 441 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 442 else 443 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff; 444 445 if (stcb->asoc.chunks_on_out_queue < 0xff) 446 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 447 else 448 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff; 449 450 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0; 451 /* set in the defered mode stuff */ 452 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 453 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1; 454 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 455 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2; 456 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 457 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4; 458 /* what about the sb */ 459 if (stcb->sctp_socket) { 460 struct socket *so = stcb->sctp_socket; 461 462 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 463 } else { 464 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff; 465 } 466 } 467 468 void 469 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 470 { 471 int sctp_cwnd_log_at; 472 473 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 474 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 475 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 476 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK; 477 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size; 478 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 479 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd; 480 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 481 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 482 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 483 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen; 484 } 485 486 int 487 sctp_fill_stat_log(void *optval, size_t *optsize) 488 { 489 int sctp_cwnd_log_at; 490 struct sctp_cwnd_log_req *req; 491 size_t size_limit; 492 int num, i, at, cnt_out = 0; 493 494 if (*optsize < sizeof(struct sctp_cwnd_log_req)) { 495 return (EINVAL); 496 } 497 size_limit = (*optsize - sizeof(struct sctp_cwnd_log_req)); 498 if (size_limit < sizeof(struct sctp_cwnd_log)) { 499 return (EINVAL); 500 } 501 sctp_cwnd_log_at = global_sctp_cwnd_log_at; 502 req = (struct sctp_cwnd_log_req *)optval; 503 num = size_limit / sizeof(struct sctp_cwnd_log); 504 if (global_sctp_cwnd_log_rolled) { 505 req->num_in_log = SCTP_STAT_LOG_SIZE; 506 } else { 507 req->num_in_log = sctp_cwnd_log_at; 508 /* 509 * if the log has not rolled, we don't let you have old 510 * data. 511 */ 512 if (req->end_at > sctp_cwnd_log_at) { 513 req->end_at = sctp_cwnd_log_at; 514 } 515 } 516 if ((num < SCTP_STAT_LOG_SIZE) && 517 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) { 518 /* we can't return all of it */ 519 if (((req->start_at == 0) && (req->end_at == 0)) || 520 (req->start_at >= SCTP_STAT_LOG_SIZE) || 521 (req->end_at >= SCTP_STAT_LOG_SIZE)) { 522 /* No user request or user is wacked. */ 523 req->num_ret = num; 524 req->end_at = sctp_cwnd_log_at - 1; 525 if ((sctp_cwnd_log_at - num) < 0) { 526 int cc; 527 528 cc = num - sctp_cwnd_log_at; 529 req->start_at = SCTP_STAT_LOG_SIZE - cc; 530 } else { 531 req->start_at = sctp_cwnd_log_at - num; 532 } 533 } else { 534 /* a user request */ 535 int cc; 536 537 if (req->start_at > req->end_at) { 538 cc = (SCTP_STAT_LOG_SIZE - req->start_at) + 539 (req->end_at + 1); 540 } else { 541 542 cc = (req->end_at - req->start_at) + 1; 543 } 544 if (cc < num) { 545 num = cc; 546 } 547 req->num_ret = num; 548 } 549 } else { 550 /* We can return all of it */ 551 req->start_at = 0; 552 req->end_at = sctp_cwnd_log_at - 1; 553 req->num_ret = sctp_cwnd_log_at; 554 } 555 #ifdef INVARIANTS 556 if (req->num_ret > num) { 557 panic("Bad statlog get?"); 558 } 559 #endif 560 for (i = 0, at = req->start_at; i < req->num_ret; i++) { 561 req->log[i] = sctp_clog[at]; 562 cnt_out++; 563 at++; 564 if (at >= SCTP_STAT_LOG_SIZE) 565 at = 0; 566 } 567 *optsize = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req); 568 return (0); 569 } 570 571 #endif 572 573 #ifdef SCTP_AUDITING_ENABLED 574 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 575 static int sctp_audit_indx = 0; 576 577 static 578 void 579 sctp_print_audit_report(void) 580 { 581 int i; 582 int cnt; 583 584 cnt = 0; 585 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 586 if ((sctp_audit_data[i][0] == 0xe0) && 587 (sctp_audit_data[i][1] == 0x01)) { 588 cnt = 0; 589 printf("\n"); 590 } else if (sctp_audit_data[i][0] == 0xf0) { 591 cnt = 0; 592 printf("\n"); 593 } else if ((sctp_audit_data[i][0] == 0xc0) && 594 (sctp_audit_data[i][1] == 0x01)) { 595 printf("\n"); 596 cnt = 0; 597 } 598 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 599 (uint32_t) sctp_audit_data[i][1]); 600 cnt++; 601 if ((cnt % 14) == 0) 602 printf("\n"); 603 } 604 for (i = 0; i < sctp_audit_indx; i++) { 605 if ((sctp_audit_data[i][0] == 0xe0) && 606 (sctp_audit_data[i][1] == 0x01)) { 607 cnt = 0; 608 printf("\n"); 609 } else if (sctp_audit_data[i][0] == 0xf0) { 610 cnt = 0; 611 printf("\n"); 612 } else if ((sctp_audit_data[i][0] == 0xc0) && 613 (sctp_audit_data[i][1] == 0x01)) { 614 printf("\n"); 615 cnt = 0; 616 } 617 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 618 (uint32_t) sctp_audit_data[i][1]); 619 cnt++; 620 if ((cnt % 14) == 0) 621 printf("\n"); 622 } 623 printf("\n"); 624 } 625 626 void 627 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 628 struct sctp_nets *net) 629 { 630 int resend_cnt, tot_out, rep, tot_book_cnt; 631 struct sctp_nets *lnet; 632 struct sctp_tmit_chunk *chk; 633 634 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 635 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 636 sctp_audit_indx++; 637 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 638 sctp_audit_indx = 0; 639 } 640 if (inp == NULL) { 641 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 642 sctp_audit_data[sctp_audit_indx][1] = 0x01; 643 sctp_audit_indx++; 644 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 645 sctp_audit_indx = 0; 646 } 647 return; 648 } 649 if (stcb == NULL) { 650 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 651 sctp_audit_data[sctp_audit_indx][1] = 0x02; 652 sctp_audit_indx++; 653 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 654 sctp_audit_indx = 0; 655 } 656 return; 657 } 658 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 659 sctp_audit_data[sctp_audit_indx][1] = 660 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 661 sctp_audit_indx++; 662 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 663 sctp_audit_indx = 0; 664 } 665 rep = 0; 666 tot_book_cnt = 0; 667 resend_cnt = tot_out = 0; 668 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 669 if (chk->sent == SCTP_DATAGRAM_RESEND) { 670 resend_cnt++; 671 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 672 tot_out += chk->book_size; 673 tot_book_cnt++; 674 } 675 } 676 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 677 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 678 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 679 sctp_audit_indx++; 680 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 681 sctp_audit_indx = 0; 682 } 683 printf("resend_cnt:%d asoc-tot:%d\n", 684 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 685 rep = 1; 686 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 687 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 688 sctp_audit_data[sctp_audit_indx][1] = 689 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 690 sctp_audit_indx++; 691 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 692 sctp_audit_indx = 0; 693 } 694 } 695 if (tot_out != stcb->asoc.total_flight) { 696 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 697 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 698 sctp_audit_indx++; 699 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 700 sctp_audit_indx = 0; 701 } 702 rep = 1; 703 printf("tot_flt:%d asoc_tot:%d\n", tot_out, 704 (int)stcb->asoc.total_flight); 705 stcb->asoc.total_flight = tot_out; 706 } 707 if (tot_book_cnt != stcb->asoc.total_flight_count) { 708 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 709 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 710 sctp_audit_indx++; 711 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 712 sctp_audit_indx = 0; 713 } 714 rep = 1; 715 printf("tot_flt_book:%d\n", tot_book); 716 717 stcb->asoc.total_flight_count = tot_book_cnt; 718 } 719 tot_out = 0; 720 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 721 tot_out += lnet->flight_size; 722 } 723 if (tot_out != stcb->asoc.total_flight) { 724 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 725 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 726 sctp_audit_indx++; 727 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 728 sctp_audit_indx = 0; 729 } 730 rep = 1; 731 printf("real flight:%d net total was %d\n", 732 stcb->asoc.total_flight, tot_out); 733 /* now corrective action */ 734 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 735 736 tot_out = 0; 737 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 738 if ((chk->whoTo == lnet) && 739 (chk->sent < SCTP_DATAGRAM_RESEND)) { 740 tot_out += chk->book_size; 741 } 742 } 743 if (lnet->flight_size != tot_out) { 744 printf("net:%x flight was %d corrected to %d\n", 745 (uint32_t) lnet, lnet->flight_size, tot_out); 746 lnet->flight_size = tot_out; 747 } 748 } 749 } 750 if (rep) { 751 sctp_print_audit_report(); 752 } 753 } 754 755 void 756 sctp_audit_log(uint8_t ev, uint8_t fd) 757 { 758 759 sctp_audit_data[sctp_audit_indx][0] = ev; 760 sctp_audit_data[sctp_audit_indx][1] = fd; 761 sctp_audit_indx++; 762 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 763 sctp_audit_indx = 0; 764 } 765 } 766 767 #endif 768 769 /* 770 * a list of sizes based on typical mtu's, used only if next hop size not 771 * returned. 772 */ 773 static int sctp_mtu_sizes[] = { 774 68, 775 296, 776 508, 777 512, 778 544, 779 576, 780 1006, 781 1492, 782 1500, 783 1536, 784 2002, 785 2048, 786 4352, 787 4464, 788 8166, 789 17914, 790 32000, 791 65535 792 }; 793 794 void 795 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 796 { 797 struct sctp_association *asoc; 798 struct sctp_nets *net; 799 800 asoc = &stcb->asoc; 801 802 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 803 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 804 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 805 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 806 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 807 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 808 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 809 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 810 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 811 } 812 } 813 814 int 815 find_next_best_mtu(int totsz) 816 { 817 int i, perfer; 818 819 /* 820 * if we are in here we must find the next best fit based on the 821 * size of the dg that failed to be sent. 822 */ 823 perfer = 0; 824 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 825 if (totsz < sctp_mtu_sizes[i]) { 826 perfer = i - 1; 827 if (perfer < 0) 828 perfer = 0; 829 break; 830 } 831 } 832 return (sctp_mtu_sizes[perfer]); 833 } 834 835 void 836 sctp_fill_random_store(struct sctp_pcb *m) 837 { 838 /* 839 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 840 * our counter. The result becomes our good random numbers and we 841 * then setup to give these out. Note that we do no locking to 842 * protect this. This is ok, since if competing folks call this we 843 * will get more gobbled gook in the random store whic is what we 844 * want. There is a danger that two guys will use the same random 845 * numbers, but thats ok too since that is random as well :-> 846 */ 847 m->store_at = 0; 848 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 849 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 850 sizeof(m->random_counter), (uint8_t *) m->random_store); 851 m->random_counter++; 852 } 853 854 uint32_t 855 sctp_select_initial_TSN(struct sctp_pcb *m) 856 { 857 /* 858 * A true implementation should use random selection process to get 859 * the initial stream sequence number, using RFC1750 as a good 860 * guideline 861 */ 862 uint32_t x, *xp; 863 uint8_t *p; 864 865 if (m->initial_sequence_debug != 0) { 866 uint32_t ret; 867 868 ret = m->initial_sequence_debug; 869 m->initial_sequence_debug++; 870 return (ret); 871 } 872 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) { 873 /* Refill the random store */ 874 sctp_fill_random_store(m); 875 } 876 p = &m->random_store[(int)m->store_at]; 877 xp = (uint32_t *) p; 878 x = *xp; 879 m->store_at += sizeof(uint32_t); 880 return (x); 881 } 882 883 uint32_t 884 sctp_select_a_tag(struct sctp_inpcb *m) 885 { 886 u_long x, not_done; 887 struct timeval now; 888 889 SCTP_GETTIME_TIMEVAL(&now); 890 not_done = 1; 891 while (not_done) { 892 x = sctp_select_initial_TSN(&m->sctp_ep); 893 if (x == 0) { 894 /* we never use 0 */ 895 continue; 896 } 897 if (sctp_is_vtag_good(m, x, &now)) { 898 not_done = 0; 899 } 900 } 901 return (x); 902 } 903 904 905 int 906 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc, 907 int for_a_init, uint32_t override_tag, uint32_t vrf_id) 908 { 909 /* 910 * Anything set to zero is taken care of by the allocation routine's 911 * bzero 912 */ 913 914 /* 915 * Up front select what scoping to apply on addresses I tell my peer 916 * Not sure what to do with these right now, we will need to come up 917 * with a way to set them. We may need to pass them through from the 918 * caller in the sctp_aloc_assoc() function. 919 */ 920 int i; 921 922 /* init all variables to a known value. */ 923 asoc->state = SCTP_STATE_INUSE; 924 asoc->max_burst = m->sctp_ep.max_burst; 925 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 926 asoc->cookie_life = m->sctp_ep.def_cookie_life; 927 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off; 928 #ifdef INET 929 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 930 #else 931 asoc->default_tos = 0; 932 #endif 933 934 #ifdef INET6 935 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 936 #else 937 asoc->default_flowlabel = 0; 938 #endif 939 if (override_tag) { 940 struct timeval now; 941 942 SCTP_GETTIME_TIMEVAL(&now); 943 if (sctp_is_vtag_good(m, override_tag, &now)) { 944 asoc->my_vtag = override_tag; 945 } else { 946 return (ENOMEM); 947 } 948 949 } else { 950 asoc->my_vtag = sctp_select_a_tag(m); 951 } 952 /* Get the nonce tags */ 953 asoc->my_vtag_nonce = sctp_select_a_tag(m); 954 asoc->peer_vtag_nonce = sctp_select_a_tag(m); 955 asoc->vrf_id = vrf_id; 956 957 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 958 asoc->hb_is_disabled = 1; 959 else 960 asoc->hb_is_disabled = 0; 961 962 asoc->refcnt = 0; 963 asoc->assoc_up_sent = 0; 964 asoc->assoc_id = asoc->my_vtag; 965 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 966 sctp_select_initial_TSN(&m->sctp_ep); 967 /* we are optimisitic here */ 968 asoc->peer_supports_pktdrop = 1; 969 970 asoc->sent_queue_retran_cnt = 0; 971 972 /* for CMT */ 973 asoc->last_net_data_came_from = NULL; 974 975 /* This will need to be adjusted */ 976 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 977 asoc->last_acked_seq = asoc->init_seq_number - 1; 978 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 979 asoc->asconf_seq_in = asoc->last_acked_seq; 980 981 /* here we are different, we hold the next one we expect */ 982 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 983 984 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 985 asoc->initial_rto = m->sctp_ep.initial_rto; 986 987 asoc->max_init_times = m->sctp_ep.max_init_times; 988 asoc->max_send_times = m->sctp_ep.max_send_times; 989 asoc->def_net_failure = m->sctp_ep.def_net_failure; 990 asoc->free_chunk_cnt = 0; 991 992 asoc->iam_blocking = 0; 993 /* ECN Nonce initialization */ 994 asoc->context = m->sctp_context; 995 asoc->def_send = m->def_send; 996 asoc->ecn_nonce_allowed = 0; 997 asoc->receiver_nonce_sum = 1; 998 asoc->nonce_sum_expect_base = 1; 999 asoc->nonce_sum_check = 1; 1000 asoc->nonce_resync_tsn = 0; 1001 asoc->nonce_wait_for_ecne = 0; 1002 asoc->nonce_wait_tsn = 0; 1003 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1004 asoc->sack_freq = m->sctp_ep.sctp_sack_freq; 1005 asoc->pr_sctp_cnt = 0; 1006 asoc->total_output_queue_size = 0; 1007 1008 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1009 struct in6pcb *inp6; 1010 1011 /* Its a V6 socket */ 1012 inp6 = (struct in6pcb *)m; 1013 asoc->ipv6_addr_legal = 1; 1014 /* Now look at the binding flag to see if V4 will be legal */ 1015 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 1016 asoc->ipv4_addr_legal = 1; 1017 } else { 1018 /* V4 addresses are NOT legal on the association */ 1019 asoc->ipv4_addr_legal = 0; 1020 } 1021 } else { 1022 /* Its a V4 socket, no - V6 */ 1023 asoc->ipv4_addr_legal = 1; 1024 asoc->ipv6_addr_legal = 0; 1025 } 1026 1027 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND); 1028 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket); 1029 1030 asoc->smallest_mtu = m->sctp_frag_point; 1031 asoc->minrto = m->sctp_ep.sctp_minrto; 1032 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1033 1034 asoc->locked_on_sending = NULL; 1035 asoc->stream_locked_on = 0; 1036 asoc->ecn_echo_cnt_onq = 0; 1037 asoc->stream_locked = 0; 1038 1039 asoc->send_sack = 1; 1040 1041 LIST_INIT(&asoc->sctp_restricted_addrs); 1042 1043 TAILQ_INIT(&asoc->nets); 1044 TAILQ_INIT(&asoc->pending_reply_queue); 1045 asoc->last_asconf_ack_sent = NULL; 1046 /* Setup to fill the hb random cache at first HB */ 1047 asoc->hb_random_idx = 4; 1048 1049 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1050 1051 /* 1052 * Now the stream parameters, here we allocate space for all streams 1053 * that we request by default. 1054 */ 1055 asoc->streamoutcnt = asoc->pre_open_streams = 1056 m->sctp_ep.pre_open_stream_count; 1057 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1058 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1059 "StreamsOut"); 1060 if (asoc->strmout == NULL) { 1061 /* big trouble no memory */ 1062 return (ENOMEM); 1063 } 1064 for (i = 0; i < asoc->streamoutcnt; i++) { 1065 /* 1066 * inbound side must be set to 0xffff, also NOTE when we get 1067 * the INIT-ACK back (for INIT sender) we MUST reduce the 1068 * count (streamoutcnt) but first check if we sent to any of 1069 * the upper streams that were dropped (if some were). Those 1070 * that were dropped must be notified to the upper layer as 1071 * failed to send. 1072 */ 1073 asoc->strmout[i].next_sequence_sent = 0x0; 1074 TAILQ_INIT(&asoc->strmout[i].outqueue); 1075 asoc->strmout[i].stream_no = i; 1076 asoc->strmout[i].last_msg_incomplete = 0; 1077 asoc->strmout[i].next_spoke.tqe_next = 0; 1078 asoc->strmout[i].next_spoke.tqe_prev = 0; 1079 } 1080 /* Now the mapping array */ 1081 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1082 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1083 "MappingArray"); 1084 if (asoc->mapping_array == NULL) { 1085 SCTP_FREE(asoc->strmout); 1086 return (ENOMEM); 1087 } 1088 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1089 /* Now the init of the other outqueues */ 1090 TAILQ_INIT(&asoc->free_chunks); 1091 TAILQ_INIT(&asoc->free_strmoq); 1092 TAILQ_INIT(&asoc->out_wheel); 1093 TAILQ_INIT(&asoc->control_send_queue); 1094 TAILQ_INIT(&asoc->send_queue); 1095 TAILQ_INIT(&asoc->sent_queue); 1096 TAILQ_INIT(&asoc->reasmqueue); 1097 TAILQ_INIT(&asoc->resetHead); 1098 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1099 TAILQ_INIT(&asoc->asconf_queue); 1100 /* authentication fields */ 1101 asoc->authinfo.random = NULL; 1102 asoc->authinfo.assoc_key = NULL; 1103 asoc->authinfo.assoc_keyid = 0; 1104 asoc->authinfo.recv_key = NULL; 1105 asoc->authinfo.recv_keyid = 0; 1106 LIST_INIT(&asoc->shared_keys); 1107 asoc->marked_retrans = 0; 1108 asoc->timoinit = 0; 1109 asoc->timodata = 0; 1110 asoc->timosack = 0; 1111 asoc->timoshutdown = 0; 1112 asoc->timoheartbeat = 0; 1113 asoc->timocookie = 0; 1114 asoc->timoshutdownack = 0; 1115 SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1116 SCTP_GETTIME_TIMEVAL(&asoc->discontinuity_time); 1117 1118 return (0); 1119 } 1120 1121 int 1122 sctp_expand_mapping_array(struct sctp_association *asoc) 1123 { 1124 /* mapping array needs to grow */ 1125 uint8_t *new_array; 1126 uint16_t new_size; 1127 1128 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR; 1129 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray"); 1130 if (new_array == NULL) { 1131 /* can't get more, forget it */ 1132 printf("No memory for expansion of SCTP mapping array %d\n", 1133 new_size); 1134 return (-1); 1135 } 1136 memset(new_array, 0, new_size); 1137 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1138 SCTP_FREE(asoc->mapping_array); 1139 asoc->mapping_array = new_array; 1140 asoc->mapping_array_size = new_size; 1141 return (0); 1142 } 1143 1144 #if defined(SCTP_USE_THREAD_BASED_ITERATOR) 1145 static void 1146 sctp_iterator_work(struct sctp_iterator *it) 1147 { 1148 int iteration_count = 0; 1149 int inp_skip = 0; 1150 1151 SCTP_ITERATOR_LOCK(); 1152 if (it->inp) 1153 SCTP_INP_DECR_REF(it->inp); 1154 1155 if (it->inp == NULL) { 1156 /* iterator is complete */ 1157 done_with_iterator: 1158 SCTP_ITERATOR_UNLOCK(); 1159 if (it->function_atend != NULL) { 1160 (*it->function_atend) (it->pointer, it->val); 1161 } 1162 SCTP_FREE(it); 1163 return; 1164 } 1165 select_a_new_ep: 1166 SCTP_INP_WLOCK(it->inp); 1167 while (((it->pcb_flags) && 1168 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1169 ((it->pcb_features) && 1170 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1171 /* endpoint flags or features don't match, so keep looking */ 1172 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1173 SCTP_INP_WUNLOCK(it->inp); 1174 goto done_with_iterator; 1175 } 1176 SCTP_INP_WUNLOCK(it->inp); 1177 it->inp = LIST_NEXT(it->inp, sctp_list); 1178 if (it->inp == NULL) { 1179 goto done_with_iterator; 1180 } 1181 SCTP_INP_WLOCK(it->inp); 1182 } 1183 1184 /* mark the current iterator on the endpoint */ 1185 it->inp->inp_starting_point_for_iterator = it; 1186 SCTP_INP_WUNLOCK(it->inp); 1187 SCTP_INP_RLOCK(it->inp); 1188 1189 /* now go through each assoc which is in the desired state */ 1190 if (it->done_current_ep == 0) { 1191 if (it->function_inp != NULL) 1192 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1193 it->done_current_ep = 1; 1194 } 1195 if (it->stcb == NULL) { 1196 /* run the per instance function */ 1197 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1198 } 1199 if ((inp_skip) || it->stcb == NULL) { 1200 if (it->function_inp_end != NULL) { 1201 inp_skip = (*it->function_inp_end) (it->inp, 1202 it->pointer, 1203 it->val); 1204 } 1205 SCTP_INP_RUNLOCK(it->inp); 1206 goto no_stcb; 1207 } 1208 if ((it->stcb) && 1209 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) { 1210 it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1211 } 1212 while (it->stcb) { 1213 SCTP_TCB_LOCK(it->stcb); 1214 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1215 /* not in the right state... keep looking */ 1216 SCTP_TCB_UNLOCK(it->stcb); 1217 goto next_assoc; 1218 } 1219 /* mark the current iterator on the assoc */ 1220 it->stcb->asoc.stcb_starting_point_for_iterator = it; 1221 /* see if we have limited out the iterator loop */ 1222 iteration_count++; 1223 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1224 /* Pause to let others grab the lock */ 1225 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1226 SCTP_TCB_UNLOCK(it->stcb); 1227 SCTP_INP_RUNLOCK(it->inp); 1228 SCTP_ITERATOR_UNLOCK(); 1229 SCTP_ITERATOR_LOCK(); 1230 SCTP_INP_RLOCK(it->inp); 1231 SCTP_TCB_LOCK(it->stcb); 1232 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1233 iteration_count = 0; 1234 } 1235 /* run function on this one */ 1236 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1237 1238 /* 1239 * we lie here, it really needs to have its own type but 1240 * first I must verify that this won't effect things :-0 1241 */ 1242 if (it->no_chunk_output == 0) 1243 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3); 1244 1245 SCTP_TCB_UNLOCK(it->stcb); 1246 next_assoc: 1247 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1248 if (it->stcb == NULL) { 1249 /* Run last function */ 1250 if (it->function_inp_end != NULL) { 1251 inp_skip = (*it->function_inp_end) (it->inp, 1252 it->pointer, 1253 it->val); 1254 } 1255 } 1256 } 1257 SCTP_INP_RUNLOCK(it->inp); 1258 no_stcb: 1259 /* done with all assocs on this endpoint, move on to next endpoint */ 1260 it->done_current_ep = 0; 1261 SCTP_INP_WLOCK(it->inp); 1262 it->inp->inp_starting_point_for_iterator = NULL; 1263 SCTP_INP_WUNLOCK(it->inp); 1264 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1265 it->inp = NULL; 1266 } else { 1267 SCTP_INP_INFO_RLOCK(); 1268 it->inp = LIST_NEXT(it->inp, sctp_list); 1269 SCTP_INP_INFO_RUNLOCK(); 1270 } 1271 if (it->inp == NULL) { 1272 goto done_with_iterator; 1273 } 1274 goto select_a_new_ep; 1275 } 1276 1277 void 1278 sctp_iterator_worker(void) 1279 { 1280 struct sctp_iterator *it = NULL; 1281 1282 /* This function is called with the WQ lock in place */ 1283 1284 sctppcbinfo.iterator_running = 1; 1285 again: 1286 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead); 1287 while (it) { 1288 /* now lets work on this one */ 1289 TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); 1290 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1291 sctp_iterator_work(it); 1292 SCTP_IPI_ITERATOR_WQ_LOCK(); 1293 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead); 1294 } 1295 if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) { 1296 goto again; 1297 } 1298 sctppcbinfo.iterator_running = 0; 1299 return; 1300 } 1301 1302 #endif 1303 1304 1305 static void 1306 sctp_handle_addr_wq(void) 1307 { 1308 /* deal with the ADDR wq from the rtsock calls */ 1309 struct sctp_laddr *wi; 1310 struct sctp_asconf_iterator *asc; 1311 1312 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1313 sizeof(struct sctp_asconf_iterator), "SCTP_ASCONF_ITERATOR"); 1314 if (asc == NULL) { 1315 /* Try later, no memory */ 1316 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1317 (struct sctp_inpcb *)NULL, 1318 (struct sctp_tcb *)NULL, 1319 (struct sctp_nets *)NULL); 1320 return; 1321 } 1322 LIST_INIT(&asc->list_of_work); 1323 asc->cnt = 0; 1324 SCTP_IPI_ITERATOR_WQ_LOCK(); 1325 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1326 while (wi != NULL) { 1327 LIST_REMOVE(wi, sctp_nxt_addr); 1328 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1329 asc->cnt++; 1330 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1331 } 1332 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1333 if (asc->cnt == 0) { 1334 SCTP_FREE(asc); 1335 } else { 1336 sctp_initiate_iterator(sctp_iterator_ep, 1337 sctp_iterator_stcb, 1338 NULL, /* No ep end for boundall */ 1339 SCTP_PCB_FLAGS_BOUNDALL, 1340 SCTP_PCB_ANY_FEATURES, 1341 SCTP_ASOC_ANY_STATE, (void *)asc, 0, 1342 sctp_iterator_end, NULL, 0); 1343 } 1344 1345 } 1346 1347 void 1348 sctp_timeout_handler(void *t) 1349 { 1350 struct sctp_inpcb *inp; 1351 struct sctp_tcb *stcb; 1352 struct sctp_nets *net; 1353 struct sctp_timer *tmr; 1354 int did_output; 1355 struct sctp_iterator *it = NULL; 1356 1357 1358 tmr = (struct sctp_timer *)t; 1359 inp = (struct sctp_inpcb *)tmr->ep; 1360 stcb = (struct sctp_tcb *)tmr->tcb; 1361 net = (struct sctp_nets *)tmr->net; 1362 did_output = 1; 1363 1364 #ifdef SCTP_AUDITING_ENABLED 1365 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1366 sctp_auditing(3, inp, stcb, net); 1367 #endif 1368 1369 /* sanity checks... */ 1370 if (tmr->self != (void *)tmr) { 1371 /* 1372 * printf("Stale SCTP timer fired (%p), ignoring...\n", 1373 * tmr); 1374 */ 1375 return; 1376 } 1377 tmr->stopped_from = 0xa001; 1378 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1379 /* 1380 * printf("SCTP timer fired with invalid type: 0x%x\n", 1381 * tmr->type); 1382 */ 1383 return; 1384 } 1385 tmr->stopped_from = 0xa002; 1386 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1387 return; 1388 } 1389 /* if this is an iterator timeout, get the struct and clear inp */ 1390 tmr->stopped_from = 0xa003; 1391 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1392 it = (struct sctp_iterator *)inp; 1393 inp = NULL; 1394 } 1395 if (inp) { 1396 SCTP_INP_INCR_REF(inp); 1397 if ((inp->sctp_socket == 0) && 1398 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1399 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1400 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1401 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1402 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1403 ) { 1404 SCTP_INP_DECR_REF(inp); 1405 return; 1406 } 1407 } 1408 tmr->stopped_from = 0xa004; 1409 if (stcb) { 1410 if (stcb->asoc.state == 0) { 1411 if (inp) { 1412 SCTP_INP_DECR_REF(inp); 1413 } 1414 return; 1415 } 1416 } 1417 tmr->stopped_from = 0xa005; 1418 #ifdef SCTP_DEBUG 1419 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1420 printf("Timer type %d goes off\n", tmr->type); 1421 } 1422 #endif /* SCTP_DEBUG */ 1423 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1424 if (inp) { 1425 SCTP_INP_DECR_REF(inp); 1426 } 1427 return; 1428 } 1429 tmr->stopped_from = 0xa006; 1430 1431 if (stcb) { 1432 atomic_add_int(&stcb->asoc.refcnt, 1); 1433 SCTP_TCB_LOCK(stcb); 1434 atomic_add_int(&stcb->asoc.refcnt, -1); 1435 } 1436 /* record in stopped what t-o occured */ 1437 tmr->stopped_from = tmr->type; 1438 1439 /* mark as being serviced now */ 1440 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1441 /* 1442 * Callout has been rescheduled. 1443 */ 1444 goto get_out; 1445 } 1446 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1447 /* 1448 * Not active, so no action. 1449 */ 1450 goto get_out; 1451 } 1452 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1453 1454 /* call the handler for the appropriate timer type */ 1455 switch (tmr->type) { 1456 case SCTP_TIMER_TYPE_ADDR_WQ: 1457 sctp_handle_addr_wq(); 1458 break; 1459 case SCTP_TIMER_TYPE_ITERATOR: 1460 SCTP_STAT_INCR(sctps_timoiterator); 1461 sctp_iterator_timer(it); 1462 break; 1463 case SCTP_TIMER_TYPE_SEND: 1464 SCTP_STAT_INCR(sctps_timodata); 1465 stcb->asoc.timodata++; 1466 stcb->asoc.num_send_timers_up--; 1467 if (stcb->asoc.num_send_timers_up < 0) { 1468 stcb->asoc.num_send_timers_up = 0; 1469 } 1470 if (sctp_t3rxt_timer(inp, stcb, net)) { 1471 /* no need to unlock on tcb its gone */ 1472 1473 goto out_decr; 1474 } 1475 #ifdef SCTP_AUDITING_ENABLED 1476 sctp_auditing(4, inp, stcb, net); 1477 #endif 1478 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1479 if ((stcb->asoc.num_send_timers_up == 0) && 1480 (stcb->asoc.sent_queue_cnt > 0) 1481 ) { 1482 struct sctp_tmit_chunk *chk; 1483 1484 /* 1485 * safeguard. If there on some on the sent queue 1486 * somewhere but no timers running something is 1487 * wrong... so we start a timer on the first chunk 1488 * on the send queue on whatever net it is sent to. 1489 */ 1490 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1491 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1492 chk->whoTo); 1493 } 1494 break; 1495 case SCTP_TIMER_TYPE_INIT: 1496 SCTP_STAT_INCR(sctps_timoinit); 1497 stcb->asoc.timoinit++; 1498 if (sctp_t1init_timer(inp, stcb, net)) { 1499 /* no need to unlock on tcb its gone */ 1500 goto out_decr; 1501 } 1502 /* We do output but not here */ 1503 did_output = 0; 1504 break; 1505 case SCTP_TIMER_TYPE_RECV: 1506 SCTP_STAT_INCR(sctps_timosack); 1507 stcb->asoc.timosack++; 1508 sctp_send_sack(stcb); 1509 #ifdef SCTP_AUDITING_ENABLED 1510 sctp_auditing(4, inp, stcb, net); 1511 #endif 1512 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR); 1513 break; 1514 case SCTP_TIMER_TYPE_SHUTDOWN: 1515 if (sctp_shutdown_timer(inp, stcb, net)) { 1516 /* no need to unlock on tcb its gone */ 1517 goto out_decr; 1518 } 1519 SCTP_STAT_INCR(sctps_timoshutdown); 1520 stcb->asoc.timoshutdown++; 1521 #ifdef SCTP_AUDITING_ENABLED 1522 sctp_auditing(4, inp, stcb, net); 1523 #endif 1524 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR); 1525 break; 1526 case SCTP_TIMER_TYPE_HEARTBEAT: 1527 { 1528 struct sctp_nets *net; 1529 int cnt_of_unconf = 0; 1530 1531 SCTP_STAT_INCR(sctps_timoheartbeat); 1532 stcb->asoc.timoheartbeat++; 1533 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1534 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1535 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1536 cnt_of_unconf++; 1537 } 1538 } 1539 if (cnt_of_unconf == 0) { 1540 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) { 1541 /* no need to unlock on tcb its gone */ 1542 goto out_decr; 1543 } 1544 } 1545 #ifdef SCTP_AUDITING_ENABLED 1546 sctp_auditing(4, inp, stcb, net); 1547 #endif 1548 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 1549 stcb, net); 1550 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR); 1551 } 1552 break; 1553 case SCTP_TIMER_TYPE_COOKIE: 1554 if (sctp_cookie_timer(inp, stcb, net)) { 1555 /* no need to unlock on tcb its gone */ 1556 goto out_decr; 1557 } 1558 SCTP_STAT_INCR(sctps_timocookie); 1559 stcb->asoc.timocookie++; 1560 #ifdef SCTP_AUDITING_ENABLED 1561 sctp_auditing(4, inp, stcb, net); 1562 #endif 1563 /* 1564 * We consider T3 and Cookie timer pretty much the same with 1565 * respect to where from in chunk_output. 1566 */ 1567 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1568 break; 1569 case SCTP_TIMER_TYPE_NEWCOOKIE: 1570 { 1571 struct timeval tv; 1572 int i, secret; 1573 1574 SCTP_STAT_INCR(sctps_timosecret); 1575 SCTP_GETTIME_TIMEVAL(&tv); 1576 SCTP_INP_WLOCK(inp); 1577 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1578 inp->sctp_ep.last_secret_number = 1579 inp->sctp_ep.current_secret_number; 1580 inp->sctp_ep.current_secret_number++; 1581 if (inp->sctp_ep.current_secret_number >= 1582 SCTP_HOW_MANY_SECRETS) { 1583 inp->sctp_ep.current_secret_number = 0; 1584 } 1585 secret = (int)inp->sctp_ep.current_secret_number; 1586 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1587 inp->sctp_ep.secret_key[secret][i] = 1588 sctp_select_initial_TSN(&inp->sctp_ep); 1589 } 1590 SCTP_INP_WUNLOCK(inp); 1591 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1592 } 1593 did_output = 0; 1594 break; 1595 case SCTP_TIMER_TYPE_PATHMTURAISE: 1596 SCTP_STAT_INCR(sctps_timopathmtu); 1597 sctp_pathmtu_timer(inp, stcb, net); 1598 did_output = 0; 1599 break; 1600 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1601 if (sctp_shutdownack_timer(inp, stcb, net)) { 1602 /* no need to unlock on tcb its gone */ 1603 goto out_decr; 1604 } 1605 SCTP_STAT_INCR(sctps_timoshutdownack); 1606 stcb->asoc.timoshutdownack++; 1607 #ifdef SCTP_AUDITING_ENABLED 1608 sctp_auditing(4, inp, stcb, net); 1609 #endif 1610 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR); 1611 break; 1612 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1613 SCTP_STAT_INCR(sctps_timoshutdownguard); 1614 sctp_abort_an_association(inp, stcb, 1615 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL); 1616 /* no need to unlock on tcb its gone */ 1617 goto out_decr; 1618 break; 1619 1620 case SCTP_TIMER_TYPE_STRRESET: 1621 if (sctp_strreset_timer(inp, stcb, net)) { 1622 /* no need to unlock on tcb its gone */ 1623 goto out_decr; 1624 } 1625 SCTP_STAT_INCR(sctps_timostrmrst); 1626 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR); 1627 break; 1628 case SCTP_TIMER_TYPE_EARLYFR: 1629 /* Need to do FR of things for net */ 1630 SCTP_STAT_INCR(sctps_timoearlyfr); 1631 sctp_early_fr_timer(inp, stcb, net); 1632 break; 1633 case SCTP_TIMER_TYPE_ASCONF: 1634 if (sctp_asconf_timer(inp, stcb, net)) { 1635 /* no need to unlock on tcb its gone */ 1636 goto out_decr; 1637 } 1638 SCTP_STAT_INCR(sctps_timoasconf); 1639 #ifdef SCTP_AUDITING_ENABLED 1640 sctp_auditing(4, inp, stcb, net); 1641 #endif 1642 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR); 1643 break; 1644 1645 case SCTP_TIMER_TYPE_AUTOCLOSE: 1646 SCTP_STAT_INCR(sctps_timoautoclose); 1647 sctp_autoclose_timer(inp, stcb, net); 1648 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1649 did_output = 0; 1650 break; 1651 case SCTP_TIMER_TYPE_ASOCKILL: 1652 SCTP_STAT_INCR(sctps_timoassockill); 1653 /* Can we free it yet? */ 1654 SCTP_INP_DECR_REF(inp); 1655 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1656 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1657 /* 1658 * free asoc, always unlocks (or destroy's) so prevent 1659 * duplicate unlock or unlock of a free mtx :-0 1660 */ 1661 stcb = NULL; 1662 goto out_no_decr; 1663 break; 1664 case SCTP_TIMER_TYPE_INPKILL: 1665 SCTP_STAT_INCR(sctps_timoinpkill); 1666 /* 1667 * special case, take away our increment since WE are the 1668 * killer 1669 */ 1670 SCTP_INP_DECR_REF(inp); 1671 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1672 sctp_inpcb_free(inp, 1, 0); 1673 goto out_no_decr; 1674 break; 1675 default: 1676 #ifdef SCTP_DEBUG 1677 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1678 printf("sctp_timeout_handler:unknown timer %d\n", 1679 tmr->type); 1680 } 1681 #endif /* SCTP_DEBUG */ 1682 break; 1683 }; 1684 #ifdef SCTP_AUDITING_ENABLED 1685 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1686 if (inp) 1687 sctp_auditing(5, inp, stcb, net); 1688 #endif 1689 if ((did_output) && stcb) { 1690 /* 1691 * Now we need to clean up the control chunk chain if an 1692 * ECNE is on it. It must be marked as UNSENT again so next 1693 * call will continue to send it until such time that we get 1694 * a CWR, to remove it. It is, however, less likely that we 1695 * will find a ecn echo on the chain though. 1696 */ 1697 sctp_fix_ecn_echo(&stcb->asoc); 1698 } 1699 get_out: 1700 if (stcb) { 1701 SCTP_TCB_UNLOCK(stcb); 1702 } 1703 out_decr: 1704 if (inp) { 1705 SCTP_INP_DECR_REF(inp); 1706 } 1707 out_no_decr: 1708 1709 #ifdef SCTP_DEBUG 1710 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1711 printf("Timer now complete (type %d)\n", tmr->type); 1712 } 1713 #endif /* SCTP_DEBUG */ 1714 if (inp) { 1715 } 1716 } 1717 1718 int 1719 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1720 struct sctp_nets *net) 1721 { 1722 int to_ticks; 1723 struct sctp_timer *tmr; 1724 1725 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1726 return (EFAULT); 1727 1728 to_ticks = 0; 1729 1730 tmr = NULL; 1731 if (stcb) { 1732 SCTP_TCB_LOCK_ASSERT(stcb); 1733 } 1734 switch (t_type) { 1735 case SCTP_TIMER_TYPE_ADDR_WQ: 1736 /* Only 1 tick away :-) */ 1737 tmr = &sctppcbinfo.addr_wq_timer; 1738 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1739 break; 1740 case SCTP_TIMER_TYPE_ITERATOR: 1741 { 1742 struct sctp_iterator *it; 1743 1744 it = (struct sctp_iterator *)inp; 1745 tmr = &it->tmr; 1746 to_ticks = SCTP_ITERATOR_TICKS; 1747 } 1748 break; 1749 case SCTP_TIMER_TYPE_SEND: 1750 /* Here we use the RTO timer */ 1751 { 1752 int rto_val; 1753 1754 if ((stcb == NULL) || (net == NULL)) { 1755 return (EFAULT); 1756 } 1757 tmr = &net->rxt_timer; 1758 if (net->RTO == 0) { 1759 rto_val = stcb->asoc.initial_rto; 1760 } else { 1761 rto_val = net->RTO; 1762 } 1763 to_ticks = MSEC_TO_TICKS(rto_val); 1764 } 1765 break; 1766 case SCTP_TIMER_TYPE_INIT: 1767 /* 1768 * Here we use the INIT timer default usually about 1 1769 * minute. 1770 */ 1771 if ((stcb == NULL) || (net == NULL)) { 1772 return (EFAULT); 1773 } 1774 tmr = &net->rxt_timer; 1775 if (net->RTO == 0) { 1776 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1777 } else { 1778 to_ticks = MSEC_TO_TICKS(net->RTO); 1779 } 1780 break; 1781 case SCTP_TIMER_TYPE_RECV: 1782 /* 1783 * Here we use the Delayed-Ack timer value from the inp 1784 * ususually about 200ms. 1785 */ 1786 if (stcb == NULL) { 1787 return (EFAULT); 1788 } 1789 tmr = &stcb->asoc.dack_timer; 1790 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1791 break; 1792 case SCTP_TIMER_TYPE_SHUTDOWN: 1793 /* Here we use the RTO of the destination. */ 1794 if ((stcb == NULL) || (net == NULL)) { 1795 return (EFAULT); 1796 } 1797 if (net->RTO == 0) { 1798 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1799 } else { 1800 to_ticks = MSEC_TO_TICKS(net->RTO); 1801 } 1802 tmr = &net->rxt_timer; 1803 break; 1804 case SCTP_TIMER_TYPE_HEARTBEAT: 1805 /* 1806 * the net is used here so that we can add in the RTO. Even 1807 * though we use a different timer. We also add the HB timer 1808 * PLUS a random jitter. 1809 */ 1810 if (stcb == NULL) { 1811 return (EFAULT); 1812 } { 1813 uint32_t rndval; 1814 uint8_t this_random; 1815 int cnt_of_unconf = 0; 1816 struct sctp_nets *lnet; 1817 1818 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1819 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1820 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1821 cnt_of_unconf++; 1822 } 1823 } 1824 if (cnt_of_unconf) { 1825 lnet = NULL; 1826 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 1827 } 1828 if (stcb->asoc.hb_random_idx > 3) { 1829 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1830 memcpy(stcb->asoc.hb_random_values, &rndval, 1831 sizeof(stcb->asoc.hb_random_values)); 1832 stcb->asoc.hb_random_idx = 0; 1833 } 1834 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1835 stcb->asoc.hb_random_idx++; 1836 stcb->asoc.hb_ect_randombit = 0; 1837 /* 1838 * this_random will be 0 - 256 ms RTO is in ms. 1839 */ 1840 if ((stcb->asoc.hb_is_disabled) && 1841 (cnt_of_unconf == 0)) { 1842 return (0); 1843 } 1844 if (net) { 1845 struct sctp_nets *lnet; 1846 int delay; 1847 1848 delay = stcb->asoc.heart_beat_delay; 1849 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1850 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1851 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 1852 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1853 delay = 0; 1854 } 1855 } 1856 if (net->RTO == 0) { 1857 /* Never been checked */ 1858 to_ticks = this_random + stcb->asoc.initial_rto + delay; 1859 } else { 1860 /* set rto_val to the ms */ 1861 to_ticks = delay + net->RTO + this_random; 1862 } 1863 } else { 1864 if (cnt_of_unconf) { 1865 to_ticks = this_random + stcb->asoc.initial_rto; 1866 } else { 1867 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 1868 } 1869 } 1870 /* 1871 * Now we must convert the to_ticks that are now in 1872 * ms to ticks. 1873 */ 1874 to_ticks = MSEC_TO_TICKS(to_ticks); 1875 tmr = &stcb->asoc.hb_timer; 1876 } 1877 break; 1878 case SCTP_TIMER_TYPE_COOKIE: 1879 /* 1880 * Here we can use the RTO timer from the network since one 1881 * RTT was compelete. If a retran happened then we will be 1882 * using the RTO initial value. 1883 */ 1884 if ((stcb == NULL) || (net == NULL)) { 1885 return (EFAULT); 1886 } 1887 if (net->RTO == 0) { 1888 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1889 } else { 1890 to_ticks = MSEC_TO_TICKS(net->RTO); 1891 } 1892 tmr = &net->rxt_timer; 1893 break; 1894 case SCTP_TIMER_TYPE_NEWCOOKIE: 1895 /* 1896 * nothing needed but the endpoint here ususually about 60 1897 * minutes. 1898 */ 1899 tmr = &inp->sctp_ep.signature_change; 1900 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1901 break; 1902 case SCTP_TIMER_TYPE_ASOCKILL: 1903 if (stcb == NULL) { 1904 return (EFAULT); 1905 } 1906 tmr = &stcb->asoc.strreset_timer; 1907 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1908 break; 1909 case SCTP_TIMER_TYPE_INPKILL: 1910 /* 1911 * The inp is setup to die. We re-use the signature_chage 1912 * timer since that has stopped and we are in the GONE 1913 * state. 1914 */ 1915 tmr = &inp->sctp_ep.signature_change; 1916 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 1917 break; 1918 case SCTP_TIMER_TYPE_PATHMTURAISE: 1919 /* 1920 * Here we use the value found in the EP for PMTU ususually 1921 * about 10 minutes. 1922 */ 1923 if (stcb == NULL) { 1924 return (EFAULT); 1925 } 1926 if (net == NULL) { 1927 return (EFAULT); 1928 } 1929 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 1930 tmr = &net->pmtu_timer; 1931 break; 1932 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1933 /* Here we use the RTO of the destination */ 1934 if ((stcb == NULL) || (net == NULL)) { 1935 return (EFAULT); 1936 } 1937 if (net->RTO == 0) { 1938 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1939 } else { 1940 to_ticks = MSEC_TO_TICKS(net->RTO); 1941 } 1942 tmr = &net->rxt_timer; 1943 break; 1944 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1945 /* 1946 * Here we use the endpoints shutdown guard timer usually 1947 * about 3 minutes. 1948 */ 1949 if (stcb == NULL) { 1950 return (EFAULT); 1951 } 1952 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 1953 tmr = &stcb->asoc.shut_guard_timer; 1954 break; 1955 case SCTP_TIMER_TYPE_STRRESET: 1956 /* 1957 * Here the timer comes from the inp but its value is from 1958 * the RTO. 1959 */ 1960 if ((stcb == NULL) || (net == NULL)) { 1961 return (EFAULT); 1962 } 1963 if (net->RTO == 0) { 1964 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1965 } else { 1966 to_ticks = MSEC_TO_TICKS(net->RTO); 1967 } 1968 tmr = &stcb->asoc.strreset_timer; 1969 break; 1970 1971 case SCTP_TIMER_TYPE_EARLYFR: 1972 { 1973 unsigned int msec; 1974 1975 if ((stcb == NULL) || (net == NULL)) { 1976 return (EFAULT); 1977 } 1978 if (net->flight_size > net->cwnd) { 1979 /* no need to start */ 1980 return (0); 1981 } 1982 SCTP_STAT_INCR(sctps_earlyfrstart); 1983 if (net->lastsa == 0) { 1984 /* Hmm no rtt estimate yet? */ 1985 msec = stcb->asoc.initial_rto >> 2; 1986 } else { 1987 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 1988 } 1989 if (msec < sctp_early_fr_msec) { 1990 msec = sctp_early_fr_msec; 1991 if (msec < SCTP_MINFR_MSEC_FLOOR) { 1992 msec = SCTP_MINFR_MSEC_FLOOR; 1993 } 1994 } 1995 to_ticks = MSEC_TO_TICKS(msec); 1996 tmr = &net->fr_timer; 1997 } 1998 break; 1999 case SCTP_TIMER_TYPE_ASCONF: 2000 /* 2001 * Here the timer comes from the inp but its value is from 2002 * the RTO. 2003 */ 2004 if ((stcb == NULL) || (net == NULL)) { 2005 return (EFAULT); 2006 } 2007 if (net->RTO == 0) { 2008 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2009 } else { 2010 to_ticks = MSEC_TO_TICKS(net->RTO); 2011 } 2012 tmr = &stcb->asoc.asconf_timer; 2013 break; 2014 case SCTP_TIMER_TYPE_AUTOCLOSE: 2015 if (stcb == NULL) { 2016 return (EFAULT); 2017 } 2018 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2019 /* 2020 * Really an error since stcb is NOT set to 2021 * autoclose 2022 */ 2023 return (0); 2024 } 2025 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2026 tmr = &stcb->asoc.autoclose_timer; 2027 break; 2028 default: 2029 #ifdef SCTP_DEBUG 2030 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2031 printf("sctp_timer_start:Unknown timer type %d\n", 2032 t_type); 2033 } 2034 #endif /* SCTP_DEBUG */ 2035 return (EFAULT); 2036 break; 2037 }; 2038 if ((to_ticks <= 0) || (tmr == NULL)) { 2039 #ifdef SCTP_DEBUG 2040 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2041 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n", 2042 t_type, to_ticks, tmr); 2043 } 2044 #endif /* SCTP_DEBUG */ 2045 return (EFAULT); 2046 } 2047 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2048 /* 2049 * we do NOT allow you to have it already running. if it is 2050 * we leave the current one up unchanged 2051 */ 2052 return (EALREADY); 2053 } 2054 /* At this point we can proceed */ 2055 if (t_type == SCTP_TIMER_TYPE_SEND) { 2056 stcb->asoc.num_send_timers_up++; 2057 } 2058 tmr->stopped_from = 0; 2059 tmr->type = t_type; 2060 tmr->ep = (void *)inp; 2061 tmr->tcb = (void *)stcb; 2062 tmr->net = (void *)net; 2063 tmr->self = (void *)tmr; 2064 tmr->ticks = ticks; 2065 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2066 return (0); 2067 } 2068 2069 int 2070 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2071 struct sctp_nets *net, uint32_t from) 2072 { 2073 struct sctp_timer *tmr; 2074 2075 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2076 (inp == NULL)) 2077 return (EFAULT); 2078 2079 tmr = NULL; 2080 if (stcb) { 2081 SCTP_TCB_LOCK_ASSERT(stcb); 2082 } 2083 switch (t_type) { 2084 case SCTP_TIMER_TYPE_ADDR_WQ: 2085 tmr = &sctppcbinfo.addr_wq_timer; 2086 break; 2087 case SCTP_TIMER_TYPE_EARLYFR: 2088 if ((stcb == NULL) || (net == NULL)) { 2089 return (EFAULT); 2090 } 2091 tmr = &net->fr_timer; 2092 SCTP_STAT_INCR(sctps_earlyfrstop); 2093 break; 2094 case SCTP_TIMER_TYPE_ITERATOR: 2095 { 2096 struct sctp_iterator *it; 2097 2098 it = (struct sctp_iterator *)inp; 2099 tmr = &it->tmr; 2100 } 2101 break; 2102 case SCTP_TIMER_TYPE_SEND: 2103 if ((stcb == NULL) || (net == NULL)) { 2104 return (EFAULT); 2105 } 2106 tmr = &net->rxt_timer; 2107 break; 2108 case SCTP_TIMER_TYPE_INIT: 2109 if ((stcb == NULL) || (net == NULL)) { 2110 return (EFAULT); 2111 } 2112 tmr = &net->rxt_timer; 2113 break; 2114 case SCTP_TIMER_TYPE_RECV: 2115 if (stcb == NULL) { 2116 return (EFAULT); 2117 } 2118 tmr = &stcb->asoc.dack_timer; 2119 break; 2120 case SCTP_TIMER_TYPE_SHUTDOWN: 2121 if ((stcb == NULL) || (net == NULL)) { 2122 return (EFAULT); 2123 } 2124 tmr = &net->rxt_timer; 2125 break; 2126 case SCTP_TIMER_TYPE_HEARTBEAT: 2127 if (stcb == NULL) { 2128 return (EFAULT); 2129 } 2130 tmr = &stcb->asoc.hb_timer; 2131 break; 2132 case SCTP_TIMER_TYPE_COOKIE: 2133 if ((stcb == NULL) || (net == NULL)) { 2134 return (EFAULT); 2135 } 2136 tmr = &net->rxt_timer; 2137 break; 2138 case SCTP_TIMER_TYPE_NEWCOOKIE: 2139 /* nothing needed but the endpoint here */ 2140 tmr = &inp->sctp_ep.signature_change; 2141 /* 2142 * We re-use the newcookie timer for the INP kill timer. We 2143 * must assure that we do not kill it by accident. 2144 */ 2145 break; 2146 case SCTP_TIMER_TYPE_ASOCKILL: 2147 /* 2148 * Stop the asoc kill timer. 2149 */ 2150 if (stcb == NULL) { 2151 return (EFAULT); 2152 } 2153 tmr = &stcb->asoc.strreset_timer; 2154 break; 2155 2156 case SCTP_TIMER_TYPE_INPKILL: 2157 /* 2158 * The inp is setup to die. We re-use the signature_chage 2159 * timer since that has stopped and we are in the GONE 2160 * state. 2161 */ 2162 tmr = &inp->sctp_ep.signature_change; 2163 break; 2164 case SCTP_TIMER_TYPE_PATHMTURAISE: 2165 if ((stcb == NULL) || (net == NULL)) { 2166 return (EFAULT); 2167 } 2168 tmr = &net->pmtu_timer; 2169 break; 2170 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2171 if ((stcb == NULL) || (net == NULL)) { 2172 return (EFAULT); 2173 } 2174 tmr = &net->rxt_timer; 2175 break; 2176 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2177 if (stcb == NULL) { 2178 return (EFAULT); 2179 } 2180 tmr = &stcb->asoc.shut_guard_timer; 2181 break; 2182 case SCTP_TIMER_TYPE_STRRESET: 2183 if (stcb == NULL) { 2184 return (EFAULT); 2185 } 2186 tmr = &stcb->asoc.strreset_timer; 2187 break; 2188 case SCTP_TIMER_TYPE_ASCONF: 2189 if (stcb == NULL) { 2190 return (EFAULT); 2191 } 2192 tmr = &stcb->asoc.asconf_timer; 2193 break; 2194 case SCTP_TIMER_TYPE_AUTOCLOSE: 2195 if (stcb == NULL) { 2196 return (EFAULT); 2197 } 2198 tmr = &stcb->asoc.autoclose_timer; 2199 break; 2200 default: 2201 #ifdef SCTP_DEBUG 2202 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2203 printf("sctp_timer_stop:Unknown timer type %d\n", 2204 t_type); 2205 } 2206 #endif /* SCTP_DEBUG */ 2207 break; 2208 }; 2209 if (tmr == NULL) { 2210 return (EFAULT); 2211 } 2212 if ((tmr->type != t_type) && tmr->type) { 2213 /* 2214 * Ok we have a timer that is under joint use. Cookie timer 2215 * per chance with the SEND timer. We therefore are NOT 2216 * running the timer that the caller wants stopped. So just 2217 * return. 2218 */ 2219 return (0); 2220 } 2221 if (t_type == SCTP_TIMER_TYPE_SEND) { 2222 stcb->asoc.num_send_timers_up--; 2223 if (stcb->asoc.num_send_timers_up < 0) { 2224 stcb->asoc.num_send_timers_up = 0; 2225 } 2226 } 2227 tmr->self = NULL; 2228 tmr->stopped_from = from; 2229 SCTP_OS_TIMER_STOP(&tmr->timer); 2230 return (0); 2231 } 2232 2233 #ifdef SCTP_USE_ADLER32 2234 static uint32_t 2235 update_adler32(uint32_t adler, uint8_t * buf, int32_t len) 2236 { 2237 uint32_t s1 = adler & 0xffff; 2238 uint32_t s2 = (adler >> 16) & 0xffff; 2239 int n; 2240 2241 for (n = 0; n < len; n++, buf++) { 2242 /* s1 = (s1 + buf[n]) % BASE */ 2243 /* first we add */ 2244 s1 = (s1 + *buf); 2245 /* 2246 * now if we need to, we do a mod by subtracting. It seems a 2247 * bit faster since I really will only ever do one subtract 2248 * at the MOST, since buf[n] is a max of 255. 2249 */ 2250 if (s1 >= SCTP_ADLER32_BASE) { 2251 s1 -= SCTP_ADLER32_BASE; 2252 } 2253 /* s2 = (s2 + s1) % BASE */ 2254 /* first we add */ 2255 s2 = (s2 + s1); 2256 /* 2257 * again, it is more efficent (it seems) to subtract since 2258 * the most s2 will ever be is (BASE-1 + BASE-1) in the 2259 * worse case. This would then be (2 * BASE) - 2, which will 2260 * still only do one subtract. On Intel this is much better 2261 * to do this way and avoid the divide. Have not -pg'd on 2262 * sparc. 2263 */ 2264 if (s2 >= SCTP_ADLER32_BASE) { 2265 s2 -= SCTP_ADLER32_BASE; 2266 } 2267 } 2268 /* Return the adler32 of the bytes buf[0..len-1] */ 2269 return ((s2 << 16) + s1); 2270 } 2271 2272 #endif 2273 2274 2275 uint32_t 2276 sctp_calculate_len(struct mbuf *m) 2277 { 2278 uint32_t tlen = 0; 2279 struct mbuf *at; 2280 2281 at = m; 2282 while (at) { 2283 tlen += SCTP_BUF_LEN(at); 2284 at = SCTP_BUF_NEXT(at); 2285 } 2286 return (tlen); 2287 } 2288 2289 #if defined(SCTP_WITH_NO_CSUM) 2290 2291 uint32_t 2292 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2293 { 2294 /* 2295 * given a mbuf chain with a packetheader offset by 'offset' 2296 * pointing at a sctphdr (with csum set to 0) go through the chain 2297 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2298 * currently Adler32 but will change to CRC32x soon. Also has a side 2299 * bonus calculate the total length of the mbuf chain. Note: if 2300 * offset is greater than the total mbuf length, checksum=1, 2301 * pktlen=0 is returned (ie. no real error code) 2302 */ 2303 if (pktlen == NULL) 2304 return (0); 2305 *pktlen = sctp_calculate_len(m); 2306 return (0); 2307 } 2308 2309 #elif defined(SCTP_USE_INCHKSUM) 2310 2311 #include <machine/in_cksum.h> 2312 2313 uint32_t 2314 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2315 { 2316 /* 2317 * given a mbuf chain with a packetheader offset by 'offset' 2318 * pointing at a sctphdr (with csum set to 0) go through the chain 2319 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2320 * currently Adler32 but will change to CRC32x soon. Also has a side 2321 * bonus calculate the total length of the mbuf chain. Note: if 2322 * offset is greater than the total mbuf length, checksum=1, 2323 * pktlen=0 is returned (ie. no real error code) 2324 */ 2325 int32_t tlen = 0; 2326 struct mbuf *at; 2327 uint32_t the_sum, retsum; 2328 2329 at = m; 2330 while (at) { 2331 tlen += SCTP_BUF_LEN(at); 2332 at = SCTP_BUF_NEXT(at); 2333 } 2334 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset)); 2335 if (pktlen != NULL) 2336 *pktlen = (tlen - offset); 2337 retsum = htons(the_sum); 2338 return (the_sum); 2339 } 2340 2341 #else 2342 2343 uint32_t 2344 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2345 { 2346 /* 2347 * given a mbuf chain with a packetheader offset by 'offset' 2348 * pointing at a sctphdr (with csum set to 0) go through the chain 2349 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2350 * currently Adler32 but will change to CRC32x soon. Also has a side 2351 * bonus calculate the total length of the mbuf chain. Note: if 2352 * offset is greater than the total mbuf length, checksum=1, 2353 * pktlen=0 is returned (ie. no real error code) 2354 */ 2355 int32_t tlen = 0; 2356 2357 #ifdef SCTP_USE_ADLER32 2358 uint32_t base = 1L; 2359 2360 #else 2361 uint32_t base = 0xffffffff; 2362 2363 #endif /* SCTP_USE_ADLER32 */ 2364 struct mbuf *at; 2365 2366 at = m; 2367 /* find the correct mbuf and offset into mbuf */ 2368 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) { 2369 offset -= SCTP_BUF_LEN(at); /* update remaining offset 2370 * left */ 2371 at = SCTP_BUF_NEXT(at); 2372 } 2373 while (at != NULL) { 2374 if ((SCTP_BUF_LEN(at) - offset) > 0) { 2375 #ifdef SCTP_USE_ADLER32 2376 base = update_adler32(base, 2377 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2378 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2379 #else 2380 if ((SCTP_BUF_LEN(at) - offset) < 4) { 2381 /* Use old method if less than 4 bytes */ 2382 base = old_update_crc32(base, 2383 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2384 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2385 } else { 2386 base = update_crc32(base, 2387 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2388 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2389 } 2390 #endif /* SCTP_USE_ADLER32 */ 2391 tlen += SCTP_BUF_LEN(at) - offset; 2392 /* we only offset once into the first mbuf */ 2393 } 2394 if (offset) { 2395 if (offset < SCTP_BUF_LEN(at)) 2396 offset = 0; 2397 else 2398 offset -= SCTP_BUF_LEN(at); 2399 } 2400 at = SCTP_BUF_NEXT(at); 2401 } 2402 if (pktlen != NULL) { 2403 *pktlen = tlen; 2404 } 2405 #ifdef SCTP_USE_ADLER32 2406 /* Adler32 */ 2407 base = htonl(base); 2408 #else 2409 /* CRC-32c */ 2410 base = sctp_csum_finalize(base); 2411 #endif 2412 return (base); 2413 } 2414 2415 2416 #endif 2417 2418 void 2419 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2420 struct sctp_association *asoc, uint32_t mtu) 2421 { 2422 /* 2423 * Reset the P-MTU size on this association, this involves changing 2424 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2425 * allow the DF flag to be cleared. 2426 */ 2427 struct sctp_tmit_chunk *chk; 2428 unsigned int eff_mtu, ovh; 2429 2430 asoc->smallest_mtu = mtu; 2431 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2432 ovh = SCTP_MIN_OVERHEAD; 2433 } else { 2434 ovh = SCTP_MIN_V4_OVERHEAD; 2435 } 2436 eff_mtu = mtu - ovh; 2437 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2438 2439 if (chk->send_size > eff_mtu) { 2440 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2441 } 2442 } 2443 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2444 if (chk->send_size > eff_mtu) { 2445 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2446 } 2447 } 2448 } 2449 2450 2451 /* 2452 * given an association and starting time of the current RTT period return 2453 * RTO in number of msecs net should point to the current network 2454 */ 2455 uint32_t 2456 sctp_calculate_rto(struct sctp_tcb *stcb, 2457 struct sctp_association *asoc, 2458 struct sctp_nets *net, 2459 struct timeval *old) 2460 { 2461 /* 2462 * given an association and the starting time of the current RTT 2463 * period (in value1/value2) return RTO in number of msecs. 2464 */ 2465 int calc_time = 0; 2466 int o_calctime; 2467 unsigned int new_rto = 0; 2468 int first_measure = 0; 2469 struct timeval now; 2470 2471 /************************/ 2472 /* 1. calculate new RTT */ 2473 /************************/ 2474 /* get the current time */ 2475 SCTP_GETTIME_TIMEVAL(&now); 2476 /* compute the RTT value */ 2477 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2478 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2479 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2480 calc_time += (((u_long)now.tv_usec - 2481 (u_long)old->tv_usec) / 1000); 2482 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2483 /* Borrow 1,000ms from current calculation */ 2484 calc_time -= 1000; 2485 /* Add in the slop over */ 2486 calc_time += ((int)now.tv_usec / 1000); 2487 /* Add in the pre-second ms's */ 2488 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2489 } 2490 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2491 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2492 calc_time = ((u_long)now.tv_usec - 2493 (u_long)old->tv_usec) / 1000; 2494 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2495 /* impossible .. garbage in nothing out */ 2496 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2497 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) { 2498 /* 2499 * We have to have 1 usec :-D this must be the 2500 * loopback. 2501 */ 2502 calc_time = 1; 2503 } else { 2504 /* impossible .. garbage in nothing out */ 2505 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2506 } 2507 } else { 2508 /* Clock wrapped? */ 2509 return (((net->lastsa >> 2) + net->lastsv) >> 1); 2510 } 2511 /***************************/ 2512 /* 2. update RTTVAR & SRTT */ 2513 /***************************/ 2514 o_calctime = calc_time; 2515 /* this is Van Jacobson's integer version */ 2516 if (net->RTO) { 2517 calc_time -= (net->lastsa >> 3); 2518 if ((int)net->prev_rtt > o_calctime) { 2519 net->rtt_variance = net->prev_rtt - o_calctime; 2520 /* decreasing */ 2521 net->rto_variance_dir = 0; 2522 } else { 2523 /* increasing */ 2524 net->rtt_variance = o_calctime - net->prev_rtt; 2525 net->rto_variance_dir = 1; 2526 } 2527 #ifdef SCTP_RTTVAR_LOGGING 2528 rto_logging(net, SCTP_LOG_RTTVAR); 2529 #endif 2530 net->prev_rtt = o_calctime; 2531 net->lastsa += calc_time; 2532 if (calc_time < 0) { 2533 calc_time = -calc_time; 2534 } 2535 calc_time -= (net->lastsv >> 2); 2536 net->lastsv += calc_time; 2537 if (net->lastsv == 0) { 2538 net->lastsv = SCTP_CLOCK_GRANULARITY; 2539 } 2540 } else { 2541 /* First RTO measurment */ 2542 net->lastsa = calc_time; 2543 net->lastsv = calc_time >> 1; 2544 first_measure = 1; 2545 net->rto_variance_dir = 1; 2546 net->prev_rtt = o_calctime; 2547 net->rtt_variance = 0; 2548 #ifdef SCTP_RTTVAR_LOGGING 2549 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2550 #endif 2551 } 2552 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1; 2553 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2554 (stcb->asoc.sat_network_lockout == 0)) { 2555 stcb->asoc.sat_network = 1; 2556 } else if ((!first_measure) && stcb->asoc.sat_network) { 2557 stcb->asoc.sat_network = 0; 2558 stcb->asoc.sat_network_lockout = 1; 2559 } 2560 /* bound it, per C6/C7 in Section 5.3.1 */ 2561 if (new_rto < stcb->asoc.minrto) { 2562 new_rto = stcb->asoc.minrto; 2563 } 2564 if (new_rto > stcb->asoc.maxrto) { 2565 new_rto = stcb->asoc.maxrto; 2566 } 2567 /* we are now returning the RTT Smoothed */ 2568 return ((uint32_t) new_rto); 2569 } 2570 2571 /* 2572 * return a pointer to a contiguous piece of data from the given mbuf chain 2573 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2574 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2575 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2576 */ 2577 __inline caddr_t 2578 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2579 { 2580 uint32_t count; 2581 uint8_t *ptr; 2582 2583 ptr = in_ptr; 2584 if ((off < 0) || (len <= 0)) 2585 return (NULL); 2586 2587 /* find the desired start location */ 2588 while ((m != NULL) && (off > 0)) { 2589 if (off < SCTP_BUF_LEN(m)) 2590 break; 2591 off -= SCTP_BUF_LEN(m); 2592 m = SCTP_BUF_NEXT(m); 2593 } 2594 if (m == NULL) 2595 return (NULL); 2596 2597 /* is the current mbuf large enough (eg. contiguous)? */ 2598 if ((SCTP_BUF_LEN(m) - off) >= len) { 2599 return (mtod(m, caddr_t)+off); 2600 } else { 2601 /* else, it spans more than one mbuf, so save a temp copy... */ 2602 while ((m != NULL) && (len > 0)) { 2603 count = min(SCTP_BUF_LEN(m) - off, len); 2604 bcopy(mtod(m, caddr_t)+off, ptr, count); 2605 len -= count; 2606 ptr += count; 2607 off = 0; 2608 m = SCTP_BUF_NEXT(m); 2609 } 2610 if ((m == NULL) && (len > 0)) 2611 return (NULL); 2612 else 2613 return ((caddr_t)in_ptr); 2614 } 2615 } 2616 2617 2618 2619 struct sctp_paramhdr * 2620 sctp_get_next_param(struct mbuf *m, 2621 int offset, 2622 struct sctp_paramhdr *pull, 2623 int pull_limit) 2624 { 2625 /* This just provides a typed signature to Peter's Pull routine */ 2626 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2627 (uint8_t *) pull)); 2628 } 2629 2630 2631 int 2632 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2633 { 2634 /* 2635 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2636 * padlen is > 3 this routine will fail. 2637 */ 2638 uint8_t *dp; 2639 int i; 2640 2641 if (padlen > 3) { 2642 return (ENOBUFS); 2643 } 2644 if (M_TRAILINGSPACE(m)) { 2645 /* 2646 * The easy way. We hope the majority of the time we hit 2647 * here :) 2648 */ 2649 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2650 SCTP_BUF_LEN(m) += padlen; 2651 } else { 2652 /* Hard way we must grow the mbuf */ 2653 struct mbuf *tmp; 2654 2655 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2656 if (tmp == NULL) { 2657 /* Out of space GAK! we are in big trouble. */ 2658 return (ENOSPC); 2659 } 2660 /* setup and insert in middle */ 2661 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m); 2662 SCTP_BUF_LEN(tmp) = padlen; 2663 SCTP_BUF_NEXT(m) = tmp; 2664 dp = mtod(tmp, uint8_t *); 2665 } 2666 /* zero out the pad */ 2667 for (i = 0; i < padlen; i++) { 2668 *dp = 0; 2669 dp++; 2670 } 2671 return (0); 2672 } 2673 2674 int 2675 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2676 { 2677 /* find the last mbuf in chain and pad it */ 2678 struct mbuf *m_at; 2679 2680 m_at = m; 2681 if (last_mbuf) { 2682 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2683 } else { 2684 while (m_at) { 2685 if (SCTP_BUF_NEXT(m_at) == NULL) { 2686 return (sctp_add_pad_tombuf(m_at, padval)); 2687 } 2688 m_at = SCTP_BUF_NEXT(m_at); 2689 } 2690 } 2691 return (EFAULT); 2692 } 2693 2694 int sctp_asoc_change_wake = 0; 2695 2696 static void 2697 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2698 uint32_t error, void *data) 2699 { 2700 struct mbuf *m_notify; 2701 struct sctp_assoc_change *sac; 2702 struct sctp_queued_to_read *control; 2703 2704 /* 2705 * First if we are are going down dump everything we can to the 2706 * socket rcv queue. 2707 */ 2708 2709 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2710 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2711 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 2712 ) { 2713 /* If the socket is gone we are out of here */ 2714 return; 2715 } 2716 /* 2717 * For TCP model AND UDP connected sockets we will send an error up 2718 * when an ABORT comes in. 2719 */ 2720 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2721 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2722 (event == SCTP_COMM_LOST)) { 2723 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) 2724 stcb->sctp_socket->so_error = ECONNREFUSED; 2725 else 2726 stcb->sctp_socket->so_error = ECONNRESET; 2727 /* Wake ANY sleepers */ 2728 sorwakeup(stcb->sctp_socket); 2729 sowwakeup(stcb->sctp_socket); 2730 sctp_asoc_change_wake++; 2731 } 2732 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2733 /* event not enabled */ 2734 return; 2735 } 2736 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA); 2737 if (m_notify == NULL) 2738 /* no space left */ 2739 return; 2740 SCTP_BUF_LEN(m_notify) = 0; 2741 2742 sac = mtod(m_notify, struct sctp_assoc_change *); 2743 sac->sac_type = SCTP_ASSOC_CHANGE; 2744 sac->sac_flags = 0; 2745 sac->sac_length = sizeof(struct sctp_assoc_change); 2746 sac->sac_state = event; 2747 sac->sac_error = error; 2748 /* XXX verify these stream counts */ 2749 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2750 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2751 sac->sac_assoc_id = sctp_get_associd(stcb); 2752 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change); 2753 SCTP_BUF_NEXT(m_notify) = NULL; 2754 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2755 0, 0, 0, 0, 0, 0, 2756 m_notify); 2757 if (control == NULL) { 2758 /* no memory */ 2759 sctp_m_freem(m_notify); 2760 return; 2761 } 2762 control->length = SCTP_BUF_LEN(m_notify); 2763 /* not that we need this */ 2764 control->tail_mbuf = m_notify; 2765 control->spec_flags = M_NOTIFICATION; 2766 sctp_add_to_readq(stcb->sctp_ep, stcb, 2767 control, 2768 &stcb->sctp_socket->so_rcv, 1); 2769 if (event == SCTP_COMM_LOST) { 2770 /* Wake up any sleeper */ 2771 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2772 } 2773 } 2774 2775 static void 2776 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2777 struct sockaddr *sa, uint32_t error) 2778 { 2779 struct mbuf *m_notify; 2780 struct sctp_paddr_change *spc; 2781 struct sctp_queued_to_read *control; 2782 2783 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2784 /* event not enabled */ 2785 return; 2786 2787 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2788 if (m_notify == NULL) 2789 return; 2790 SCTP_BUF_LEN(m_notify) = 0; 2791 spc = mtod(m_notify, struct sctp_paddr_change *); 2792 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2793 spc->spc_flags = 0; 2794 spc->spc_length = sizeof(struct sctp_paddr_change); 2795 if (sa->sa_family == AF_INET) { 2796 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2797 } else { 2798 struct sockaddr_in6 *sin6; 2799 2800 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2801 2802 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2803 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2804 if (sin6->sin6_scope_id == 0) { 2805 /* recover scope_id for user */ 2806 (void)sa6_recoverscope(sin6); 2807 } else { 2808 /* clear embedded scope_id for user */ 2809 in6_clearscope(&sin6->sin6_addr); 2810 } 2811 } 2812 } 2813 spc->spc_state = state; 2814 spc->spc_error = error; 2815 spc->spc_assoc_id = sctp_get_associd(stcb); 2816 2817 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2818 SCTP_BUF_NEXT(m_notify) = NULL; 2819 2820 /* append to socket */ 2821 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2822 0, 0, 0, 0, 0, 0, 2823 m_notify); 2824 if (control == NULL) { 2825 /* no memory */ 2826 sctp_m_freem(m_notify); 2827 return; 2828 } 2829 control->length = SCTP_BUF_LEN(m_notify); 2830 control->spec_flags = M_NOTIFICATION; 2831 /* not that we need this */ 2832 control->tail_mbuf = m_notify; 2833 sctp_add_to_readq(stcb->sctp_ep, stcb, 2834 control, 2835 &stcb->sctp_socket->so_rcv, 1); 2836 } 2837 2838 2839 static void 2840 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2841 struct sctp_tmit_chunk *chk) 2842 { 2843 struct mbuf *m_notify; 2844 struct sctp_send_failed *ssf; 2845 struct sctp_queued_to_read *control; 2846 int length; 2847 2848 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2849 /* event not enabled */ 2850 return; 2851 2852 length = sizeof(struct sctp_send_failed) + chk->send_size; 2853 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 2854 if (m_notify == NULL) 2855 /* no space left */ 2856 return; 2857 SCTP_BUF_LEN(m_notify) = 0; 2858 ssf = mtod(m_notify, struct sctp_send_failed *); 2859 ssf->ssf_type = SCTP_SEND_FAILED; 2860 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2861 ssf->ssf_flags = SCTP_DATA_UNSENT; 2862 else 2863 ssf->ssf_flags = SCTP_DATA_SENT; 2864 ssf->ssf_length = length; 2865 ssf->ssf_error = error; 2866 /* not exactly what the user sent in, but should be close :) */ 2867 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2868 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2869 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2870 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2871 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2872 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2873 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2874 SCTP_BUF_NEXT(m_notify) = chk->data; 2875 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2876 2877 /* Steal off the mbuf */ 2878 chk->data = NULL; 2879 /* 2880 * For this case, we check the actual socket buffer, since the assoc 2881 * is going away we don't want to overfill the socket buffer for a 2882 * non-reader 2883 */ 2884 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2885 sctp_m_freem(m_notify); 2886 return; 2887 } 2888 /* append to socket */ 2889 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2890 0, 0, 0, 0, 0, 0, 2891 m_notify); 2892 if (control == NULL) { 2893 /* no memory */ 2894 sctp_m_freem(m_notify); 2895 return; 2896 } 2897 control->spec_flags = M_NOTIFICATION; 2898 sctp_add_to_readq(stcb->sctp_ep, stcb, 2899 control, 2900 &stcb->sctp_socket->so_rcv, 1); 2901 } 2902 2903 2904 static void 2905 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2906 struct sctp_stream_queue_pending *sp) 2907 { 2908 struct mbuf *m_notify; 2909 struct sctp_send_failed *ssf; 2910 struct sctp_queued_to_read *control; 2911 int length; 2912 2913 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2914 /* event not enabled */ 2915 return; 2916 2917 length = sizeof(struct sctp_send_failed) + sp->length; 2918 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2919 if (m_notify == NULL) 2920 /* no space left */ 2921 return; 2922 SCTP_BUF_LEN(m_notify) = 0; 2923 ssf = mtod(m_notify, struct sctp_send_failed *); 2924 ssf->ssf_type = SCTP_SEND_FAILED; 2925 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2926 ssf->ssf_flags = SCTP_DATA_UNSENT; 2927 else 2928 ssf->ssf_flags = SCTP_DATA_SENT; 2929 ssf->ssf_length = length; 2930 ssf->ssf_error = error; 2931 /* not exactly what the user sent in, but should be close :) */ 2932 ssf->ssf_info.sinfo_stream = sp->stream; 2933 ssf->ssf_info.sinfo_ssn = sp->strseq; 2934 ssf->ssf_info.sinfo_flags = sp->sinfo_flags; 2935 ssf->ssf_info.sinfo_ppid = sp->ppid; 2936 ssf->ssf_info.sinfo_context = sp->context; 2937 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2938 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2939 SCTP_BUF_NEXT(m_notify) = sp->data; 2940 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2941 2942 /* Steal off the mbuf */ 2943 sp->data = NULL; 2944 /* 2945 * For this case, we check the actual socket buffer, since the assoc 2946 * is going away we don't want to overfill the socket buffer for a 2947 * non-reader 2948 */ 2949 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2950 sctp_m_freem(m_notify); 2951 return; 2952 } 2953 /* append to socket */ 2954 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2955 0, 0, 0, 0, 0, 0, 2956 m_notify); 2957 if (control == NULL) { 2958 /* no memory */ 2959 sctp_m_freem(m_notify); 2960 return; 2961 } 2962 control->spec_flags = M_NOTIFICATION; 2963 sctp_add_to_readq(stcb->sctp_ep, stcb, 2964 control, 2965 &stcb->sctp_socket->so_rcv, 1); 2966 } 2967 2968 2969 2970 static void 2971 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 2972 uint32_t error) 2973 { 2974 struct mbuf *m_notify; 2975 struct sctp_adaptation_event *sai; 2976 struct sctp_queued_to_read *control; 2977 2978 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2979 /* event not enabled */ 2980 return; 2981 2982 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2983 if (m_notify == NULL) 2984 /* no space left */ 2985 return; 2986 SCTP_BUF_LEN(m_notify) = 0; 2987 sai = mtod(m_notify, struct sctp_adaptation_event *); 2988 sai->sai_type = SCTP_ADAPTATION_INDICATION; 2989 sai->sai_flags = 0; 2990 sai->sai_length = sizeof(struct sctp_adaptation_event); 2991 sai->sai_adaptation_ind = error; 2992 sai->sai_assoc_id = sctp_get_associd(stcb); 2993 2994 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 2995 SCTP_BUF_NEXT(m_notify) = NULL; 2996 2997 /* append to socket */ 2998 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2999 0, 0, 0, 0, 0, 0, 3000 m_notify); 3001 if (control == NULL) { 3002 /* no memory */ 3003 sctp_m_freem(m_notify); 3004 return; 3005 } 3006 control->length = SCTP_BUF_LEN(m_notify); 3007 control->spec_flags = M_NOTIFICATION; 3008 /* not that we need this */ 3009 control->tail_mbuf = m_notify; 3010 sctp_add_to_readq(stcb->sctp_ep, stcb, 3011 control, 3012 &stcb->sctp_socket->so_rcv, 1); 3013 } 3014 3015 /* This always must be called with the read-queue LOCKED in the INP */ 3016 void 3017 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, 3018 uint32_t error, int nolock) 3019 { 3020 struct mbuf *m_notify; 3021 struct sctp_pdapi_event *pdapi; 3022 struct sctp_queued_to_read *control; 3023 struct sockbuf *sb; 3024 3025 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) 3026 /* event not enabled */ 3027 return; 3028 3029 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 3030 if (m_notify == NULL) 3031 /* no space left */ 3032 return; 3033 SCTP_BUF_LEN(m_notify) = 0; 3034 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3035 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3036 pdapi->pdapi_flags = 0; 3037 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3038 pdapi->pdapi_indication = error; 3039 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3040 3041 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3042 SCTP_BUF_NEXT(m_notify) = NULL; 3043 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3044 0, 0, 0, 0, 0, 0, 3045 m_notify); 3046 if (control == NULL) { 3047 /* no memory */ 3048 sctp_m_freem(m_notify); 3049 return; 3050 } 3051 control->spec_flags = M_NOTIFICATION; 3052 control->length = SCTP_BUF_LEN(m_notify); 3053 /* not that we need this */ 3054 control->tail_mbuf = m_notify; 3055 control->held_length = 0; 3056 control->length = 0; 3057 if (nolock == 0) { 3058 SCTP_INP_READ_LOCK(stcb->sctp_ep); 3059 } 3060 sb = &stcb->sctp_socket->so_rcv; 3061 #ifdef SCTP_SB_LOGGING 3062 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3063 #endif 3064 sctp_sballoc(stcb, sb, m_notify); 3065 #ifdef SCTP_SB_LOGGING 3066 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3067 #endif 3068 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3069 control->end_added = 1; 3070 if (stcb->asoc.control_pdapi) 3071 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3072 else { 3073 /* we really should not see this case */ 3074 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3075 } 3076 if (nolock == 0) { 3077 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 3078 } 3079 if (stcb->sctp_ep && stcb->sctp_socket) { 3080 /* This should always be the case */ 3081 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3082 } 3083 } 3084 3085 static void 3086 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3087 { 3088 struct mbuf *m_notify; 3089 struct sctp_shutdown_event *sse; 3090 struct sctp_queued_to_read *control; 3091 3092 /* 3093 * For TCP model AND UDP connected sockets we will send an error up 3094 * when an SHUTDOWN completes 3095 */ 3096 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3097 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3098 /* mark socket closed for read/write and wakeup! */ 3099 socantsendmore(stcb->sctp_socket); 3100 } 3101 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 3102 /* event not enabled */ 3103 return; 3104 3105 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 3106 if (m_notify == NULL) 3107 /* no space left */ 3108 return; 3109 sse = mtod(m_notify, struct sctp_shutdown_event *); 3110 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3111 sse->sse_flags = 0; 3112 sse->sse_length = sizeof(struct sctp_shutdown_event); 3113 sse->sse_assoc_id = sctp_get_associd(stcb); 3114 3115 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3116 SCTP_BUF_NEXT(m_notify) = NULL; 3117 3118 /* append to socket */ 3119 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3120 0, 0, 0, 0, 0, 0, 3121 m_notify); 3122 if (control == NULL) { 3123 /* no memory */ 3124 sctp_m_freem(m_notify); 3125 return; 3126 } 3127 control->spec_flags = M_NOTIFICATION; 3128 control->length = SCTP_BUF_LEN(m_notify); 3129 /* not that we need this */ 3130 control->tail_mbuf = m_notify; 3131 sctp_add_to_readq(stcb->sctp_ep, stcb, 3132 control, 3133 &stcb->sctp_socket->so_rcv, 1); 3134 } 3135 3136 static void 3137 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3138 int number_entries, uint16_t * list, int flag) 3139 { 3140 struct mbuf *m_notify; 3141 struct sctp_queued_to_read *control; 3142 struct sctp_stream_reset_event *strreset; 3143 int len; 3144 3145 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 3146 /* event not enabled */ 3147 return; 3148 3149 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3150 if (m_notify == NULL) 3151 /* no space left */ 3152 return; 3153 SCTP_BUF_LEN(m_notify) = 0; 3154 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3155 if (len > M_TRAILINGSPACE(m_notify)) { 3156 /* never enough room */ 3157 sctp_m_freem(m_notify); 3158 return; 3159 } 3160 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3161 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3162 if (number_entries == 0) { 3163 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 3164 } else { 3165 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 3166 } 3167 strreset->strreset_length = len; 3168 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3169 if (number_entries) { 3170 int i; 3171 3172 for (i = 0; i < number_entries; i++) { 3173 strreset->strreset_list[i] = ntohs(list[i]); 3174 } 3175 } 3176 SCTP_BUF_LEN(m_notify) = len; 3177 SCTP_BUF_NEXT(m_notify) = NULL; 3178 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3179 /* no space */ 3180 sctp_m_freem(m_notify); 3181 return; 3182 } 3183 /* append to socket */ 3184 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3185 0, 0, 0, 0, 0, 0, 3186 m_notify); 3187 if (control == NULL) { 3188 /* no memory */ 3189 sctp_m_freem(m_notify); 3190 return; 3191 } 3192 control->spec_flags = M_NOTIFICATION; 3193 control->length = SCTP_BUF_LEN(m_notify); 3194 /* not that we need this */ 3195 control->tail_mbuf = m_notify; 3196 sctp_add_to_readq(stcb->sctp_ep, stcb, 3197 control, 3198 &stcb->sctp_socket->so_rcv, 1); 3199 } 3200 3201 3202 void 3203 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3204 uint32_t error, void *data) 3205 { 3206 if (stcb == NULL) { 3207 /* unlikely but */ 3208 return; 3209 } 3210 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3211 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3212 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 3213 ) { 3214 /* No notifications up when we are in a no socket state */ 3215 return; 3216 } 3217 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3218 /* Can't send up to a closed socket any notifications */ 3219 return; 3220 } 3221 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) { 3222 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) && 3223 (notification != SCTP_NOTIFY_ASSOC_ABORTED) && 3224 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) && 3225 (notification != SCTP_NOTIFY_DG_FAIL) && 3226 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) { 3227 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL); 3228 stcb->asoc.assoc_up_sent = 1; 3229 } 3230 } 3231 switch (notification) { 3232 case SCTP_NOTIFY_ASSOC_UP: 3233 if (stcb->asoc.assoc_up_sent == 0) { 3234 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL); 3235 stcb->asoc.assoc_up_sent = 1; 3236 } 3237 break; 3238 case SCTP_NOTIFY_ASSOC_DOWN: 3239 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL); 3240 break; 3241 case SCTP_NOTIFY_INTERFACE_DOWN: 3242 { 3243 struct sctp_nets *net; 3244 3245 net = (struct sctp_nets *)data; 3246 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3247 (struct sockaddr *)&net->ro._l_addr, error); 3248 break; 3249 } 3250 case SCTP_NOTIFY_INTERFACE_UP: 3251 { 3252 struct sctp_nets *net; 3253 3254 net = (struct sctp_nets *)data; 3255 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3256 (struct sockaddr *)&net->ro._l_addr, error); 3257 break; 3258 } 3259 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3260 { 3261 struct sctp_nets *net; 3262 3263 net = (struct sctp_nets *)data; 3264 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3265 (struct sockaddr *)&net->ro._l_addr, error); 3266 break; 3267 } 3268 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3269 sctp_notify_send_failed2(stcb, error, 3270 (struct sctp_stream_queue_pending *)data); 3271 break; 3272 case SCTP_NOTIFY_DG_FAIL: 3273 sctp_notify_send_failed(stcb, error, 3274 (struct sctp_tmit_chunk *)data); 3275 break; 3276 case SCTP_NOTIFY_ADAPTATION_INDICATION: 3277 /* Here the error is the adaptation indication */ 3278 sctp_notify_adaptation_layer(stcb, error); 3279 break; 3280 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3281 sctp_notify_partial_delivery_indication(stcb, error, 0); 3282 break; 3283 case SCTP_NOTIFY_STRDATA_ERR: 3284 break; 3285 case SCTP_NOTIFY_ASSOC_ABORTED: 3286 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL); 3287 break; 3288 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3289 break; 3290 case SCTP_NOTIFY_STREAM_OPENED_OK: 3291 break; 3292 case SCTP_NOTIFY_ASSOC_RESTART: 3293 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data); 3294 break; 3295 case SCTP_NOTIFY_HB_RESP: 3296 break; 3297 case SCTP_NOTIFY_STR_RESET_SEND: 3298 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3299 break; 3300 case SCTP_NOTIFY_STR_RESET_RECV: 3301 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3302 break; 3303 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3304 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3305 break; 3306 3307 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3308 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3309 break; 3310 3311 case SCTP_NOTIFY_ASCONF_ADD_IP: 3312 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3313 error); 3314 break; 3315 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3316 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3317 error); 3318 break; 3319 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3320 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3321 error); 3322 break; 3323 case SCTP_NOTIFY_ASCONF_SUCCESS: 3324 break; 3325 case SCTP_NOTIFY_ASCONF_FAILED: 3326 break; 3327 case SCTP_NOTIFY_PEER_SHUTDOWN: 3328 sctp_notify_shutdown_event(stcb); 3329 break; 3330 case SCTP_NOTIFY_AUTH_NEW_KEY: 3331 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3332 (uint16_t) (uintptr_t) data); 3333 break; 3334 #if 0 3335 case SCTP_NOTIFY_AUTH_KEY_CONFLICT: 3336 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT, 3337 error, (uint16_t) (uintptr_t) data); 3338 break; 3339 #endif /* not yet? remove? */ 3340 3341 3342 default: 3343 #ifdef SCTP_DEBUG 3344 if (sctp_debug_on & SCTP_DEBUG_UTIL1) { 3345 printf("NOTIFY: unknown notification %xh (%u)\n", 3346 notification, notification); 3347 } 3348 #endif /* SCTP_DEBUG */ 3349 break; 3350 } /* end switch */ 3351 } 3352 3353 void 3354 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock) 3355 { 3356 struct sctp_association *asoc; 3357 struct sctp_stream_out *outs; 3358 struct sctp_tmit_chunk *chk; 3359 struct sctp_stream_queue_pending *sp; 3360 int i; 3361 3362 asoc = &stcb->asoc; 3363 3364 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3365 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3366 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3367 return; 3368 } 3369 /* now through all the gunk freeing chunks */ 3370 if (holds_lock == 0) 3371 SCTP_TCB_SEND_LOCK(stcb); 3372 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3373 /* For each stream */ 3374 outs = &stcb->asoc.strmout[i]; 3375 /* clean up any sends there */ 3376 stcb->asoc.locked_on_sending = NULL; 3377 sp = TAILQ_FIRST(&outs->outqueue); 3378 while (sp) { 3379 stcb->asoc.stream_queue_cnt--; 3380 TAILQ_REMOVE(&outs->outqueue, sp, next); 3381 sctp_free_spbufspace(stcb, asoc, sp); 3382 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3383 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp); 3384 if (sp->data) { 3385 sctp_m_freem(sp->data); 3386 sp->data = NULL; 3387 } 3388 if (sp->net) 3389 sctp_free_remote_addr(sp->net); 3390 sp->net = NULL; 3391 /* Free the chunk */ 3392 sctp_free_a_strmoq(stcb, sp); 3393 sp = TAILQ_FIRST(&outs->outqueue); 3394 } 3395 } 3396 3397 /* pending send queue SHOULD be empty */ 3398 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3399 chk = TAILQ_FIRST(&asoc->send_queue); 3400 while (chk) { 3401 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3402 asoc->send_queue_cnt--; 3403 if (chk->data) { 3404 /* 3405 * trim off the sctp chunk header(it should 3406 * be there) 3407 */ 3408 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3409 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3410 sctp_mbuf_crush(chk->data); 3411 } 3412 } 3413 sctp_free_bufspace(stcb, asoc, chk, 1); 3414 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk); 3415 if (chk->data) { 3416 sctp_m_freem(chk->data); 3417 chk->data = NULL; 3418 } 3419 if (chk->whoTo) 3420 sctp_free_remote_addr(chk->whoTo); 3421 chk->whoTo = NULL; 3422 sctp_free_a_chunk(stcb, chk); 3423 chk = TAILQ_FIRST(&asoc->send_queue); 3424 } 3425 } 3426 /* sent queue SHOULD be empty */ 3427 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3428 chk = TAILQ_FIRST(&asoc->sent_queue); 3429 while (chk) { 3430 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3431 asoc->sent_queue_cnt--; 3432 if (chk->data) { 3433 /* 3434 * trim off the sctp chunk header(it should 3435 * be there) 3436 */ 3437 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3438 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3439 sctp_mbuf_crush(chk->data); 3440 } 3441 } 3442 sctp_free_bufspace(stcb, asoc, chk, 1); 3443 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3444 SCTP_NOTIFY_DATAGRAM_SENT, chk); 3445 if (chk->data) { 3446 sctp_m_freem(chk->data); 3447 chk->data = NULL; 3448 } 3449 if (chk->whoTo) 3450 sctp_free_remote_addr(chk->whoTo); 3451 chk->whoTo = NULL; 3452 sctp_free_a_chunk(stcb, chk); 3453 chk = TAILQ_FIRST(&asoc->sent_queue); 3454 } 3455 } 3456 if (holds_lock == 0) 3457 SCTP_TCB_SEND_UNLOCK(stcb); 3458 } 3459 3460 void 3461 sctp_abort_notification(struct sctp_tcb *stcb, int error) 3462 { 3463 3464 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3465 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3466 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3467 return; 3468 } 3469 /* Tell them we lost the asoc */ 3470 sctp_report_all_outbound(stcb, 1); 3471 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3472 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3473 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3474 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3475 } 3476 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL); 3477 } 3478 3479 void 3480 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3481 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err) 3482 { 3483 uint32_t vtag; 3484 3485 vtag = 0; 3486 if (stcb != NULL) { 3487 /* We have a TCB to abort, send notification too */ 3488 vtag = stcb->asoc.peer_vtag; 3489 sctp_abort_notification(stcb, 0); 3490 } 3491 sctp_send_abort(m, iphlen, sh, vtag, op_err); 3492 if (stcb != NULL) { 3493 /* Ok, now lets free it */ 3494 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3495 } else { 3496 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3497 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3498 sctp_inpcb_free(inp, 1, 0); 3499 } 3500 } 3501 } 3502 } 3503 3504 void 3505 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3506 int error, struct mbuf *op_err) 3507 { 3508 uint32_t vtag; 3509 3510 if (stcb == NULL) { 3511 /* Got to have a TCB */ 3512 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3513 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3514 sctp_inpcb_free(inp, 1, 0); 3515 } 3516 } 3517 return; 3518 } 3519 vtag = stcb->asoc.peer_vtag; 3520 /* notify the ulp */ 3521 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3522 sctp_abort_notification(stcb, error); 3523 /* notify the peer */ 3524 sctp_send_abort_tcb(stcb, op_err); 3525 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3526 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3527 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3528 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3529 } 3530 /* now free the asoc */ 3531 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 3532 } 3533 3534 void 3535 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3536 struct sctp_inpcb *inp, struct mbuf *op_err) 3537 { 3538 struct sctp_chunkhdr *ch, chunk_buf; 3539 unsigned int chk_length; 3540 3541 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3542 /* Generate a TO address for future reference */ 3543 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3544 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3545 sctp_inpcb_free(inp, 1, 0); 3546 } 3547 } 3548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3549 sizeof(*ch), (uint8_t *) & chunk_buf); 3550 while (ch != NULL) { 3551 chk_length = ntohs(ch->chunk_length); 3552 if (chk_length < sizeof(*ch)) { 3553 /* break to abort land */ 3554 break; 3555 } 3556 switch (ch->chunk_type) { 3557 case SCTP_PACKET_DROPPED: 3558 /* we don't respond to pkt-dropped */ 3559 return; 3560 case SCTP_ABORT_ASSOCIATION: 3561 /* we don't respond with an ABORT to an ABORT */ 3562 return; 3563 case SCTP_SHUTDOWN_COMPLETE: 3564 /* 3565 * we ignore it since we are not waiting for it and 3566 * peer is gone 3567 */ 3568 return; 3569 case SCTP_SHUTDOWN_ACK: 3570 sctp_send_shutdown_complete2(m, iphlen, sh); 3571 return; 3572 default: 3573 break; 3574 } 3575 offset += SCTP_SIZE32(chk_length); 3576 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3577 sizeof(*ch), (uint8_t *) & chunk_buf); 3578 } 3579 sctp_send_abort(m, iphlen, sh, 0, op_err); 3580 } 3581 3582 /* 3583 * check the inbound datagram to make sure there is not an abort inside it, 3584 * if there is return 1, else return 0. 3585 */ 3586 int 3587 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 3588 { 3589 struct sctp_chunkhdr *ch; 3590 struct sctp_init_chunk *init_chk, chunk_buf; 3591 int offset; 3592 unsigned int chk_length; 3593 3594 offset = iphlen + sizeof(struct sctphdr); 3595 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 3596 (uint8_t *) & chunk_buf); 3597 while (ch != NULL) { 3598 chk_length = ntohs(ch->chunk_length); 3599 if (chk_length < sizeof(*ch)) { 3600 /* packet is probably corrupt */ 3601 break; 3602 } 3603 /* we seem to be ok, is it an abort? */ 3604 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 3605 /* yep, tell them */ 3606 return (1); 3607 } 3608 if (ch->chunk_type == SCTP_INITIATION) { 3609 /* need to update the Vtag */ 3610 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 3611 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 3612 if (init_chk != NULL) { 3613 *vtagfill = ntohl(init_chk->init.initiate_tag); 3614 } 3615 } 3616 /* Nope, move to the next chunk */ 3617 offset += SCTP_SIZE32(chk_length); 3618 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3619 sizeof(*ch), (uint8_t *) & chunk_buf); 3620 } 3621 return (0); 3622 } 3623 3624 /* 3625 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 3626 * set (i.e. it's 0) so, create this function to compare link local scopes 3627 */ 3628 uint32_t 3629 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 3630 { 3631 struct sockaddr_in6 a, b; 3632 3633 /* save copies */ 3634 a = *addr1; 3635 b = *addr2; 3636 3637 if (a.sin6_scope_id == 0) 3638 if (sa6_recoverscope(&a)) { 3639 /* can't get scope, so can't match */ 3640 return (0); 3641 } 3642 if (b.sin6_scope_id == 0) 3643 if (sa6_recoverscope(&b)) { 3644 /* can't get scope, so can't match */ 3645 return (0); 3646 } 3647 if (a.sin6_scope_id != b.sin6_scope_id) 3648 return (0); 3649 3650 return (1); 3651 } 3652 3653 /* 3654 * returns a sockaddr_in6 with embedded scope recovered and removed 3655 */ 3656 struct sockaddr_in6 * 3657 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 3658 { 3659 /* check and strip embedded scope junk */ 3660 if (addr->sin6_family == AF_INET6) { 3661 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 3662 if (addr->sin6_scope_id == 0) { 3663 *store = *addr; 3664 if (!sa6_recoverscope(store)) { 3665 /* use the recovered scope */ 3666 addr = store; 3667 } 3668 } else { 3669 /* else, return the original "to" addr */ 3670 in6_clearscope(&addr->sin6_addr); 3671 } 3672 } 3673 } 3674 return (addr); 3675 } 3676 3677 /* 3678 * are the two addresses the same? currently a "scopeless" check returns: 1 3679 * if same, 0 if not 3680 */ 3681 __inline int 3682 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 3683 { 3684 3685 /* must be valid */ 3686 if (sa1 == NULL || sa2 == NULL) 3687 return (0); 3688 3689 /* must be the same family */ 3690 if (sa1->sa_family != sa2->sa_family) 3691 return (0); 3692 3693 if (sa1->sa_family == AF_INET6) { 3694 /* IPv6 addresses */ 3695 struct sockaddr_in6 *sin6_1, *sin6_2; 3696 3697 sin6_1 = (struct sockaddr_in6 *)sa1; 3698 sin6_2 = (struct sockaddr_in6 *)sa2; 3699 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, 3700 &sin6_2->sin6_addr)); 3701 } else if (sa1->sa_family == AF_INET) { 3702 /* IPv4 addresses */ 3703 struct sockaddr_in *sin_1, *sin_2; 3704 3705 sin_1 = (struct sockaddr_in *)sa1; 3706 sin_2 = (struct sockaddr_in *)sa2; 3707 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 3708 } else { 3709 /* we don't do these... */ 3710 return (0); 3711 } 3712 } 3713 3714 void 3715 sctp_print_address(struct sockaddr *sa) 3716 { 3717 3718 if (sa->sa_family == AF_INET6) { 3719 struct sockaddr_in6 *sin6; 3720 char ip6buf[INET6_ADDRSTRLEN]; 3721 3722 sin6 = (struct sockaddr_in6 *)sa; 3723 printf("IPv6 address: %s:%d scope:%u\n", 3724 ip6_sprintf(ip6buf, &sin6->sin6_addr), 3725 ntohs(sin6->sin6_port), 3726 sin6->sin6_scope_id); 3727 } else if (sa->sa_family == AF_INET) { 3728 struct sockaddr_in *sin; 3729 unsigned char *p; 3730 3731 sin = (struct sockaddr_in *)sa; 3732 p = (unsigned char *)&sin->sin_addr; 3733 printf("IPv4 address: %u.%u.%u.%u:%d\n", 3734 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 3735 } else { 3736 printf("?\n"); 3737 } 3738 } 3739 3740 void 3741 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 3742 { 3743 if (iph->ip_v == IPVERSION) { 3744 struct sockaddr_in lsa, fsa; 3745 3746 bzero(&lsa, sizeof(lsa)); 3747 lsa.sin_len = sizeof(lsa); 3748 lsa.sin_family = AF_INET; 3749 lsa.sin_addr = iph->ip_src; 3750 lsa.sin_port = sh->src_port; 3751 bzero(&fsa, sizeof(fsa)); 3752 fsa.sin_len = sizeof(fsa); 3753 fsa.sin_family = AF_INET; 3754 fsa.sin_addr = iph->ip_dst; 3755 fsa.sin_port = sh->dest_port; 3756 printf("src: "); 3757 sctp_print_address((struct sockaddr *)&lsa); 3758 printf("dest: "); 3759 sctp_print_address((struct sockaddr *)&fsa); 3760 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3761 struct ip6_hdr *ip6; 3762 struct sockaddr_in6 lsa6, fsa6; 3763 3764 ip6 = (struct ip6_hdr *)iph; 3765 bzero(&lsa6, sizeof(lsa6)); 3766 lsa6.sin6_len = sizeof(lsa6); 3767 lsa6.sin6_family = AF_INET6; 3768 lsa6.sin6_addr = ip6->ip6_src; 3769 lsa6.sin6_port = sh->src_port; 3770 bzero(&fsa6, sizeof(fsa6)); 3771 fsa6.sin6_len = sizeof(fsa6); 3772 fsa6.sin6_family = AF_INET6; 3773 fsa6.sin6_addr = ip6->ip6_dst; 3774 fsa6.sin6_port = sh->dest_port; 3775 printf("src: "); 3776 sctp_print_address((struct sockaddr *)&lsa6); 3777 printf("dest: "); 3778 sctp_print_address((struct sockaddr *)&fsa6); 3779 } 3780 } 3781 3782 void 3783 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 3784 struct sctp_inpcb *new_inp, 3785 struct sctp_tcb *stcb) 3786 { 3787 /* 3788 * go through our old INP and pull off any control structures that 3789 * belong to stcb and move then to the new inp. 3790 */ 3791 struct socket *old_so, *new_so; 3792 struct sctp_queued_to_read *control, *nctl; 3793 struct sctp_readhead tmp_queue; 3794 struct mbuf *m; 3795 int error; 3796 3797 old_so = old_inp->sctp_socket; 3798 new_so = new_inp->sctp_socket; 3799 TAILQ_INIT(&tmp_queue); 3800 3801 SOCKBUF_LOCK(&(old_so->so_rcv)); 3802 3803 error = sblock(&old_so->so_rcv, 0); 3804 3805 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3806 if (error) { 3807 /* 3808 * Gak, can't get sblock, we have a problem. data will be 3809 * left stranded.. and we don't dare look at it since the 3810 * other thread may be reading something. Oh well, its a 3811 * screwed up app that does a peeloff OR a accept while 3812 * reading from the main socket... actually its only the 3813 * peeloff() case, since I think read will fail on a 3814 * listening socket.. 3815 */ 3816 return; 3817 } 3818 /* lock the socket buffers */ 3819 SCTP_INP_READ_LOCK(old_inp); 3820 control = TAILQ_FIRST(&old_inp->read_queue); 3821 /* Pull off all for out target stcb */ 3822 while (control) { 3823 nctl = TAILQ_NEXT(control, next); 3824 if (control->stcb == stcb) { 3825 /* remove it we want it */ 3826 TAILQ_REMOVE(&old_inp->read_queue, control, next); 3827 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 3828 m = control->data; 3829 while (m) { 3830 #ifdef SCTP_SB_LOGGING 3831 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 3832 #endif 3833 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 3834 #ifdef SCTP_SB_LOGGING 3835 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3836 #endif 3837 m = SCTP_BUF_NEXT(m); 3838 } 3839 } 3840 control = nctl; 3841 } 3842 SCTP_INP_READ_UNLOCK(old_inp); 3843 3844 /* Remove the sb-lock on the old socket */ 3845 SOCKBUF_LOCK(&(old_so->so_rcv)); 3846 3847 sbunlock(&old_so->so_rcv); 3848 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3849 3850 /* Now we move them over to the new socket buffer */ 3851 control = TAILQ_FIRST(&tmp_queue); 3852 SCTP_INP_READ_LOCK(new_inp); 3853 while (control) { 3854 nctl = TAILQ_NEXT(control, next); 3855 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 3856 m = control->data; 3857 while (m) { 3858 #ifdef SCTP_SB_LOGGING 3859 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3860 #endif 3861 sctp_sballoc(stcb, &new_so->so_rcv, m); 3862 #ifdef SCTP_SB_LOGGING 3863 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3864 #endif 3865 m = SCTP_BUF_NEXT(m); 3866 } 3867 control = nctl; 3868 } 3869 SCTP_INP_READ_UNLOCK(new_inp); 3870 } 3871 3872 3873 void 3874 sctp_add_to_readq(struct sctp_inpcb *inp, 3875 struct sctp_tcb *stcb, 3876 struct sctp_queued_to_read *control, 3877 struct sockbuf *sb, 3878 int end) 3879 { 3880 /* 3881 * Here we must place the control on the end of the socket read 3882 * queue AND increment sb_cc so that select will work properly on 3883 * read. 3884 */ 3885 struct mbuf *m, *prev = NULL; 3886 3887 if (inp == NULL) { 3888 /* Gak, TSNH!! */ 3889 #ifdef INVARIANTS 3890 panic("Gak, inp NULL on add_to_readq"); 3891 #endif 3892 return; 3893 } 3894 SCTP_INP_READ_LOCK(inp); 3895 if (!(control->spec_flags & M_NOTIFICATION)) { 3896 atomic_add_int(&inp->total_recvs, 1); 3897 if (!control->do_not_ref_stcb) { 3898 atomic_add_int(&stcb->total_recvs, 1); 3899 } 3900 } 3901 m = control->data; 3902 control->held_length = 0; 3903 control->length = 0; 3904 while (m) { 3905 if (SCTP_BUF_LEN(m) == 0) { 3906 /* Skip mbufs with NO length */ 3907 if (prev == NULL) { 3908 /* First one */ 3909 control->data = sctp_m_free(m); 3910 m = control->data; 3911 } else { 3912 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 3913 m = SCTP_BUF_NEXT(prev); 3914 } 3915 if (m == NULL) { 3916 control->tail_mbuf = prev;; 3917 } 3918 continue; 3919 } 3920 prev = m; 3921 #ifdef SCTP_SB_LOGGING 3922 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3923 #endif 3924 sctp_sballoc(stcb, sb, m); 3925 #ifdef SCTP_SB_LOGGING 3926 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3927 #endif 3928 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 3929 m = SCTP_BUF_NEXT(m); 3930 } 3931 if (prev != NULL) { 3932 control->tail_mbuf = prev; 3933 } else { 3934 /* Everything got collapsed out?? */ 3935 return; 3936 } 3937 if (end) { 3938 control->end_added = 1; 3939 } 3940 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 3941 SCTP_INP_READ_UNLOCK(inp); 3942 if (inp && inp->sctp_socket) { 3943 sctp_sorwakeup(inp, inp->sctp_socket); 3944 } 3945 } 3946 3947 3948 int 3949 sctp_append_to_readq(struct sctp_inpcb *inp, 3950 struct sctp_tcb *stcb, 3951 struct sctp_queued_to_read *control, 3952 struct mbuf *m, 3953 int end, 3954 int ctls_cumack, 3955 struct sockbuf *sb) 3956 { 3957 /* 3958 * A partial delivery API event is underway. OR we are appending on 3959 * the reassembly queue. 3960 * 3961 * If PDAPI this means we need to add m to the end of the data. 3962 * Increase the length in the control AND increment the sb_cc. 3963 * Otherwise sb is NULL and all we need to do is put it at the end 3964 * of the mbuf chain. 3965 */ 3966 int len = 0; 3967 struct mbuf *mm, *tail = NULL, *prev = NULL; 3968 3969 if (inp) { 3970 SCTP_INP_READ_LOCK(inp); 3971 } 3972 if (control == NULL) { 3973 get_out: 3974 if (inp) { 3975 SCTP_INP_READ_UNLOCK(inp); 3976 } 3977 return (-1); 3978 } 3979 if (control->end_added) { 3980 /* huh this one is complete? */ 3981 goto get_out; 3982 } 3983 mm = m; 3984 if (mm == NULL) { 3985 goto get_out; 3986 } 3987 while (mm) { 3988 if (SCTP_BUF_LEN(mm) == 0) { 3989 /* Skip mbufs with NO lenght */ 3990 if (prev == NULL) { 3991 /* First one */ 3992 m = sctp_m_free(mm); 3993 mm = m; 3994 } else { 3995 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 3996 mm = SCTP_BUF_NEXT(prev); 3997 } 3998 continue; 3999 } 4000 prev = mm; 4001 len += SCTP_BUF_LEN(mm); 4002 if (sb) { 4003 #ifdef SCTP_SB_LOGGING 4004 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4005 #endif 4006 sctp_sballoc(stcb, sb, mm); 4007 #ifdef SCTP_SB_LOGGING 4008 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4009 #endif 4010 } 4011 mm = SCTP_BUF_NEXT(mm); 4012 } 4013 if (prev) { 4014 tail = prev; 4015 } else { 4016 /* Really there should always be a prev */ 4017 if (m == NULL) { 4018 /* Huh nothing left? */ 4019 #ifdef INVARIANTS 4020 panic("Nothing left to add?"); 4021 #else 4022 goto get_out; 4023 #endif 4024 } 4025 tail = m; 4026 } 4027 if (end) { 4028 /* message is complete */ 4029 if (control == stcb->asoc.control_pdapi) { 4030 stcb->asoc.control_pdapi = NULL; 4031 } 4032 control->held_length = 0; 4033 control->end_added = 1; 4034 } 4035 atomic_add_int(&control->length, len); 4036 if (control->tail_mbuf) { 4037 /* append */ 4038 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4039 control->tail_mbuf = tail; 4040 } else { 4041 /* nothing there */ 4042 #ifdef INVARIANTS 4043 if (control->data != NULL) { 4044 panic("This should NOT happen"); 4045 } 4046 #endif 4047 control->data = m; 4048 control->tail_mbuf = tail; 4049 } 4050 /* 4051 * When we are appending in partial delivery, the cum-ack is used 4052 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4053 * is populated in the outbound sinfo structure from the true cumack 4054 * if the association exists... 4055 */ 4056 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4057 if (inp) { 4058 SCTP_INP_READ_UNLOCK(inp); 4059 } 4060 if (inp && inp->sctp_socket) { 4061 sctp_sorwakeup(inp, inp->sctp_socket); 4062 } 4063 return (0); 4064 } 4065 4066 4067 4068 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4069 *************ALTERNATE ROUTING CODE 4070 */ 4071 4072 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4073 *************ALTERNATE ROUTING CODE 4074 */ 4075 4076 struct mbuf * 4077 sctp_generate_invmanparam(int err) 4078 { 4079 /* Return a MBUF with a invalid mandatory parameter */ 4080 struct mbuf *m; 4081 4082 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 4083 if (m) { 4084 struct sctp_paramhdr *ph; 4085 4086 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4087 ph = mtod(m, struct sctp_paramhdr *); 4088 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4089 ph->param_type = htons(err); 4090 } 4091 return (m); 4092 } 4093 4094 #ifdef SCTP_MBCNT_LOGGING 4095 void 4096 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4097 struct sctp_tmit_chunk *tp1, int chk_cnt) 4098 { 4099 if (tp1->data == NULL) { 4100 return; 4101 } 4102 asoc->chunks_on_out_queue -= chk_cnt; 4103 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4104 asoc->total_output_queue_size, 4105 tp1->book_size, 4106 0, 4107 tp1->mbcnt); 4108 if (asoc->total_output_queue_size >= tp1->book_size) { 4109 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4110 } else { 4111 asoc->total_output_queue_size = 0; 4112 } 4113 4114 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4115 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4116 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4117 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4118 } else { 4119 stcb->sctp_socket->so_snd.sb_cc = 0; 4120 4121 } 4122 } 4123 } 4124 4125 #endif 4126 4127 int 4128 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4129 int reason, struct sctpchunk_listhead *queue) 4130 { 4131 int ret_sz = 0; 4132 int notdone; 4133 uint8_t foundeom = 0; 4134 4135 do { 4136 ret_sz += tp1->book_size; 4137 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4138 if (tp1->data) { 4139 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4140 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1); 4141 sctp_m_freem(tp1->data); 4142 tp1->data = NULL; 4143 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4144 } 4145 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4146 stcb->asoc.sent_queue_cnt_removeable--; 4147 } 4148 if (queue == &stcb->asoc.send_queue) { 4149 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4150 /* on to the sent queue */ 4151 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4152 sctp_next); 4153 stcb->asoc.sent_queue_cnt++; 4154 } 4155 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4156 SCTP_DATA_NOT_FRAG) { 4157 /* not frag'ed we ae done */ 4158 notdone = 0; 4159 foundeom = 1; 4160 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4161 /* end of frag, we are done */ 4162 notdone = 0; 4163 foundeom = 1; 4164 } else { 4165 /* 4166 * Its a begin or middle piece, we must mark all of 4167 * it 4168 */ 4169 notdone = 1; 4170 tp1 = TAILQ_NEXT(tp1, sctp_next); 4171 } 4172 } while (tp1 && notdone); 4173 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) { 4174 /* 4175 * The multi-part message was scattered across the send and 4176 * sent queue. 4177 */ 4178 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 4179 /* 4180 * recurse throught the send_queue too, starting at the 4181 * beginning. 4182 */ 4183 if (tp1) { 4184 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason, 4185 &stcb->asoc.send_queue); 4186 } else { 4187 printf("hmm, nothing on the send queue and no EOM?\n"); 4188 } 4189 } 4190 return (ret_sz); 4191 } 4192 4193 /* 4194 * checks to see if the given address, sa, is one that is currently known by 4195 * the kernel note: can't distinguish the same address on multiple interfaces 4196 * and doesn't handle multiple addresses with different zone/scope id's note: 4197 * ifa_ifwithaddr() compares the entire sockaddr struct 4198 */ 4199 struct sctp_ifa * 4200 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int holds_lock) 4201 { 4202 struct sctp_laddr *laddr; 4203 4204 if (holds_lock == 0) 4205 SCTP_INP_RLOCK(inp); 4206 4207 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4208 if (laddr->ifa == NULL) 4209 continue; 4210 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4211 continue; 4212 if (addr->sa_family == AF_INET) { 4213 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4214 laddr->ifa->address.sin.sin_addr.s_addr) { 4215 /* found him. */ 4216 if (holds_lock == 0) 4217 SCTP_INP_RUNLOCK(inp); 4218 return (laddr->ifa); 4219 break; 4220 } 4221 } else if (addr->sa_family == AF_INET6) { 4222 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4223 &laddr->ifa->address.sin6.sin6_addr)) { 4224 /* found him. */ 4225 if (holds_lock == 0) 4226 SCTP_INP_RUNLOCK(inp); 4227 return (laddr->ifa); 4228 break; 4229 } 4230 } 4231 } 4232 if (holds_lock == 0) 4233 SCTP_INP_RUNLOCK(inp); 4234 return (NULL); 4235 } 4236 4237 struct sctp_ifa * 4238 sctp_find_ifa_in_ifn(struct sctp_ifn *sctp_ifnp, struct sockaddr *addr, 4239 int holds_lock) 4240 { 4241 struct sctp_ifa *sctp_ifap; 4242 4243 if (holds_lock == 0) 4244 SCTP_IPI_ADDR_LOCK(); 4245 4246 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 4247 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4248 continue; 4249 if (addr->sa_family == AF_INET) { 4250 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4251 sctp_ifap->address.sin.sin_addr.s_addr) { 4252 /* found him. */ 4253 if (holds_lock == 0) 4254 SCTP_IPI_ADDR_UNLOCK(); 4255 return (sctp_ifap); 4256 break; 4257 } 4258 } else if (addr->sa_family == AF_INET6) { 4259 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4260 &sctp_ifap->address.sin6.sin6_addr)) { 4261 /* found him. */ 4262 if (holds_lock == 0) 4263 SCTP_IPI_ADDR_UNLOCK(); 4264 return (sctp_ifap); 4265 break; 4266 } 4267 } 4268 } 4269 if (holds_lock == 0) 4270 SCTP_IPI_ADDR_UNLOCK(); 4271 return (NULL); 4272 } 4273 4274 uint32_t 4275 sctp_get_ifa_hash_val(struct sockaddr *addr) 4276 { 4277 4278 if (addr->sa_family == AF_INET) { 4279 struct sockaddr_in *sin; 4280 4281 sin = (struct sockaddr_in *)addr; 4282 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4283 } else if (addr->sa_family == AF_INET6) { 4284 struct sockaddr_in6 *sin6; 4285 uint32_t hash_of_addr; 4286 4287 sin6 = (struct sockaddr_in6 *)addr; 4288 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4289 sin6->sin6_addr.s6_addr32[1] + 4290 sin6->sin6_addr.s6_addr32[2] + 4291 sin6->sin6_addr.s6_addr32[3]); 4292 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4293 return (hash_of_addr); 4294 } 4295 return (0); 4296 } 4297 4298 struct sctp_ifa * 4299 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4300 { 4301 struct sctp_ifa *sctp_ifap; 4302 struct sctp_vrf *vrf; 4303 struct sctp_ifalist *hash_head; 4304 uint32_t hash_of_addr; 4305 4306 vrf = sctp_find_vrf(vrf_id); 4307 if (vrf == NULL) 4308 return (NULL); 4309 4310 hash_of_addr = sctp_get_ifa_hash_val(addr); 4311 if (holds_lock == 0) 4312 SCTP_IPI_ADDR_LOCK(); 4313 4314 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_hashmark)]; 4315 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 4316 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4317 continue; 4318 if (addr->sa_family == AF_INET) { 4319 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4320 sctp_ifap->address.sin.sin_addr.s_addr) { 4321 /* found him. */ 4322 if (holds_lock == 0) 4323 SCTP_IPI_ADDR_UNLOCK(); 4324 return (sctp_ifap); 4325 break; 4326 } 4327 } else if (addr->sa_family == AF_INET6) { 4328 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4329 &sctp_ifap->address.sin6.sin6_addr)) { 4330 /* found him. */ 4331 if (holds_lock == 0) 4332 SCTP_IPI_ADDR_UNLOCK(); 4333 return (sctp_ifap); 4334 break; 4335 } 4336 } 4337 } 4338 if (holds_lock == 0) 4339 SCTP_IPI_ADDR_UNLOCK(); 4340 return (NULL); 4341 } 4342 4343 static void 4344 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock, 4345 uint32_t rwnd_req) 4346 { 4347 /* User pulled some data, do we need a rwnd update? */ 4348 int r_unlocked = 0; 4349 uint32_t dif, rwnd; 4350 struct socket *so = NULL; 4351 4352 if (stcb == NULL) 4353 return; 4354 4355 atomic_add_int(&stcb->asoc.refcnt, 1); 4356 4357 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 4358 SCTP_STATE_SHUTDOWN_RECEIVED | 4359 SCTP_STATE_SHUTDOWN_ACK_SENT) 4360 ) { 4361 /* Pre-check If we are freeing no update */ 4362 goto no_lock; 4363 } 4364 SCTP_INP_INCR_REF(stcb->sctp_ep); 4365 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4366 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4367 goto out; 4368 } 4369 so = stcb->sctp_socket; 4370 if (so == NULL) { 4371 goto out; 4372 } 4373 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 4374 /* Have you have freed enough to look */ 4375 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4376 sctp_misc_ints(SCTP_ENTER_USER_RECV, 4377 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd), 4378 *freed_so_far, 4379 stcb->freed_by_sorcv_sincelast, 4380 rwnd_req); 4381 #endif 4382 *freed_so_far = 0; 4383 /* Yep, its worth a look and the lock overhead */ 4384 4385 /* Figure out what the rwnd would be */ 4386 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 4387 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 4388 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 4389 } else { 4390 dif = 0; 4391 } 4392 if (dif >= rwnd_req) { 4393 if (hold_rlock) { 4394 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 4395 r_unlocked = 1; 4396 } 4397 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4398 /* 4399 * One last check before we allow the guy possibly 4400 * to get in. There is a race, where the guy has not 4401 * reached the gate. In that case 4402 */ 4403 goto out; 4404 } 4405 SCTP_TCB_LOCK(stcb); 4406 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4407 /* No reports here */ 4408 SCTP_TCB_UNLOCK(stcb); 4409 goto out; 4410 } 4411 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4412 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4413 stcb->asoc.my_rwnd, 4414 stcb->asoc.my_last_reported_rwnd, 4415 stcb->freed_by_sorcv_sincelast, 4416 dif); 4417 #endif 4418 SCTP_STAT_INCR(sctps_wu_sacks_sent); 4419 sctp_send_sack(stcb); 4420 sctp_chunk_output(stcb->sctp_ep, stcb, 4421 SCTP_OUTPUT_FROM_USR_RCVD); 4422 /* make sure no timer is running */ 4423 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 4424 SCTP_TCB_UNLOCK(stcb); 4425 } else { 4426 /* Update how much we have pending */ 4427 stcb->freed_by_sorcv_sincelast = dif; 4428 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4429 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4430 stcb->asoc.my_rwnd, 4431 stcb->asoc.my_last_reported_rwnd, 4432 stcb->freed_by_sorcv_sincelast, 4433 0); 4434 #endif 4435 } 4436 out: 4437 if (so && r_unlocked && hold_rlock) { 4438 SCTP_INP_READ_LOCK(stcb->sctp_ep); 4439 } 4440 SCTP_INP_DECR_REF(stcb->sctp_ep); 4441 no_lock: 4442 atomic_add_int(&stcb->asoc.refcnt, -1); 4443 return; 4444 } 4445 4446 int 4447 sctp_sorecvmsg(struct socket *so, 4448 struct uio *uio, 4449 struct mbuf **mp, 4450 struct sockaddr *from, 4451 int fromlen, 4452 int *msg_flags, 4453 struct sctp_sndrcvinfo *sinfo, 4454 int filling_sinfo) 4455 { 4456 /* 4457 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 4458 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 4459 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 4460 * On the way out we may send out any combination of: 4461 * MSG_NOTIFICATION MSG_EOR 4462 * 4463 */ 4464 struct sctp_inpcb *inp = NULL; 4465 int my_len = 0; 4466 int cp_len = 0, error = 0; 4467 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 4468 struct mbuf *m = NULL, *embuf = NULL; 4469 struct sctp_tcb *stcb = NULL; 4470 int wakeup_read_socket = 0; 4471 int freecnt_applied = 0; 4472 int out_flags = 0, in_flags = 0; 4473 int block_allowed = 1; 4474 int freed_so_far = 0; 4475 int copied_so_far = 0; 4476 int in_eeor_mode = 0; 4477 int no_rcv_needed = 0; 4478 uint32_t rwnd_req = 0; 4479 int hold_sblock = 0; 4480 int hold_rlock = 0; 4481 int alen = 0; 4482 int slen = 0; 4483 int held_length = 0; 4484 4485 if (msg_flags) { 4486 in_flags = *msg_flags; 4487 } else { 4488 in_flags = 0; 4489 } 4490 slen = uio->uio_resid; 4491 /* Pull in and set up our int flags */ 4492 if (in_flags & MSG_OOB) { 4493 /* Out of band's NOT supported */ 4494 return (EOPNOTSUPP); 4495 } 4496 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 4497 return (EINVAL); 4498 } 4499 if ((in_flags & (MSG_DONTWAIT 4500 | MSG_NBIO 4501 )) || 4502 SCTP_SO_IS_NBIO(so)) { 4503 block_allowed = 0; 4504 } 4505 /* setup the endpoint */ 4506 inp = (struct sctp_inpcb *)so->so_pcb; 4507 if (inp == NULL) { 4508 return (EFAULT); 4509 } 4510 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 4511 /* Must be at least a MTU's worth */ 4512 if (rwnd_req < SCTP_MIN_RWND) 4513 rwnd_req = SCTP_MIN_RWND; 4514 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 4515 #ifdef SCTP_RECV_RWND_LOGGING 4516 sctp_misc_ints(SCTP_SORECV_ENTER, 4517 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 4518 #endif 4519 SOCKBUF_LOCK(&so->so_rcv); 4520 hold_sblock = 1; 4521 #ifdef SCTP_RECV_RWND_LOGGING 4522 sctp_misc_ints(SCTP_SORECV_ENTERPL, 4523 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 4524 #endif 4525 4526 4527 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4528 if (error) { 4529 goto release_unlocked; 4530 } 4531 restart: 4532 if (hold_sblock == 0) { 4533 SOCKBUF_LOCK(&so->so_rcv); 4534 hold_sblock = 1; 4535 } 4536 sbunlock(&so->so_rcv); 4537 4538 restart_nosblocks: 4539 if (hold_sblock == 0) { 4540 SOCKBUF_LOCK(&so->so_rcv); 4541 hold_sblock = 1; 4542 } 4543 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4544 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4545 goto out; 4546 } 4547 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4548 if (so->so_error) { 4549 error = so->so_error; 4550 if ((in_flags & MSG_PEEK) == 0) 4551 so->so_error = 0; 4552 } else { 4553 error = ENOTCONN; 4554 } 4555 goto out; 4556 } 4557 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 4558 /* we need to wait for data */ 4559 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4560 sctp_misc_ints(SCTP_SORECV_BLOCKSA, 4561 0, 0, so->so_rcv.sb_cc, uio->uio_resid); 4562 #endif 4563 if ((so->so_rcv.sb_cc == 0) && 4564 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4565 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 4566 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4567 /* 4568 * For active open side clear flags for 4569 * re-use passive open is blocked by 4570 * connect. 4571 */ 4572 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4573 /* 4574 * You were aborted, passive side 4575 * always hits here 4576 */ 4577 error = ECONNRESET; 4578 /* 4579 * You get this once if you are 4580 * active open side 4581 */ 4582 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4583 /* 4584 * Remove flag if on the 4585 * active open side 4586 */ 4587 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4588 } 4589 } 4590 so->so_state &= ~(SS_ISCONNECTING | 4591 SS_ISDISCONNECTING | 4592 SS_ISCONFIRMING | 4593 SS_ISCONNECTED); 4594 if (error == 0) { 4595 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4596 error = ENOTCONN; 4597 } else { 4598 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4599 } 4600 } 4601 goto out; 4602 } 4603 } 4604 error = sbwait(&so->so_rcv); 4605 if (error) { 4606 goto out; 4607 } 4608 held_length = 0; 4609 goto restart_nosblocks; 4610 } else if (so->so_rcv.sb_cc == 0) { 4611 if (so->so_error) { 4612 error = so->so_error; 4613 if ((in_flags & MSG_PEEK) == 0) 4614 so->so_error = 0; 4615 } else { 4616 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4617 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4618 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4619 /* 4620 * For active open side clear flags 4621 * for re-use passive open is 4622 * blocked by connect. 4623 */ 4624 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4625 /* 4626 * You were aborted, passive 4627 * side always hits here 4628 */ 4629 error = ECONNRESET; 4630 /* 4631 * You get this once if you 4632 * are active open side 4633 */ 4634 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4635 /* 4636 * Remove flag if on 4637 * the active open 4638 * side 4639 */ 4640 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4641 } 4642 } 4643 so->so_state &= ~(SS_ISCONNECTING | 4644 SS_ISDISCONNECTING | 4645 SS_ISCONFIRMING | 4646 SS_ISCONNECTED); 4647 if (error == 0) { 4648 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4649 error = ENOTCONN; 4650 } else { 4651 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4652 } 4653 } 4654 goto out; 4655 } 4656 } 4657 error = EWOULDBLOCK; 4658 } 4659 goto out; 4660 } 4661 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4662 /* we possibly have data we can read */ 4663 control = TAILQ_FIRST(&inp->read_queue); 4664 if (control == NULL) { 4665 /* 4666 * This could be happening since the appender did the 4667 * increment but as not yet did the tailq insert onto the 4668 * read_queue 4669 */ 4670 if (hold_rlock == 0) { 4671 SCTP_INP_READ_LOCK(inp); 4672 hold_rlock = 1; 4673 } 4674 control = TAILQ_FIRST(&inp->read_queue); 4675 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 4676 #ifdef INVARIANTS 4677 panic("Huh, its non zero and nothing on control?"); 4678 #endif 4679 so->so_rcv.sb_cc = 0; 4680 } 4681 SCTP_INP_READ_UNLOCK(inp); 4682 hold_rlock = 0; 4683 goto restart; 4684 } 4685 if ((control->length == 0) && 4686 (control->do_not_ref_stcb)) { 4687 /* 4688 * Clean up code for freeing assoc that left behind a 4689 * pdapi.. maybe a peer in EEOR that just closed after 4690 * sending and never indicated a EOR. 4691 */ 4692 if (hold_rlock == 0) { 4693 hold_rlock = 1; 4694 SCTP_INP_READ_LOCK(inp); 4695 } 4696 control->held_length = 0; 4697 if (control->data) { 4698 /* Hmm there is data here .. fix */ 4699 struct mbuf *m; 4700 int cnt = 0; 4701 4702 m = control->data; 4703 while (m) { 4704 cnt += SCTP_BUF_LEN(m); 4705 if (SCTP_BUF_NEXT(m) == NULL) { 4706 control->tail_mbuf = m; 4707 control->end_added = 1; 4708 } 4709 m = SCTP_BUF_NEXT(m); 4710 } 4711 control->length = cnt; 4712 } else { 4713 /* remove it */ 4714 TAILQ_REMOVE(&inp->read_queue, control, next); 4715 /* Add back any hiddend data */ 4716 sctp_free_remote_addr(control->whoFrom); 4717 sctp_free_a_readq(stcb, control); 4718 } 4719 if (hold_rlock) { 4720 hold_rlock = 0; 4721 SCTP_INP_READ_UNLOCK(inp); 4722 } 4723 goto restart; 4724 } 4725 if (control->length == 0) { 4726 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 4727 (filling_sinfo)) { 4728 /* find a more suitable one then this */ 4729 ctl = TAILQ_NEXT(control, next); 4730 while (ctl) { 4731 if ((ctl->stcb != control->stcb) && (ctl->length)) { 4732 /* found one */ 4733 control = ctl; 4734 goto found_one; 4735 } 4736 ctl = TAILQ_NEXT(ctl, next); 4737 } 4738 } 4739 /* 4740 * if we reach here, not suitable replacement is available 4741 * <or> fragment interleave is NOT on. So stuff the sb_cc 4742 * into the our held count, and its time to sleep again. 4743 */ 4744 held_length = so->so_rcv.sb_cc; 4745 control->held_length = so->so_rcv.sb_cc; 4746 goto restart; 4747 } 4748 /* Clear the held length since there is something to read */ 4749 control->held_length = 0; 4750 if (hold_rlock) { 4751 SCTP_INP_READ_UNLOCK(inp); 4752 hold_rlock = 0; 4753 } 4754 found_one: 4755 /* 4756 * If we reach here, control has a some data for us to read off. 4757 * Note that stcb COULD be NULL. 4758 */ 4759 if (hold_sblock) { 4760 SOCKBUF_UNLOCK(&so->so_rcv); 4761 hold_sblock = 0; 4762 } 4763 stcb = control->stcb; 4764 if (stcb) { 4765 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 4766 (control->do_not_ref_stcb == 0)) { 4767 if (freecnt_applied == 0) 4768 stcb = NULL; 4769 } else if (control->do_not_ref_stcb == 0) { 4770 /* you can't free it on me please */ 4771 /* 4772 * The lock on the socket buffer protects us so the 4773 * free code will stop. But since we used the 4774 * socketbuf lock and the sender uses the tcb_lock 4775 * to increment, we need to use the atomic add to 4776 * the refcnt 4777 */ 4778 atomic_add_int(&stcb->asoc.refcnt, 1); 4779 freecnt_applied = 1; 4780 /* 4781 * Setup to remember how much we have not yet told 4782 * the peer our rwnd has opened up. Note we grab the 4783 * value from the tcb from last time. Note too that 4784 * sack sending clears this when a sack is sent.. 4785 * which is fine. Once we hit the rwnd_req, we then 4786 * will go to the sctp_user_rcvd() that will not 4787 * lock until it KNOWs it MUST send a WUP-SACK. 4788 * 4789 */ 4790 freed_so_far = stcb->freed_by_sorcv_sincelast; 4791 stcb->freed_by_sorcv_sincelast = 0; 4792 } 4793 } 4794 /* First lets get off the sinfo and sockaddr info */ 4795 if ((sinfo) && filling_sinfo) { 4796 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 4797 nxt = TAILQ_NEXT(control, next); 4798 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 4799 struct sctp_extrcvinfo *s_extra; 4800 4801 s_extra = (struct sctp_extrcvinfo *)sinfo; 4802 if (nxt) { 4803 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL; 4804 if (nxt->sinfo_flags & SCTP_UNORDERED) { 4805 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 4806 } 4807 if (nxt->spec_flags & M_NOTIFICATION) { 4808 s_extra->next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 4809 } 4810 s_extra->next_asocid = nxt->sinfo_assoc_id; 4811 s_extra->next_length = nxt->length; 4812 s_extra->next_ppid = nxt->sinfo_ppid; 4813 s_extra->next_stream = nxt->sinfo_stream; 4814 if (nxt->tail_mbuf != NULL) { 4815 if (nxt->end_added) { 4816 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 4817 } 4818 } 4819 } else { 4820 /* 4821 * we explicitly 0 this, since the memcpy 4822 * got some other things beyond the older 4823 * sinfo_ that is on the control's structure 4824 * :-D 4825 */ 4826 s_extra->next_flags = SCTP_NO_NEXT_MSG; 4827 s_extra->next_asocid = 0; 4828 s_extra->next_length = 0; 4829 s_extra->next_ppid = 0; 4830 s_extra->next_stream = 0; 4831 } 4832 } 4833 /* 4834 * update off the real current cum-ack, if we have an stcb. 4835 */ 4836 if (stcb) 4837 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 4838 /* 4839 * mask off the high bits, we keep the actual chunk bits in 4840 * there. 4841 */ 4842 sinfo->sinfo_flags &= 0x00ff; 4843 } 4844 if (fromlen && from) { 4845 struct sockaddr *to; 4846 4847 #ifdef INET 4848 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len); 4849 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4850 ((struct sockaddr_in *)from)->sin_port = control->port_from; 4851 #else 4852 /* No AF_INET use AF_INET6 */ 4853 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len); 4854 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4855 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 4856 #endif 4857 4858 to = from; 4859 #if defined(INET) && defined(INET6) 4860 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 4861 (to->sa_family == AF_INET) && 4862 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 4863 struct sockaddr_in *sin; 4864 struct sockaddr_in6 sin6; 4865 4866 sin = (struct sockaddr_in *)to; 4867 bzero(&sin6, sizeof(sin6)); 4868 sin6.sin6_family = AF_INET6; 4869 sin6.sin6_len = sizeof(struct sockaddr_in6); 4870 sin6.sin6_addr.s6_addr16[2] = 0xffff; 4871 bcopy(&sin->sin_addr, 4872 &sin6.sin6_addr.s6_addr16[3], 4873 sizeof(sin6.sin6_addr.s6_addr16[3])); 4874 sin6.sin6_port = sin->sin_port; 4875 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 4876 } 4877 #endif 4878 #if defined(INET6) 4879 { 4880 struct sockaddr_in6 lsa6, *to6; 4881 4882 to6 = (struct sockaddr_in6 *)to; 4883 sctp_recover_scope_mac(to6, (&lsa6)); 4884 } 4885 #endif 4886 } 4887 /* now copy out what data we can */ 4888 if (mp == NULL) { 4889 /* copy out each mbuf in the chain up to length */ 4890 get_more_data: 4891 m = control->data; 4892 while (m) { 4893 /* Move out all we can */ 4894 cp_len = (int)uio->uio_resid; 4895 my_len = (int)SCTP_BUF_LEN(m); 4896 if (cp_len > my_len) { 4897 /* not enough in this buf */ 4898 cp_len = my_len; 4899 } 4900 if (hold_rlock) { 4901 SCTP_INP_READ_UNLOCK(inp); 4902 hold_rlock = 0; 4903 } 4904 if (cp_len > 0) 4905 error = uiomove(mtod(m, char *), cp_len, uio); 4906 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4907 sctp_misc_ints(SCTP_SORCV_DOESCPY, 4908 so->so_rcv.sb_cc, 4909 cp_len, 4910 0, 4911 0); 4912 #endif 4913 /* re-read */ 4914 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4915 goto release; 4916 } 4917 if (stcb && 4918 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4919 no_rcv_needed = 1; 4920 } 4921 if (error) { 4922 /* error we are out of here */ 4923 goto release; 4924 } 4925 if ((SCTP_BUF_NEXT(m) == NULL) && 4926 (cp_len >= SCTP_BUF_LEN(m)) && 4927 ((control->end_added == 0) || 4928 (control->end_added && (TAILQ_NEXT(control, next) == NULL))) 4929 ) { 4930 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4931 sctp_misc_ints(SCTP_SORCV_DOESLCK, 4932 so->so_rcv.sb_cc, 4933 cp_len, 4934 SCTP_BUF_LEN(m), 4935 control->length); 4936 #endif 4937 SCTP_INP_READ_LOCK(inp); 4938 hold_rlock = 1; 4939 } 4940 if (cp_len == SCTP_BUF_LEN(m)) { 4941 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4942 sctp_misc_ints(SCTP_SORCV_DOESADJ, 4943 so->so_rcv.sb_cc, 4944 control->length, 4945 cp_len, 4946 0); 4947 #endif 4948 if ((SCTP_BUF_NEXT(m) == NULL) && 4949 (control->end_added)) { 4950 out_flags |= MSG_EOR; 4951 } 4952 if (control->spec_flags & M_NOTIFICATION) { 4953 out_flags |= MSG_NOTIFICATION; 4954 } 4955 /* we ate up the mbuf */ 4956 if (in_flags & MSG_PEEK) { 4957 /* just looking */ 4958 m = SCTP_BUF_NEXT(m); 4959 copied_so_far += cp_len; 4960 } else { 4961 /* dispose of the mbuf */ 4962 #ifdef SCTP_SB_LOGGING 4963 sctp_sblog(&so->so_rcv, 4964 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4965 #endif 4966 sctp_sbfree(control, stcb, &so->so_rcv, m); 4967 #ifdef SCTP_SB_LOGGING 4968 sctp_sblog(&so->so_rcv, 4969 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4970 #endif 4971 embuf = m; 4972 copied_so_far += cp_len; 4973 freed_so_far += cp_len; 4974 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4975 if (alen < cp_len) { 4976 panic("Control length goes negative?"); 4977 } 4978 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4979 sctp_misc_ints(SCTP_SORCV_PASSBF, 4980 so->so_rcv.sb_cc, 4981 control->length, 4982 0, 4983 0); 4984 #endif 4985 control->data = sctp_m_free(m); 4986 m = control->data; 4987 /* 4988 * been through it all, must hold sb 4989 * lock ok to null tail 4990 */ 4991 if (control->data == NULL) { 4992 #ifdef INVARIANTS 4993 if ((control->end_added == 0) || 4994 (TAILQ_NEXT(control, next) == NULL)) { 4995 /* 4996 * If the end is not 4997 * added, OR the 4998 * next is NOT null 4999 * we MUST have the 5000 * lock. 5001 */ 5002 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5003 panic("Hmm we don't own the lock?"); 5004 } 5005 } 5006 #endif 5007 control->tail_mbuf = NULL; 5008 #ifdef INVARIANTS 5009 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5010 panic("end_added, nothing left and no MSG_EOR"); 5011 } 5012 #endif 5013 } 5014 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5015 sctp_misc_ints(SCTP_SORCV_ADJD, 5016 so->so_rcv.sb_cc, 5017 control->length, 5018 0, 5019 0); 5020 #endif 5021 } 5022 } else { 5023 /* Do we need to trim the mbuf? */ 5024 if (control->spec_flags & M_NOTIFICATION) { 5025 out_flags |= MSG_NOTIFICATION; 5026 } 5027 if ((in_flags & MSG_PEEK) == 0) { 5028 SCTP_BUF_RESV_UF(m, cp_len); 5029 SCTP_BUF_LEN(m) -= cp_len; 5030 #ifdef SCTP_SB_LOGGING 5031 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5032 #endif 5033 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5034 if (stcb) { 5035 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5036 } 5037 copied_so_far += cp_len; 5038 embuf = m; 5039 freed_so_far += cp_len; 5040 #ifdef SCTP_SB_LOGGING 5041 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5042 SCTP_LOG_SBRESULT, 0); 5043 #endif 5044 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 5045 if (alen < cp_len) { 5046 panic("Control length goes negative2?"); 5047 } 5048 } else { 5049 copied_so_far += cp_len; 5050 } 5051 } 5052 if ((out_flags & MSG_EOR) || 5053 (uio->uio_resid == 0) 5054 ) { 5055 break; 5056 } 5057 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5058 (control->do_not_ref_stcb == 0) && 5059 (freed_so_far >= rwnd_req)) { 5060 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5061 } 5062 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5063 sctp_misc_ints(SCTP_SORCV_BOTWHILE, 5064 so->so_rcv.sb_cc, 5065 control->length, 5066 0, 5067 0); 5068 #endif 5069 5070 } /* end while(m) */ 5071 /* 5072 * At this point we have looked at it all and we either have 5073 * a MSG_EOR/or read all the user wants... <OR> 5074 * control->length == 0. 5075 */ 5076 if ((out_flags & MSG_EOR) && 5077 ((in_flags & MSG_PEEK) == 0)) { 5078 /* we are done with this control */ 5079 if (control->length == 0) { 5080 if (control->data) { 5081 #ifdef INVARIANTS 5082 panic("control->data not null at read eor?"); 5083 #else 5084 printf("Strange, data left in the control buffer .. invarients would panic?\n"); 5085 sctp_m_freem(control->data); 5086 control->data = NULL; 5087 #endif 5088 } 5089 done_with_control: 5090 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5091 sctp_misc_ints(SCTP_SORCV_FREECTL, 5092 so->so_rcv.sb_cc, 5093 0, 5094 0, 5095 0); 5096 #endif 5097 if (TAILQ_NEXT(control, next) == NULL) { 5098 /* 5099 * If we don't have a next we need a 5100 * lock, if there is a next interupt 5101 * is filling ahead of us and we 5102 * don't need a lock to remove this 5103 * guy (which is the head of the 5104 * queue). 5105 */ 5106 if (hold_rlock == 0) { 5107 SCTP_INP_READ_LOCK(inp); 5108 hold_rlock = 1; 5109 } 5110 } 5111 TAILQ_REMOVE(&inp->read_queue, control, next); 5112 /* Add back any hiddend data */ 5113 if (control->held_length) { 5114 held_length = 0; 5115 control->held_length = 0; 5116 wakeup_read_socket = 1; 5117 } 5118 no_rcv_needed = control->do_not_ref_stcb; 5119 sctp_free_remote_addr(control->whoFrom); 5120 control->data = NULL; 5121 sctp_free_a_readq(stcb, control); 5122 control = NULL; 5123 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0)) 5124 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5125 5126 } else { 5127 /* 5128 * The user did not read all of this 5129 * message, turn off the returned MSG_EOR 5130 * since we are leaving more behind on the 5131 * control to read. 5132 */ 5133 #ifdef INVARIANTS 5134 if (control->end_added && (control->data == NULL) && 5135 (control->tail_mbuf == NULL)) { 5136 panic("Gak, control->length is corrupt?"); 5137 } 5138 #endif 5139 no_rcv_needed = control->do_not_ref_stcb; 5140 out_flags &= ~MSG_EOR; 5141 } 5142 } 5143 if (out_flags & MSG_EOR) { 5144 goto release; 5145 } 5146 if ((uio->uio_resid == 0) || 5147 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5148 ) { 5149 goto release; 5150 } 5151 /* 5152 * If I hit here the receiver wants more and this message is 5153 * NOT done (pd-api). So two questions. Can we block? if not 5154 * we are done. Did the user NOT set MSG_WAITALL? 5155 */ 5156 if (block_allowed == 0) { 5157 goto release; 5158 } 5159 /* 5160 * We need to wait for more data a few things: - We don't 5161 * sbunlock() so we don't get someone else reading. - We 5162 * must be sure to account for the case where what is added 5163 * is NOT to our control when we wakeup. 5164 */ 5165 5166 /* 5167 * Do we need to tell the transport a rwnd update might be 5168 * needed before we go to sleep? 5169 */ 5170 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5171 ((freed_so_far >= rwnd_req) && 5172 (control->do_not_ref_stcb == 0) && 5173 (no_rcv_needed == 0))) { 5174 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5175 } 5176 wait_some_more: 5177 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5178 goto release; 5179 } 5180 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5181 goto release; 5182 5183 if (hold_rlock == 1) { 5184 SCTP_INP_READ_UNLOCK(inp); 5185 hold_rlock = 0; 5186 } 5187 if (hold_sblock == 0) { 5188 SOCKBUF_LOCK(&so->so_rcv); 5189 hold_sblock = 1; 5190 } 5191 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5192 if (stcb) 5193 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5194 freed_so_far, 5195 stcb->asoc.my_rwnd, 5196 so->so_rcv.sb_cc, 5197 uio->uio_resid); 5198 else 5199 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5200 freed_so_far, 5201 0, 5202 so->so_rcv.sb_cc, 5203 uio->uio_resid); 5204 #endif 5205 if (so->so_rcv.sb_cc <= control->held_length) { 5206 error = sbwait(&so->so_rcv); 5207 if (error) { 5208 goto release; 5209 } 5210 control->held_length = 0; 5211 } 5212 if (hold_sblock) { 5213 SOCKBUF_UNLOCK(&so->so_rcv); 5214 hold_sblock = 0; 5215 } 5216 if (control->length == 0) { 5217 /* still nothing here */ 5218 if (control->end_added == 1) { 5219 /* he aborted, or is done i.e.did a shutdown */ 5220 out_flags |= MSG_EOR; 5221 if (control->pdapi_aborted) 5222 out_flags |= MSG_TRUNC; 5223 goto done_with_control; 5224 } 5225 if (so->so_rcv.sb_cc > held_length) { 5226 control->held_length = so->so_rcv.sb_cc; 5227 held_length = 0; 5228 } 5229 goto wait_some_more; 5230 } else if (control->data == NULL) { 5231 /* 5232 * we must re-sync since data is probably being 5233 * added 5234 */ 5235 SCTP_INP_READ_LOCK(inp); 5236 if ((control->length > 0) && (control->data == NULL)) { 5237 /* 5238 * big trouble.. we have the lock and its 5239 * corrupt? 5240 */ 5241 panic("Impossible data==NULL length !=0"); 5242 } 5243 SCTP_INP_READ_UNLOCK(inp); 5244 /* We will fall around to get more data */ 5245 } 5246 goto get_more_data; 5247 } else { 5248 /* copy out the mbuf chain */ 5249 get_more_data2: 5250 /* 5251 * Do we have a uio, I doubt it if so we grab the size from 5252 * it, if not you get it all 5253 */ 5254 if (uio) 5255 cp_len = uio->uio_resid; 5256 else 5257 cp_len = control->length; 5258 5259 if ((uint32_t) cp_len >= control->length) { 5260 /* easy way */ 5261 if ((control->end_added == 0) || 5262 (TAILQ_NEXT(control, next) == NULL)) { 5263 /* Need to get rlock */ 5264 if (hold_rlock == 0) { 5265 SCTP_INP_READ_LOCK(inp); 5266 hold_rlock = 1; 5267 } 5268 } 5269 if (control->end_added) { 5270 out_flags |= MSG_EOR; 5271 } 5272 if (control->spec_flags & M_NOTIFICATION) { 5273 out_flags |= MSG_NOTIFICATION; 5274 } 5275 if (uio) 5276 uio->uio_resid -= control->length; 5277 *mp = control->data; 5278 m = control->data; 5279 while (m) { 5280 #ifdef SCTP_SB_LOGGING 5281 sctp_sblog(&so->so_rcv, 5282 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5283 #endif 5284 sctp_sbfree(control, stcb, &so->so_rcv, m); 5285 freed_so_far += SCTP_BUF_LEN(m); 5286 #ifdef SCTP_SB_LOGGING 5287 sctp_sblog(&so->so_rcv, 5288 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5289 #endif 5290 m = SCTP_BUF_NEXT(m); 5291 } 5292 control->data = control->tail_mbuf = NULL; 5293 control->length = 0; 5294 if (out_flags & MSG_EOR) { 5295 /* Done with this control */ 5296 goto done_with_control; 5297 } 5298 /* still more to do with this conntrol */ 5299 /* do we really support msg_waitall here? */ 5300 if ((block_allowed == 0) || 5301 ((in_flags & MSG_WAITALL) == 0)) { 5302 goto release; 5303 } 5304 wait_some_more2: 5305 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 5306 goto release; 5307 if (hold_rlock == 1) { 5308 SCTP_INP_READ_UNLOCK(inp); 5309 hold_rlock = 0; 5310 } 5311 if (hold_sblock == 0) { 5312 SOCKBUF_LOCK(&so->so_rcv); 5313 hold_sblock = 1; 5314 } 5315 if (so->so_rcv.sb_cc <= control->held_length) { 5316 error = sbwait(&so->so_rcv); 5317 if (error) { 5318 goto release; 5319 } 5320 } 5321 if (hold_sblock) { 5322 SOCKBUF_UNLOCK(&so->so_rcv); 5323 hold_sblock = 0; 5324 } 5325 if (control->length == 0) { 5326 /* still nothing here */ 5327 if (control->end_added == 1) { 5328 /* 5329 * he aborted, or is done i.e. 5330 * shutdown 5331 */ 5332 out_flags |= MSG_EOR; 5333 if (control->pdapi_aborted) 5334 out_flags |= MSG_TRUNC; 5335 goto done_with_control; 5336 } 5337 if (so->so_rcv.sb_cc > held_length) { 5338 control->held_length = so->so_rcv.sb_cc; 5339 /* 5340 * We don't use held_length while 5341 * getting a message 5342 */ 5343 held_length = 0; 5344 } 5345 goto wait_some_more2; 5346 } 5347 goto get_more_data2; 5348 } else { 5349 /* hard way mbuf by mbuf */ 5350 m = control->data; 5351 if (control->end_added == 0) { 5352 /* need the rlock */ 5353 if (hold_rlock == 0) { 5354 SCTP_INP_READ_LOCK(inp); 5355 hold_rlock = 1; 5356 } 5357 } 5358 if (control->spec_flags & M_NOTIFICATION) { 5359 out_flags |= MSG_NOTIFICATION; 5360 } 5361 while ((m) && (cp_len > 0)) { 5362 if (cp_len >= SCTP_BUF_LEN(m)) { 5363 *mp = m; 5364 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m)); 5365 if (uio) 5366 uio->uio_resid -= SCTP_BUF_LEN(m); 5367 cp_len -= SCTP_BUF_LEN(m); 5368 control->data = SCTP_BUF_NEXT(m); 5369 SCTP_BUF_NEXT(m) = NULL; 5370 #ifdef SCTP_SB_LOGGING 5371 sctp_sblog(&so->so_rcv, 5372 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5373 #endif 5374 sctp_sbfree(control, stcb, &so->so_rcv, m); 5375 freed_so_far += SCTP_BUF_LEN(m); 5376 #ifdef SCTP_SB_LOGGING 5377 sctp_sblog(&so->so_rcv, 5378 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5379 #endif 5380 mp = &SCTP_BUF_NEXT(m); 5381 m = control->data; 5382 } else { 5383 /* 5384 * got all he wants and its part of 5385 * this mbuf only. 5386 */ 5387 if (uio) 5388 uio->uio_resid -= SCTP_BUF_LEN(m); 5389 cp_len -= SCTP_BUF_LEN(m); 5390 if (hold_rlock) { 5391 SCTP_INP_READ_UNLOCK(inp); 5392 hold_rlock = 0; 5393 } 5394 if (hold_sblock) { 5395 SOCKBUF_UNLOCK(&so->so_rcv); 5396 hold_sblock = 0; 5397 } 5398 *mp = SCTP_M_COPYM(m, 0, cp_len, 5399 M_TRYWAIT 5400 ); 5401 #ifdef SCTP_LOCK_LOGGING 5402 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R); 5403 #endif 5404 if (hold_sblock == 0) { 5405 SOCKBUF_LOCK(&so->so_rcv); 5406 hold_sblock = 1; 5407 } 5408 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5409 goto release; 5410 5411 if (stcb && 5412 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5413 no_rcv_needed = 1; 5414 } 5415 SCTP_BUF_RESV_UF(m, cp_len); 5416 SCTP_BUF_LEN(m) -= cp_len; 5417 #ifdef SCTP_SB_LOGGING 5418 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5419 #endif 5420 freed_so_far += cp_len; 5421 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5422 if (stcb) { 5423 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5424 if ((freed_so_far >= rwnd_req) && 5425 (control->do_not_ref_stcb == 0) && 5426 (no_rcv_needed == 0)) 5427 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5428 } 5429 #ifdef SCTP_SB_LOGGING 5430 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5431 SCTP_LOG_SBRESULT, 0); 5432 #endif 5433 goto release; 5434 } 5435 } 5436 } 5437 } 5438 release: 5439 if (hold_rlock == 1) { 5440 SCTP_INP_READ_UNLOCK(inp); 5441 hold_rlock = 0; 5442 } 5443 if (hold_sblock == 0) { 5444 SOCKBUF_LOCK(&so->so_rcv); 5445 hold_sblock = 1; 5446 } 5447 sbunlock(&so->so_rcv); 5448 5449 release_unlocked: 5450 if (hold_sblock) { 5451 SOCKBUF_UNLOCK(&so->so_rcv); 5452 hold_sblock = 0; 5453 } 5454 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 5455 if ((freed_so_far >= rwnd_req) && 5456 (control && (control->do_not_ref_stcb == 0)) && 5457 (no_rcv_needed == 0)) 5458 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5459 } 5460 if (msg_flags) 5461 *msg_flags |= out_flags; 5462 out: 5463 if (hold_rlock == 1) { 5464 SCTP_INP_READ_UNLOCK(inp); 5465 hold_rlock = 0; 5466 } 5467 if (hold_sblock) { 5468 SOCKBUF_UNLOCK(&so->so_rcv); 5469 hold_sblock = 0; 5470 } 5471 if (freecnt_applied) { 5472 /* 5473 * The lock on the socket buffer protects us so the free 5474 * code will stop. But since we used the socketbuf lock and 5475 * the sender uses the tcb_lock to increment, we need to use 5476 * the atomic add to the refcnt. 5477 */ 5478 if (stcb == NULL) { 5479 panic("stcb for refcnt has gone NULL?"); 5480 } 5481 atomic_add_int(&stcb->asoc.refcnt, -1); 5482 freecnt_applied = 0; 5483 /* Save the value back for next time */ 5484 stcb->freed_by_sorcv_sincelast = freed_so_far; 5485 } 5486 #ifdef SCTP_RECV_RWND_LOGGING 5487 if (stcb) { 5488 sctp_misc_ints(SCTP_SORECV_DONE, 5489 freed_so_far, 5490 ((uio) ? (slen - uio->uio_resid) : slen), 5491 stcb->asoc.my_rwnd, 5492 so->so_rcv.sb_cc); 5493 } else { 5494 sctp_misc_ints(SCTP_SORECV_DONE, 5495 freed_so_far, 5496 ((uio) ? (slen - uio->uio_resid) : slen), 5497 0, 5498 so->so_rcv.sb_cc); 5499 } 5500 #endif 5501 if (wakeup_read_socket) { 5502 sctp_sorwakeup(inp, so); 5503 } 5504 return (error); 5505 } 5506 5507 5508 #ifdef SCTP_MBUF_LOGGING 5509 struct mbuf * 5510 sctp_m_free(struct mbuf *m) 5511 { 5512 if (SCTP_BUF_IS_EXTENDED(m)) { 5513 sctp_log_mb(m, SCTP_MBUF_IFREE); 5514 } 5515 return (m_free(m)); 5516 } 5517 5518 void 5519 sctp_m_freem(struct mbuf *mb) 5520 { 5521 while (mb != NULL) 5522 mb = sctp_m_free(mb); 5523 } 5524 5525 #endif 5526 5527 int 5528 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 5529 { 5530 /* 5531 * Given a local address. For all associations that holds the 5532 * address, request a peer-set-primary. 5533 */ 5534 struct sctp_ifa *ifa; 5535 struct sctp_laddr *wi; 5536 5537 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 5538 if (ifa == NULL) { 5539 return (EADDRNOTAVAIL); 5540 } 5541 /* 5542 * Now that we have the ifa we must awaken the iterator with this 5543 * message. 5544 */ 5545 wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr); 5546 if (wi == NULL) { 5547 return (ENOMEM); 5548 } 5549 /* Now incr the count and int wi structure */ 5550 SCTP_INCR_LADDR_COUNT(); 5551 bzero(wi, sizeof(*wi)); 5552 wi->ifa = ifa; 5553 wi->action = SCTP_SET_PRIM_ADDR; 5554 atomic_add_int(&ifa->refcount, 1); 5555 5556 /* Now add it to the work queue */ 5557 SCTP_IPI_ITERATOR_WQ_LOCK(); 5558 /* 5559 * Should this really be a tailq? As it is we will process the 5560 * newest first :-0 5561 */ 5562 LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr); 5563 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 5564 (struct sctp_inpcb *)NULL, 5565 (struct sctp_tcb *)NULL, 5566 (struct sctp_nets *)NULL); 5567 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 5568 return (0); 5569 } 5570 5571 5572 5573 5574 int 5575 sctp_soreceive(so, psa, uio, mp0, controlp, flagsp) 5576 struct socket *so; 5577 struct sockaddr **psa; 5578 struct uio *uio; 5579 struct mbuf **mp0; 5580 struct mbuf **controlp; 5581 int *flagsp; 5582 { 5583 int error, fromlen; 5584 uint8_t sockbuf[256]; 5585 struct sockaddr *from; 5586 struct sctp_extrcvinfo sinfo; 5587 int filling_sinfo = 1; 5588 struct sctp_inpcb *inp; 5589 5590 inp = (struct sctp_inpcb *)so->so_pcb; 5591 /* pickup the assoc we are reading from */ 5592 if (inp == NULL) { 5593 return (EINVAL); 5594 } 5595 if ((sctp_is_feature_off(inp, 5596 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 5597 (controlp == NULL)) { 5598 /* user does not want the sndrcv ctl */ 5599 filling_sinfo = 0; 5600 } 5601 if (psa) { 5602 from = (struct sockaddr *)sockbuf; 5603 fromlen = sizeof(sockbuf); 5604 from->sa_len = 0; 5605 } else { 5606 from = NULL; 5607 fromlen = 0; 5608 } 5609 5610 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 5611 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 5612 if ((controlp) && (filling_sinfo)) { 5613 /* copy back the sinfo in a CMSG format */ 5614 if (filling_sinfo) 5615 *controlp = sctp_build_ctl_nchunk(inp, 5616 (struct sctp_sndrcvinfo *)&sinfo); 5617 else 5618 *controlp = NULL; 5619 } 5620 if (psa) { 5621 /* copy back the address info */ 5622 if (from && from->sa_len) { 5623 *psa = sodupsockaddr(from, M_NOWAIT); 5624 } else { 5625 *psa = NULL; 5626 } 5627 } 5628 return (error); 5629 } 5630