1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_crc32.h> 49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 50 #include <netinet/sctp_auth.h> 51 #include <netinet/sctp_asconf.h> 52 53 #define NUMBER_OF_MTU_SIZES 18 54 55 56 #ifdef SCTP_STAT_LOGGING 57 int global_sctp_cwnd_log_at = 0; 58 int global_sctp_cwnd_log_rolled = 0; 59 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE]; 60 61 static uint32_t 62 sctp_get_time_of_event(void) 63 { 64 struct timeval now; 65 uint32_t timeval; 66 67 SCTP_GETPTIME_TIMEVAL(&now); 68 timeval = (now.tv_sec % 0x00000fff); 69 timeval <<= 20; 70 timeval |= now.tv_usec & 0xfffff; 71 return (timeval); 72 } 73 74 75 void 76 sctp_clr_stat_log(void) 77 { 78 global_sctp_cwnd_log_at = 0; 79 global_sctp_cwnd_log_rolled = 0; 80 } 81 82 83 void 84 sctp_sblog(struct sockbuf *sb, 85 struct sctp_tcb *stcb, int from, int incr) 86 { 87 int sctp_cwnd_log_at; 88 89 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 90 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 91 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 92 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB; 93 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb; 94 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc; 95 if (stcb) 96 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc; 97 else 98 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0; 99 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr; 100 } 101 102 void 103 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 104 { 105 int sctp_cwnd_log_at; 106 107 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 108 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 109 sctp_clog[sctp_cwnd_log_at].from = 0; 110 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE; 111 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp; 112 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags; 113 if (stcb) { 114 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb; 115 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state; 116 } else { 117 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0; 118 sctp_clog[sctp_cwnd_log_at].x.close.state = 0; 119 } 120 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc; 121 } 122 123 124 void 125 rto_logging(struct sctp_nets *net, int from) 126 { 127 int sctp_cwnd_log_at; 128 129 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 130 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 131 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 132 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT; 133 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net; 134 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt; 135 } 136 137 void 138 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 139 { 140 int sctp_cwnd_log_at; 141 142 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 143 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 144 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 145 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 146 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb; 147 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn; 148 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq; 149 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 150 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 151 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream; 152 } 153 154 void 155 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 156 { 157 int sctp_cwnd_log_at; 158 159 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 160 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 161 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action; 162 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE; 163 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb; 164 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight; 165 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 166 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 167 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count; 168 } 169 170 171 void 172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 173 { 174 int sctp_cwnd_log_at; 175 176 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 177 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 178 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 179 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK; 180 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack; 181 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack; 182 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn; 183 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps; 184 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups; 185 } 186 187 void 188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 189 { 190 int sctp_cwnd_log_at; 191 192 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 193 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 194 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 195 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP; 196 sctp_clog[sctp_cwnd_log_at].x.map.base = map; 197 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum; 198 sctp_clog[sctp_cwnd_log_at].x.map.high = high; 199 } 200 201 void 202 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 203 int from) 204 { 205 int sctp_cwnd_log_at; 206 207 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 208 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 209 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 210 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR; 211 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn; 212 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn; 213 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn; 214 } 215 216 217 void 218 sctp_log_mb(struct mbuf *m, int from) 219 { 220 int sctp_cwnd_log_at; 221 222 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 223 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 224 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 225 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF; 226 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m; 227 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 228 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 229 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0); 230 if (SCTP_BUF_IS_EXTENDED(m)) { 231 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 232 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 233 } else { 234 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0; 235 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0; 236 } 237 } 238 239 240 void 241 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 242 int from) 243 { 244 int sctp_cwnd_log_at; 245 246 if (control == NULL) { 247 printf("Gak log of NULL?\n"); 248 return; 249 } 250 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 251 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 252 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 253 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 254 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb; 255 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn; 256 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn; 257 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream; 258 if (poschk != NULL) { 259 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn; 260 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn; 261 } else { 262 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 263 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 264 } 265 } 266 267 void 268 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 269 { 270 int sctp_cwnd_log_at; 271 272 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 273 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 274 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 275 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND; 276 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 277 if (stcb->asoc.send_queue_cnt > 255) 278 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 279 else 280 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 281 if (stcb->asoc.stream_queue_cnt > 255) 282 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 283 else 284 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 285 286 if (net) { 287 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd; 288 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 289 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack; 290 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 291 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 292 } 293 if (SCTP_CWNDLOG_PRESEND == from) { 294 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 295 } 296 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment; 297 } 298 299 void 300 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 301 { 302 int sctp_cwnd_log_at; 303 304 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 305 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 306 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 307 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT; 308 if (inp) { 309 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket; 310 311 } else { 312 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL; 313 } 314 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp; 315 if (stcb) { 316 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 317 } else { 318 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 319 } 320 if (inp) { 321 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 322 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 323 } else { 324 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 325 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN; 326 } 327 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx); 328 if (inp->sctp_socket) { 329 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 330 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 331 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 332 } else { 333 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 334 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 335 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 336 } 337 } 338 339 void 340 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 341 { 342 int sctp_cwnd_log_at; 343 344 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 345 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 346 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 347 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST; 348 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 349 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error; 350 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 351 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst; 352 if (stcb->asoc.send_queue_cnt > 255) 353 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 354 else 355 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 356 if (stcb->asoc.stream_queue_cnt > 255) 357 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 358 else 359 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 360 } 361 362 void 363 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 364 { 365 int sctp_cwnd_log_at; 366 367 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 368 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 369 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 370 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 371 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 372 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size; 373 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 374 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0; 375 } 376 377 void 378 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 379 { 380 int sctp_cwnd_log_at; 381 382 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 383 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 384 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 385 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 386 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 387 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size; 388 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 389 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval; 390 } 391 392 void 393 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 394 { 395 int sctp_cwnd_log_at; 396 397 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 398 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 399 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 400 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT; 401 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq; 402 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book; 403 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q; 404 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt; 405 } 406 407 void 408 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 409 { 410 int sctp_cwnd_log_at; 411 412 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 413 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 414 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 415 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT; 416 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a; 417 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b; 418 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c; 419 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d; 420 } 421 422 void 423 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 424 { 425 int sctp_cwnd_log_at; 426 427 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 428 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 429 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 430 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE; 431 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb; 432 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt; 433 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count; 434 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt; 435 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt; 436 437 if (stcb->asoc.stream_queue_cnt < 0xff) 438 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 439 else 440 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff; 441 442 if (stcb->asoc.chunks_on_out_queue < 0xff) 443 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 444 else 445 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff; 446 447 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0; 448 /* set in the defered mode stuff */ 449 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 450 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1; 451 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 452 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2; 453 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 454 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4; 455 /* what about the sb */ 456 if (stcb->sctp_socket) { 457 struct socket *so = stcb->sctp_socket; 458 459 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 460 } else { 461 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff; 462 } 463 } 464 465 void 466 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 467 { 468 int sctp_cwnd_log_at; 469 470 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 471 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 472 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 473 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK; 474 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size; 475 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 476 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd; 477 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 478 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 479 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 480 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen; 481 } 482 483 int 484 sctp_fill_stat_log(void *optval, size_t *optsize) 485 { 486 int sctp_cwnd_log_at; 487 struct sctp_cwnd_log_req *req; 488 size_t size_limit; 489 int num, i, at, cnt_out = 0; 490 491 if (*optsize < sizeof(struct sctp_cwnd_log_req)) { 492 return (EINVAL); 493 } 494 size_limit = (*optsize - sizeof(struct sctp_cwnd_log_req)); 495 if (size_limit < sizeof(struct sctp_cwnd_log)) { 496 return (EINVAL); 497 } 498 sctp_cwnd_log_at = global_sctp_cwnd_log_at; 499 req = (struct sctp_cwnd_log_req *)optval; 500 num = size_limit / sizeof(struct sctp_cwnd_log); 501 if (global_sctp_cwnd_log_rolled) { 502 req->num_in_log = SCTP_STAT_LOG_SIZE; 503 } else { 504 req->num_in_log = sctp_cwnd_log_at; 505 /* 506 * if the log has not rolled, we don't let you have old 507 * data. 508 */ 509 if (req->end_at > sctp_cwnd_log_at) { 510 req->end_at = sctp_cwnd_log_at; 511 } 512 } 513 if ((num < SCTP_STAT_LOG_SIZE) && 514 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) { 515 /* we can't return all of it */ 516 if (((req->start_at == 0) && (req->end_at == 0)) || 517 (req->start_at >= SCTP_STAT_LOG_SIZE) || 518 (req->end_at >= SCTP_STAT_LOG_SIZE)) { 519 /* No user request or user is wacked. */ 520 req->num_ret = num; 521 req->end_at = sctp_cwnd_log_at - 1; 522 if ((sctp_cwnd_log_at - num) < 0) { 523 int cc; 524 525 cc = num - sctp_cwnd_log_at; 526 req->start_at = SCTP_STAT_LOG_SIZE - cc; 527 } else { 528 req->start_at = sctp_cwnd_log_at - num; 529 } 530 } else { 531 /* a user request */ 532 int cc; 533 534 if (req->start_at > req->end_at) { 535 cc = (SCTP_STAT_LOG_SIZE - req->start_at) + 536 (req->end_at + 1); 537 } else { 538 539 cc = (req->end_at - req->start_at) + 1; 540 } 541 if (cc < num) { 542 num = cc; 543 } 544 req->num_ret = num; 545 } 546 } else { 547 /* We can return all of it */ 548 req->start_at = 0; 549 req->end_at = sctp_cwnd_log_at - 1; 550 req->num_ret = sctp_cwnd_log_at; 551 } 552 #ifdef INVARIANTS 553 if (req->num_ret > num) { 554 panic("Bad statlog get?"); 555 } 556 #endif 557 for (i = 0, at = req->start_at; i < req->num_ret; i++) { 558 req->log[i] = sctp_clog[at]; 559 cnt_out++; 560 at++; 561 if (at >= SCTP_STAT_LOG_SIZE) 562 at = 0; 563 } 564 *optsize = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req); 565 return (0); 566 } 567 568 #endif 569 570 #ifdef SCTP_AUDITING_ENABLED 571 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 572 static int sctp_audit_indx = 0; 573 574 static 575 void 576 sctp_print_audit_report(void) 577 { 578 int i; 579 int cnt; 580 581 cnt = 0; 582 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 583 if ((sctp_audit_data[i][0] == 0xe0) && 584 (sctp_audit_data[i][1] == 0x01)) { 585 cnt = 0; 586 printf("\n"); 587 } else if (sctp_audit_data[i][0] == 0xf0) { 588 cnt = 0; 589 printf("\n"); 590 } else if ((sctp_audit_data[i][0] == 0xc0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 printf("\n"); 593 cnt = 0; 594 } 595 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 596 (uint32_t) sctp_audit_data[i][1]); 597 cnt++; 598 if ((cnt % 14) == 0) 599 printf("\n"); 600 } 601 for (i = 0; i < sctp_audit_indx; i++) { 602 if ((sctp_audit_data[i][0] == 0xe0) && 603 (sctp_audit_data[i][1] == 0x01)) { 604 cnt = 0; 605 printf("\n"); 606 } else if (sctp_audit_data[i][0] == 0xf0) { 607 cnt = 0; 608 printf("\n"); 609 } else if ((sctp_audit_data[i][0] == 0xc0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 printf("\n"); 612 cnt = 0; 613 } 614 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 615 (uint32_t) sctp_audit_data[i][1]); 616 cnt++; 617 if ((cnt % 14) == 0) 618 printf("\n"); 619 } 620 printf("\n"); 621 } 622 623 void 624 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 625 struct sctp_nets *net) 626 { 627 int resend_cnt, tot_out, rep, tot_book_cnt; 628 struct sctp_nets *lnet; 629 struct sctp_tmit_chunk *chk; 630 631 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 632 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 633 sctp_audit_indx++; 634 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 635 sctp_audit_indx = 0; 636 } 637 if (inp == NULL) { 638 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 639 sctp_audit_data[sctp_audit_indx][1] = 0x01; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 return; 645 } 646 if (stcb == NULL) { 647 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 648 sctp_audit_data[sctp_audit_indx][1] = 0x02; 649 sctp_audit_indx++; 650 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 651 sctp_audit_indx = 0; 652 } 653 return; 654 } 655 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 656 sctp_audit_data[sctp_audit_indx][1] = 657 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 658 sctp_audit_indx++; 659 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 660 sctp_audit_indx = 0; 661 } 662 rep = 0; 663 tot_book_cnt = 0; 664 resend_cnt = tot_out = 0; 665 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 666 if (chk->sent == SCTP_DATAGRAM_RESEND) { 667 resend_cnt++; 668 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 669 tot_out += chk->book_size; 670 tot_book_cnt++; 671 } 672 } 673 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 674 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 675 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 676 sctp_audit_indx++; 677 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 678 sctp_audit_indx = 0; 679 } 680 printf("resend_cnt:%d asoc-tot:%d\n", 681 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 682 rep = 1; 683 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 684 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 685 sctp_audit_data[sctp_audit_indx][1] = 686 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 687 sctp_audit_indx++; 688 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 689 sctp_audit_indx = 0; 690 } 691 } 692 if (tot_out != stcb->asoc.total_flight) { 693 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 694 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 695 sctp_audit_indx++; 696 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 697 sctp_audit_indx = 0; 698 } 699 rep = 1; 700 printf("tot_flt:%d asoc_tot:%d\n", tot_out, 701 (int)stcb->asoc.total_flight); 702 stcb->asoc.total_flight = tot_out; 703 } 704 if (tot_book_cnt != stcb->asoc.total_flight_count) { 705 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 706 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 707 sctp_audit_indx++; 708 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 709 sctp_audit_indx = 0; 710 } 711 rep = 1; 712 printf("tot_flt_book:%d\n", tot_book); 713 714 stcb->asoc.total_flight_count = tot_book_cnt; 715 } 716 tot_out = 0; 717 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 718 tot_out += lnet->flight_size; 719 } 720 if (tot_out != stcb->asoc.total_flight) { 721 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 722 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 723 sctp_audit_indx++; 724 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 725 sctp_audit_indx = 0; 726 } 727 rep = 1; 728 printf("real flight:%d net total was %d\n", 729 stcb->asoc.total_flight, tot_out); 730 /* now corrective action */ 731 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 732 733 tot_out = 0; 734 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 735 if ((chk->whoTo == lnet) && 736 (chk->sent < SCTP_DATAGRAM_RESEND)) { 737 tot_out += chk->book_size; 738 } 739 } 740 if (lnet->flight_size != tot_out) { 741 printf("net:%x flight was %d corrected to %d\n", 742 (uint32_t) lnet, lnet->flight_size, tot_out); 743 lnet->flight_size = tot_out; 744 } 745 } 746 } 747 if (rep) { 748 sctp_print_audit_report(); 749 } 750 } 751 752 void 753 sctp_audit_log(uint8_t ev, uint8_t fd) 754 { 755 756 sctp_audit_data[sctp_audit_indx][0] = ev; 757 sctp_audit_data[sctp_audit_indx][1] = fd; 758 sctp_audit_indx++; 759 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 760 sctp_audit_indx = 0; 761 } 762 } 763 764 #endif 765 766 /* 767 * a list of sizes based on typical mtu's, used only if next hop size not 768 * returned. 769 */ 770 static int sctp_mtu_sizes[] = { 771 68, 772 296, 773 508, 774 512, 775 544, 776 576, 777 1006, 778 1492, 779 1500, 780 1536, 781 2002, 782 2048, 783 4352, 784 4464, 785 8166, 786 17914, 787 32000, 788 65535 789 }; 790 791 void 792 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 793 { 794 struct sctp_association *asoc; 795 struct sctp_nets *net; 796 797 asoc = &stcb->asoc; 798 799 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 800 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 801 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 802 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 803 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 804 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 805 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 806 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 807 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 808 } 809 } 810 811 int 812 find_next_best_mtu(int totsz) 813 { 814 int i, perfer; 815 816 /* 817 * if we are in here we must find the next best fit based on the 818 * size of the dg that failed to be sent. 819 */ 820 perfer = 0; 821 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 822 if (totsz < sctp_mtu_sizes[i]) { 823 perfer = i - 1; 824 if (perfer < 0) 825 perfer = 0; 826 break; 827 } 828 } 829 return (sctp_mtu_sizes[perfer]); 830 } 831 832 void 833 sctp_fill_random_store(struct sctp_pcb *m) 834 { 835 /* 836 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 837 * our counter. The result becomes our good random numbers and we 838 * then setup to give these out. Note that we do no locking to 839 * protect this. This is ok, since if competing folks call this we 840 * will get more gobbled gook in the random store whic is what we 841 * want. There is a danger that two guys will use the same random 842 * numbers, but thats ok too since that is random as well :-> 843 */ 844 m->store_at = 0; 845 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 846 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 847 sizeof(m->random_counter), (uint8_t *) m->random_store); 848 m->random_counter++; 849 } 850 851 uint32_t 852 sctp_select_initial_TSN(struct sctp_pcb *m) 853 { 854 /* 855 * A true implementation should use random selection process to get 856 * the initial stream sequence number, using RFC1750 as a good 857 * guideline 858 */ 859 uint32_t x, *xp; 860 uint8_t *p; 861 862 if (m->initial_sequence_debug != 0) { 863 uint32_t ret; 864 865 ret = m->initial_sequence_debug; 866 m->initial_sequence_debug++; 867 return (ret); 868 } 869 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) { 870 /* Refill the random store */ 871 sctp_fill_random_store(m); 872 } 873 p = &m->random_store[(int)m->store_at]; 874 xp = (uint32_t *) p; 875 x = *xp; 876 m->store_at += sizeof(uint32_t); 877 return (x); 878 } 879 880 uint32_t 881 sctp_select_a_tag(struct sctp_inpcb *m) 882 { 883 u_long x, not_done; 884 struct timeval now; 885 886 SCTP_GETTIME_TIMEVAL(&now); 887 not_done = 1; 888 while (not_done) { 889 x = sctp_select_initial_TSN(&m->sctp_ep); 890 if (x == 0) { 891 /* we never use 0 */ 892 continue; 893 } 894 if (sctp_is_vtag_good(m, x, &now)) { 895 not_done = 0; 896 } 897 } 898 return (x); 899 } 900 901 902 int 903 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc, 904 int for_a_init, uint32_t override_tag, uint32_t vrf_id) 905 { 906 /* 907 * Anything set to zero is taken care of by the allocation routine's 908 * bzero 909 */ 910 911 /* 912 * Up front select what scoping to apply on addresses I tell my peer 913 * Not sure what to do with these right now, we will need to come up 914 * with a way to set them. We may need to pass them through from the 915 * caller in the sctp_aloc_assoc() function. 916 */ 917 int i; 918 919 /* init all variables to a known value. */ 920 asoc->state = SCTP_STATE_INUSE; 921 asoc->max_burst = m->sctp_ep.max_burst; 922 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 923 asoc->cookie_life = m->sctp_ep.def_cookie_life; 924 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off; 925 #ifdef INET 926 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 927 #else 928 asoc->default_tos = 0; 929 #endif 930 931 #ifdef INET6 932 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 933 #else 934 asoc->default_flowlabel = 0; 935 #endif 936 if (override_tag) { 937 struct timeval now; 938 939 SCTP_GETTIME_TIMEVAL(&now); 940 if (sctp_is_vtag_good(m, override_tag, &now)) { 941 asoc->my_vtag = override_tag; 942 } else { 943 return (ENOMEM); 944 } 945 946 } else { 947 asoc->my_vtag = sctp_select_a_tag(m); 948 } 949 /* Get the nonce tags */ 950 asoc->my_vtag_nonce = sctp_select_a_tag(m); 951 asoc->peer_vtag_nonce = sctp_select_a_tag(m); 952 asoc->vrf_id = vrf_id; 953 954 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 955 asoc->hb_is_disabled = 1; 956 else 957 asoc->hb_is_disabled = 0; 958 959 asoc->refcnt = 0; 960 asoc->assoc_up_sent = 0; 961 asoc->assoc_id = asoc->my_vtag; 962 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 963 sctp_select_initial_TSN(&m->sctp_ep); 964 /* we are optimisitic here */ 965 asoc->peer_supports_pktdrop = 1; 966 967 asoc->sent_queue_retran_cnt = 0; 968 969 /* for CMT */ 970 asoc->last_net_data_came_from = NULL; 971 972 /* This will need to be adjusted */ 973 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 974 asoc->last_acked_seq = asoc->init_seq_number - 1; 975 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 976 asoc->asconf_seq_in = asoc->last_acked_seq; 977 978 /* here we are different, we hold the next one we expect */ 979 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 980 981 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 982 asoc->initial_rto = m->sctp_ep.initial_rto; 983 984 asoc->max_init_times = m->sctp_ep.max_init_times; 985 asoc->max_send_times = m->sctp_ep.max_send_times; 986 asoc->def_net_failure = m->sctp_ep.def_net_failure; 987 asoc->free_chunk_cnt = 0; 988 989 asoc->iam_blocking = 0; 990 /* ECN Nonce initialization */ 991 asoc->context = m->sctp_context; 992 asoc->def_send = m->def_send; 993 asoc->ecn_nonce_allowed = 0; 994 asoc->receiver_nonce_sum = 1; 995 asoc->nonce_sum_expect_base = 1; 996 asoc->nonce_sum_check = 1; 997 asoc->nonce_resync_tsn = 0; 998 asoc->nonce_wait_for_ecne = 0; 999 asoc->nonce_wait_tsn = 0; 1000 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1001 asoc->sack_freq = m->sctp_ep.sctp_sack_freq; 1002 asoc->pr_sctp_cnt = 0; 1003 asoc->total_output_queue_size = 0; 1004 1005 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1006 struct in6pcb *inp6; 1007 1008 /* Its a V6 socket */ 1009 inp6 = (struct in6pcb *)m; 1010 asoc->ipv6_addr_legal = 1; 1011 /* Now look at the binding flag to see if V4 will be legal */ 1012 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 1013 asoc->ipv4_addr_legal = 1; 1014 } else { 1015 /* V4 addresses are NOT legal on the association */ 1016 asoc->ipv4_addr_legal = 0; 1017 } 1018 } else { 1019 /* Its a V4 socket, no - V6 */ 1020 asoc->ipv4_addr_legal = 1; 1021 asoc->ipv6_addr_legal = 0; 1022 } 1023 1024 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND); 1025 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket); 1026 1027 asoc->smallest_mtu = m->sctp_frag_point; 1028 asoc->minrto = m->sctp_ep.sctp_minrto; 1029 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1030 1031 asoc->locked_on_sending = NULL; 1032 asoc->stream_locked_on = 0; 1033 asoc->ecn_echo_cnt_onq = 0; 1034 asoc->stream_locked = 0; 1035 1036 asoc->send_sack = 1; 1037 1038 LIST_INIT(&asoc->sctp_restricted_addrs); 1039 1040 TAILQ_INIT(&asoc->nets); 1041 TAILQ_INIT(&asoc->pending_reply_queue); 1042 asoc->last_asconf_ack_sent = NULL; 1043 /* Setup to fill the hb random cache at first HB */ 1044 asoc->hb_random_idx = 4; 1045 1046 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1047 1048 /* 1049 * Now the stream parameters, here we allocate space for all streams 1050 * that we request by default. 1051 */ 1052 asoc->streamoutcnt = asoc->pre_open_streams = 1053 m->sctp_ep.pre_open_stream_count; 1054 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1055 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1056 "StreamsOut"); 1057 if (asoc->strmout == NULL) { 1058 /* big trouble no memory */ 1059 return (ENOMEM); 1060 } 1061 for (i = 0; i < asoc->streamoutcnt; i++) { 1062 /* 1063 * inbound side must be set to 0xffff, also NOTE when we get 1064 * the INIT-ACK back (for INIT sender) we MUST reduce the 1065 * count (streamoutcnt) but first check if we sent to any of 1066 * the upper streams that were dropped (if some were). Those 1067 * that were dropped must be notified to the upper layer as 1068 * failed to send. 1069 */ 1070 asoc->strmout[i].next_sequence_sent = 0x0; 1071 TAILQ_INIT(&asoc->strmout[i].outqueue); 1072 asoc->strmout[i].stream_no = i; 1073 asoc->strmout[i].last_msg_incomplete = 0; 1074 asoc->strmout[i].next_spoke.tqe_next = 0; 1075 asoc->strmout[i].next_spoke.tqe_prev = 0; 1076 } 1077 /* Now the mapping array */ 1078 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1079 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1080 "MappingArray"); 1081 if (asoc->mapping_array == NULL) { 1082 SCTP_FREE(asoc->strmout); 1083 return (ENOMEM); 1084 } 1085 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1086 /* Now the init of the other outqueues */ 1087 TAILQ_INIT(&asoc->free_chunks); 1088 TAILQ_INIT(&asoc->free_strmoq); 1089 TAILQ_INIT(&asoc->out_wheel); 1090 TAILQ_INIT(&asoc->control_send_queue); 1091 TAILQ_INIT(&asoc->send_queue); 1092 TAILQ_INIT(&asoc->sent_queue); 1093 TAILQ_INIT(&asoc->reasmqueue); 1094 TAILQ_INIT(&asoc->resetHead); 1095 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1096 TAILQ_INIT(&asoc->asconf_queue); 1097 /* authentication fields */ 1098 asoc->authinfo.random = NULL; 1099 asoc->authinfo.assoc_key = NULL; 1100 asoc->authinfo.assoc_keyid = 0; 1101 asoc->authinfo.recv_key = NULL; 1102 asoc->authinfo.recv_keyid = 0; 1103 LIST_INIT(&asoc->shared_keys); 1104 asoc->marked_retrans = 0; 1105 asoc->timoinit = 0; 1106 asoc->timodata = 0; 1107 asoc->timosack = 0; 1108 asoc->timoshutdown = 0; 1109 asoc->timoheartbeat = 0; 1110 asoc->timocookie = 0; 1111 asoc->timoshutdownack = 0; 1112 SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1113 SCTP_GETTIME_TIMEVAL(&asoc->discontinuity_time); 1114 1115 return (0); 1116 } 1117 1118 int 1119 sctp_expand_mapping_array(struct sctp_association *asoc) 1120 { 1121 /* mapping array needs to grow */ 1122 uint8_t *new_array; 1123 uint16_t new_size; 1124 1125 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR; 1126 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray"); 1127 if (new_array == NULL) { 1128 /* can't get more, forget it */ 1129 printf("No memory for expansion of SCTP mapping array %d\n", 1130 new_size); 1131 return (-1); 1132 } 1133 memset(new_array, 0, new_size); 1134 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1135 SCTP_FREE(asoc->mapping_array); 1136 asoc->mapping_array = new_array; 1137 asoc->mapping_array_size = new_size; 1138 return (0); 1139 } 1140 1141 #if defined(SCTP_USE_THREAD_BASED_ITERATOR) 1142 static void 1143 sctp_iterator_work(struct sctp_iterator *it) 1144 { 1145 int iteration_count = 0; 1146 int inp_skip = 0; 1147 1148 SCTP_ITERATOR_LOCK(); 1149 if (it->inp) 1150 SCTP_INP_DECR_REF(it->inp); 1151 1152 if (it->inp == NULL) { 1153 /* iterator is complete */ 1154 done_with_iterator: 1155 SCTP_ITERATOR_UNLOCK(); 1156 if (it->function_atend != NULL) { 1157 (*it->function_atend) (it->pointer, it->val); 1158 } 1159 SCTP_FREE(it); 1160 return; 1161 } 1162 select_a_new_ep: 1163 SCTP_INP_WLOCK(it->inp); 1164 while (((it->pcb_flags) && 1165 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1166 ((it->pcb_features) && 1167 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1168 /* endpoint flags or features don't match, so keep looking */ 1169 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1170 SCTP_INP_WUNLOCK(it->inp); 1171 goto done_with_iterator; 1172 } 1173 SCTP_INP_WUNLOCK(it->inp); 1174 it->inp = LIST_NEXT(it->inp, sctp_list); 1175 if (it->inp == NULL) { 1176 goto done_with_iterator; 1177 } 1178 SCTP_INP_WLOCK(it->inp); 1179 } 1180 1181 /* mark the current iterator on the endpoint */ 1182 it->inp->inp_starting_point_for_iterator = it; 1183 SCTP_INP_WUNLOCK(it->inp); 1184 SCTP_INP_RLOCK(it->inp); 1185 1186 /* now go through each assoc which is in the desired state */ 1187 if (it->done_current_ep == 0) { 1188 if (it->function_inp != NULL) 1189 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1190 it->done_current_ep = 1; 1191 } 1192 if (it->stcb == NULL) { 1193 /* run the per instance function */ 1194 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1195 } 1196 if ((inp_skip) || it->stcb == NULL) { 1197 if (it->function_inp_end != NULL) { 1198 inp_skip = (*it->function_inp_end) (it->inp, 1199 it->pointer, 1200 it->val); 1201 } 1202 SCTP_INP_RUNLOCK(it->inp); 1203 goto no_stcb; 1204 } 1205 if ((it->stcb) && 1206 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) { 1207 it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1208 } 1209 while (it->stcb) { 1210 SCTP_TCB_LOCK(it->stcb); 1211 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1212 /* not in the right state... keep looking */ 1213 SCTP_TCB_UNLOCK(it->stcb); 1214 goto next_assoc; 1215 } 1216 /* mark the current iterator on the assoc */ 1217 it->stcb->asoc.stcb_starting_point_for_iterator = it; 1218 /* see if we have limited out the iterator loop */ 1219 iteration_count++; 1220 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1221 /* Pause to let others grab the lock */ 1222 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1223 SCTP_TCB_UNLOCK(it->stcb); 1224 SCTP_INP_RUNLOCK(it->inp); 1225 SCTP_ITERATOR_UNLOCK(); 1226 SCTP_ITERATOR_LOCK(); 1227 SCTP_INP_RLOCK(it->inp); 1228 SCTP_TCB_LOCK(it->stcb); 1229 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1230 iteration_count = 0; 1231 } 1232 /* run function on this one */ 1233 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1234 1235 /* 1236 * we lie here, it really needs to have its own type but 1237 * first I must verify that this won't effect things :-0 1238 */ 1239 if (it->no_chunk_output == 0) 1240 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3); 1241 1242 SCTP_TCB_UNLOCK(it->stcb); 1243 next_assoc: 1244 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1245 if (it->stcb == NULL) { 1246 /* Run last function */ 1247 if (it->function_inp_end != NULL) { 1248 inp_skip = (*it->function_inp_end) (it->inp, 1249 it->pointer, 1250 it->val); 1251 } 1252 } 1253 } 1254 SCTP_INP_RUNLOCK(it->inp); 1255 no_stcb: 1256 /* done with all assocs on this endpoint, move on to next endpoint */ 1257 it->done_current_ep = 0; 1258 SCTP_INP_WLOCK(it->inp); 1259 it->inp->inp_starting_point_for_iterator = NULL; 1260 SCTP_INP_WUNLOCK(it->inp); 1261 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1262 it->inp = NULL; 1263 } else { 1264 SCTP_INP_INFO_RLOCK(); 1265 it->inp = LIST_NEXT(it->inp, sctp_list); 1266 SCTP_INP_INFO_RUNLOCK(); 1267 } 1268 if (it->inp == NULL) { 1269 goto done_with_iterator; 1270 } 1271 goto select_a_new_ep; 1272 } 1273 1274 void 1275 sctp_iterator_worker(void) 1276 { 1277 struct sctp_iterator *it = NULL; 1278 1279 /* This function is called with the WQ lock in place */ 1280 1281 sctppcbinfo.iterator_running = 1; 1282 again: 1283 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead); 1284 while (it) { 1285 /* now lets work on this one */ 1286 TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); 1287 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1288 sctp_iterator_work(it); 1289 SCTP_IPI_ITERATOR_WQ_LOCK(); 1290 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead); 1291 } 1292 if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) { 1293 goto again; 1294 } 1295 sctppcbinfo.iterator_running = 0; 1296 return; 1297 } 1298 1299 #endif 1300 1301 1302 static void 1303 sctp_handle_addr_wq(void) 1304 { 1305 /* deal with the ADDR wq from the rtsock calls */ 1306 struct sctp_laddr *wi; 1307 struct sctp_asconf_iterator *asc; 1308 1309 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1310 sizeof(struct sctp_asconf_iterator), "SCTP_ASCONF_ITERATOR"); 1311 if (asc == NULL) { 1312 /* Try later, no memory */ 1313 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1314 (struct sctp_inpcb *)NULL, 1315 (struct sctp_tcb *)NULL, 1316 (struct sctp_nets *)NULL); 1317 return; 1318 } 1319 LIST_INIT(&asc->list_of_work); 1320 asc->cnt = 0; 1321 SCTP_IPI_ITERATOR_WQ_LOCK(); 1322 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1323 while (wi != NULL) { 1324 LIST_REMOVE(wi, sctp_nxt_addr); 1325 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1326 asc->cnt++; 1327 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1328 } 1329 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1330 if (asc->cnt == 0) { 1331 SCTP_FREE(asc); 1332 } else { 1333 sctp_initiate_iterator(sctp_iterator_ep, 1334 sctp_iterator_stcb, 1335 NULL, /* No ep end for boundall */ 1336 SCTP_PCB_FLAGS_BOUNDALL, 1337 SCTP_PCB_ANY_FEATURES, 1338 SCTP_ASOC_ANY_STATE, (void *)asc, 0, 1339 sctp_iterator_end, NULL, 0); 1340 } 1341 1342 } 1343 1344 void 1345 sctp_timeout_handler(void *t) 1346 { 1347 struct sctp_inpcb *inp; 1348 struct sctp_tcb *stcb; 1349 struct sctp_nets *net; 1350 struct sctp_timer *tmr; 1351 int did_output; 1352 struct sctp_iterator *it = NULL; 1353 1354 1355 tmr = (struct sctp_timer *)t; 1356 inp = (struct sctp_inpcb *)tmr->ep; 1357 stcb = (struct sctp_tcb *)tmr->tcb; 1358 net = (struct sctp_nets *)tmr->net; 1359 did_output = 1; 1360 1361 #ifdef SCTP_AUDITING_ENABLED 1362 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1363 sctp_auditing(3, inp, stcb, net); 1364 #endif 1365 1366 /* sanity checks... */ 1367 if (tmr->self != (void *)tmr) { 1368 /* 1369 * printf("Stale SCTP timer fired (%p), ignoring...\n", 1370 * tmr); 1371 */ 1372 return; 1373 } 1374 tmr->stopped_from = 0xa001; 1375 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1376 /* 1377 * printf("SCTP timer fired with invalid type: 0x%x\n", 1378 * tmr->type); 1379 */ 1380 return; 1381 } 1382 tmr->stopped_from = 0xa002; 1383 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1384 return; 1385 } 1386 /* if this is an iterator timeout, get the struct and clear inp */ 1387 tmr->stopped_from = 0xa003; 1388 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1389 it = (struct sctp_iterator *)inp; 1390 inp = NULL; 1391 } 1392 if (inp) { 1393 SCTP_INP_INCR_REF(inp); 1394 if ((inp->sctp_socket == 0) && 1395 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1396 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1397 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1398 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1399 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1400 ) { 1401 SCTP_INP_DECR_REF(inp); 1402 return; 1403 } 1404 } 1405 tmr->stopped_from = 0xa004; 1406 if (stcb) { 1407 atomic_add_int(&stcb->asoc.refcnt, 1); 1408 if (stcb->asoc.state == 0) { 1409 atomic_add_int(&stcb->asoc.refcnt, -1); 1410 if (inp) { 1411 SCTP_INP_DECR_REF(inp); 1412 } 1413 return; 1414 } 1415 } 1416 tmr->stopped_from = 0xa005; 1417 #ifdef SCTP_DEBUG 1418 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1419 printf("Timer type %d goes off\n", tmr->type); 1420 } 1421 #endif /* SCTP_DEBUG */ 1422 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1423 if (inp) { 1424 SCTP_INP_DECR_REF(inp); 1425 } 1426 return; 1427 } 1428 tmr->stopped_from = 0xa006; 1429 1430 if (stcb) { 1431 SCTP_TCB_LOCK(stcb); 1432 atomic_add_int(&stcb->asoc.refcnt, -1); 1433 } 1434 /* record in stopped what t-o occured */ 1435 tmr->stopped_from = tmr->type; 1436 1437 /* mark as being serviced now */ 1438 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1439 /* 1440 * Callout has been rescheduled. 1441 */ 1442 goto get_out; 1443 } 1444 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1445 /* 1446 * Not active, so no action. 1447 */ 1448 goto get_out; 1449 } 1450 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1451 1452 /* call the handler for the appropriate timer type */ 1453 switch (tmr->type) { 1454 case SCTP_TIMER_TYPE_ADDR_WQ: 1455 sctp_handle_addr_wq(); 1456 break; 1457 case SCTP_TIMER_TYPE_ITERATOR: 1458 SCTP_STAT_INCR(sctps_timoiterator); 1459 sctp_iterator_timer(it); 1460 break; 1461 case SCTP_TIMER_TYPE_SEND: 1462 SCTP_STAT_INCR(sctps_timodata); 1463 stcb->asoc.timodata++; 1464 stcb->asoc.num_send_timers_up--; 1465 if (stcb->asoc.num_send_timers_up < 0) { 1466 stcb->asoc.num_send_timers_up = 0; 1467 } 1468 if (sctp_t3rxt_timer(inp, stcb, net)) { 1469 /* no need to unlock on tcb its gone */ 1470 1471 goto out_decr; 1472 } 1473 #ifdef SCTP_AUDITING_ENABLED 1474 sctp_auditing(4, inp, stcb, net); 1475 #endif 1476 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1477 if ((stcb->asoc.num_send_timers_up == 0) && 1478 (stcb->asoc.sent_queue_cnt > 0) 1479 ) { 1480 struct sctp_tmit_chunk *chk; 1481 1482 /* 1483 * safeguard. If there on some on the sent queue 1484 * somewhere but no timers running something is 1485 * wrong... so we start a timer on the first chunk 1486 * on the send queue on whatever net it is sent to. 1487 */ 1488 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1489 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1490 chk->whoTo); 1491 } 1492 break; 1493 case SCTP_TIMER_TYPE_INIT: 1494 SCTP_STAT_INCR(sctps_timoinit); 1495 stcb->asoc.timoinit++; 1496 if (sctp_t1init_timer(inp, stcb, net)) { 1497 /* no need to unlock on tcb its gone */ 1498 goto out_decr; 1499 } 1500 /* We do output but not here */ 1501 did_output = 0; 1502 break; 1503 case SCTP_TIMER_TYPE_RECV: 1504 SCTP_STAT_INCR(sctps_timosack); 1505 stcb->asoc.timosack++; 1506 sctp_send_sack(stcb); 1507 #ifdef SCTP_AUDITING_ENABLED 1508 sctp_auditing(4, inp, stcb, net); 1509 #endif 1510 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR); 1511 break; 1512 case SCTP_TIMER_TYPE_SHUTDOWN: 1513 if (sctp_shutdown_timer(inp, stcb, net)) { 1514 /* no need to unlock on tcb its gone */ 1515 goto out_decr; 1516 } 1517 SCTP_STAT_INCR(sctps_timoshutdown); 1518 stcb->asoc.timoshutdown++; 1519 #ifdef SCTP_AUDITING_ENABLED 1520 sctp_auditing(4, inp, stcb, net); 1521 #endif 1522 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR); 1523 break; 1524 case SCTP_TIMER_TYPE_HEARTBEAT: 1525 { 1526 struct sctp_nets *net; 1527 int cnt_of_unconf = 0; 1528 1529 SCTP_STAT_INCR(sctps_timoheartbeat); 1530 stcb->asoc.timoheartbeat++; 1531 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1532 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1533 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1534 cnt_of_unconf++; 1535 } 1536 } 1537 if (cnt_of_unconf == 0) { 1538 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) { 1539 /* no need to unlock on tcb its gone */ 1540 goto out_decr; 1541 } 1542 } 1543 #ifdef SCTP_AUDITING_ENABLED 1544 sctp_auditing(4, inp, stcb, net); 1545 #endif 1546 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 1547 stcb, net); 1548 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR); 1549 } 1550 break; 1551 case SCTP_TIMER_TYPE_COOKIE: 1552 if (sctp_cookie_timer(inp, stcb, net)) { 1553 /* no need to unlock on tcb its gone */ 1554 goto out_decr; 1555 } 1556 SCTP_STAT_INCR(sctps_timocookie); 1557 stcb->asoc.timocookie++; 1558 #ifdef SCTP_AUDITING_ENABLED 1559 sctp_auditing(4, inp, stcb, net); 1560 #endif 1561 /* 1562 * We consider T3 and Cookie timer pretty much the same with 1563 * respect to where from in chunk_output. 1564 */ 1565 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1566 break; 1567 case SCTP_TIMER_TYPE_NEWCOOKIE: 1568 { 1569 struct timeval tv; 1570 int i, secret; 1571 1572 SCTP_STAT_INCR(sctps_timosecret); 1573 SCTP_GETTIME_TIMEVAL(&tv); 1574 SCTP_INP_WLOCK(inp); 1575 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1576 inp->sctp_ep.last_secret_number = 1577 inp->sctp_ep.current_secret_number; 1578 inp->sctp_ep.current_secret_number++; 1579 if (inp->sctp_ep.current_secret_number >= 1580 SCTP_HOW_MANY_SECRETS) { 1581 inp->sctp_ep.current_secret_number = 0; 1582 } 1583 secret = (int)inp->sctp_ep.current_secret_number; 1584 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1585 inp->sctp_ep.secret_key[secret][i] = 1586 sctp_select_initial_TSN(&inp->sctp_ep); 1587 } 1588 SCTP_INP_WUNLOCK(inp); 1589 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1590 } 1591 did_output = 0; 1592 break; 1593 case SCTP_TIMER_TYPE_PATHMTURAISE: 1594 SCTP_STAT_INCR(sctps_timopathmtu); 1595 sctp_pathmtu_timer(inp, stcb, net); 1596 did_output = 0; 1597 break; 1598 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1599 if (sctp_shutdownack_timer(inp, stcb, net)) { 1600 /* no need to unlock on tcb its gone */ 1601 goto out_decr; 1602 } 1603 SCTP_STAT_INCR(sctps_timoshutdownack); 1604 stcb->asoc.timoshutdownack++; 1605 #ifdef SCTP_AUDITING_ENABLED 1606 sctp_auditing(4, inp, stcb, net); 1607 #endif 1608 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR); 1609 break; 1610 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1611 SCTP_STAT_INCR(sctps_timoshutdownguard); 1612 sctp_abort_an_association(inp, stcb, 1613 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL); 1614 /* no need to unlock on tcb its gone */ 1615 goto out_decr; 1616 break; 1617 1618 case SCTP_TIMER_TYPE_STRRESET: 1619 if (sctp_strreset_timer(inp, stcb, net)) { 1620 /* no need to unlock on tcb its gone */ 1621 goto out_decr; 1622 } 1623 SCTP_STAT_INCR(sctps_timostrmrst); 1624 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR); 1625 break; 1626 case SCTP_TIMER_TYPE_EARLYFR: 1627 /* Need to do FR of things for net */ 1628 SCTP_STAT_INCR(sctps_timoearlyfr); 1629 sctp_early_fr_timer(inp, stcb, net); 1630 break; 1631 case SCTP_TIMER_TYPE_ASCONF: 1632 if (sctp_asconf_timer(inp, stcb, net)) { 1633 /* no need to unlock on tcb its gone */ 1634 goto out_decr; 1635 } 1636 SCTP_STAT_INCR(sctps_timoasconf); 1637 #ifdef SCTP_AUDITING_ENABLED 1638 sctp_auditing(4, inp, stcb, net); 1639 #endif 1640 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR); 1641 break; 1642 1643 case SCTP_TIMER_TYPE_AUTOCLOSE: 1644 SCTP_STAT_INCR(sctps_timoautoclose); 1645 sctp_autoclose_timer(inp, stcb, net); 1646 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1647 did_output = 0; 1648 break; 1649 case SCTP_TIMER_TYPE_ASOCKILL: 1650 SCTP_STAT_INCR(sctps_timoassockill); 1651 /* Can we free it yet? */ 1652 SCTP_INP_DECR_REF(inp); 1653 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1654 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1655 /* 1656 * free asoc, always unlocks (or destroy's) so prevent 1657 * duplicate unlock or unlock of a free mtx :-0 1658 */ 1659 stcb = NULL; 1660 goto out_no_decr; 1661 break; 1662 case SCTP_TIMER_TYPE_INPKILL: 1663 SCTP_STAT_INCR(sctps_timoinpkill); 1664 /* 1665 * special case, take away our increment since WE are the 1666 * killer 1667 */ 1668 SCTP_INP_DECR_REF(inp); 1669 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1670 sctp_inpcb_free(inp, 1, 0); 1671 goto out_no_decr; 1672 break; 1673 default: 1674 #ifdef SCTP_DEBUG 1675 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1676 printf("sctp_timeout_handler:unknown timer %d\n", 1677 tmr->type); 1678 } 1679 #endif /* SCTP_DEBUG */ 1680 break; 1681 }; 1682 #ifdef SCTP_AUDITING_ENABLED 1683 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1684 if (inp) 1685 sctp_auditing(5, inp, stcb, net); 1686 #endif 1687 if ((did_output) && stcb) { 1688 /* 1689 * Now we need to clean up the control chunk chain if an 1690 * ECNE is on it. It must be marked as UNSENT again so next 1691 * call will continue to send it until such time that we get 1692 * a CWR, to remove it. It is, however, less likely that we 1693 * will find a ecn echo on the chain though. 1694 */ 1695 sctp_fix_ecn_echo(&stcb->asoc); 1696 } 1697 get_out: 1698 if (stcb) { 1699 SCTP_TCB_UNLOCK(stcb); 1700 } 1701 out_decr: 1702 if (inp) { 1703 SCTP_INP_DECR_REF(inp); 1704 } 1705 out_no_decr: 1706 1707 #ifdef SCTP_DEBUG 1708 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1709 printf("Timer now complete (type %d)\n", tmr->type); 1710 } 1711 #endif /* SCTP_DEBUG */ 1712 if (inp) { 1713 } 1714 } 1715 1716 int 1717 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1718 struct sctp_nets *net) 1719 { 1720 int to_ticks; 1721 struct sctp_timer *tmr; 1722 1723 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1724 return (EFAULT); 1725 1726 to_ticks = 0; 1727 1728 tmr = NULL; 1729 if (stcb) { 1730 SCTP_TCB_LOCK_ASSERT(stcb); 1731 } 1732 switch (t_type) { 1733 case SCTP_TIMER_TYPE_ADDR_WQ: 1734 /* Only 1 tick away :-) */ 1735 tmr = &sctppcbinfo.addr_wq_timer; 1736 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1737 break; 1738 case SCTP_TIMER_TYPE_ITERATOR: 1739 { 1740 struct sctp_iterator *it; 1741 1742 it = (struct sctp_iterator *)inp; 1743 tmr = &it->tmr; 1744 to_ticks = SCTP_ITERATOR_TICKS; 1745 } 1746 break; 1747 case SCTP_TIMER_TYPE_SEND: 1748 /* Here we use the RTO timer */ 1749 { 1750 int rto_val; 1751 1752 if ((stcb == NULL) || (net == NULL)) { 1753 return (EFAULT); 1754 } 1755 tmr = &net->rxt_timer; 1756 if (net->RTO == 0) { 1757 rto_val = stcb->asoc.initial_rto; 1758 } else { 1759 rto_val = net->RTO; 1760 } 1761 to_ticks = MSEC_TO_TICKS(rto_val); 1762 } 1763 break; 1764 case SCTP_TIMER_TYPE_INIT: 1765 /* 1766 * Here we use the INIT timer default usually about 1 1767 * minute. 1768 */ 1769 if ((stcb == NULL) || (net == NULL)) { 1770 return (EFAULT); 1771 } 1772 tmr = &net->rxt_timer; 1773 if (net->RTO == 0) { 1774 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1775 } else { 1776 to_ticks = MSEC_TO_TICKS(net->RTO); 1777 } 1778 break; 1779 case SCTP_TIMER_TYPE_RECV: 1780 /* 1781 * Here we use the Delayed-Ack timer value from the inp 1782 * ususually about 200ms. 1783 */ 1784 if (stcb == NULL) { 1785 return (EFAULT); 1786 } 1787 tmr = &stcb->asoc.dack_timer; 1788 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1789 break; 1790 case SCTP_TIMER_TYPE_SHUTDOWN: 1791 /* Here we use the RTO of the destination. */ 1792 if ((stcb == NULL) || (net == NULL)) { 1793 return (EFAULT); 1794 } 1795 if (net->RTO == 0) { 1796 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1797 } else { 1798 to_ticks = MSEC_TO_TICKS(net->RTO); 1799 } 1800 tmr = &net->rxt_timer; 1801 break; 1802 case SCTP_TIMER_TYPE_HEARTBEAT: 1803 /* 1804 * the net is used here so that we can add in the RTO. Even 1805 * though we use a different timer. We also add the HB timer 1806 * PLUS a random jitter. 1807 */ 1808 if (stcb == NULL) { 1809 return (EFAULT); 1810 } { 1811 uint32_t rndval; 1812 uint8_t this_random; 1813 int cnt_of_unconf = 0; 1814 struct sctp_nets *lnet; 1815 1816 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1817 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1818 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1819 cnt_of_unconf++; 1820 } 1821 } 1822 if (cnt_of_unconf) { 1823 lnet = NULL; 1824 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 1825 } 1826 if (stcb->asoc.hb_random_idx > 3) { 1827 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1828 memcpy(stcb->asoc.hb_random_values, &rndval, 1829 sizeof(stcb->asoc.hb_random_values)); 1830 stcb->asoc.hb_random_idx = 0; 1831 } 1832 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1833 stcb->asoc.hb_random_idx++; 1834 stcb->asoc.hb_ect_randombit = 0; 1835 /* 1836 * this_random will be 0 - 256 ms RTO is in ms. 1837 */ 1838 if ((stcb->asoc.hb_is_disabled) && 1839 (cnt_of_unconf == 0)) { 1840 return (0); 1841 } 1842 if (net) { 1843 struct sctp_nets *lnet; 1844 int delay; 1845 1846 delay = stcb->asoc.heart_beat_delay; 1847 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1848 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1849 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 1850 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1851 delay = 0; 1852 } 1853 } 1854 if (net->RTO == 0) { 1855 /* Never been checked */ 1856 to_ticks = this_random + stcb->asoc.initial_rto + delay; 1857 } else { 1858 /* set rto_val to the ms */ 1859 to_ticks = delay + net->RTO + this_random; 1860 } 1861 } else { 1862 if (cnt_of_unconf) { 1863 to_ticks = this_random + stcb->asoc.initial_rto; 1864 } else { 1865 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 1866 } 1867 } 1868 /* 1869 * Now we must convert the to_ticks that are now in 1870 * ms to ticks. 1871 */ 1872 to_ticks = MSEC_TO_TICKS(to_ticks); 1873 tmr = &stcb->asoc.hb_timer; 1874 } 1875 break; 1876 case SCTP_TIMER_TYPE_COOKIE: 1877 /* 1878 * Here we can use the RTO timer from the network since one 1879 * RTT was compelete. If a retran happened then we will be 1880 * using the RTO initial value. 1881 */ 1882 if ((stcb == NULL) || (net == NULL)) { 1883 return (EFAULT); 1884 } 1885 if (net->RTO == 0) { 1886 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1887 } else { 1888 to_ticks = MSEC_TO_TICKS(net->RTO); 1889 } 1890 tmr = &net->rxt_timer; 1891 break; 1892 case SCTP_TIMER_TYPE_NEWCOOKIE: 1893 /* 1894 * nothing needed but the endpoint here ususually about 60 1895 * minutes. 1896 */ 1897 tmr = &inp->sctp_ep.signature_change; 1898 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1899 break; 1900 case SCTP_TIMER_TYPE_ASOCKILL: 1901 if (stcb == NULL) { 1902 return (EFAULT); 1903 } 1904 tmr = &stcb->asoc.strreset_timer; 1905 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1906 break; 1907 case SCTP_TIMER_TYPE_INPKILL: 1908 /* 1909 * The inp is setup to die. We re-use the signature_chage 1910 * timer since that has stopped and we are in the GONE 1911 * state. 1912 */ 1913 tmr = &inp->sctp_ep.signature_change; 1914 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 1915 break; 1916 case SCTP_TIMER_TYPE_PATHMTURAISE: 1917 /* 1918 * Here we use the value found in the EP for PMTU ususually 1919 * about 10 minutes. 1920 */ 1921 if (stcb == NULL) { 1922 return (EFAULT); 1923 } 1924 if (net == NULL) { 1925 return (EFAULT); 1926 } 1927 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 1928 tmr = &net->pmtu_timer; 1929 break; 1930 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1931 /* Here we use the RTO of the destination */ 1932 if ((stcb == NULL) || (net == NULL)) { 1933 return (EFAULT); 1934 } 1935 if (net->RTO == 0) { 1936 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1937 } else { 1938 to_ticks = MSEC_TO_TICKS(net->RTO); 1939 } 1940 tmr = &net->rxt_timer; 1941 break; 1942 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1943 /* 1944 * Here we use the endpoints shutdown guard timer usually 1945 * about 3 minutes. 1946 */ 1947 if (stcb == NULL) { 1948 return (EFAULT); 1949 } 1950 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 1951 tmr = &stcb->asoc.shut_guard_timer; 1952 break; 1953 case SCTP_TIMER_TYPE_STRRESET: 1954 /* 1955 * Here the timer comes from the inp but its value is from 1956 * the RTO. 1957 */ 1958 if ((stcb == NULL) || (net == NULL)) { 1959 return (EFAULT); 1960 } 1961 if (net->RTO == 0) { 1962 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1963 } else { 1964 to_ticks = MSEC_TO_TICKS(net->RTO); 1965 } 1966 tmr = &stcb->asoc.strreset_timer; 1967 break; 1968 1969 case SCTP_TIMER_TYPE_EARLYFR: 1970 { 1971 unsigned int msec; 1972 1973 if ((stcb == NULL) || (net == NULL)) { 1974 return (EFAULT); 1975 } 1976 if (net->flight_size > net->cwnd) { 1977 /* no need to start */ 1978 return (0); 1979 } 1980 SCTP_STAT_INCR(sctps_earlyfrstart); 1981 if (net->lastsa == 0) { 1982 /* Hmm no rtt estimate yet? */ 1983 msec = stcb->asoc.initial_rto >> 2; 1984 } else { 1985 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 1986 } 1987 if (msec < sctp_early_fr_msec) { 1988 msec = sctp_early_fr_msec; 1989 if (msec < SCTP_MINFR_MSEC_FLOOR) { 1990 msec = SCTP_MINFR_MSEC_FLOOR; 1991 } 1992 } 1993 to_ticks = MSEC_TO_TICKS(msec); 1994 tmr = &net->fr_timer; 1995 } 1996 break; 1997 case SCTP_TIMER_TYPE_ASCONF: 1998 /* 1999 * Here the timer comes from the inp but its value is from 2000 * the RTO. 2001 */ 2002 if ((stcb == NULL) || (net == NULL)) { 2003 return (EFAULT); 2004 } 2005 if (net->RTO == 0) { 2006 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2007 } else { 2008 to_ticks = MSEC_TO_TICKS(net->RTO); 2009 } 2010 tmr = &stcb->asoc.asconf_timer; 2011 break; 2012 case SCTP_TIMER_TYPE_AUTOCLOSE: 2013 if (stcb == NULL) { 2014 return (EFAULT); 2015 } 2016 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2017 /* 2018 * Really an error since stcb is NOT set to 2019 * autoclose 2020 */ 2021 return (0); 2022 } 2023 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2024 tmr = &stcb->asoc.autoclose_timer; 2025 break; 2026 default: 2027 #ifdef SCTP_DEBUG 2028 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2029 printf("sctp_timer_start:Unknown timer type %d\n", 2030 t_type); 2031 } 2032 #endif /* SCTP_DEBUG */ 2033 return (EFAULT); 2034 break; 2035 }; 2036 if ((to_ticks <= 0) || (tmr == NULL)) { 2037 #ifdef SCTP_DEBUG 2038 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2039 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n", 2040 t_type, to_ticks, tmr); 2041 } 2042 #endif /* SCTP_DEBUG */ 2043 return (EFAULT); 2044 } 2045 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2046 /* 2047 * we do NOT allow you to have it already running. if it is 2048 * we leave the current one up unchanged 2049 */ 2050 return (EALREADY); 2051 } 2052 /* At this point we can proceed */ 2053 if (t_type == SCTP_TIMER_TYPE_SEND) { 2054 stcb->asoc.num_send_timers_up++; 2055 } 2056 tmr->stopped_from = 0; 2057 tmr->type = t_type; 2058 tmr->ep = (void *)inp; 2059 tmr->tcb = (void *)stcb; 2060 tmr->net = (void *)net; 2061 tmr->self = (void *)tmr; 2062 tmr->ticks = ticks; 2063 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2064 return (0); 2065 } 2066 2067 int 2068 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2069 struct sctp_nets *net, uint32_t from) 2070 { 2071 struct sctp_timer *tmr; 2072 2073 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2074 (inp == NULL)) 2075 return (EFAULT); 2076 2077 tmr = NULL; 2078 if (stcb) { 2079 SCTP_TCB_LOCK_ASSERT(stcb); 2080 } 2081 switch (t_type) { 2082 case SCTP_TIMER_TYPE_ADDR_WQ: 2083 tmr = &sctppcbinfo.addr_wq_timer; 2084 break; 2085 case SCTP_TIMER_TYPE_EARLYFR: 2086 if ((stcb == NULL) || (net == NULL)) { 2087 return (EFAULT); 2088 } 2089 tmr = &net->fr_timer; 2090 SCTP_STAT_INCR(sctps_earlyfrstop); 2091 break; 2092 case SCTP_TIMER_TYPE_ITERATOR: 2093 { 2094 struct sctp_iterator *it; 2095 2096 it = (struct sctp_iterator *)inp; 2097 tmr = &it->tmr; 2098 } 2099 break; 2100 case SCTP_TIMER_TYPE_SEND: 2101 if ((stcb == NULL) || (net == NULL)) { 2102 return (EFAULT); 2103 } 2104 tmr = &net->rxt_timer; 2105 break; 2106 case SCTP_TIMER_TYPE_INIT: 2107 if ((stcb == NULL) || (net == NULL)) { 2108 return (EFAULT); 2109 } 2110 tmr = &net->rxt_timer; 2111 break; 2112 case SCTP_TIMER_TYPE_RECV: 2113 if (stcb == NULL) { 2114 return (EFAULT); 2115 } 2116 tmr = &stcb->asoc.dack_timer; 2117 break; 2118 case SCTP_TIMER_TYPE_SHUTDOWN: 2119 if ((stcb == NULL) || (net == NULL)) { 2120 return (EFAULT); 2121 } 2122 tmr = &net->rxt_timer; 2123 break; 2124 case SCTP_TIMER_TYPE_HEARTBEAT: 2125 if (stcb == NULL) { 2126 return (EFAULT); 2127 } 2128 tmr = &stcb->asoc.hb_timer; 2129 break; 2130 case SCTP_TIMER_TYPE_COOKIE: 2131 if ((stcb == NULL) || (net == NULL)) { 2132 return (EFAULT); 2133 } 2134 tmr = &net->rxt_timer; 2135 break; 2136 case SCTP_TIMER_TYPE_NEWCOOKIE: 2137 /* nothing needed but the endpoint here */ 2138 tmr = &inp->sctp_ep.signature_change; 2139 /* 2140 * We re-use the newcookie timer for the INP kill timer. We 2141 * must assure that we do not kill it by accident. 2142 */ 2143 break; 2144 case SCTP_TIMER_TYPE_ASOCKILL: 2145 /* 2146 * Stop the asoc kill timer. 2147 */ 2148 if (stcb == NULL) { 2149 return (EFAULT); 2150 } 2151 tmr = &stcb->asoc.strreset_timer; 2152 break; 2153 2154 case SCTP_TIMER_TYPE_INPKILL: 2155 /* 2156 * The inp is setup to die. We re-use the signature_chage 2157 * timer since that has stopped and we are in the GONE 2158 * state. 2159 */ 2160 tmr = &inp->sctp_ep.signature_change; 2161 break; 2162 case SCTP_TIMER_TYPE_PATHMTURAISE: 2163 if ((stcb == NULL) || (net == NULL)) { 2164 return (EFAULT); 2165 } 2166 tmr = &net->pmtu_timer; 2167 break; 2168 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2169 if ((stcb == NULL) || (net == NULL)) { 2170 return (EFAULT); 2171 } 2172 tmr = &net->rxt_timer; 2173 break; 2174 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2175 if (stcb == NULL) { 2176 return (EFAULT); 2177 } 2178 tmr = &stcb->asoc.shut_guard_timer; 2179 break; 2180 case SCTP_TIMER_TYPE_STRRESET: 2181 if (stcb == NULL) { 2182 return (EFAULT); 2183 } 2184 tmr = &stcb->asoc.strreset_timer; 2185 break; 2186 case SCTP_TIMER_TYPE_ASCONF: 2187 if (stcb == NULL) { 2188 return (EFAULT); 2189 } 2190 tmr = &stcb->asoc.asconf_timer; 2191 break; 2192 case SCTP_TIMER_TYPE_AUTOCLOSE: 2193 if (stcb == NULL) { 2194 return (EFAULT); 2195 } 2196 tmr = &stcb->asoc.autoclose_timer; 2197 break; 2198 default: 2199 #ifdef SCTP_DEBUG 2200 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2201 printf("sctp_timer_stop:Unknown timer type %d\n", 2202 t_type); 2203 } 2204 #endif /* SCTP_DEBUG */ 2205 break; 2206 }; 2207 if (tmr == NULL) { 2208 return (EFAULT); 2209 } 2210 if ((tmr->type != t_type) && tmr->type) { 2211 /* 2212 * Ok we have a timer that is under joint use. Cookie timer 2213 * per chance with the SEND timer. We therefore are NOT 2214 * running the timer that the caller wants stopped. So just 2215 * return. 2216 */ 2217 return (0); 2218 } 2219 if (t_type == SCTP_TIMER_TYPE_SEND) { 2220 stcb->asoc.num_send_timers_up--; 2221 if (stcb->asoc.num_send_timers_up < 0) { 2222 stcb->asoc.num_send_timers_up = 0; 2223 } 2224 } 2225 tmr->self = NULL; 2226 tmr->stopped_from = from; 2227 SCTP_OS_TIMER_STOP(&tmr->timer); 2228 return (0); 2229 } 2230 2231 #ifdef SCTP_USE_ADLER32 2232 static uint32_t 2233 update_adler32(uint32_t adler, uint8_t * buf, int32_t len) 2234 { 2235 uint32_t s1 = adler & 0xffff; 2236 uint32_t s2 = (adler >> 16) & 0xffff; 2237 int n; 2238 2239 for (n = 0; n < len; n++, buf++) { 2240 /* s1 = (s1 + buf[n]) % BASE */ 2241 /* first we add */ 2242 s1 = (s1 + *buf); 2243 /* 2244 * now if we need to, we do a mod by subtracting. It seems a 2245 * bit faster since I really will only ever do one subtract 2246 * at the MOST, since buf[n] is a max of 255. 2247 */ 2248 if (s1 >= SCTP_ADLER32_BASE) { 2249 s1 -= SCTP_ADLER32_BASE; 2250 } 2251 /* s2 = (s2 + s1) % BASE */ 2252 /* first we add */ 2253 s2 = (s2 + s1); 2254 /* 2255 * again, it is more efficent (it seems) to subtract since 2256 * the most s2 will ever be is (BASE-1 + BASE-1) in the 2257 * worse case. This would then be (2 * BASE) - 2, which will 2258 * still only do one subtract. On Intel this is much better 2259 * to do this way and avoid the divide. Have not -pg'd on 2260 * sparc. 2261 */ 2262 if (s2 >= SCTP_ADLER32_BASE) { 2263 s2 -= SCTP_ADLER32_BASE; 2264 } 2265 } 2266 /* Return the adler32 of the bytes buf[0..len-1] */ 2267 return ((s2 << 16) + s1); 2268 } 2269 2270 #endif 2271 2272 2273 uint32_t 2274 sctp_calculate_len(struct mbuf *m) 2275 { 2276 uint32_t tlen = 0; 2277 struct mbuf *at; 2278 2279 at = m; 2280 while (at) { 2281 tlen += SCTP_BUF_LEN(at); 2282 at = SCTP_BUF_NEXT(at); 2283 } 2284 return (tlen); 2285 } 2286 2287 #if defined(SCTP_WITH_NO_CSUM) 2288 2289 uint32_t 2290 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2291 { 2292 /* 2293 * given a mbuf chain with a packetheader offset by 'offset' 2294 * pointing at a sctphdr (with csum set to 0) go through the chain 2295 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2296 * currently Adler32 but will change to CRC32x soon. Also has a side 2297 * bonus calculate the total length of the mbuf chain. Note: if 2298 * offset is greater than the total mbuf length, checksum=1, 2299 * pktlen=0 is returned (ie. no real error code) 2300 */ 2301 if (pktlen == NULL) 2302 return (0); 2303 *pktlen = sctp_calculate_len(m); 2304 return (0); 2305 } 2306 2307 #elif defined(SCTP_USE_INCHKSUM) 2308 2309 #include <machine/in_cksum.h> 2310 2311 uint32_t 2312 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2313 { 2314 /* 2315 * given a mbuf chain with a packetheader offset by 'offset' 2316 * pointing at a sctphdr (with csum set to 0) go through the chain 2317 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2318 * currently Adler32 but will change to CRC32x soon. Also has a side 2319 * bonus calculate the total length of the mbuf chain. Note: if 2320 * offset is greater than the total mbuf length, checksum=1, 2321 * pktlen=0 is returned (ie. no real error code) 2322 */ 2323 int32_t tlen = 0; 2324 struct mbuf *at; 2325 uint32_t the_sum, retsum; 2326 2327 at = m; 2328 while (at) { 2329 tlen += SCTP_BUF_LEN(at); 2330 at = SCTP_BUF_NEXT(at); 2331 } 2332 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset)); 2333 if (pktlen != NULL) 2334 *pktlen = (tlen - offset); 2335 retsum = htons(the_sum); 2336 return (the_sum); 2337 } 2338 2339 #else 2340 2341 uint32_t 2342 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2343 { 2344 /* 2345 * given a mbuf chain with a packetheader offset by 'offset' 2346 * pointing at a sctphdr (with csum set to 0) go through the chain 2347 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2348 * currently Adler32 but will change to CRC32x soon. Also has a side 2349 * bonus calculate the total length of the mbuf chain. Note: if 2350 * offset is greater than the total mbuf length, checksum=1, 2351 * pktlen=0 is returned (ie. no real error code) 2352 */ 2353 int32_t tlen = 0; 2354 2355 #ifdef SCTP_USE_ADLER32 2356 uint32_t base = 1L; 2357 2358 #else 2359 uint32_t base = 0xffffffff; 2360 2361 #endif /* SCTP_USE_ADLER32 */ 2362 struct mbuf *at; 2363 2364 at = m; 2365 /* find the correct mbuf and offset into mbuf */ 2366 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) { 2367 offset -= SCTP_BUF_LEN(at); /* update remaining offset 2368 * left */ 2369 at = SCTP_BUF_NEXT(at); 2370 } 2371 while (at != NULL) { 2372 if ((SCTP_BUF_LEN(at) - offset) > 0) { 2373 #ifdef SCTP_USE_ADLER32 2374 base = update_adler32(base, 2375 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2376 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2377 #else 2378 if ((SCTP_BUF_LEN(at) - offset) < 4) { 2379 /* Use old method if less than 4 bytes */ 2380 base = old_update_crc32(base, 2381 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2382 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2383 } else { 2384 base = update_crc32(base, 2385 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2386 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2387 } 2388 #endif /* SCTP_USE_ADLER32 */ 2389 tlen += SCTP_BUF_LEN(at) - offset; 2390 /* we only offset once into the first mbuf */ 2391 } 2392 if (offset) { 2393 if (offset < SCTP_BUF_LEN(at)) 2394 offset = 0; 2395 else 2396 offset -= SCTP_BUF_LEN(at); 2397 } 2398 at = SCTP_BUF_NEXT(at); 2399 } 2400 if (pktlen != NULL) { 2401 *pktlen = tlen; 2402 } 2403 #ifdef SCTP_USE_ADLER32 2404 /* Adler32 */ 2405 base = htonl(base); 2406 #else 2407 /* CRC-32c */ 2408 base = sctp_csum_finalize(base); 2409 #endif 2410 return (base); 2411 } 2412 2413 2414 #endif 2415 2416 void 2417 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2418 struct sctp_association *asoc, uint32_t mtu) 2419 { 2420 /* 2421 * Reset the P-MTU size on this association, this involves changing 2422 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2423 * allow the DF flag to be cleared. 2424 */ 2425 struct sctp_tmit_chunk *chk; 2426 unsigned int eff_mtu, ovh; 2427 2428 asoc->smallest_mtu = mtu; 2429 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2430 ovh = SCTP_MIN_OVERHEAD; 2431 } else { 2432 ovh = SCTP_MIN_V4_OVERHEAD; 2433 } 2434 eff_mtu = mtu - ovh; 2435 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2436 2437 if (chk->send_size > eff_mtu) { 2438 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2439 } 2440 } 2441 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2442 if (chk->send_size > eff_mtu) { 2443 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2444 } 2445 } 2446 } 2447 2448 2449 /* 2450 * given an association and starting time of the current RTT period return 2451 * RTO in number of msecs net should point to the current network 2452 */ 2453 uint32_t 2454 sctp_calculate_rto(struct sctp_tcb *stcb, 2455 struct sctp_association *asoc, 2456 struct sctp_nets *net, 2457 struct timeval *old) 2458 { 2459 /* 2460 * given an association and the starting time of the current RTT 2461 * period (in value1/value2) return RTO in number of msecs. 2462 */ 2463 int calc_time = 0; 2464 int o_calctime; 2465 uint32_t new_rto = 0; 2466 int first_measure = 0; 2467 struct timeval now; 2468 2469 /************************/ 2470 /* 1. calculate new RTT */ 2471 /************************/ 2472 /* get the current time */ 2473 SCTP_GETTIME_TIMEVAL(&now); 2474 /* compute the RTT value */ 2475 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2476 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2477 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2478 calc_time += (((u_long)now.tv_usec - 2479 (u_long)old->tv_usec) / 1000); 2480 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2481 /* Borrow 1,000ms from current calculation */ 2482 calc_time -= 1000; 2483 /* Add in the slop over */ 2484 calc_time += ((int)now.tv_usec / 1000); 2485 /* Add in the pre-second ms's */ 2486 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2487 } 2488 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2489 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2490 calc_time = ((u_long)now.tv_usec - 2491 (u_long)old->tv_usec) / 1000; 2492 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2493 /* impossible .. garbage in nothing out */ 2494 goto calc_rto; 2495 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) { 2496 /* 2497 * We have to have 1 usec :-D this must be the 2498 * loopback. 2499 */ 2500 calc_time = 1; 2501 } else { 2502 /* impossible .. garbage in nothing out */ 2503 goto calc_rto; 2504 } 2505 } else { 2506 /* Clock wrapped? */ 2507 goto calc_rto; 2508 } 2509 /***************************/ 2510 /* 2. update RTTVAR & SRTT */ 2511 /***************************/ 2512 o_calctime = calc_time; 2513 /* this is Van Jacobson's integer version */ 2514 if (net->RTO) { 2515 calc_time -= (net->lastsa >> 3); 2516 #ifdef SCTP_RTTVAR_LOGGING 2517 rto_logging(net, SCTP_LOG_RTTVAR); 2518 #endif 2519 net->prev_rtt = o_calctime; 2520 net->lastsa += calc_time; 2521 if (calc_time < 0) { 2522 calc_time = -calc_time; 2523 } 2524 calc_time -= (net->lastsv >> 2); 2525 net->lastsv += calc_time; 2526 if (net->lastsv == 0) { 2527 net->lastsv = SCTP_CLOCK_GRANULARITY; 2528 } 2529 } else { 2530 /* First RTO measurment */ 2531 net->lastsa = calc_time; 2532 net->lastsv = calc_time >> 1; 2533 first_measure = 1; 2534 net->prev_rtt = o_calctime; 2535 #ifdef SCTP_RTTVAR_LOGGING 2536 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2537 #endif 2538 } 2539 calc_rto: 2540 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1; 2541 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2542 (stcb->asoc.sat_network_lockout == 0)) { 2543 stcb->asoc.sat_network = 1; 2544 } else if ((!first_measure) && stcb->asoc.sat_network) { 2545 stcb->asoc.sat_network = 0; 2546 stcb->asoc.sat_network_lockout = 1; 2547 } 2548 /* bound it, per C6/C7 in Section 5.3.1 */ 2549 if (new_rto < stcb->asoc.minrto) { 2550 new_rto = stcb->asoc.minrto; 2551 } 2552 if (new_rto > stcb->asoc.maxrto) { 2553 new_rto = stcb->asoc.maxrto; 2554 } 2555 /* we are now returning the RTO */ 2556 return (new_rto); 2557 } 2558 2559 /* 2560 * return a pointer to a contiguous piece of data from the given mbuf chain 2561 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2562 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2563 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2564 */ 2565 __inline caddr_t 2566 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2567 { 2568 uint32_t count; 2569 uint8_t *ptr; 2570 2571 ptr = in_ptr; 2572 if ((off < 0) || (len <= 0)) 2573 return (NULL); 2574 2575 /* find the desired start location */ 2576 while ((m != NULL) && (off > 0)) { 2577 if (off < SCTP_BUF_LEN(m)) 2578 break; 2579 off -= SCTP_BUF_LEN(m); 2580 m = SCTP_BUF_NEXT(m); 2581 } 2582 if (m == NULL) 2583 return (NULL); 2584 2585 /* is the current mbuf large enough (eg. contiguous)? */ 2586 if ((SCTP_BUF_LEN(m) - off) >= len) { 2587 return (mtod(m, caddr_t)+off); 2588 } else { 2589 /* else, it spans more than one mbuf, so save a temp copy... */ 2590 while ((m != NULL) && (len > 0)) { 2591 count = min(SCTP_BUF_LEN(m) - off, len); 2592 bcopy(mtod(m, caddr_t)+off, ptr, count); 2593 len -= count; 2594 ptr += count; 2595 off = 0; 2596 m = SCTP_BUF_NEXT(m); 2597 } 2598 if ((m == NULL) && (len > 0)) 2599 return (NULL); 2600 else 2601 return ((caddr_t)in_ptr); 2602 } 2603 } 2604 2605 2606 2607 struct sctp_paramhdr * 2608 sctp_get_next_param(struct mbuf *m, 2609 int offset, 2610 struct sctp_paramhdr *pull, 2611 int pull_limit) 2612 { 2613 /* This just provides a typed signature to Peter's Pull routine */ 2614 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2615 (uint8_t *) pull)); 2616 } 2617 2618 2619 int 2620 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2621 { 2622 /* 2623 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2624 * padlen is > 3 this routine will fail. 2625 */ 2626 uint8_t *dp; 2627 int i; 2628 2629 if (padlen > 3) { 2630 return (ENOBUFS); 2631 } 2632 if (M_TRAILINGSPACE(m)) { 2633 /* 2634 * The easy way. We hope the majority of the time we hit 2635 * here :) 2636 */ 2637 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2638 SCTP_BUF_LEN(m) += padlen; 2639 } else { 2640 /* Hard way we must grow the mbuf */ 2641 struct mbuf *tmp; 2642 2643 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2644 if (tmp == NULL) { 2645 /* Out of space GAK! we are in big trouble. */ 2646 return (ENOSPC); 2647 } 2648 /* setup and insert in middle */ 2649 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m); 2650 SCTP_BUF_LEN(tmp) = padlen; 2651 SCTP_BUF_NEXT(m) = tmp; 2652 dp = mtod(tmp, uint8_t *); 2653 } 2654 /* zero out the pad */ 2655 for (i = 0; i < padlen; i++) { 2656 *dp = 0; 2657 dp++; 2658 } 2659 return (0); 2660 } 2661 2662 int 2663 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2664 { 2665 /* find the last mbuf in chain and pad it */ 2666 struct mbuf *m_at; 2667 2668 m_at = m; 2669 if (last_mbuf) { 2670 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2671 } else { 2672 while (m_at) { 2673 if (SCTP_BUF_NEXT(m_at) == NULL) { 2674 return (sctp_add_pad_tombuf(m_at, padval)); 2675 } 2676 m_at = SCTP_BUF_NEXT(m_at); 2677 } 2678 } 2679 return (EFAULT); 2680 } 2681 2682 int sctp_asoc_change_wake = 0; 2683 2684 static void 2685 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2686 uint32_t error, void *data) 2687 { 2688 struct mbuf *m_notify; 2689 struct sctp_assoc_change *sac; 2690 struct sctp_queued_to_read *control; 2691 2692 /* 2693 * First if we are are going down dump everything we can to the 2694 * socket rcv queue. 2695 */ 2696 2697 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2698 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2699 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 2700 ) { 2701 /* If the socket is gone we are out of here */ 2702 return; 2703 } 2704 /* 2705 * For TCP model AND UDP connected sockets we will send an error up 2706 * when an ABORT comes in. 2707 */ 2708 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2709 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2710 ((event == SCTP_COMM_LOST) || (event == SCTP_SHUTDOWN_COMP))) { 2711 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) 2712 stcb->sctp_socket->so_error = ECONNREFUSED; 2713 else 2714 stcb->sctp_socket->so_error = ECONNRESET; 2715 /* Wake ANY sleepers */ 2716 sorwakeup(stcb->sctp_socket); 2717 sowwakeup(stcb->sctp_socket); 2718 sctp_asoc_change_wake++; 2719 } 2720 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2721 /* event not enabled */ 2722 return; 2723 } 2724 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA); 2725 if (m_notify == NULL) 2726 /* no space left */ 2727 return; 2728 SCTP_BUF_LEN(m_notify) = 0; 2729 2730 sac = mtod(m_notify, struct sctp_assoc_change *); 2731 sac->sac_type = SCTP_ASSOC_CHANGE; 2732 sac->sac_flags = 0; 2733 sac->sac_length = sizeof(struct sctp_assoc_change); 2734 sac->sac_state = event; 2735 sac->sac_error = error; 2736 /* XXX verify these stream counts */ 2737 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2738 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2739 sac->sac_assoc_id = sctp_get_associd(stcb); 2740 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change); 2741 SCTP_BUF_NEXT(m_notify) = NULL; 2742 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2743 0, 0, 0, 0, 0, 0, 2744 m_notify); 2745 if (control == NULL) { 2746 /* no memory */ 2747 sctp_m_freem(m_notify); 2748 return; 2749 } 2750 control->length = SCTP_BUF_LEN(m_notify); 2751 /* not that we need this */ 2752 control->tail_mbuf = m_notify; 2753 control->spec_flags = M_NOTIFICATION; 2754 sctp_add_to_readq(stcb->sctp_ep, stcb, 2755 control, 2756 &stcb->sctp_socket->so_rcv, 1); 2757 if (event == SCTP_COMM_LOST) { 2758 /* Wake up any sleeper */ 2759 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2760 } 2761 } 2762 2763 static void 2764 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2765 struct sockaddr *sa, uint32_t error) 2766 { 2767 struct mbuf *m_notify; 2768 struct sctp_paddr_change *spc; 2769 struct sctp_queued_to_read *control; 2770 2771 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2772 /* event not enabled */ 2773 return; 2774 2775 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2776 if (m_notify == NULL) 2777 return; 2778 SCTP_BUF_LEN(m_notify) = 0; 2779 spc = mtod(m_notify, struct sctp_paddr_change *); 2780 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2781 spc->spc_flags = 0; 2782 spc->spc_length = sizeof(struct sctp_paddr_change); 2783 if (sa->sa_family == AF_INET) { 2784 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2785 } else { 2786 struct sockaddr_in6 *sin6; 2787 2788 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2789 2790 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2791 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2792 if (sin6->sin6_scope_id == 0) { 2793 /* recover scope_id for user */ 2794 (void)sa6_recoverscope(sin6); 2795 } else { 2796 /* clear embedded scope_id for user */ 2797 in6_clearscope(&sin6->sin6_addr); 2798 } 2799 } 2800 } 2801 spc->spc_state = state; 2802 spc->spc_error = error; 2803 spc->spc_assoc_id = sctp_get_associd(stcb); 2804 2805 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2806 SCTP_BUF_NEXT(m_notify) = NULL; 2807 2808 /* append to socket */ 2809 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2810 0, 0, 0, 0, 0, 0, 2811 m_notify); 2812 if (control == NULL) { 2813 /* no memory */ 2814 sctp_m_freem(m_notify); 2815 return; 2816 } 2817 control->length = SCTP_BUF_LEN(m_notify); 2818 control->spec_flags = M_NOTIFICATION; 2819 /* not that we need this */ 2820 control->tail_mbuf = m_notify; 2821 sctp_add_to_readq(stcb->sctp_ep, stcb, 2822 control, 2823 &stcb->sctp_socket->so_rcv, 1); 2824 } 2825 2826 2827 static void 2828 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2829 struct sctp_tmit_chunk *chk) 2830 { 2831 struct mbuf *m_notify; 2832 struct sctp_send_failed *ssf; 2833 struct sctp_queued_to_read *control; 2834 int length; 2835 2836 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2837 /* event not enabled */ 2838 return; 2839 2840 length = sizeof(struct sctp_send_failed) + chk->send_size; 2841 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 2842 if (m_notify == NULL) 2843 /* no space left */ 2844 return; 2845 SCTP_BUF_LEN(m_notify) = 0; 2846 ssf = mtod(m_notify, struct sctp_send_failed *); 2847 ssf->ssf_type = SCTP_SEND_FAILED; 2848 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2849 ssf->ssf_flags = SCTP_DATA_UNSENT; 2850 else 2851 ssf->ssf_flags = SCTP_DATA_SENT; 2852 ssf->ssf_length = length; 2853 ssf->ssf_error = error; 2854 /* not exactly what the user sent in, but should be close :) */ 2855 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2856 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2857 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2858 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2859 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2860 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2861 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2862 SCTP_BUF_NEXT(m_notify) = chk->data; 2863 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2864 2865 /* Steal off the mbuf */ 2866 chk->data = NULL; 2867 /* 2868 * For this case, we check the actual socket buffer, since the assoc 2869 * is going away we don't want to overfill the socket buffer for a 2870 * non-reader 2871 */ 2872 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2873 sctp_m_freem(m_notify); 2874 return; 2875 } 2876 /* append to socket */ 2877 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2878 0, 0, 0, 0, 0, 0, 2879 m_notify); 2880 if (control == NULL) { 2881 /* no memory */ 2882 sctp_m_freem(m_notify); 2883 return; 2884 } 2885 control->spec_flags = M_NOTIFICATION; 2886 sctp_add_to_readq(stcb->sctp_ep, stcb, 2887 control, 2888 &stcb->sctp_socket->so_rcv, 1); 2889 } 2890 2891 2892 static void 2893 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2894 struct sctp_stream_queue_pending *sp) 2895 { 2896 struct mbuf *m_notify; 2897 struct sctp_send_failed *ssf; 2898 struct sctp_queued_to_read *control; 2899 int length; 2900 2901 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2902 /* event not enabled */ 2903 return; 2904 2905 length = sizeof(struct sctp_send_failed) + sp->length; 2906 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2907 if (m_notify == NULL) 2908 /* no space left */ 2909 return; 2910 SCTP_BUF_LEN(m_notify) = 0; 2911 ssf = mtod(m_notify, struct sctp_send_failed *); 2912 ssf->ssf_type = SCTP_SEND_FAILED; 2913 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2914 ssf->ssf_flags = SCTP_DATA_UNSENT; 2915 else 2916 ssf->ssf_flags = SCTP_DATA_SENT; 2917 ssf->ssf_length = length; 2918 ssf->ssf_error = error; 2919 /* not exactly what the user sent in, but should be close :) */ 2920 ssf->ssf_info.sinfo_stream = sp->stream; 2921 ssf->ssf_info.sinfo_ssn = sp->strseq; 2922 ssf->ssf_info.sinfo_flags = sp->sinfo_flags; 2923 ssf->ssf_info.sinfo_ppid = sp->ppid; 2924 ssf->ssf_info.sinfo_context = sp->context; 2925 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2926 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2927 SCTP_BUF_NEXT(m_notify) = sp->data; 2928 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2929 2930 /* Steal off the mbuf */ 2931 sp->data = NULL; 2932 /* 2933 * For this case, we check the actual socket buffer, since the assoc 2934 * is going away we don't want to overfill the socket buffer for a 2935 * non-reader 2936 */ 2937 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2938 sctp_m_freem(m_notify); 2939 return; 2940 } 2941 /* append to socket */ 2942 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2943 0, 0, 0, 0, 0, 0, 2944 m_notify); 2945 if (control == NULL) { 2946 /* no memory */ 2947 sctp_m_freem(m_notify); 2948 return; 2949 } 2950 control->spec_flags = M_NOTIFICATION; 2951 sctp_add_to_readq(stcb->sctp_ep, stcb, 2952 control, 2953 &stcb->sctp_socket->so_rcv, 1); 2954 } 2955 2956 2957 2958 static void 2959 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 2960 uint32_t error) 2961 { 2962 struct mbuf *m_notify; 2963 struct sctp_adaptation_event *sai; 2964 struct sctp_queued_to_read *control; 2965 2966 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2967 /* event not enabled */ 2968 return; 2969 2970 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2971 if (m_notify == NULL) 2972 /* no space left */ 2973 return; 2974 SCTP_BUF_LEN(m_notify) = 0; 2975 sai = mtod(m_notify, struct sctp_adaptation_event *); 2976 sai->sai_type = SCTP_ADAPTATION_INDICATION; 2977 sai->sai_flags = 0; 2978 sai->sai_length = sizeof(struct sctp_adaptation_event); 2979 sai->sai_adaptation_ind = error; 2980 sai->sai_assoc_id = sctp_get_associd(stcb); 2981 2982 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 2983 SCTP_BUF_NEXT(m_notify) = NULL; 2984 2985 /* append to socket */ 2986 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2987 0, 0, 0, 0, 0, 0, 2988 m_notify); 2989 if (control == NULL) { 2990 /* no memory */ 2991 sctp_m_freem(m_notify); 2992 return; 2993 } 2994 control->length = SCTP_BUF_LEN(m_notify); 2995 control->spec_flags = M_NOTIFICATION; 2996 /* not that we need this */ 2997 control->tail_mbuf = m_notify; 2998 sctp_add_to_readq(stcb->sctp_ep, stcb, 2999 control, 3000 &stcb->sctp_socket->so_rcv, 1); 3001 } 3002 3003 /* This always must be called with the read-queue LOCKED in the INP */ 3004 void 3005 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, 3006 uint32_t error, int nolock) 3007 { 3008 struct mbuf *m_notify; 3009 struct sctp_pdapi_event *pdapi; 3010 struct sctp_queued_to_read *control; 3011 struct sockbuf *sb; 3012 3013 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) 3014 /* event not enabled */ 3015 return; 3016 3017 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 3018 if (m_notify == NULL) 3019 /* no space left */ 3020 return; 3021 SCTP_BUF_LEN(m_notify) = 0; 3022 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3023 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3024 pdapi->pdapi_flags = 0; 3025 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3026 pdapi->pdapi_indication = error; 3027 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3028 3029 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3030 SCTP_BUF_NEXT(m_notify) = NULL; 3031 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3032 0, 0, 0, 0, 0, 0, 3033 m_notify); 3034 if (control == NULL) { 3035 /* no memory */ 3036 sctp_m_freem(m_notify); 3037 return; 3038 } 3039 control->spec_flags = M_NOTIFICATION; 3040 control->length = SCTP_BUF_LEN(m_notify); 3041 /* not that we need this */ 3042 control->tail_mbuf = m_notify; 3043 control->held_length = 0; 3044 control->length = 0; 3045 if (nolock == 0) { 3046 SCTP_INP_READ_LOCK(stcb->sctp_ep); 3047 } 3048 sb = &stcb->sctp_socket->so_rcv; 3049 #ifdef SCTP_SB_LOGGING 3050 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3051 #endif 3052 sctp_sballoc(stcb, sb, m_notify); 3053 #ifdef SCTP_SB_LOGGING 3054 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3055 #endif 3056 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3057 control->end_added = 1; 3058 if (stcb->asoc.control_pdapi) 3059 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3060 else { 3061 /* we really should not see this case */ 3062 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3063 } 3064 if (nolock == 0) { 3065 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 3066 } 3067 if (stcb->sctp_ep && stcb->sctp_socket) { 3068 /* This should always be the case */ 3069 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3070 } 3071 } 3072 3073 static void 3074 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3075 { 3076 struct mbuf *m_notify; 3077 struct sctp_shutdown_event *sse; 3078 struct sctp_queued_to_read *control; 3079 3080 /* 3081 * For TCP model AND UDP connected sockets we will send an error up 3082 * when an SHUTDOWN completes 3083 */ 3084 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3085 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3086 /* mark socket closed for read/write and wakeup! */ 3087 socantsendmore(stcb->sctp_socket); 3088 } 3089 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 3090 /* event not enabled */ 3091 return; 3092 3093 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 3094 if (m_notify == NULL) 3095 /* no space left */ 3096 return; 3097 sse = mtod(m_notify, struct sctp_shutdown_event *); 3098 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3099 sse->sse_flags = 0; 3100 sse->sse_length = sizeof(struct sctp_shutdown_event); 3101 sse->sse_assoc_id = sctp_get_associd(stcb); 3102 3103 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3104 SCTP_BUF_NEXT(m_notify) = NULL; 3105 3106 /* append to socket */ 3107 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3108 0, 0, 0, 0, 0, 0, 3109 m_notify); 3110 if (control == NULL) { 3111 /* no memory */ 3112 sctp_m_freem(m_notify); 3113 return; 3114 } 3115 control->spec_flags = M_NOTIFICATION; 3116 control->length = SCTP_BUF_LEN(m_notify); 3117 /* not that we need this */ 3118 control->tail_mbuf = m_notify; 3119 sctp_add_to_readq(stcb->sctp_ep, stcb, 3120 control, 3121 &stcb->sctp_socket->so_rcv, 1); 3122 } 3123 3124 static void 3125 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3126 int number_entries, uint16_t * list, int flag) 3127 { 3128 struct mbuf *m_notify; 3129 struct sctp_queued_to_read *control; 3130 struct sctp_stream_reset_event *strreset; 3131 int len; 3132 3133 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 3134 /* event not enabled */ 3135 return; 3136 3137 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3138 if (m_notify == NULL) 3139 /* no space left */ 3140 return; 3141 SCTP_BUF_LEN(m_notify) = 0; 3142 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3143 if (len > M_TRAILINGSPACE(m_notify)) { 3144 /* never enough room */ 3145 sctp_m_freem(m_notify); 3146 return; 3147 } 3148 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3149 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3150 if (number_entries == 0) { 3151 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 3152 } else { 3153 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 3154 } 3155 strreset->strreset_length = len; 3156 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3157 if (number_entries) { 3158 int i; 3159 3160 for (i = 0; i < number_entries; i++) { 3161 strreset->strreset_list[i] = ntohs(list[i]); 3162 } 3163 } 3164 SCTP_BUF_LEN(m_notify) = len; 3165 SCTP_BUF_NEXT(m_notify) = NULL; 3166 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3167 /* no space */ 3168 sctp_m_freem(m_notify); 3169 return; 3170 } 3171 /* append to socket */ 3172 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3173 0, 0, 0, 0, 0, 0, 3174 m_notify); 3175 if (control == NULL) { 3176 /* no memory */ 3177 sctp_m_freem(m_notify); 3178 return; 3179 } 3180 control->spec_flags = M_NOTIFICATION; 3181 control->length = SCTP_BUF_LEN(m_notify); 3182 /* not that we need this */ 3183 control->tail_mbuf = m_notify; 3184 sctp_add_to_readq(stcb->sctp_ep, stcb, 3185 control, 3186 &stcb->sctp_socket->so_rcv, 1); 3187 } 3188 3189 3190 void 3191 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3192 uint32_t error, void *data) 3193 { 3194 if (stcb == NULL) { 3195 /* unlikely but */ 3196 return; 3197 } 3198 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3199 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3200 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 3201 ) { 3202 /* No notifications up when we are in a no socket state */ 3203 return; 3204 } 3205 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3206 /* Can't send up to a closed socket any notifications */ 3207 return; 3208 } 3209 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) { 3210 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) && 3211 (notification != SCTP_NOTIFY_ASSOC_ABORTED) && 3212 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) && 3213 (notification != SCTP_NOTIFY_DG_FAIL) && 3214 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) { 3215 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL); 3216 stcb->asoc.assoc_up_sent = 1; 3217 } 3218 } 3219 switch (notification) { 3220 case SCTP_NOTIFY_ASSOC_UP: 3221 if (stcb->asoc.assoc_up_sent == 0) { 3222 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL); 3223 stcb->asoc.assoc_up_sent = 1; 3224 } 3225 break; 3226 case SCTP_NOTIFY_ASSOC_DOWN: 3227 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL); 3228 break; 3229 case SCTP_NOTIFY_INTERFACE_DOWN: 3230 { 3231 struct sctp_nets *net; 3232 3233 net = (struct sctp_nets *)data; 3234 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3235 (struct sockaddr *)&net->ro._l_addr, error); 3236 break; 3237 } 3238 case SCTP_NOTIFY_INTERFACE_UP: 3239 { 3240 struct sctp_nets *net; 3241 3242 net = (struct sctp_nets *)data; 3243 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3244 (struct sockaddr *)&net->ro._l_addr, error); 3245 break; 3246 } 3247 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3248 { 3249 struct sctp_nets *net; 3250 3251 net = (struct sctp_nets *)data; 3252 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3253 (struct sockaddr *)&net->ro._l_addr, error); 3254 break; 3255 } 3256 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3257 sctp_notify_send_failed2(stcb, error, 3258 (struct sctp_stream_queue_pending *)data); 3259 break; 3260 case SCTP_NOTIFY_DG_FAIL: 3261 sctp_notify_send_failed(stcb, error, 3262 (struct sctp_tmit_chunk *)data); 3263 break; 3264 case SCTP_NOTIFY_ADAPTATION_INDICATION: 3265 /* Here the error is the adaptation indication */ 3266 sctp_notify_adaptation_layer(stcb, error); 3267 break; 3268 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3269 sctp_notify_partial_delivery_indication(stcb, error, 0); 3270 break; 3271 case SCTP_NOTIFY_STRDATA_ERR: 3272 break; 3273 case SCTP_NOTIFY_ASSOC_ABORTED: 3274 if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3275 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) { 3276 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL); 3277 } else { 3278 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL); 3279 } 3280 break; 3281 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3282 break; 3283 case SCTP_NOTIFY_STREAM_OPENED_OK: 3284 break; 3285 case SCTP_NOTIFY_ASSOC_RESTART: 3286 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data); 3287 break; 3288 case SCTP_NOTIFY_HB_RESP: 3289 break; 3290 case SCTP_NOTIFY_STR_RESET_SEND: 3291 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3292 break; 3293 case SCTP_NOTIFY_STR_RESET_RECV: 3294 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3295 break; 3296 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3297 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3298 break; 3299 3300 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3301 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3302 break; 3303 3304 case SCTP_NOTIFY_ASCONF_ADD_IP: 3305 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3306 error); 3307 break; 3308 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3309 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3310 error); 3311 break; 3312 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3313 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3314 error); 3315 break; 3316 case SCTP_NOTIFY_ASCONF_SUCCESS: 3317 break; 3318 case SCTP_NOTIFY_ASCONF_FAILED: 3319 break; 3320 case SCTP_NOTIFY_PEER_SHUTDOWN: 3321 sctp_notify_shutdown_event(stcb); 3322 break; 3323 case SCTP_NOTIFY_AUTH_NEW_KEY: 3324 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3325 (uint16_t) (uintptr_t) data); 3326 break; 3327 #if 0 3328 case SCTP_NOTIFY_AUTH_KEY_CONFLICT: 3329 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT, 3330 error, (uint16_t) (uintptr_t) data); 3331 break; 3332 #endif /* not yet? remove? */ 3333 3334 3335 default: 3336 #ifdef SCTP_DEBUG 3337 if (sctp_debug_on & SCTP_DEBUG_UTIL1) { 3338 printf("NOTIFY: unknown notification %xh (%u)\n", 3339 notification, notification); 3340 } 3341 #endif /* SCTP_DEBUG */ 3342 break; 3343 } /* end switch */ 3344 } 3345 3346 void 3347 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock) 3348 { 3349 struct sctp_association *asoc; 3350 struct sctp_stream_out *outs; 3351 struct sctp_tmit_chunk *chk; 3352 struct sctp_stream_queue_pending *sp; 3353 int i; 3354 3355 asoc = &stcb->asoc; 3356 3357 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3358 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3359 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3360 return; 3361 } 3362 /* now through all the gunk freeing chunks */ 3363 if (holds_lock == 0) 3364 SCTP_TCB_SEND_LOCK(stcb); 3365 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3366 /* For each stream */ 3367 outs = &stcb->asoc.strmout[i]; 3368 /* clean up any sends there */ 3369 stcb->asoc.locked_on_sending = NULL; 3370 sp = TAILQ_FIRST(&outs->outqueue); 3371 while (sp) { 3372 stcb->asoc.stream_queue_cnt--; 3373 TAILQ_REMOVE(&outs->outqueue, sp, next); 3374 sctp_free_spbufspace(stcb, asoc, sp); 3375 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3376 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp); 3377 if (sp->data) { 3378 sctp_m_freem(sp->data); 3379 sp->data = NULL; 3380 } 3381 if (sp->net) 3382 sctp_free_remote_addr(sp->net); 3383 sp->net = NULL; 3384 /* Free the chunk */ 3385 sctp_free_a_strmoq(stcb, sp); 3386 sp = TAILQ_FIRST(&outs->outqueue); 3387 } 3388 } 3389 3390 /* pending send queue SHOULD be empty */ 3391 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3392 chk = TAILQ_FIRST(&asoc->send_queue); 3393 while (chk) { 3394 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3395 asoc->send_queue_cnt--; 3396 if (chk->data) { 3397 /* 3398 * trim off the sctp chunk header(it should 3399 * be there) 3400 */ 3401 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3402 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3403 sctp_mbuf_crush(chk->data); 3404 } 3405 } 3406 sctp_free_bufspace(stcb, asoc, chk, 1); 3407 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk); 3408 if (chk->data) { 3409 sctp_m_freem(chk->data); 3410 chk->data = NULL; 3411 } 3412 if (chk->whoTo) 3413 sctp_free_remote_addr(chk->whoTo); 3414 chk->whoTo = NULL; 3415 sctp_free_a_chunk(stcb, chk); 3416 chk = TAILQ_FIRST(&asoc->send_queue); 3417 } 3418 } 3419 /* sent queue SHOULD be empty */ 3420 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3421 chk = TAILQ_FIRST(&asoc->sent_queue); 3422 while (chk) { 3423 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3424 asoc->sent_queue_cnt--; 3425 if (chk->data) { 3426 /* 3427 * trim off the sctp chunk header(it should 3428 * be there) 3429 */ 3430 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3431 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3432 sctp_mbuf_crush(chk->data); 3433 } 3434 } 3435 sctp_free_bufspace(stcb, asoc, chk, 1); 3436 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3437 SCTP_NOTIFY_DATAGRAM_SENT, chk); 3438 if (chk->data) { 3439 sctp_m_freem(chk->data); 3440 chk->data = NULL; 3441 } 3442 if (chk->whoTo) 3443 sctp_free_remote_addr(chk->whoTo); 3444 chk->whoTo = NULL; 3445 sctp_free_a_chunk(stcb, chk); 3446 chk = TAILQ_FIRST(&asoc->sent_queue); 3447 } 3448 } 3449 if (holds_lock == 0) 3450 SCTP_TCB_SEND_UNLOCK(stcb); 3451 } 3452 3453 void 3454 sctp_abort_notification(struct sctp_tcb *stcb, int error) 3455 { 3456 3457 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3458 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3459 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3460 return; 3461 } 3462 /* Tell them we lost the asoc */ 3463 sctp_report_all_outbound(stcb, 1); 3464 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3465 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3466 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3467 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3468 } 3469 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL); 3470 } 3471 3472 void 3473 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3474 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err) 3475 { 3476 uint32_t vtag; 3477 3478 vtag = 0; 3479 if (stcb != NULL) { 3480 /* We have a TCB to abort, send notification too */ 3481 vtag = stcb->asoc.peer_vtag; 3482 sctp_abort_notification(stcb, 0); 3483 } 3484 sctp_send_abort(m, iphlen, sh, vtag, op_err); 3485 if (stcb != NULL) { 3486 /* Ok, now lets free it */ 3487 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3488 } else { 3489 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3490 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3491 sctp_inpcb_free(inp, 1, 0); 3492 } 3493 } 3494 } 3495 } 3496 3497 void 3498 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3499 int error, struct mbuf *op_err) 3500 { 3501 uint32_t vtag; 3502 3503 if (stcb == NULL) { 3504 /* Got to have a TCB */ 3505 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3506 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3507 sctp_inpcb_free(inp, 1, 0); 3508 } 3509 } 3510 return; 3511 } 3512 vtag = stcb->asoc.peer_vtag; 3513 /* notify the ulp */ 3514 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3515 sctp_abort_notification(stcb, error); 3516 /* notify the peer */ 3517 sctp_send_abort_tcb(stcb, op_err); 3518 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3519 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3520 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3521 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3522 } 3523 /* now free the asoc */ 3524 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 3525 } 3526 3527 void 3528 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3529 struct sctp_inpcb *inp, struct mbuf *op_err) 3530 { 3531 struct sctp_chunkhdr *ch, chunk_buf; 3532 unsigned int chk_length; 3533 3534 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3535 /* Generate a TO address for future reference */ 3536 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3537 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3538 sctp_inpcb_free(inp, 1, 0); 3539 } 3540 } 3541 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3542 sizeof(*ch), (uint8_t *) & chunk_buf); 3543 while (ch != NULL) { 3544 chk_length = ntohs(ch->chunk_length); 3545 if (chk_length < sizeof(*ch)) { 3546 /* break to abort land */ 3547 break; 3548 } 3549 switch (ch->chunk_type) { 3550 case SCTP_PACKET_DROPPED: 3551 /* we don't respond to pkt-dropped */ 3552 return; 3553 case SCTP_ABORT_ASSOCIATION: 3554 /* we don't respond with an ABORT to an ABORT */ 3555 return; 3556 case SCTP_SHUTDOWN_COMPLETE: 3557 /* 3558 * we ignore it since we are not waiting for it and 3559 * peer is gone 3560 */ 3561 return; 3562 case SCTP_SHUTDOWN_ACK: 3563 sctp_send_shutdown_complete2(m, iphlen, sh); 3564 return; 3565 default: 3566 break; 3567 } 3568 offset += SCTP_SIZE32(chk_length); 3569 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3570 sizeof(*ch), (uint8_t *) & chunk_buf); 3571 } 3572 sctp_send_abort(m, iphlen, sh, 0, op_err); 3573 } 3574 3575 /* 3576 * check the inbound datagram to make sure there is not an abort inside it, 3577 * if there is return 1, else return 0. 3578 */ 3579 int 3580 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 3581 { 3582 struct sctp_chunkhdr *ch; 3583 struct sctp_init_chunk *init_chk, chunk_buf; 3584 int offset; 3585 unsigned int chk_length; 3586 3587 offset = iphlen + sizeof(struct sctphdr); 3588 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 3589 (uint8_t *) & chunk_buf); 3590 while (ch != NULL) { 3591 chk_length = ntohs(ch->chunk_length); 3592 if (chk_length < sizeof(*ch)) { 3593 /* packet is probably corrupt */ 3594 break; 3595 } 3596 /* we seem to be ok, is it an abort? */ 3597 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 3598 /* yep, tell them */ 3599 return (1); 3600 } 3601 if (ch->chunk_type == SCTP_INITIATION) { 3602 /* need to update the Vtag */ 3603 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 3604 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 3605 if (init_chk != NULL) { 3606 *vtagfill = ntohl(init_chk->init.initiate_tag); 3607 } 3608 } 3609 /* Nope, move to the next chunk */ 3610 offset += SCTP_SIZE32(chk_length); 3611 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3612 sizeof(*ch), (uint8_t *) & chunk_buf); 3613 } 3614 return (0); 3615 } 3616 3617 /* 3618 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 3619 * set (i.e. it's 0) so, create this function to compare link local scopes 3620 */ 3621 uint32_t 3622 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 3623 { 3624 struct sockaddr_in6 a, b; 3625 3626 /* save copies */ 3627 a = *addr1; 3628 b = *addr2; 3629 3630 if (a.sin6_scope_id == 0) 3631 if (sa6_recoverscope(&a)) { 3632 /* can't get scope, so can't match */ 3633 return (0); 3634 } 3635 if (b.sin6_scope_id == 0) 3636 if (sa6_recoverscope(&b)) { 3637 /* can't get scope, so can't match */ 3638 return (0); 3639 } 3640 if (a.sin6_scope_id != b.sin6_scope_id) 3641 return (0); 3642 3643 return (1); 3644 } 3645 3646 /* 3647 * returns a sockaddr_in6 with embedded scope recovered and removed 3648 */ 3649 struct sockaddr_in6 * 3650 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 3651 { 3652 /* check and strip embedded scope junk */ 3653 if (addr->sin6_family == AF_INET6) { 3654 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 3655 if (addr->sin6_scope_id == 0) { 3656 *store = *addr; 3657 if (!sa6_recoverscope(store)) { 3658 /* use the recovered scope */ 3659 addr = store; 3660 } 3661 } else { 3662 /* else, return the original "to" addr */ 3663 in6_clearscope(&addr->sin6_addr); 3664 } 3665 } 3666 } 3667 return (addr); 3668 } 3669 3670 /* 3671 * are the two addresses the same? currently a "scopeless" check returns: 1 3672 * if same, 0 if not 3673 */ 3674 __inline int 3675 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 3676 { 3677 3678 /* must be valid */ 3679 if (sa1 == NULL || sa2 == NULL) 3680 return (0); 3681 3682 /* must be the same family */ 3683 if (sa1->sa_family != sa2->sa_family) 3684 return (0); 3685 3686 if (sa1->sa_family == AF_INET6) { 3687 /* IPv6 addresses */ 3688 struct sockaddr_in6 *sin6_1, *sin6_2; 3689 3690 sin6_1 = (struct sockaddr_in6 *)sa1; 3691 sin6_2 = (struct sockaddr_in6 *)sa2; 3692 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, 3693 &sin6_2->sin6_addr)); 3694 } else if (sa1->sa_family == AF_INET) { 3695 /* IPv4 addresses */ 3696 struct sockaddr_in *sin_1, *sin_2; 3697 3698 sin_1 = (struct sockaddr_in *)sa1; 3699 sin_2 = (struct sockaddr_in *)sa2; 3700 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 3701 } else { 3702 /* we don't do these... */ 3703 return (0); 3704 } 3705 } 3706 3707 void 3708 sctp_print_address(struct sockaddr *sa) 3709 { 3710 3711 if (sa->sa_family == AF_INET6) { 3712 struct sockaddr_in6 *sin6; 3713 char ip6buf[INET6_ADDRSTRLEN]; 3714 3715 sin6 = (struct sockaddr_in6 *)sa; 3716 printf("IPv6 address: %s:port:%d scope:%u\n", 3717 ip6_sprintf(ip6buf, &sin6->sin6_addr), 3718 ntohs(sin6->sin6_port), 3719 sin6->sin6_scope_id); 3720 } else if (sa->sa_family == AF_INET) { 3721 struct sockaddr_in *sin; 3722 unsigned char *p; 3723 3724 sin = (struct sockaddr_in *)sa; 3725 p = (unsigned char *)&sin->sin_addr; 3726 printf("IPv4 address: %u.%u.%u.%u:%d\n", 3727 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 3728 } else { 3729 printf("?\n"); 3730 } 3731 } 3732 3733 void 3734 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 3735 { 3736 if (iph->ip_v == IPVERSION) { 3737 struct sockaddr_in lsa, fsa; 3738 3739 bzero(&lsa, sizeof(lsa)); 3740 lsa.sin_len = sizeof(lsa); 3741 lsa.sin_family = AF_INET; 3742 lsa.sin_addr = iph->ip_src; 3743 lsa.sin_port = sh->src_port; 3744 bzero(&fsa, sizeof(fsa)); 3745 fsa.sin_len = sizeof(fsa); 3746 fsa.sin_family = AF_INET; 3747 fsa.sin_addr = iph->ip_dst; 3748 fsa.sin_port = sh->dest_port; 3749 printf("src: "); 3750 sctp_print_address((struct sockaddr *)&lsa); 3751 printf("dest: "); 3752 sctp_print_address((struct sockaddr *)&fsa); 3753 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3754 struct ip6_hdr *ip6; 3755 struct sockaddr_in6 lsa6, fsa6; 3756 3757 ip6 = (struct ip6_hdr *)iph; 3758 bzero(&lsa6, sizeof(lsa6)); 3759 lsa6.sin6_len = sizeof(lsa6); 3760 lsa6.sin6_family = AF_INET6; 3761 lsa6.sin6_addr = ip6->ip6_src; 3762 lsa6.sin6_port = sh->src_port; 3763 bzero(&fsa6, sizeof(fsa6)); 3764 fsa6.sin6_len = sizeof(fsa6); 3765 fsa6.sin6_family = AF_INET6; 3766 fsa6.sin6_addr = ip6->ip6_dst; 3767 fsa6.sin6_port = sh->dest_port; 3768 printf("src: "); 3769 sctp_print_address((struct sockaddr *)&lsa6); 3770 printf("dest: "); 3771 sctp_print_address((struct sockaddr *)&fsa6); 3772 } 3773 } 3774 3775 void 3776 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 3777 struct sctp_inpcb *new_inp, 3778 struct sctp_tcb *stcb) 3779 { 3780 /* 3781 * go through our old INP and pull off any control structures that 3782 * belong to stcb and move then to the new inp. 3783 */ 3784 struct socket *old_so, *new_so; 3785 struct sctp_queued_to_read *control, *nctl; 3786 struct sctp_readhead tmp_queue; 3787 struct mbuf *m; 3788 int error = 0; 3789 3790 old_so = old_inp->sctp_socket; 3791 new_so = new_inp->sctp_socket; 3792 TAILQ_INIT(&tmp_queue); 3793 3794 SOCKBUF_LOCK(&(old_so->so_rcv)); 3795 3796 error = sblock(&old_so->so_rcv, 0); 3797 3798 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3799 if (error) { 3800 /* 3801 * Gak, can't get sblock, we have a problem. data will be 3802 * left stranded.. and we don't dare look at it since the 3803 * other thread may be reading something. Oh well, its a 3804 * screwed up app that does a peeloff OR a accept while 3805 * reading from the main socket... actually its only the 3806 * peeloff() case, since I think read will fail on a 3807 * listening socket.. 3808 */ 3809 return; 3810 } 3811 /* lock the socket buffers */ 3812 SCTP_INP_READ_LOCK(old_inp); 3813 control = TAILQ_FIRST(&old_inp->read_queue); 3814 /* Pull off all for out target stcb */ 3815 while (control) { 3816 nctl = TAILQ_NEXT(control, next); 3817 if (control->stcb == stcb) { 3818 /* remove it we want it */ 3819 TAILQ_REMOVE(&old_inp->read_queue, control, next); 3820 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 3821 m = control->data; 3822 while (m) { 3823 #ifdef SCTP_SB_LOGGING 3824 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 3825 #endif 3826 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 3827 #ifdef SCTP_SB_LOGGING 3828 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3829 #endif 3830 m = SCTP_BUF_NEXT(m); 3831 } 3832 } 3833 control = nctl; 3834 } 3835 SCTP_INP_READ_UNLOCK(old_inp); 3836 3837 /* Remove the sb-lock on the old socket */ 3838 SOCKBUF_LOCK(&(old_so->so_rcv)); 3839 3840 sbunlock(&old_so->so_rcv); 3841 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3842 3843 /* Now we move them over to the new socket buffer */ 3844 control = TAILQ_FIRST(&tmp_queue); 3845 SCTP_INP_READ_LOCK(new_inp); 3846 while (control) { 3847 nctl = TAILQ_NEXT(control, next); 3848 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 3849 m = control->data; 3850 while (m) { 3851 #ifdef SCTP_SB_LOGGING 3852 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3853 #endif 3854 sctp_sballoc(stcb, &new_so->so_rcv, m); 3855 #ifdef SCTP_SB_LOGGING 3856 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3857 #endif 3858 m = SCTP_BUF_NEXT(m); 3859 } 3860 control = nctl; 3861 } 3862 SCTP_INP_READ_UNLOCK(new_inp); 3863 } 3864 3865 3866 void 3867 sctp_add_to_readq(struct sctp_inpcb *inp, 3868 struct sctp_tcb *stcb, 3869 struct sctp_queued_to_read *control, 3870 struct sockbuf *sb, 3871 int end) 3872 { 3873 /* 3874 * Here we must place the control on the end of the socket read 3875 * queue AND increment sb_cc so that select will work properly on 3876 * read. 3877 */ 3878 struct mbuf *m, *prev = NULL; 3879 3880 if (inp == NULL) { 3881 /* Gak, TSNH!! */ 3882 #ifdef INVARIANTS 3883 panic("Gak, inp NULL on add_to_readq"); 3884 #endif 3885 return; 3886 } 3887 SCTP_INP_READ_LOCK(inp); 3888 if (!(control->spec_flags & M_NOTIFICATION)) { 3889 atomic_add_int(&inp->total_recvs, 1); 3890 if (!control->do_not_ref_stcb) { 3891 atomic_add_int(&stcb->total_recvs, 1); 3892 } 3893 } 3894 m = control->data; 3895 control->held_length = 0; 3896 control->length = 0; 3897 while (m) { 3898 if (SCTP_BUF_LEN(m) == 0) { 3899 /* Skip mbufs with NO length */ 3900 if (prev == NULL) { 3901 /* First one */ 3902 control->data = sctp_m_free(m); 3903 m = control->data; 3904 } else { 3905 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 3906 m = SCTP_BUF_NEXT(prev); 3907 } 3908 if (m == NULL) { 3909 control->tail_mbuf = prev;; 3910 } 3911 continue; 3912 } 3913 prev = m; 3914 #ifdef SCTP_SB_LOGGING 3915 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3916 #endif 3917 sctp_sballoc(stcb, sb, m); 3918 #ifdef SCTP_SB_LOGGING 3919 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3920 #endif 3921 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 3922 m = SCTP_BUF_NEXT(m); 3923 } 3924 if (prev != NULL) { 3925 control->tail_mbuf = prev; 3926 } else { 3927 /* Everything got collapsed out?? */ 3928 return; 3929 } 3930 if (end) { 3931 control->end_added = 1; 3932 } 3933 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 3934 SCTP_INP_READ_UNLOCK(inp); 3935 if (inp && inp->sctp_socket) { 3936 sctp_sorwakeup(inp, inp->sctp_socket); 3937 } 3938 } 3939 3940 3941 int 3942 sctp_append_to_readq(struct sctp_inpcb *inp, 3943 struct sctp_tcb *stcb, 3944 struct sctp_queued_to_read *control, 3945 struct mbuf *m, 3946 int end, 3947 int ctls_cumack, 3948 struct sockbuf *sb) 3949 { 3950 /* 3951 * A partial delivery API event is underway. OR we are appending on 3952 * the reassembly queue. 3953 * 3954 * If PDAPI this means we need to add m to the end of the data. 3955 * Increase the length in the control AND increment the sb_cc. 3956 * Otherwise sb is NULL and all we need to do is put it at the end 3957 * of the mbuf chain. 3958 */ 3959 int len = 0; 3960 struct mbuf *mm, *tail = NULL, *prev = NULL; 3961 3962 if (inp) { 3963 SCTP_INP_READ_LOCK(inp); 3964 } 3965 if (control == NULL) { 3966 get_out: 3967 if (inp) { 3968 SCTP_INP_READ_UNLOCK(inp); 3969 } 3970 return (-1); 3971 } 3972 if (control->end_added) { 3973 /* huh this one is complete? */ 3974 goto get_out; 3975 } 3976 mm = m; 3977 if (mm == NULL) { 3978 goto get_out; 3979 } 3980 while (mm) { 3981 if (SCTP_BUF_LEN(mm) == 0) { 3982 /* Skip mbufs with NO lenght */ 3983 if (prev == NULL) { 3984 /* First one */ 3985 m = sctp_m_free(mm); 3986 mm = m; 3987 } else { 3988 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 3989 mm = SCTP_BUF_NEXT(prev); 3990 } 3991 continue; 3992 } 3993 prev = mm; 3994 len += SCTP_BUF_LEN(mm); 3995 if (sb) { 3996 #ifdef SCTP_SB_LOGGING 3997 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 3998 #endif 3999 sctp_sballoc(stcb, sb, mm); 4000 #ifdef SCTP_SB_LOGGING 4001 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4002 #endif 4003 } 4004 mm = SCTP_BUF_NEXT(mm); 4005 } 4006 if (prev) { 4007 tail = prev; 4008 } else { 4009 /* Really there should always be a prev */ 4010 if (m == NULL) { 4011 /* Huh nothing left? */ 4012 #ifdef INVARIANTS 4013 panic("Nothing left to add?"); 4014 #else 4015 goto get_out; 4016 #endif 4017 } 4018 tail = m; 4019 } 4020 if (end) { 4021 /* message is complete */ 4022 if (control == stcb->asoc.control_pdapi) { 4023 stcb->asoc.control_pdapi = NULL; 4024 } 4025 control->held_length = 0; 4026 control->end_added = 1; 4027 } 4028 atomic_add_int(&control->length, len); 4029 if (control->tail_mbuf) { 4030 /* append */ 4031 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4032 control->tail_mbuf = tail; 4033 } else { 4034 /* nothing there */ 4035 #ifdef INVARIANTS 4036 if (control->data != NULL) { 4037 panic("This should NOT happen"); 4038 } 4039 #endif 4040 control->data = m; 4041 control->tail_mbuf = tail; 4042 } 4043 /* 4044 * When we are appending in partial delivery, the cum-ack is used 4045 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4046 * is populated in the outbound sinfo structure from the true cumack 4047 * if the association exists... 4048 */ 4049 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4050 if (inp) { 4051 SCTP_INP_READ_UNLOCK(inp); 4052 } 4053 if (inp && inp->sctp_socket) { 4054 sctp_sorwakeup(inp, inp->sctp_socket); 4055 } 4056 return (0); 4057 } 4058 4059 4060 4061 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4062 *************ALTERNATE ROUTING CODE 4063 */ 4064 4065 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4066 *************ALTERNATE ROUTING CODE 4067 */ 4068 4069 struct mbuf * 4070 sctp_generate_invmanparam(int err) 4071 { 4072 /* Return a MBUF with a invalid mandatory parameter */ 4073 struct mbuf *m; 4074 4075 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 4076 if (m) { 4077 struct sctp_paramhdr *ph; 4078 4079 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4080 ph = mtod(m, struct sctp_paramhdr *); 4081 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4082 ph->param_type = htons(err); 4083 } 4084 return (m); 4085 } 4086 4087 #ifdef SCTP_MBCNT_LOGGING 4088 void 4089 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4090 struct sctp_tmit_chunk *tp1, int chk_cnt) 4091 { 4092 if (tp1->data == NULL) { 4093 return; 4094 } 4095 asoc->chunks_on_out_queue -= chk_cnt; 4096 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4097 asoc->total_output_queue_size, 4098 tp1->book_size, 4099 0, 4100 tp1->mbcnt); 4101 if (asoc->total_output_queue_size >= tp1->book_size) { 4102 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4103 } else { 4104 asoc->total_output_queue_size = 0; 4105 } 4106 4107 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4108 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4109 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4110 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4111 } else { 4112 stcb->sctp_socket->so_snd.sb_cc = 0; 4113 4114 } 4115 } 4116 } 4117 4118 #endif 4119 4120 int 4121 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4122 int reason, struct sctpchunk_listhead *queue) 4123 { 4124 int ret_sz = 0; 4125 int notdone; 4126 uint8_t foundeom = 0; 4127 4128 do { 4129 ret_sz += tp1->book_size; 4130 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4131 if (tp1->data) { 4132 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4133 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1); 4134 sctp_m_freem(tp1->data); 4135 tp1->data = NULL; 4136 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4137 } 4138 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4139 stcb->asoc.sent_queue_cnt_removeable--; 4140 } 4141 if (queue == &stcb->asoc.send_queue) { 4142 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4143 /* on to the sent queue */ 4144 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4145 sctp_next); 4146 stcb->asoc.sent_queue_cnt++; 4147 } 4148 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4149 SCTP_DATA_NOT_FRAG) { 4150 /* not frag'ed we ae done */ 4151 notdone = 0; 4152 foundeom = 1; 4153 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4154 /* end of frag, we are done */ 4155 notdone = 0; 4156 foundeom = 1; 4157 } else { 4158 /* 4159 * Its a begin or middle piece, we must mark all of 4160 * it 4161 */ 4162 notdone = 1; 4163 tp1 = TAILQ_NEXT(tp1, sctp_next); 4164 } 4165 } while (tp1 && notdone); 4166 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) { 4167 /* 4168 * The multi-part message was scattered across the send and 4169 * sent queue. 4170 */ 4171 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 4172 /* 4173 * recurse throught the send_queue too, starting at the 4174 * beginning. 4175 */ 4176 if (tp1) { 4177 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason, 4178 &stcb->asoc.send_queue); 4179 } else { 4180 printf("hmm, nothing on the send queue and no EOM?\n"); 4181 } 4182 } 4183 return (ret_sz); 4184 } 4185 4186 /* 4187 * checks to see if the given address, sa, is one that is currently known by 4188 * the kernel note: can't distinguish the same address on multiple interfaces 4189 * and doesn't handle multiple addresses with different zone/scope id's note: 4190 * ifa_ifwithaddr() compares the entire sockaddr struct 4191 */ 4192 struct sctp_ifa * 4193 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int holds_lock) 4194 { 4195 struct sctp_laddr *laddr; 4196 4197 if (holds_lock == 0) 4198 SCTP_INP_RLOCK(inp); 4199 4200 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4201 if (laddr->ifa == NULL) 4202 continue; 4203 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4204 continue; 4205 if (addr->sa_family == AF_INET) { 4206 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4207 laddr->ifa->address.sin.sin_addr.s_addr) { 4208 /* found him. */ 4209 if (holds_lock == 0) 4210 SCTP_INP_RUNLOCK(inp); 4211 return (laddr->ifa); 4212 break; 4213 } 4214 } else if (addr->sa_family == AF_INET6) { 4215 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4216 &laddr->ifa->address.sin6.sin6_addr)) { 4217 /* found him. */ 4218 if (holds_lock == 0) 4219 SCTP_INP_RUNLOCK(inp); 4220 return (laddr->ifa); 4221 break; 4222 } 4223 } 4224 } 4225 if (holds_lock == 0) 4226 SCTP_INP_RUNLOCK(inp); 4227 return (NULL); 4228 } 4229 4230 struct sctp_ifa * 4231 sctp_find_ifa_in_ifn(struct sctp_ifn *sctp_ifnp, struct sockaddr *addr, 4232 int holds_lock) 4233 { 4234 struct sctp_ifa *sctp_ifap; 4235 4236 if (holds_lock == 0) 4237 SCTP_IPI_ADDR_LOCK(); 4238 4239 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 4240 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4241 continue; 4242 if (addr->sa_family == AF_INET) { 4243 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4244 sctp_ifap->address.sin.sin_addr.s_addr) { 4245 /* found him. */ 4246 if (holds_lock == 0) 4247 SCTP_IPI_ADDR_UNLOCK(); 4248 return (sctp_ifap); 4249 break; 4250 } 4251 } else if (addr->sa_family == AF_INET6) { 4252 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4253 &sctp_ifap->address.sin6.sin6_addr)) { 4254 /* found him. */ 4255 if (holds_lock == 0) 4256 SCTP_IPI_ADDR_UNLOCK(); 4257 return (sctp_ifap); 4258 break; 4259 } 4260 } 4261 } 4262 if (holds_lock == 0) 4263 SCTP_IPI_ADDR_UNLOCK(); 4264 return (NULL); 4265 } 4266 4267 uint32_t 4268 sctp_get_ifa_hash_val(struct sockaddr *addr) 4269 { 4270 4271 if (addr->sa_family == AF_INET) { 4272 struct sockaddr_in *sin; 4273 4274 sin = (struct sockaddr_in *)addr; 4275 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4276 } else if (addr->sa_family == AF_INET6) { 4277 struct sockaddr_in6 *sin6; 4278 uint32_t hash_of_addr; 4279 4280 sin6 = (struct sockaddr_in6 *)addr; 4281 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4282 sin6->sin6_addr.s6_addr32[1] + 4283 sin6->sin6_addr.s6_addr32[2] + 4284 sin6->sin6_addr.s6_addr32[3]); 4285 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4286 return (hash_of_addr); 4287 } 4288 return (0); 4289 } 4290 4291 struct sctp_ifa * 4292 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4293 { 4294 struct sctp_ifa *sctp_ifap; 4295 struct sctp_vrf *vrf; 4296 struct sctp_ifalist *hash_head; 4297 uint32_t hash_of_addr; 4298 4299 if (holds_lock == 0) 4300 SCTP_IPI_ADDR_LOCK(); 4301 4302 vrf = sctp_find_vrf(vrf_id); 4303 if (vrf == NULL) { 4304 if (holds_lock == 0) 4305 SCTP_IPI_ADDR_UNLOCK(); 4306 return (NULL); 4307 } 4308 hash_of_addr = sctp_get_ifa_hash_val(addr); 4309 4310 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_hashmark)]; 4311 if (hash_head == NULL) { 4312 printf("hash_of_addr:%x mask:%x table:%x - ", 4313 (u_int)hash_of_addr, (u_int)vrf->vrf_hashmark, 4314 (u_int)(hash_of_addr & vrf->vrf_hashmark)); 4315 sctp_print_address(addr); 4316 printf("No such bucket for address\n"); 4317 if (holds_lock == 0) 4318 SCTP_IPI_ADDR_UNLOCK(); 4319 4320 return (NULL); 4321 } 4322 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 4323 if (sctp_ifap == NULL) { 4324 panic("Huh LIST_FOREACH corrupt"); 4325 } 4326 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4327 continue; 4328 if (addr->sa_family == AF_INET) { 4329 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4330 sctp_ifap->address.sin.sin_addr.s_addr) { 4331 /* found him. */ 4332 if (holds_lock == 0) 4333 SCTP_IPI_ADDR_UNLOCK(); 4334 return (sctp_ifap); 4335 break; 4336 } 4337 } else if (addr->sa_family == AF_INET6) { 4338 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4339 &sctp_ifap->address.sin6.sin6_addr)) { 4340 /* found him. */ 4341 if (holds_lock == 0) 4342 SCTP_IPI_ADDR_UNLOCK(); 4343 return (sctp_ifap); 4344 break; 4345 } 4346 } 4347 } 4348 if (holds_lock == 0) 4349 SCTP_IPI_ADDR_UNLOCK(); 4350 return (NULL); 4351 } 4352 4353 static void 4354 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock, 4355 uint32_t rwnd_req) 4356 { 4357 /* User pulled some data, do we need a rwnd update? */ 4358 int r_unlocked = 0; 4359 uint32_t dif, rwnd; 4360 struct socket *so = NULL; 4361 4362 if (stcb == NULL) 4363 return; 4364 4365 atomic_add_int(&stcb->asoc.refcnt, 1); 4366 4367 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 4368 SCTP_STATE_SHUTDOWN_RECEIVED | 4369 SCTP_STATE_SHUTDOWN_ACK_SENT) 4370 ) { 4371 /* Pre-check If we are freeing no update */ 4372 goto no_lock; 4373 } 4374 SCTP_INP_INCR_REF(stcb->sctp_ep); 4375 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4376 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4377 goto out; 4378 } 4379 so = stcb->sctp_socket; 4380 if (so == NULL) { 4381 goto out; 4382 } 4383 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 4384 /* Have you have freed enough to look */ 4385 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4386 sctp_misc_ints(SCTP_ENTER_USER_RECV, 4387 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd), 4388 *freed_so_far, 4389 stcb->freed_by_sorcv_sincelast, 4390 rwnd_req); 4391 #endif 4392 *freed_so_far = 0; 4393 /* Yep, its worth a look and the lock overhead */ 4394 4395 /* Figure out what the rwnd would be */ 4396 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 4397 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 4398 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 4399 } else { 4400 dif = 0; 4401 } 4402 if (dif >= rwnd_req) { 4403 if (hold_rlock) { 4404 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 4405 r_unlocked = 1; 4406 } 4407 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4408 /* 4409 * One last check before we allow the guy possibly 4410 * to get in. There is a race, where the guy has not 4411 * reached the gate. In that case 4412 */ 4413 goto out; 4414 } 4415 SCTP_TCB_LOCK(stcb); 4416 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4417 /* No reports here */ 4418 SCTP_TCB_UNLOCK(stcb); 4419 goto out; 4420 } 4421 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4422 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4423 stcb->asoc.my_rwnd, 4424 stcb->asoc.my_last_reported_rwnd, 4425 stcb->freed_by_sorcv_sincelast, 4426 dif); 4427 #endif 4428 SCTP_STAT_INCR(sctps_wu_sacks_sent); 4429 sctp_send_sack(stcb); 4430 sctp_chunk_output(stcb->sctp_ep, stcb, 4431 SCTP_OUTPUT_FROM_USR_RCVD); 4432 /* make sure no timer is running */ 4433 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 4434 SCTP_TCB_UNLOCK(stcb); 4435 } else { 4436 /* Update how much we have pending */ 4437 stcb->freed_by_sorcv_sincelast = dif; 4438 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4439 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4440 stcb->asoc.my_rwnd, 4441 stcb->asoc.my_last_reported_rwnd, 4442 stcb->freed_by_sorcv_sincelast, 4443 0); 4444 #endif 4445 } 4446 out: 4447 if (so && r_unlocked && hold_rlock) { 4448 SCTP_INP_READ_LOCK(stcb->sctp_ep); 4449 } 4450 SCTP_INP_DECR_REF(stcb->sctp_ep); 4451 no_lock: 4452 atomic_add_int(&stcb->asoc.refcnt, -1); 4453 return; 4454 } 4455 4456 int 4457 sctp_sorecvmsg(struct socket *so, 4458 struct uio *uio, 4459 struct mbuf **mp, 4460 struct sockaddr *from, 4461 int fromlen, 4462 int *msg_flags, 4463 struct sctp_sndrcvinfo *sinfo, 4464 int filling_sinfo) 4465 { 4466 /* 4467 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 4468 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 4469 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 4470 * On the way out we may send out any combination of: 4471 * MSG_NOTIFICATION MSG_EOR 4472 * 4473 */ 4474 struct sctp_inpcb *inp = NULL; 4475 int my_len = 0; 4476 int cp_len = 0, error = 0; 4477 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 4478 struct mbuf *m = NULL, *embuf = NULL; 4479 struct sctp_tcb *stcb = NULL; 4480 int wakeup_read_socket = 0; 4481 int freecnt_applied = 0; 4482 int out_flags = 0, in_flags = 0; 4483 int block_allowed = 1; 4484 int freed_so_far = 0; 4485 int copied_so_far = 0; 4486 int in_eeor_mode = 0; 4487 int no_rcv_needed = 0; 4488 uint32_t rwnd_req = 0; 4489 int hold_sblock = 0; 4490 int hold_rlock = 0; 4491 int alen = 0; 4492 int slen = 0; 4493 int held_length = 0; 4494 4495 if (msg_flags) { 4496 in_flags = *msg_flags; 4497 if (in_flags & MSG_PEEK) 4498 SCTP_STAT_INCR(sctps_read_peeks); 4499 } else { 4500 in_flags = 0; 4501 } 4502 slen = uio->uio_resid; 4503 /* Pull in and set up our int flags */ 4504 if (in_flags & MSG_OOB) { 4505 /* Out of band's NOT supported */ 4506 return (EOPNOTSUPP); 4507 } 4508 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 4509 return (EINVAL); 4510 } 4511 if ((in_flags & (MSG_DONTWAIT 4512 | MSG_NBIO 4513 )) || 4514 SCTP_SO_IS_NBIO(so)) { 4515 block_allowed = 0; 4516 } 4517 /* setup the endpoint */ 4518 inp = (struct sctp_inpcb *)so->so_pcb; 4519 if (inp == NULL) { 4520 return (EFAULT); 4521 } 4522 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 4523 /* Must be at least a MTU's worth */ 4524 if (rwnd_req < SCTP_MIN_RWND) 4525 rwnd_req = SCTP_MIN_RWND; 4526 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 4527 #ifdef SCTP_RECV_RWND_LOGGING 4528 sctp_misc_ints(SCTP_SORECV_ENTER, 4529 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 4530 #endif 4531 SOCKBUF_LOCK(&so->so_rcv); 4532 hold_sblock = 1; 4533 #ifdef SCTP_RECV_RWND_LOGGING 4534 sctp_misc_ints(SCTP_SORECV_ENTERPL, 4535 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 4536 #endif 4537 4538 4539 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4540 if (error) { 4541 goto release_unlocked; 4542 } 4543 restart: 4544 if (hold_sblock == 0) { 4545 SOCKBUF_LOCK(&so->so_rcv); 4546 hold_sblock = 1; 4547 } 4548 sbunlock(&so->so_rcv); 4549 4550 restart_nosblocks: 4551 if (hold_sblock == 0) { 4552 SOCKBUF_LOCK(&so->so_rcv); 4553 hold_sblock = 1; 4554 } 4555 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4556 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4557 goto out; 4558 } 4559 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4560 if (so->so_error) { 4561 error = so->so_error; 4562 if ((in_flags & MSG_PEEK) == 0) 4563 so->so_error = 0; 4564 } else { 4565 error = ENOTCONN; 4566 } 4567 goto out; 4568 } 4569 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 4570 /* we need to wait for data */ 4571 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4572 sctp_misc_ints(SCTP_SORECV_BLOCKSA, 4573 0, 0, so->so_rcv.sb_cc, uio->uio_resid); 4574 #endif 4575 if ((so->so_rcv.sb_cc == 0) && 4576 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4577 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 4578 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4579 /* 4580 * For active open side clear flags for 4581 * re-use passive open is blocked by 4582 * connect. 4583 */ 4584 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4585 /* 4586 * You were aborted, passive side 4587 * always hits here 4588 */ 4589 error = ECONNRESET; 4590 /* 4591 * You get this once if you are 4592 * active open side 4593 */ 4594 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4595 /* 4596 * Remove flag if on the 4597 * active open side 4598 */ 4599 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4600 } 4601 } 4602 so->so_state &= ~(SS_ISCONNECTING | 4603 SS_ISDISCONNECTING | 4604 SS_ISCONFIRMING | 4605 SS_ISCONNECTED); 4606 if (error == 0) { 4607 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4608 error = ENOTCONN; 4609 } else { 4610 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4611 } 4612 } 4613 goto out; 4614 } 4615 } 4616 error = sbwait(&so->so_rcv); 4617 if (error) { 4618 goto out; 4619 } 4620 held_length = 0; 4621 goto restart_nosblocks; 4622 } else if (so->so_rcv.sb_cc == 0) { 4623 if (so->so_error) { 4624 error = so->so_error; 4625 if ((in_flags & MSG_PEEK) == 0) 4626 so->so_error = 0; 4627 } else { 4628 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4629 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4630 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4631 /* 4632 * For active open side clear flags 4633 * for re-use passive open is 4634 * blocked by connect. 4635 */ 4636 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4637 /* 4638 * You were aborted, passive 4639 * side always hits here 4640 */ 4641 error = ECONNRESET; 4642 /* 4643 * You get this once if you 4644 * are active open side 4645 */ 4646 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4647 /* 4648 * Remove flag if on 4649 * the active open 4650 * side 4651 */ 4652 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4653 } 4654 } 4655 so->so_state &= ~(SS_ISCONNECTING | 4656 SS_ISDISCONNECTING | 4657 SS_ISCONFIRMING | 4658 SS_ISCONNECTED); 4659 if (error == 0) { 4660 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4661 error = ENOTCONN; 4662 } else { 4663 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4664 } 4665 } 4666 goto out; 4667 } 4668 } 4669 error = EWOULDBLOCK; 4670 } 4671 goto out; 4672 } 4673 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4674 /* we possibly have data we can read */ 4675 control = TAILQ_FIRST(&inp->read_queue); 4676 if (control == NULL) { 4677 /* 4678 * This could be happening since the appender did the 4679 * increment but as not yet did the tailq insert onto the 4680 * read_queue 4681 */ 4682 if (hold_rlock == 0) { 4683 SCTP_INP_READ_LOCK(inp); 4684 hold_rlock = 1; 4685 } 4686 control = TAILQ_FIRST(&inp->read_queue); 4687 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 4688 #ifdef INVARIANTS 4689 panic("Huh, its non zero and nothing on control?"); 4690 #endif 4691 so->so_rcv.sb_cc = 0; 4692 } 4693 SCTP_INP_READ_UNLOCK(inp); 4694 hold_rlock = 0; 4695 goto restart; 4696 } 4697 if ((control->length == 0) && 4698 (control->do_not_ref_stcb)) { 4699 /* 4700 * Clean up code for freeing assoc that left behind a 4701 * pdapi.. maybe a peer in EEOR that just closed after 4702 * sending and never indicated a EOR. 4703 */ 4704 if (hold_rlock == 0) { 4705 hold_rlock = 1; 4706 SCTP_INP_READ_LOCK(inp); 4707 } 4708 control->held_length = 0; 4709 if (control->data) { 4710 /* Hmm there is data here .. fix */ 4711 struct mbuf *m; 4712 int cnt = 0; 4713 4714 m = control->data; 4715 while (m) { 4716 cnt += SCTP_BUF_LEN(m); 4717 if (SCTP_BUF_NEXT(m) == NULL) { 4718 control->tail_mbuf = m; 4719 control->end_added = 1; 4720 } 4721 m = SCTP_BUF_NEXT(m); 4722 } 4723 control->length = cnt; 4724 } else { 4725 /* remove it */ 4726 TAILQ_REMOVE(&inp->read_queue, control, next); 4727 /* Add back any hiddend data */ 4728 sctp_free_remote_addr(control->whoFrom); 4729 sctp_free_a_readq(stcb, control); 4730 } 4731 if (hold_rlock) { 4732 hold_rlock = 0; 4733 SCTP_INP_READ_UNLOCK(inp); 4734 } 4735 goto restart; 4736 } 4737 if (control->length == 0) { 4738 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 4739 (filling_sinfo)) { 4740 /* find a more suitable one then this */ 4741 ctl = TAILQ_NEXT(control, next); 4742 while (ctl) { 4743 if ((ctl->stcb != control->stcb) && (ctl->length)) { 4744 /* found one */ 4745 control = ctl; 4746 goto found_one; 4747 } 4748 ctl = TAILQ_NEXT(ctl, next); 4749 } 4750 } 4751 /* 4752 * if we reach here, not suitable replacement is available 4753 * <or> fragment interleave is NOT on. So stuff the sb_cc 4754 * into the our held count, and its time to sleep again. 4755 */ 4756 held_length = so->so_rcv.sb_cc; 4757 control->held_length = so->so_rcv.sb_cc; 4758 goto restart; 4759 } 4760 /* Clear the held length since there is something to read */ 4761 control->held_length = 0; 4762 if (hold_rlock) { 4763 SCTP_INP_READ_UNLOCK(inp); 4764 hold_rlock = 0; 4765 } 4766 found_one: 4767 /* 4768 * If we reach here, control has a some data for us to read off. 4769 * Note that stcb COULD be NULL. 4770 */ 4771 if (hold_sblock) { 4772 SOCKBUF_UNLOCK(&so->so_rcv); 4773 hold_sblock = 0; 4774 } 4775 stcb = control->stcb; 4776 if (stcb) { 4777 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 4778 (control->do_not_ref_stcb == 0)) { 4779 if (freecnt_applied == 0) 4780 stcb = NULL; 4781 } else if (control->do_not_ref_stcb == 0) { 4782 /* you can't free it on me please */ 4783 /* 4784 * The lock on the socket buffer protects us so the 4785 * free code will stop. But since we used the 4786 * socketbuf lock and the sender uses the tcb_lock 4787 * to increment, we need to use the atomic add to 4788 * the refcnt 4789 */ 4790 atomic_add_int(&stcb->asoc.refcnt, 1); 4791 freecnt_applied = 1; 4792 /* 4793 * Setup to remember how much we have not yet told 4794 * the peer our rwnd has opened up. Note we grab the 4795 * value from the tcb from last time. Note too that 4796 * sack sending clears this when a sack is sent.. 4797 * which is fine. Once we hit the rwnd_req, we then 4798 * will go to the sctp_user_rcvd() that will not 4799 * lock until it KNOWs it MUST send a WUP-SACK. 4800 * 4801 */ 4802 freed_so_far = stcb->freed_by_sorcv_sincelast; 4803 stcb->freed_by_sorcv_sincelast = 0; 4804 } 4805 } 4806 /* First lets get off the sinfo and sockaddr info */ 4807 if ((sinfo) && filling_sinfo) { 4808 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 4809 nxt = TAILQ_NEXT(control, next); 4810 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 4811 struct sctp_extrcvinfo *s_extra; 4812 4813 s_extra = (struct sctp_extrcvinfo *)sinfo; 4814 if (nxt) { 4815 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL; 4816 if (nxt->sinfo_flags & SCTP_UNORDERED) { 4817 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 4818 } 4819 if (nxt->spec_flags & M_NOTIFICATION) { 4820 s_extra->next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 4821 } 4822 s_extra->next_asocid = nxt->sinfo_assoc_id; 4823 s_extra->next_length = nxt->length; 4824 s_extra->next_ppid = nxt->sinfo_ppid; 4825 s_extra->next_stream = nxt->sinfo_stream; 4826 if (nxt->tail_mbuf != NULL) { 4827 if (nxt->end_added) { 4828 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 4829 } 4830 } 4831 } else { 4832 /* 4833 * we explicitly 0 this, since the memcpy 4834 * got some other things beyond the older 4835 * sinfo_ that is on the control's structure 4836 * :-D 4837 */ 4838 s_extra->next_flags = SCTP_NO_NEXT_MSG; 4839 s_extra->next_asocid = 0; 4840 s_extra->next_length = 0; 4841 s_extra->next_ppid = 0; 4842 s_extra->next_stream = 0; 4843 } 4844 } 4845 /* 4846 * update off the real current cum-ack, if we have an stcb. 4847 */ 4848 if (stcb) 4849 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 4850 /* 4851 * mask off the high bits, we keep the actual chunk bits in 4852 * there. 4853 */ 4854 sinfo->sinfo_flags &= 0x00ff; 4855 } 4856 if (fromlen && from) { 4857 struct sockaddr *to; 4858 4859 #ifdef INET 4860 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len); 4861 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4862 ((struct sockaddr_in *)from)->sin_port = control->port_from; 4863 #else 4864 /* No AF_INET use AF_INET6 */ 4865 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len); 4866 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4867 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 4868 #endif 4869 4870 to = from; 4871 #if defined(INET) && defined(INET6) 4872 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 4873 (to->sa_family == AF_INET) && 4874 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 4875 struct sockaddr_in *sin; 4876 struct sockaddr_in6 sin6; 4877 4878 sin = (struct sockaddr_in *)to; 4879 bzero(&sin6, sizeof(sin6)); 4880 sin6.sin6_family = AF_INET6; 4881 sin6.sin6_len = sizeof(struct sockaddr_in6); 4882 sin6.sin6_addr.s6_addr16[2] = 0xffff; 4883 bcopy(&sin->sin_addr, 4884 &sin6.sin6_addr.s6_addr16[3], 4885 sizeof(sin6.sin6_addr.s6_addr16[3])); 4886 sin6.sin6_port = sin->sin_port; 4887 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 4888 } 4889 #endif 4890 #if defined(INET6) 4891 { 4892 struct sockaddr_in6 lsa6, *to6; 4893 4894 to6 = (struct sockaddr_in6 *)to; 4895 sctp_recover_scope_mac(to6, (&lsa6)); 4896 } 4897 #endif 4898 } 4899 /* now copy out what data we can */ 4900 if (mp == NULL) { 4901 /* copy out each mbuf in the chain up to length */ 4902 get_more_data: 4903 m = control->data; 4904 while (m) { 4905 /* Move out all we can */ 4906 cp_len = (int)uio->uio_resid; 4907 my_len = (int)SCTP_BUF_LEN(m); 4908 if (cp_len > my_len) { 4909 /* not enough in this buf */ 4910 cp_len = my_len; 4911 } 4912 if (hold_rlock) { 4913 SCTP_INP_READ_UNLOCK(inp); 4914 hold_rlock = 0; 4915 } 4916 if (cp_len > 0) 4917 error = uiomove(mtod(m, char *), cp_len, uio); 4918 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4919 sctp_misc_ints(SCTP_SORCV_DOESCPY, 4920 so->so_rcv.sb_cc, 4921 cp_len, 4922 0, 4923 0); 4924 #endif 4925 /* re-read */ 4926 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4927 goto release; 4928 } 4929 if (stcb && 4930 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4931 no_rcv_needed = 1; 4932 } 4933 if (error) { 4934 /* error we are out of here */ 4935 goto release; 4936 } 4937 if ((SCTP_BUF_NEXT(m) == NULL) && 4938 (cp_len >= SCTP_BUF_LEN(m)) && 4939 ((control->end_added == 0) || 4940 (control->end_added && (TAILQ_NEXT(control, next) == NULL))) 4941 ) { 4942 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4943 sctp_misc_ints(SCTP_SORCV_DOESLCK, 4944 so->so_rcv.sb_cc, 4945 cp_len, 4946 SCTP_BUF_LEN(m), 4947 control->length); 4948 #endif 4949 SCTP_INP_READ_LOCK(inp); 4950 hold_rlock = 1; 4951 } 4952 if (cp_len == SCTP_BUF_LEN(m)) { 4953 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4954 sctp_misc_ints(SCTP_SORCV_DOESADJ, 4955 so->so_rcv.sb_cc, 4956 control->length, 4957 cp_len, 4958 0); 4959 #endif 4960 if ((SCTP_BUF_NEXT(m) == NULL) && 4961 (control->end_added)) { 4962 out_flags |= MSG_EOR; 4963 } 4964 if (control->spec_flags & M_NOTIFICATION) { 4965 out_flags |= MSG_NOTIFICATION; 4966 } 4967 /* we ate up the mbuf */ 4968 if (in_flags & MSG_PEEK) { 4969 /* just looking */ 4970 m = SCTP_BUF_NEXT(m); 4971 copied_so_far += cp_len; 4972 } else { 4973 /* dispose of the mbuf */ 4974 #ifdef SCTP_SB_LOGGING 4975 sctp_sblog(&so->so_rcv, 4976 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4977 #endif 4978 sctp_sbfree(control, stcb, &so->so_rcv, m); 4979 #ifdef SCTP_SB_LOGGING 4980 sctp_sblog(&so->so_rcv, 4981 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4982 #endif 4983 embuf = m; 4984 copied_so_far += cp_len; 4985 freed_so_far += cp_len; 4986 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4987 if (alen < cp_len) { 4988 panic("Control length goes negative?"); 4989 } 4990 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4991 sctp_misc_ints(SCTP_SORCV_PASSBF, 4992 so->so_rcv.sb_cc, 4993 control->length, 4994 0, 4995 0); 4996 #endif 4997 control->data = sctp_m_free(m); 4998 m = control->data; 4999 /* 5000 * been through it all, must hold sb 5001 * lock ok to null tail 5002 */ 5003 if (control->data == NULL) { 5004 #ifdef INVARIANTS 5005 if ((control->end_added == 0) || 5006 (TAILQ_NEXT(control, next) == NULL)) { 5007 /* 5008 * If the end is not 5009 * added, OR the 5010 * next is NOT null 5011 * we MUST have the 5012 * lock. 5013 */ 5014 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5015 panic("Hmm we don't own the lock?"); 5016 } 5017 } 5018 #endif 5019 control->tail_mbuf = NULL; 5020 #ifdef INVARIANTS 5021 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5022 panic("end_added, nothing left and no MSG_EOR"); 5023 } 5024 #endif 5025 } 5026 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5027 sctp_misc_ints(SCTP_SORCV_ADJD, 5028 so->so_rcv.sb_cc, 5029 control->length, 5030 0, 5031 0); 5032 #endif 5033 } 5034 } else { 5035 /* Do we need to trim the mbuf? */ 5036 if (control->spec_flags & M_NOTIFICATION) { 5037 out_flags |= MSG_NOTIFICATION; 5038 } 5039 if ((in_flags & MSG_PEEK) == 0) { 5040 SCTP_BUF_RESV_UF(m, cp_len); 5041 SCTP_BUF_LEN(m) -= cp_len; 5042 #ifdef SCTP_SB_LOGGING 5043 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5044 #endif 5045 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5046 if (stcb) { 5047 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5048 } 5049 copied_so_far += cp_len; 5050 embuf = m; 5051 freed_so_far += cp_len; 5052 #ifdef SCTP_SB_LOGGING 5053 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5054 SCTP_LOG_SBRESULT, 0); 5055 #endif 5056 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 5057 if (alen < cp_len) { 5058 panic("Control length goes negative2?"); 5059 } 5060 } else { 5061 copied_so_far += cp_len; 5062 } 5063 } 5064 if ((out_flags & MSG_EOR) || 5065 (uio->uio_resid == 0) 5066 ) { 5067 break; 5068 } 5069 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5070 (control->do_not_ref_stcb == 0) && 5071 (freed_so_far >= rwnd_req)) { 5072 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5073 } 5074 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5075 sctp_misc_ints(SCTP_SORCV_BOTWHILE, 5076 so->so_rcv.sb_cc, 5077 control->length, 5078 0, 5079 0); 5080 #endif 5081 5082 } /* end while(m) */ 5083 /* 5084 * At this point we have looked at it all and we either have 5085 * a MSG_EOR/or read all the user wants... <OR> 5086 * control->length == 0. 5087 */ 5088 if ((out_flags & MSG_EOR) && 5089 ((in_flags & MSG_PEEK) == 0)) { 5090 /* we are done with this control */ 5091 if (control->length == 0) { 5092 if (control->data) { 5093 #ifdef INVARIANTS 5094 panic("control->data not null at read eor?"); 5095 #else 5096 printf("Strange, data left in the control buffer .. invarients would panic?\n"); 5097 sctp_m_freem(control->data); 5098 control->data = NULL; 5099 #endif 5100 } 5101 done_with_control: 5102 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5103 sctp_misc_ints(SCTP_SORCV_FREECTL, 5104 so->so_rcv.sb_cc, 5105 0, 5106 0, 5107 0); 5108 #endif 5109 if (TAILQ_NEXT(control, next) == NULL) { 5110 /* 5111 * If we don't have a next we need a 5112 * lock, if there is a next interupt 5113 * is filling ahead of us and we 5114 * don't need a lock to remove this 5115 * guy (which is the head of the 5116 * queue). 5117 */ 5118 if (hold_rlock == 0) { 5119 SCTP_INP_READ_LOCK(inp); 5120 hold_rlock = 1; 5121 } 5122 } 5123 TAILQ_REMOVE(&inp->read_queue, control, next); 5124 /* Add back any hiddend data */ 5125 if (control->held_length) { 5126 held_length = 0; 5127 control->held_length = 0; 5128 wakeup_read_socket = 1; 5129 } 5130 no_rcv_needed = control->do_not_ref_stcb; 5131 sctp_free_remote_addr(control->whoFrom); 5132 control->data = NULL; 5133 sctp_free_a_readq(stcb, control); 5134 control = NULL; 5135 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0)) 5136 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5137 5138 } else { 5139 /* 5140 * The user did not read all of this 5141 * message, turn off the returned MSG_EOR 5142 * since we are leaving more behind on the 5143 * control to read. 5144 */ 5145 #ifdef INVARIANTS 5146 if (control->end_added && (control->data == NULL) && 5147 (control->tail_mbuf == NULL)) { 5148 panic("Gak, control->length is corrupt?"); 5149 } 5150 #endif 5151 no_rcv_needed = control->do_not_ref_stcb; 5152 out_flags &= ~MSG_EOR; 5153 } 5154 } 5155 if (out_flags & MSG_EOR) { 5156 goto release; 5157 } 5158 if ((uio->uio_resid == 0) || 5159 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5160 ) { 5161 goto release; 5162 } 5163 /* 5164 * If I hit here the receiver wants more and this message is 5165 * NOT done (pd-api). So two questions. Can we block? if not 5166 * we are done. Did the user NOT set MSG_WAITALL? 5167 */ 5168 if (block_allowed == 0) { 5169 goto release; 5170 } 5171 /* 5172 * We need to wait for more data a few things: - We don't 5173 * sbunlock() so we don't get someone else reading. - We 5174 * must be sure to account for the case where what is added 5175 * is NOT to our control when we wakeup. 5176 */ 5177 5178 /* 5179 * Do we need to tell the transport a rwnd update might be 5180 * needed before we go to sleep? 5181 */ 5182 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5183 ((freed_so_far >= rwnd_req) && 5184 (control->do_not_ref_stcb == 0) && 5185 (no_rcv_needed == 0))) { 5186 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5187 } 5188 wait_some_more: 5189 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5190 goto release; 5191 } 5192 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5193 goto release; 5194 5195 if (hold_rlock == 1) { 5196 SCTP_INP_READ_UNLOCK(inp); 5197 hold_rlock = 0; 5198 } 5199 if (hold_sblock == 0) { 5200 SOCKBUF_LOCK(&so->so_rcv); 5201 hold_sblock = 1; 5202 } 5203 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5204 if (stcb) 5205 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5206 freed_so_far, 5207 stcb->asoc.my_rwnd, 5208 so->so_rcv.sb_cc, 5209 uio->uio_resid); 5210 else 5211 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5212 freed_so_far, 5213 0, 5214 so->so_rcv.sb_cc, 5215 uio->uio_resid); 5216 #endif 5217 if (so->so_rcv.sb_cc <= control->held_length) { 5218 error = sbwait(&so->so_rcv); 5219 if (error) { 5220 goto release; 5221 } 5222 control->held_length = 0; 5223 } 5224 if (hold_sblock) { 5225 SOCKBUF_UNLOCK(&so->so_rcv); 5226 hold_sblock = 0; 5227 } 5228 if (control->length == 0) { 5229 /* still nothing here */ 5230 if (control->end_added == 1) { 5231 /* he aborted, or is done i.e.did a shutdown */ 5232 out_flags |= MSG_EOR; 5233 if (control->pdapi_aborted) 5234 out_flags |= MSG_TRUNC; 5235 goto done_with_control; 5236 } 5237 if (so->so_rcv.sb_cc > held_length) { 5238 control->held_length = so->so_rcv.sb_cc; 5239 held_length = 0; 5240 } 5241 goto wait_some_more; 5242 } else if (control->data == NULL) { 5243 /* 5244 * we must re-sync since data is probably being 5245 * added 5246 */ 5247 SCTP_INP_READ_LOCK(inp); 5248 if ((control->length > 0) && (control->data == NULL)) { 5249 /* 5250 * big trouble.. we have the lock and its 5251 * corrupt? 5252 */ 5253 panic("Impossible data==NULL length !=0"); 5254 } 5255 SCTP_INP_READ_UNLOCK(inp); 5256 /* We will fall around to get more data */ 5257 } 5258 goto get_more_data; 5259 } else { 5260 /* copy out the mbuf chain */ 5261 get_more_data2: 5262 /* 5263 * Do we have a uio, I doubt it if so we grab the size from 5264 * it, if not you get it all 5265 */ 5266 if (uio) 5267 cp_len = uio->uio_resid; 5268 else 5269 cp_len = control->length; 5270 5271 if ((uint32_t) cp_len >= control->length) { 5272 /* easy way */ 5273 if ((control->end_added == 0) || 5274 (TAILQ_NEXT(control, next) == NULL)) { 5275 /* Need to get rlock */ 5276 if (hold_rlock == 0) { 5277 SCTP_INP_READ_LOCK(inp); 5278 hold_rlock = 1; 5279 } 5280 } 5281 if (control->end_added) { 5282 out_flags |= MSG_EOR; 5283 } 5284 if (control->spec_flags & M_NOTIFICATION) { 5285 out_flags |= MSG_NOTIFICATION; 5286 } 5287 if (uio) 5288 uio->uio_resid -= control->length; 5289 *mp = control->data; 5290 m = control->data; 5291 while (m) { 5292 #ifdef SCTP_SB_LOGGING 5293 sctp_sblog(&so->so_rcv, 5294 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5295 #endif 5296 sctp_sbfree(control, stcb, &so->so_rcv, m); 5297 freed_so_far += SCTP_BUF_LEN(m); 5298 #ifdef SCTP_SB_LOGGING 5299 sctp_sblog(&so->so_rcv, 5300 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5301 #endif 5302 m = SCTP_BUF_NEXT(m); 5303 } 5304 control->data = control->tail_mbuf = NULL; 5305 control->length = 0; 5306 if (out_flags & MSG_EOR) { 5307 /* Done with this control */ 5308 goto done_with_control; 5309 } 5310 /* still more to do with this conntrol */ 5311 /* do we really support msg_waitall here? */ 5312 if ((block_allowed == 0) || 5313 ((in_flags & MSG_WAITALL) == 0)) { 5314 goto release; 5315 } 5316 wait_some_more2: 5317 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 5318 goto release; 5319 if (hold_rlock == 1) { 5320 SCTP_INP_READ_UNLOCK(inp); 5321 hold_rlock = 0; 5322 } 5323 if (hold_sblock == 0) { 5324 SOCKBUF_LOCK(&so->so_rcv); 5325 hold_sblock = 1; 5326 } 5327 if (so->so_rcv.sb_cc <= control->held_length) { 5328 error = sbwait(&so->so_rcv); 5329 if (error) { 5330 goto release; 5331 } 5332 } 5333 if (hold_sblock) { 5334 SOCKBUF_UNLOCK(&so->so_rcv); 5335 hold_sblock = 0; 5336 } 5337 if (control->length == 0) { 5338 /* still nothing here */ 5339 if (control->end_added == 1) { 5340 /* 5341 * he aborted, or is done i.e. 5342 * shutdown 5343 */ 5344 out_flags |= MSG_EOR; 5345 if (control->pdapi_aborted) 5346 out_flags |= MSG_TRUNC; 5347 goto done_with_control; 5348 } 5349 if (so->so_rcv.sb_cc > held_length) { 5350 control->held_length = so->so_rcv.sb_cc; 5351 /* 5352 * We don't use held_length while 5353 * getting a message 5354 */ 5355 held_length = 0; 5356 } 5357 goto wait_some_more2; 5358 } 5359 goto get_more_data2; 5360 } else { 5361 /* hard way mbuf by mbuf */ 5362 m = control->data; 5363 if (control->end_added == 0) { 5364 /* need the rlock */ 5365 if (hold_rlock == 0) { 5366 SCTP_INP_READ_LOCK(inp); 5367 hold_rlock = 1; 5368 } 5369 } 5370 if (control->spec_flags & M_NOTIFICATION) { 5371 out_flags |= MSG_NOTIFICATION; 5372 } 5373 while ((m) && (cp_len > 0)) { 5374 if (cp_len >= SCTP_BUF_LEN(m)) { 5375 *mp = m; 5376 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m)); 5377 if (uio) 5378 uio->uio_resid -= SCTP_BUF_LEN(m); 5379 cp_len -= SCTP_BUF_LEN(m); 5380 control->data = SCTP_BUF_NEXT(m); 5381 SCTP_BUF_NEXT(m) = NULL; 5382 #ifdef SCTP_SB_LOGGING 5383 sctp_sblog(&so->so_rcv, 5384 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5385 #endif 5386 sctp_sbfree(control, stcb, &so->so_rcv, m); 5387 freed_so_far += SCTP_BUF_LEN(m); 5388 #ifdef SCTP_SB_LOGGING 5389 sctp_sblog(&so->so_rcv, 5390 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5391 #endif 5392 mp = &SCTP_BUF_NEXT(m); 5393 m = control->data; 5394 } else { 5395 /* 5396 * got all he wants and its part of 5397 * this mbuf only. 5398 */ 5399 if (uio) 5400 uio->uio_resid -= SCTP_BUF_LEN(m); 5401 cp_len -= SCTP_BUF_LEN(m); 5402 if (hold_rlock) { 5403 SCTP_INP_READ_UNLOCK(inp); 5404 hold_rlock = 0; 5405 } 5406 if (hold_sblock) { 5407 SOCKBUF_UNLOCK(&so->so_rcv); 5408 hold_sblock = 0; 5409 } 5410 *mp = SCTP_M_COPYM(m, 0, cp_len, 5411 M_TRYWAIT 5412 ); 5413 #ifdef SCTP_LOCK_LOGGING 5414 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R); 5415 #endif 5416 if (hold_sblock == 0) { 5417 SOCKBUF_LOCK(&so->so_rcv); 5418 hold_sblock = 1; 5419 } 5420 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5421 goto release; 5422 5423 if (stcb && 5424 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5425 no_rcv_needed = 1; 5426 } 5427 SCTP_BUF_RESV_UF(m, cp_len); 5428 SCTP_BUF_LEN(m) -= cp_len; 5429 #ifdef SCTP_SB_LOGGING 5430 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5431 #endif 5432 freed_so_far += cp_len; 5433 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5434 if (stcb) { 5435 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5436 if ((freed_so_far >= rwnd_req) && 5437 (control->do_not_ref_stcb == 0) && 5438 (no_rcv_needed == 0)) 5439 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5440 } 5441 #ifdef SCTP_SB_LOGGING 5442 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5443 SCTP_LOG_SBRESULT, 0); 5444 #endif 5445 goto release; 5446 } 5447 } 5448 } 5449 } 5450 release: 5451 if (hold_rlock == 1) { 5452 SCTP_INP_READ_UNLOCK(inp); 5453 hold_rlock = 0; 5454 } 5455 if (hold_sblock == 0) { 5456 SOCKBUF_LOCK(&so->so_rcv); 5457 hold_sblock = 1; 5458 } 5459 sbunlock(&so->so_rcv); 5460 5461 release_unlocked: 5462 if (hold_sblock) { 5463 SOCKBUF_UNLOCK(&so->so_rcv); 5464 hold_sblock = 0; 5465 } 5466 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 5467 if ((freed_so_far >= rwnd_req) && 5468 (control && (control->do_not_ref_stcb == 0)) && 5469 (no_rcv_needed == 0)) 5470 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5471 } 5472 if (msg_flags) 5473 *msg_flags |= out_flags; 5474 out: 5475 if (hold_rlock == 1) { 5476 SCTP_INP_READ_UNLOCK(inp); 5477 hold_rlock = 0; 5478 } 5479 if (hold_sblock) { 5480 SOCKBUF_UNLOCK(&so->so_rcv); 5481 hold_sblock = 0; 5482 } 5483 if (freecnt_applied) { 5484 /* 5485 * The lock on the socket buffer protects us so the free 5486 * code will stop. But since we used the socketbuf lock and 5487 * the sender uses the tcb_lock to increment, we need to use 5488 * the atomic add to the refcnt. 5489 */ 5490 if (stcb == NULL) { 5491 panic("stcb for refcnt has gone NULL?"); 5492 } 5493 atomic_add_int(&stcb->asoc.refcnt, -1); 5494 freecnt_applied = 0; 5495 /* Save the value back for next time */ 5496 stcb->freed_by_sorcv_sincelast = freed_so_far; 5497 } 5498 #ifdef SCTP_RECV_RWND_LOGGING 5499 if (stcb) { 5500 sctp_misc_ints(SCTP_SORECV_DONE, 5501 freed_so_far, 5502 ((uio) ? (slen - uio->uio_resid) : slen), 5503 stcb->asoc.my_rwnd, 5504 so->so_rcv.sb_cc); 5505 } else { 5506 sctp_misc_ints(SCTP_SORECV_DONE, 5507 freed_so_far, 5508 ((uio) ? (slen - uio->uio_resid) : slen), 5509 0, 5510 so->so_rcv.sb_cc); 5511 } 5512 #endif 5513 if (wakeup_read_socket) { 5514 sctp_sorwakeup(inp, so); 5515 } 5516 return (error); 5517 } 5518 5519 5520 #ifdef SCTP_MBUF_LOGGING 5521 struct mbuf * 5522 sctp_m_free(struct mbuf *m) 5523 { 5524 if (SCTP_BUF_IS_EXTENDED(m)) { 5525 sctp_log_mb(m, SCTP_MBUF_IFREE); 5526 } 5527 return (m_free(m)); 5528 } 5529 5530 void 5531 sctp_m_freem(struct mbuf *mb) 5532 { 5533 while (mb != NULL) 5534 mb = sctp_m_free(mb); 5535 } 5536 5537 #endif 5538 5539 int 5540 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 5541 { 5542 /* 5543 * Given a local address. For all associations that holds the 5544 * address, request a peer-set-primary. 5545 */ 5546 struct sctp_ifa *ifa; 5547 struct sctp_laddr *wi; 5548 5549 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 5550 if (ifa == NULL) { 5551 return (EADDRNOTAVAIL); 5552 } 5553 /* 5554 * Now that we have the ifa we must awaken the iterator with this 5555 * message. 5556 */ 5557 wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr); 5558 if (wi == NULL) { 5559 return (ENOMEM); 5560 } 5561 /* Now incr the count and int wi structure */ 5562 SCTP_INCR_LADDR_COUNT(); 5563 bzero(wi, sizeof(*wi)); 5564 wi->ifa = ifa; 5565 wi->action = SCTP_SET_PRIM_ADDR; 5566 atomic_add_int(&ifa->refcount, 1); 5567 5568 /* Now add it to the work queue */ 5569 SCTP_IPI_ITERATOR_WQ_LOCK(); 5570 /* 5571 * Should this really be a tailq? As it is we will process the 5572 * newest first :-0 5573 */ 5574 LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr); 5575 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 5576 (struct sctp_inpcb *)NULL, 5577 (struct sctp_tcb *)NULL, 5578 (struct sctp_nets *)NULL); 5579 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 5580 return (0); 5581 } 5582 5583 5584 5585 5586 int 5587 sctp_soreceive(so, psa, uio, mp0, controlp, flagsp) 5588 struct socket *so; 5589 struct sockaddr **psa; 5590 struct uio *uio; 5591 struct mbuf **mp0; 5592 struct mbuf **controlp; 5593 int *flagsp; 5594 { 5595 int error, fromlen; 5596 uint8_t sockbuf[256]; 5597 struct sockaddr *from; 5598 struct sctp_extrcvinfo sinfo; 5599 int filling_sinfo = 1; 5600 struct sctp_inpcb *inp; 5601 5602 inp = (struct sctp_inpcb *)so->so_pcb; 5603 /* pickup the assoc we are reading from */ 5604 if (inp == NULL) { 5605 return (EINVAL); 5606 } 5607 if ((sctp_is_feature_off(inp, 5608 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 5609 (controlp == NULL)) { 5610 /* user does not want the sndrcv ctl */ 5611 filling_sinfo = 0; 5612 } 5613 if (psa) { 5614 from = (struct sockaddr *)sockbuf; 5615 fromlen = sizeof(sockbuf); 5616 from->sa_len = 0; 5617 } else { 5618 from = NULL; 5619 fromlen = 0; 5620 } 5621 5622 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 5623 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 5624 if ((controlp) && (filling_sinfo)) { 5625 /* copy back the sinfo in a CMSG format */ 5626 if (filling_sinfo) 5627 *controlp = sctp_build_ctl_nchunk(inp, 5628 (struct sctp_sndrcvinfo *)&sinfo); 5629 else 5630 *controlp = NULL; 5631 } 5632 if (psa) { 5633 /* copy back the address info */ 5634 if (from && from->sa_len) { 5635 *psa = sodupsockaddr(from, M_NOWAIT); 5636 } else { 5637 *psa = NULL; 5638 } 5639 } 5640 return (error); 5641 } 5642