1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_pcb.h> 38 #include <netinet/sctputil.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #ifdef INET6 42 #include <netinet6/sctp6_var.h> 43 #endif 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_crc32.h> 49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 50 #include <netinet/sctp_auth.h> 51 #include <netinet/sctp_asconf.h> 52 53 #define NUMBER_OF_MTU_SIZES 18 54 55 56 #ifdef SCTP_STAT_LOGGING 57 int global_sctp_cwnd_log_at = 0; 58 int global_sctp_cwnd_log_rolled = 0; 59 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE]; 60 61 static uint32_t 62 sctp_get_time_of_event(void) 63 { 64 struct timeval now; 65 uint32_t timeval; 66 67 SCTP_GETPTIME_TIMEVAL(&now); 68 timeval = (now.tv_sec % 0x00000fff); 69 timeval <<= 20; 70 timeval |= now.tv_usec & 0xfffff; 71 return (timeval); 72 } 73 74 75 void 76 sctp_clr_stat_log(void) 77 { 78 global_sctp_cwnd_log_at = 0; 79 global_sctp_cwnd_log_rolled = 0; 80 } 81 82 83 void 84 sctp_sblog(struct sockbuf *sb, 85 struct sctp_tcb *stcb, int from, int incr) 86 { 87 int sctp_cwnd_log_at; 88 89 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 90 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 91 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 92 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB; 93 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb; 94 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc; 95 if (stcb) 96 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc; 97 else 98 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0; 99 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr; 100 } 101 102 void 103 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 104 { 105 int sctp_cwnd_log_at; 106 107 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 108 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 109 sctp_clog[sctp_cwnd_log_at].from = 0; 110 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE; 111 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp; 112 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags; 113 if (stcb) { 114 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb; 115 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state; 116 } else { 117 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0; 118 sctp_clog[sctp_cwnd_log_at].x.close.state = 0; 119 } 120 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc; 121 } 122 123 124 void 125 rto_logging(struct sctp_nets *net, int from) 126 { 127 int sctp_cwnd_log_at; 128 129 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 130 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 131 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 132 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT; 133 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net; 134 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt; 135 } 136 137 void 138 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 139 { 140 int sctp_cwnd_log_at; 141 142 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 143 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 144 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 145 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 146 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb; 147 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn; 148 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq; 149 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 150 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 151 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream; 152 } 153 154 void 155 sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 156 { 157 int sctp_cwnd_log_at; 158 159 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 160 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 161 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action; 162 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE; 163 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb; 164 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight; 165 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 166 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 167 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count; 168 } 169 170 171 void 172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 173 { 174 int sctp_cwnd_log_at; 175 176 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 177 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 178 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 179 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK; 180 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack; 181 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack; 182 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn; 183 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps; 184 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups; 185 } 186 187 void 188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 189 { 190 int sctp_cwnd_log_at; 191 192 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 193 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 194 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 195 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP; 196 sctp_clog[sctp_cwnd_log_at].x.map.base = map; 197 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum; 198 sctp_clog[sctp_cwnd_log_at].x.map.high = high; 199 } 200 201 void 202 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, 203 int from) 204 { 205 int sctp_cwnd_log_at; 206 207 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 208 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 209 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 210 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR; 211 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn; 212 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn; 213 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn; 214 } 215 216 217 void 218 sctp_log_mb(struct mbuf *m, int from) 219 { 220 int sctp_cwnd_log_at; 221 222 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 223 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 224 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 225 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF; 226 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m; 227 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 228 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 229 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0); 230 if (SCTP_BUF_IS_EXTENDED(m)) { 231 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 232 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 233 } else { 234 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0; 235 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0; 236 } 237 } 238 239 240 void 241 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, 242 int from) 243 { 244 int sctp_cwnd_log_at; 245 246 if (control == NULL) { 247 printf("Gak log of NULL?\n"); 248 return; 249 } 250 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 251 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 252 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 253 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM; 254 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb; 255 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn; 256 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn; 257 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream; 258 if (poschk != NULL) { 259 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn; 260 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn; 261 } else { 262 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0; 263 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0; 264 } 265 } 266 267 void 268 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 269 { 270 int sctp_cwnd_log_at; 271 272 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 273 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 274 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 275 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND; 276 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 277 if (stcb->asoc.send_queue_cnt > 255) 278 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 279 else 280 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 281 if (stcb->asoc.stream_queue_cnt > 255) 282 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 283 else 284 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 285 286 if (net) { 287 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd; 288 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 289 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack; 290 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 291 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 292 } 293 if (SCTP_CWNDLOG_PRESEND == from) { 294 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 295 } 296 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment; 297 } 298 299 void 300 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 301 { 302 int sctp_cwnd_log_at; 303 304 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 305 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 306 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 307 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT; 308 if (inp) { 309 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket; 310 311 } else { 312 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL; 313 } 314 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp; 315 if (stcb) { 316 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 317 } else { 318 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 319 } 320 if (inp) { 321 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 322 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 323 } else { 324 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 325 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN; 326 } 327 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx); 328 if (inp->sctp_socket) { 329 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 330 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 331 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 332 } else { 333 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 334 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 335 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 336 } 337 } 338 339 void 340 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 341 { 342 int sctp_cwnd_log_at; 343 344 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 345 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 346 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 347 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST; 348 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net; 349 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error; 350 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size; 351 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst; 352 if (stcb->asoc.send_queue_cnt > 255) 353 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255; 354 else 355 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 356 if (stcb->asoc.stream_queue_cnt > 255) 357 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255; 358 else 359 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 360 } 361 362 void 363 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 364 { 365 int sctp_cwnd_log_at; 366 367 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 368 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 369 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 370 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 371 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 372 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size; 373 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 374 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0; 375 } 376 377 void 378 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 379 { 380 int sctp_cwnd_log_at; 381 382 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 383 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 384 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 385 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND; 386 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd; 387 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size; 388 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead; 389 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval; 390 } 391 392 void 393 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 394 { 395 int sctp_cwnd_log_at; 396 397 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 398 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 399 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 400 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT; 401 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq; 402 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book; 403 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q; 404 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt; 405 } 406 407 void 408 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 409 { 410 int sctp_cwnd_log_at; 411 412 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 413 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 414 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 415 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT; 416 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a; 417 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b; 418 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c; 419 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d; 420 } 421 422 void 423 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from) 424 { 425 int sctp_cwnd_log_at; 426 427 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 428 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 429 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 430 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE; 431 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb; 432 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt; 433 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count; 434 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt; 435 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt; 436 437 if (stcb->asoc.stream_queue_cnt < 0xff) 438 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 439 else 440 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff; 441 442 if (stcb->asoc.chunks_on_out_queue < 0xff) 443 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 444 else 445 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff; 446 447 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0; 448 /* set in the defered mode stuff */ 449 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 450 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1; 451 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 452 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2; 453 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 454 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4; 455 /* what about the sb */ 456 if (stcb->sctp_socket) { 457 struct socket *so = stcb->sctp_socket; 458 459 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 460 } else { 461 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff; 462 } 463 } 464 465 void 466 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen) 467 { 468 int sctp_cwnd_log_at; 469 470 SCTP_STATLOG_GETREF(sctp_cwnd_log_at); 471 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from; 472 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event(); 473 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK; 474 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size; 475 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 476 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd; 477 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 478 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 479 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 480 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen; 481 } 482 483 int 484 sctp_fill_stat_log(void *optval, size_t *optsize) 485 { 486 int sctp_cwnd_log_at; 487 struct sctp_cwnd_log_req *req; 488 size_t size_limit; 489 int num, i, at, cnt_out = 0; 490 491 if (*optsize < sizeof(struct sctp_cwnd_log_req)) { 492 return (EINVAL); 493 } 494 size_limit = (*optsize - sizeof(struct sctp_cwnd_log_req)); 495 if (size_limit < sizeof(struct sctp_cwnd_log)) { 496 return (EINVAL); 497 } 498 sctp_cwnd_log_at = global_sctp_cwnd_log_at; 499 req = (struct sctp_cwnd_log_req *)optval; 500 num = size_limit / sizeof(struct sctp_cwnd_log); 501 if (global_sctp_cwnd_log_rolled) { 502 req->num_in_log = SCTP_STAT_LOG_SIZE; 503 } else { 504 req->num_in_log = sctp_cwnd_log_at; 505 /* 506 * if the log has not rolled, we don't let you have old 507 * data. 508 */ 509 if (req->end_at > sctp_cwnd_log_at) { 510 req->end_at = sctp_cwnd_log_at; 511 } 512 } 513 if ((num < SCTP_STAT_LOG_SIZE) && 514 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) { 515 /* we can't return all of it */ 516 if (((req->start_at == 0) && (req->end_at == 0)) || 517 (req->start_at >= SCTP_STAT_LOG_SIZE) || 518 (req->end_at >= SCTP_STAT_LOG_SIZE)) { 519 /* No user request or user is wacked. */ 520 req->num_ret = num; 521 req->end_at = sctp_cwnd_log_at - 1; 522 if ((sctp_cwnd_log_at - num) < 0) { 523 int cc; 524 525 cc = num - sctp_cwnd_log_at; 526 req->start_at = SCTP_STAT_LOG_SIZE - cc; 527 } else { 528 req->start_at = sctp_cwnd_log_at - num; 529 } 530 } else { 531 /* a user request */ 532 int cc; 533 534 if (req->start_at > req->end_at) { 535 cc = (SCTP_STAT_LOG_SIZE - req->start_at) + 536 (req->end_at + 1); 537 } else { 538 539 cc = (req->end_at - req->start_at) + 1; 540 } 541 if (cc < num) { 542 num = cc; 543 } 544 req->num_ret = num; 545 } 546 } else { 547 /* We can return all of it */ 548 req->start_at = 0; 549 req->end_at = sctp_cwnd_log_at - 1; 550 req->num_ret = sctp_cwnd_log_at; 551 } 552 #ifdef INVARIANTS 553 if (req->num_ret > num) { 554 panic("Bad statlog get?"); 555 } 556 #endif 557 for (i = 0, at = req->start_at; i < req->num_ret; i++) { 558 req->log[i] = sctp_clog[at]; 559 cnt_out++; 560 at++; 561 if (at >= SCTP_STAT_LOG_SIZE) 562 at = 0; 563 } 564 *optsize = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req); 565 return (0); 566 } 567 568 #endif 569 570 #ifdef SCTP_AUDITING_ENABLED 571 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 572 static int sctp_audit_indx = 0; 573 574 static 575 void 576 sctp_print_audit_report(void) 577 { 578 int i; 579 int cnt; 580 581 cnt = 0; 582 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 583 if ((sctp_audit_data[i][0] == 0xe0) && 584 (sctp_audit_data[i][1] == 0x01)) { 585 cnt = 0; 586 printf("\n"); 587 } else if (sctp_audit_data[i][0] == 0xf0) { 588 cnt = 0; 589 printf("\n"); 590 } else if ((sctp_audit_data[i][0] == 0xc0) && 591 (sctp_audit_data[i][1] == 0x01)) { 592 printf("\n"); 593 cnt = 0; 594 } 595 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 596 (uint32_t) sctp_audit_data[i][1]); 597 cnt++; 598 if ((cnt % 14) == 0) 599 printf("\n"); 600 } 601 for (i = 0; i < sctp_audit_indx; i++) { 602 if ((sctp_audit_data[i][0] == 0xe0) && 603 (sctp_audit_data[i][1] == 0x01)) { 604 cnt = 0; 605 printf("\n"); 606 } else if (sctp_audit_data[i][0] == 0xf0) { 607 cnt = 0; 608 printf("\n"); 609 } else if ((sctp_audit_data[i][0] == 0xc0) && 610 (sctp_audit_data[i][1] == 0x01)) { 611 printf("\n"); 612 cnt = 0; 613 } 614 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 615 (uint32_t) sctp_audit_data[i][1]); 616 cnt++; 617 if ((cnt % 14) == 0) 618 printf("\n"); 619 } 620 printf("\n"); 621 } 622 623 void 624 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 625 struct sctp_nets *net) 626 { 627 int resend_cnt, tot_out, rep, tot_book_cnt; 628 struct sctp_nets *lnet; 629 struct sctp_tmit_chunk *chk; 630 631 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 632 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 633 sctp_audit_indx++; 634 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 635 sctp_audit_indx = 0; 636 } 637 if (inp == NULL) { 638 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 639 sctp_audit_data[sctp_audit_indx][1] = 0x01; 640 sctp_audit_indx++; 641 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 642 sctp_audit_indx = 0; 643 } 644 return; 645 } 646 if (stcb == NULL) { 647 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 648 sctp_audit_data[sctp_audit_indx][1] = 0x02; 649 sctp_audit_indx++; 650 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 651 sctp_audit_indx = 0; 652 } 653 return; 654 } 655 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 656 sctp_audit_data[sctp_audit_indx][1] = 657 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 658 sctp_audit_indx++; 659 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 660 sctp_audit_indx = 0; 661 } 662 rep = 0; 663 tot_book_cnt = 0; 664 resend_cnt = tot_out = 0; 665 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 666 if (chk->sent == SCTP_DATAGRAM_RESEND) { 667 resend_cnt++; 668 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 669 tot_out += chk->book_size; 670 tot_book_cnt++; 671 } 672 } 673 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 674 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 675 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 676 sctp_audit_indx++; 677 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 678 sctp_audit_indx = 0; 679 } 680 printf("resend_cnt:%d asoc-tot:%d\n", 681 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 682 rep = 1; 683 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 684 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 685 sctp_audit_data[sctp_audit_indx][1] = 686 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 687 sctp_audit_indx++; 688 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 689 sctp_audit_indx = 0; 690 } 691 } 692 if (tot_out != stcb->asoc.total_flight) { 693 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 694 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 695 sctp_audit_indx++; 696 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 697 sctp_audit_indx = 0; 698 } 699 rep = 1; 700 printf("tot_flt:%d asoc_tot:%d\n", tot_out, 701 (int)stcb->asoc.total_flight); 702 stcb->asoc.total_flight = tot_out; 703 } 704 if (tot_book_cnt != stcb->asoc.total_flight_count) { 705 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 706 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 707 sctp_audit_indx++; 708 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 709 sctp_audit_indx = 0; 710 } 711 rep = 1; 712 printf("tot_flt_book:%d\n", tot_book); 713 714 stcb->asoc.total_flight_count = tot_book_cnt; 715 } 716 tot_out = 0; 717 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 718 tot_out += lnet->flight_size; 719 } 720 if (tot_out != stcb->asoc.total_flight) { 721 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 722 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 723 sctp_audit_indx++; 724 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 725 sctp_audit_indx = 0; 726 } 727 rep = 1; 728 printf("real flight:%d net total was %d\n", 729 stcb->asoc.total_flight, tot_out); 730 /* now corrective action */ 731 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 732 733 tot_out = 0; 734 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 735 if ((chk->whoTo == lnet) && 736 (chk->sent < SCTP_DATAGRAM_RESEND)) { 737 tot_out += chk->book_size; 738 } 739 } 740 if (lnet->flight_size != tot_out) { 741 printf("net:%x flight was %d corrected to %d\n", 742 (uint32_t) lnet, lnet->flight_size, tot_out); 743 lnet->flight_size = tot_out; 744 } 745 } 746 } 747 if (rep) { 748 sctp_print_audit_report(); 749 } 750 } 751 752 void 753 sctp_audit_log(uint8_t ev, uint8_t fd) 754 { 755 756 sctp_audit_data[sctp_audit_indx][0] = ev; 757 sctp_audit_data[sctp_audit_indx][1] = fd; 758 sctp_audit_indx++; 759 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 760 sctp_audit_indx = 0; 761 } 762 } 763 764 #endif 765 766 /* 767 * a list of sizes based on typical mtu's, used only if next hop size not 768 * returned. 769 */ 770 static int sctp_mtu_sizes[] = { 771 68, 772 296, 773 508, 774 512, 775 544, 776 576, 777 1006, 778 1492, 779 1500, 780 1536, 781 2002, 782 2048, 783 4352, 784 4464, 785 8166, 786 17914, 787 32000, 788 65535 789 }; 790 791 void 792 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 793 { 794 struct sctp_association *asoc; 795 struct sctp_nets *net; 796 797 asoc = &stcb->asoc; 798 799 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 800 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 801 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 802 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 803 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 804 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 805 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 806 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 807 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 808 } 809 } 810 811 int 812 find_next_best_mtu(int totsz) 813 { 814 int i, perfer; 815 816 /* 817 * if we are in here we must find the next best fit based on the 818 * size of the dg that failed to be sent. 819 */ 820 perfer = 0; 821 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) { 822 if (totsz < sctp_mtu_sizes[i]) { 823 perfer = i - 1; 824 if (perfer < 0) 825 perfer = 0; 826 break; 827 } 828 } 829 return (sctp_mtu_sizes[perfer]); 830 } 831 832 void 833 sctp_fill_random_store(struct sctp_pcb *m) 834 { 835 /* 836 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 837 * our counter. The result becomes our good random numbers and we 838 * then setup to give these out. Note that we do no locking to 839 * protect this. This is ok, since if competing folks call this we 840 * will get more gobbled gook in the random store whic is what we 841 * want. There is a danger that two guys will use the same random 842 * numbers, but thats ok too since that is random as well :-> 843 */ 844 m->store_at = 0; 845 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 846 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 847 sizeof(m->random_counter), (uint8_t *) m->random_store); 848 m->random_counter++; 849 } 850 851 uint32_t 852 sctp_select_initial_TSN(struct sctp_pcb *m) 853 { 854 /* 855 * A true implementation should use random selection process to get 856 * the initial stream sequence number, using RFC1750 as a good 857 * guideline 858 */ 859 uint32_t x, *xp; 860 uint8_t *p; 861 862 if (m->initial_sequence_debug != 0) { 863 uint32_t ret; 864 865 ret = m->initial_sequence_debug; 866 m->initial_sequence_debug++; 867 return (ret); 868 } 869 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) { 870 /* Refill the random store */ 871 sctp_fill_random_store(m); 872 } 873 p = &m->random_store[(int)m->store_at]; 874 xp = (uint32_t *) p; 875 x = *xp; 876 m->store_at += sizeof(uint32_t); 877 return (x); 878 } 879 880 uint32_t 881 sctp_select_a_tag(struct sctp_inpcb *m) 882 { 883 u_long x, not_done; 884 struct timeval now; 885 886 SCTP_GETTIME_TIMEVAL(&now); 887 not_done = 1; 888 while (not_done) { 889 x = sctp_select_initial_TSN(&m->sctp_ep); 890 if (x == 0) { 891 /* we never use 0 */ 892 continue; 893 } 894 if (sctp_is_vtag_good(m, x, &now)) { 895 not_done = 0; 896 } 897 } 898 return (x); 899 } 900 901 902 int 903 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc, 904 int for_a_init, uint32_t override_tag, uint32_t vrf_id) 905 { 906 /* 907 * Anything set to zero is taken care of by the allocation routine's 908 * bzero 909 */ 910 911 /* 912 * Up front select what scoping to apply on addresses I tell my peer 913 * Not sure what to do with these right now, we will need to come up 914 * with a way to set them. We may need to pass them through from the 915 * caller in the sctp_aloc_assoc() function. 916 */ 917 int i; 918 919 /* init all variables to a known value. */ 920 asoc->state = SCTP_STATE_INUSE; 921 asoc->max_burst = m->sctp_ep.max_burst; 922 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 923 asoc->cookie_life = m->sctp_ep.def_cookie_life; 924 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off; 925 #ifdef INET 926 asoc->default_tos = m->ip_inp.inp.inp_ip_tos; 927 #else 928 asoc->default_tos = 0; 929 #endif 930 931 #ifdef INET6 932 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo; 933 #else 934 asoc->default_flowlabel = 0; 935 #endif 936 if (override_tag) { 937 struct timeval now; 938 939 SCTP_GETTIME_TIMEVAL(&now); 940 if (sctp_is_vtag_good(m, override_tag, &now)) { 941 asoc->my_vtag = override_tag; 942 } else { 943 return (ENOMEM); 944 } 945 946 } else { 947 asoc->my_vtag = sctp_select_a_tag(m); 948 } 949 /* Get the nonce tags */ 950 asoc->my_vtag_nonce = sctp_select_a_tag(m); 951 asoc->peer_vtag_nonce = sctp_select_a_tag(m); 952 asoc->vrf_id = vrf_id; 953 954 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) 955 asoc->hb_is_disabled = 1; 956 else 957 asoc->hb_is_disabled = 0; 958 959 asoc->refcnt = 0; 960 asoc->assoc_up_sent = 0; 961 asoc->assoc_id = asoc->my_vtag; 962 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 963 sctp_select_initial_TSN(&m->sctp_ep); 964 /* we are optimisitic here */ 965 asoc->peer_supports_pktdrop = 1; 966 967 asoc->sent_queue_retran_cnt = 0; 968 969 /* for CMT */ 970 asoc->last_net_data_came_from = NULL; 971 972 /* This will need to be adjusted */ 973 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 974 asoc->last_acked_seq = asoc->init_seq_number - 1; 975 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 976 asoc->asconf_seq_in = asoc->last_acked_seq; 977 978 /* here we are different, we hold the next one we expect */ 979 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 980 981 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max; 982 asoc->initial_rto = m->sctp_ep.initial_rto; 983 984 asoc->max_init_times = m->sctp_ep.max_init_times; 985 asoc->max_send_times = m->sctp_ep.max_send_times; 986 asoc->def_net_failure = m->sctp_ep.def_net_failure; 987 asoc->free_chunk_cnt = 0; 988 989 asoc->iam_blocking = 0; 990 /* ECN Nonce initialization */ 991 asoc->context = m->sctp_context; 992 asoc->def_send = m->def_send; 993 asoc->ecn_nonce_allowed = 0; 994 asoc->receiver_nonce_sum = 1; 995 asoc->nonce_sum_expect_base = 1; 996 asoc->nonce_sum_check = 1; 997 asoc->nonce_resync_tsn = 0; 998 asoc->nonce_wait_for_ecne = 0; 999 asoc->nonce_wait_tsn = 0; 1000 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 1001 asoc->sack_freq = m->sctp_ep.sctp_sack_freq; 1002 asoc->pr_sctp_cnt = 0; 1003 asoc->total_output_queue_size = 0; 1004 1005 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1006 struct in6pcb *inp6; 1007 1008 /* Its a V6 socket */ 1009 inp6 = (struct in6pcb *)m; 1010 asoc->ipv6_addr_legal = 1; 1011 /* Now look at the binding flag to see if V4 will be legal */ 1012 if (SCTP_IPV6_V6ONLY(inp6) == 0) { 1013 asoc->ipv4_addr_legal = 1; 1014 } else { 1015 /* V4 addresses are NOT legal on the association */ 1016 asoc->ipv4_addr_legal = 0; 1017 } 1018 } else { 1019 /* Its a V4 socket, no - V6 */ 1020 asoc->ipv4_addr_legal = 1; 1021 asoc->ipv6_addr_legal = 0; 1022 } 1023 1024 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND); 1025 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket); 1026 1027 asoc->smallest_mtu = m->sctp_frag_point; 1028 asoc->minrto = m->sctp_ep.sctp_minrto; 1029 asoc->maxrto = m->sctp_ep.sctp_maxrto; 1030 1031 asoc->locked_on_sending = NULL; 1032 asoc->stream_locked_on = 0; 1033 asoc->ecn_echo_cnt_onq = 0; 1034 asoc->stream_locked = 0; 1035 1036 asoc->send_sack = 1; 1037 1038 LIST_INIT(&asoc->sctp_restricted_addrs); 1039 1040 TAILQ_INIT(&asoc->nets); 1041 TAILQ_INIT(&asoc->pending_reply_queue); 1042 asoc->last_asconf_ack_sent = NULL; 1043 /* Setup to fill the hb random cache at first HB */ 1044 asoc->hb_random_idx = 4; 1045 1046 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time; 1047 1048 /* 1049 * Now the stream parameters, here we allocate space for all streams 1050 * that we request by default. 1051 */ 1052 asoc->streamoutcnt = asoc->pre_open_streams = 1053 m->sctp_ep.pre_open_stream_count; 1054 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1055 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1056 "StreamsOut"); 1057 if (asoc->strmout == NULL) { 1058 /* big trouble no memory */ 1059 return (ENOMEM); 1060 } 1061 for (i = 0; i < asoc->streamoutcnt; i++) { 1062 /* 1063 * inbound side must be set to 0xffff, also NOTE when we get 1064 * the INIT-ACK back (for INIT sender) we MUST reduce the 1065 * count (streamoutcnt) but first check if we sent to any of 1066 * the upper streams that were dropped (if some were). Those 1067 * that were dropped must be notified to the upper layer as 1068 * failed to send. 1069 */ 1070 asoc->strmout[i].next_sequence_sent = 0x0; 1071 TAILQ_INIT(&asoc->strmout[i].outqueue); 1072 asoc->strmout[i].stream_no = i; 1073 asoc->strmout[i].last_msg_incomplete = 0; 1074 asoc->strmout[i].next_spoke.tqe_next = 0; 1075 asoc->strmout[i].next_spoke.tqe_prev = 0; 1076 } 1077 /* Now the mapping array */ 1078 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1079 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1080 "MappingArray"); 1081 if (asoc->mapping_array == NULL) { 1082 SCTP_FREE(asoc->strmout); 1083 return (ENOMEM); 1084 } 1085 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1086 /* Now the init of the other outqueues */ 1087 TAILQ_INIT(&asoc->free_chunks); 1088 TAILQ_INIT(&asoc->free_strmoq); 1089 TAILQ_INIT(&asoc->out_wheel); 1090 TAILQ_INIT(&asoc->control_send_queue); 1091 TAILQ_INIT(&asoc->send_queue); 1092 TAILQ_INIT(&asoc->sent_queue); 1093 TAILQ_INIT(&asoc->reasmqueue); 1094 TAILQ_INIT(&asoc->resetHead); 1095 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome; 1096 TAILQ_INIT(&asoc->asconf_queue); 1097 /* authentication fields */ 1098 asoc->authinfo.random = NULL; 1099 asoc->authinfo.assoc_key = NULL; 1100 asoc->authinfo.assoc_keyid = 0; 1101 asoc->authinfo.recv_key = NULL; 1102 asoc->authinfo.recv_keyid = 0; 1103 LIST_INIT(&asoc->shared_keys); 1104 asoc->marked_retrans = 0; 1105 asoc->timoinit = 0; 1106 asoc->timodata = 0; 1107 asoc->timosack = 0; 1108 asoc->timoshutdown = 0; 1109 asoc->timoheartbeat = 0; 1110 asoc->timocookie = 0; 1111 asoc->timoshutdownack = 0; 1112 SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1113 SCTP_GETTIME_TIMEVAL(&asoc->discontinuity_time); 1114 1115 return (0); 1116 } 1117 1118 int 1119 sctp_expand_mapping_array(struct sctp_association *asoc) 1120 { 1121 /* mapping array needs to grow */ 1122 uint8_t *new_array; 1123 uint16_t new_size; 1124 1125 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR; 1126 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray"); 1127 if (new_array == NULL) { 1128 /* can't get more, forget it */ 1129 printf("No memory for expansion of SCTP mapping array %d\n", 1130 new_size); 1131 return (-1); 1132 } 1133 memset(new_array, 0, new_size); 1134 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size); 1135 SCTP_FREE(asoc->mapping_array); 1136 asoc->mapping_array = new_array; 1137 asoc->mapping_array_size = new_size; 1138 return (0); 1139 } 1140 1141 #if defined(SCTP_USE_THREAD_BASED_ITERATOR) 1142 static void 1143 sctp_iterator_work(struct sctp_iterator *it) 1144 { 1145 int iteration_count = 0; 1146 int inp_skip = 0; 1147 1148 SCTP_ITERATOR_LOCK(); 1149 if (it->inp) 1150 SCTP_INP_DECR_REF(it->inp); 1151 1152 if (it->inp == NULL) { 1153 /* iterator is complete */ 1154 done_with_iterator: 1155 SCTP_ITERATOR_UNLOCK(); 1156 if (it->function_atend != NULL) { 1157 (*it->function_atend) (it->pointer, it->val); 1158 } 1159 SCTP_FREE(it); 1160 return; 1161 } 1162 select_a_new_ep: 1163 SCTP_INP_WLOCK(it->inp); 1164 while (((it->pcb_flags) && 1165 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1166 ((it->pcb_features) && 1167 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1168 /* endpoint flags or features don't match, so keep looking */ 1169 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1170 SCTP_INP_WUNLOCK(it->inp); 1171 goto done_with_iterator; 1172 } 1173 SCTP_INP_WUNLOCK(it->inp); 1174 it->inp = LIST_NEXT(it->inp, sctp_list); 1175 if (it->inp == NULL) { 1176 goto done_with_iterator; 1177 } 1178 SCTP_INP_WLOCK(it->inp); 1179 } 1180 1181 /* mark the current iterator on the endpoint */ 1182 it->inp->inp_starting_point_for_iterator = it; 1183 SCTP_INP_WUNLOCK(it->inp); 1184 SCTP_INP_RLOCK(it->inp); 1185 1186 /* now go through each assoc which is in the desired state */ 1187 if (it->done_current_ep == 0) { 1188 if (it->function_inp != NULL) 1189 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1190 it->done_current_ep = 1; 1191 } 1192 if (it->stcb == NULL) { 1193 /* run the per instance function */ 1194 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1195 } 1196 if ((inp_skip) || it->stcb == NULL) { 1197 if (it->function_inp_end != NULL) { 1198 inp_skip = (*it->function_inp_end) (it->inp, 1199 it->pointer, 1200 it->val); 1201 } 1202 SCTP_INP_RUNLOCK(it->inp); 1203 goto no_stcb; 1204 } 1205 if ((it->stcb) && 1206 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) { 1207 it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1208 } 1209 while (it->stcb) { 1210 SCTP_TCB_LOCK(it->stcb); 1211 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1212 /* not in the right state... keep looking */ 1213 SCTP_TCB_UNLOCK(it->stcb); 1214 goto next_assoc; 1215 } 1216 /* mark the current iterator on the assoc */ 1217 it->stcb->asoc.stcb_starting_point_for_iterator = it; 1218 /* see if we have limited out the iterator loop */ 1219 iteration_count++; 1220 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1221 /* Pause to let others grab the lock */ 1222 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1223 SCTP_TCB_UNLOCK(it->stcb); 1224 SCTP_INP_RUNLOCK(it->inp); 1225 SCTP_ITERATOR_UNLOCK(); 1226 SCTP_ITERATOR_LOCK(); 1227 SCTP_INP_RLOCK(it->inp); 1228 SCTP_TCB_LOCK(it->stcb); 1229 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1230 iteration_count = 0; 1231 } 1232 /* run function on this one */ 1233 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1234 1235 /* 1236 * we lie here, it really needs to have its own type but 1237 * first I must verify that this won't effect things :-0 1238 */ 1239 if (it->no_chunk_output == 0) 1240 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3); 1241 1242 SCTP_TCB_UNLOCK(it->stcb); 1243 next_assoc: 1244 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1245 if (it->stcb == NULL) { 1246 /* Run last function */ 1247 if (it->function_inp_end != NULL) { 1248 inp_skip = (*it->function_inp_end) (it->inp, 1249 it->pointer, 1250 it->val); 1251 } 1252 } 1253 } 1254 SCTP_INP_RUNLOCK(it->inp); 1255 no_stcb: 1256 /* done with all assocs on this endpoint, move on to next endpoint */ 1257 it->done_current_ep = 0; 1258 SCTP_INP_WLOCK(it->inp); 1259 it->inp->inp_starting_point_for_iterator = NULL; 1260 SCTP_INP_WUNLOCK(it->inp); 1261 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1262 it->inp = NULL; 1263 } else { 1264 SCTP_INP_INFO_RLOCK(); 1265 it->inp = LIST_NEXT(it->inp, sctp_list); 1266 SCTP_INP_INFO_RUNLOCK(); 1267 } 1268 if (it->inp == NULL) { 1269 goto done_with_iterator; 1270 } 1271 goto select_a_new_ep; 1272 } 1273 1274 void 1275 sctp_iterator_worker(void) 1276 { 1277 struct sctp_iterator *it = NULL; 1278 1279 /* This function is called with the WQ lock in place */ 1280 1281 sctppcbinfo.iterator_running = 1; 1282 again: 1283 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead); 1284 while (it) { 1285 /* now lets work on this one */ 1286 TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); 1287 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1288 sctp_iterator_work(it); 1289 SCTP_IPI_ITERATOR_WQ_LOCK(); 1290 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead); 1291 } 1292 if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) { 1293 goto again; 1294 } 1295 sctppcbinfo.iterator_running = 0; 1296 return; 1297 } 1298 1299 #endif 1300 1301 1302 static void 1303 sctp_handle_addr_wq(void) 1304 { 1305 /* deal with the ADDR wq from the rtsock calls */ 1306 struct sctp_laddr *wi; 1307 struct sctp_asconf_iterator *asc; 1308 1309 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1310 sizeof(struct sctp_asconf_iterator), "SCTP_ASCONF_ITERATOR"); 1311 if (asc == NULL) { 1312 /* Try later, no memory */ 1313 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1314 (struct sctp_inpcb *)NULL, 1315 (struct sctp_tcb *)NULL, 1316 (struct sctp_nets *)NULL); 1317 return; 1318 } 1319 LIST_INIT(&asc->list_of_work); 1320 asc->cnt = 0; 1321 SCTP_IPI_ITERATOR_WQ_LOCK(); 1322 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1323 while (wi != NULL) { 1324 LIST_REMOVE(wi, sctp_nxt_addr); 1325 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1326 asc->cnt++; 1327 wi = LIST_FIRST(&sctppcbinfo.addr_wq); 1328 } 1329 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1330 if (asc->cnt == 0) { 1331 SCTP_FREE(asc); 1332 } else { 1333 sctp_initiate_iterator(sctp_iterator_ep, 1334 sctp_iterator_stcb, 1335 NULL, /* No ep end for boundall */ 1336 SCTP_PCB_FLAGS_BOUNDALL, 1337 SCTP_PCB_ANY_FEATURES, 1338 SCTP_ASOC_ANY_STATE, (void *)asc, 0, 1339 sctp_iterator_end, NULL, 0); 1340 } 1341 1342 } 1343 1344 void 1345 sctp_timeout_handler(void *t) 1346 { 1347 struct sctp_inpcb *inp; 1348 struct sctp_tcb *stcb; 1349 struct sctp_nets *net; 1350 struct sctp_timer *tmr; 1351 int did_output; 1352 struct sctp_iterator *it = NULL; 1353 1354 1355 tmr = (struct sctp_timer *)t; 1356 inp = (struct sctp_inpcb *)tmr->ep; 1357 stcb = (struct sctp_tcb *)tmr->tcb; 1358 net = (struct sctp_nets *)tmr->net; 1359 did_output = 1; 1360 1361 #ifdef SCTP_AUDITING_ENABLED 1362 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1363 sctp_auditing(3, inp, stcb, net); 1364 #endif 1365 1366 /* sanity checks... */ 1367 if (tmr->self != (void *)tmr) { 1368 /* 1369 * printf("Stale SCTP timer fired (%p), ignoring...\n", 1370 * tmr); 1371 */ 1372 return; 1373 } 1374 tmr->stopped_from = 0xa001; 1375 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1376 /* 1377 * printf("SCTP timer fired with invalid type: 0x%x\n", 1378 * tmr->type); 1379 */ 1380 return; 1381 } 1382 tmr->stopped_from = 0xa002; 1383 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1384 return; 1385 } 1386 /* if this is an iterator timeout, get the struct and clear inp */ 1387 tmr->stopped_from = 0xa003; 1388 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) { 1389 it = (struct sctp_iterator *)inp; 1390 inp = NULL; 1391 } 1392 if (inp) { 1393 SCTP_INP_INCR_REF(inp); 1394 if ((inp->sctp_socket == 0) && 1395 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1396 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1397 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1398 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1399 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1400 ) { 1401 SCTP_INP_DECR_REF(inp); 1402 return; 1403 } 1404 } 1405 tmr->stopped_from = 0xa004; 1406 if (stcb) { 1407 if (stcb->asoc.state == 0) { 1408 if (inp) { 1409 SCTP_INP_DECR_REF(inp); 1410 } 1411 return; 1412 } 1413 } 1414 tmr->stopped_from = 0xa005; 1415 #ifdef SCTP_DEBUG 1416 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1417 printf("Timer type %d goes off\n", tmr->type); 1418 } 1419 #endif /* SCTP_DEBUG */ 1420 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1421 if (inp) { 1422 SCTP_INP_DECR_REF(inp); 1423 } 1424 return; 1425 } 1426 tmr->stopped_from = 0xa006; 1427 1428 if (stcb) { 1429 atomic_add_int(&stcb->asoc.refcnt, 1); 1430 SCTP_TCB_LOCK(stcb); 1431 atomic_add_int(&stcb->asoc.refcnt, -1); 1432 } 1433 /* record in stopped what t-o occured */ 1434 tmr->stopped_from = tmr->type; 1435 1436 /* mark as being serviced now */ 1437 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1438 /* 1439 * Callout has been rescheduled. 1440 */ 1441 goto get_out; 1442 } 1443 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1444 /* 1445 * Not active, so no action. 1446 */ 1447 goto get_out; 1448 } 1449 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1450 1451 /* call the handler for the appropriate timer type */ 1452 switch (tmr->type) { 1453 case SCTP_TIMER_TYPE_ADDR_WQ: 1454 sctp_handle_addr_wq(); 1455 break; 1456 case SCTP_TIMER_TYPE_ITERATOR: 1457 SCTP_STAT_INCR(sctps_timoiterator); 1458 sctp_iterator_timer(it); 1459 break; 1460 case SCTP_TIMER_TYPE_SEND: 1461 SCTP_STAT_INCR(sctps_timodata); 1462 stcb->asoc.timodata++; 1463 stcb->asoc.num_send_timers_up--; 1464 if (stcb->asoc.num_send_timers_up < 0) { 1465 stcb->asoc.num_send_timers_up = 0; 1466 } 1467 if (sctp_t3rxt_timer(inp, stcb, net)) { 1468 /* no need to unlock on tcb its gone */ 1469 1470 goto out_decr; 1471 } 1472 #ifdef SCTP_AUDITING_ENABLED 1473 sctp_auditing(4, inp, stcb, net); 1474 #endif 1475 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1476 if ((stcb->asoc.num_send_timers_up == 0) && 1477 (stcb->asoc.sent_queue_cnt > 0) 1478 ) { 1479 struct sctp_tmit_chunk *chk; 1480 1481 /* 1482 * safeguard. If there on some on the sent queue 1483 * somewhere but no timers running something is 1484 * wrong... so we start a timer on the first chunk 1485 * on the send queue on whatever net it is sent to. 1486 */ 1487 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1488 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1489 chk->whoTo); 1490 } 1491 break; 1492 case SCTP_TIMER_TYPE_INIT: 1493 SCTP_STAT_INCR(sctps_timoinit); 1494 stcb->asoc.timoinit++; 1495 if (sctp_t1init_timer(inp, stcb, net)) { 1496 /* no need to unlock on tcb its gone */ 1497 goto out_decr; 1498 } 1499 /* We do output but not here */ 1500 did_output = 0; 1501 break; 1502 case SCTP_TIMER_TYPE_RECV: 1503 SCTP_STAT_INCR(sctps_timosack); 1504 stcb->asoc.timosack++; 1505 sctp_send_sack(stcb); 1506 #ifdef SCTP_AUDITING_ENABLED 1507 sctp_auditing(4, inp, stcb, net); 1508 #endif 1509 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR); 1510 break; 1511 case SCTP_TIMER_TYPE_SHUTDOWN: 1512 if (sctp_shutdown_timer(inp, stcb, net)) { 1513 /* no need to unlock on tcb its gone */ 1514 goto out_decr; 1515 } 1516 SCTP_STAT_INCR(sctps_timoshutdown); 1517 stcb->asoc.timoshutdown++; 1518 #ifdef SCTP_AUDITING_ENABLED 1519 sctp_auditing(4, inp, stcb, net); 1520 #endif 1521 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR); 1522 break; 1523 case SCTP_TIMER_TYPE_HEARTBEAT: 1524 { 1525 struct sctp_nets *net; 1526 int cnt_of_unconf = 0; 1527 1528 SCTP_STAT_INCR(sctps_timoheartbeat); 1529 stcb->asoc.timoheartbeat++; 1530 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1531 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1532 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1533 cnt_of_unconf++; 1534 } 1535 } 1536 if (cnt_of_unconf == 0) { 1537 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) { 1538 /* no need to unlock on tcb its gone */ 1539 goto out_decr; 1540 } 1541 } 1542 #ifdef SCTP_AUDITING_ENABLED 1543 sctp_auditing(4, inp, stcb, net); 1544 #endif 1545 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 1546 stcb, net); 1547 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR); 1548 } 1549 break; 1550 case SCTP_TIMER_TYPE_COOKIE: 1551 if (sctp_cookie_timer(inp, stcb, net)) { 1552 /* no need to unlock on tcb its gone */ 1553 goto out_decr; 1554 } 1555 SCTP_STAT_INCR(sctps_timocookie); 1556 stcb->asoc.timocookie++; 1557 #ifdef SCTP_AUDITING_ENABLED 1558 sctp_auditing(4, inp, stcb, net); 1559 #endif 1560 /* 1561 * We consider T3 and Cookie timer pretty much the same with 1562 * respect to where from in chunk_output. 1563 */ 1564 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1565 break; 1566 case SCTP_TIMER_TYPE_NEWCOOKIE: 1567 { 1568 struct timeval tv; 1569 int i, secret; 1570 1571 SCTP_STAT_INCR(sctps_timosecret); 1572 SCTP_GETTIME_TIMEVAL(&tv); 1573 SCTP_INP_WLOCK(inp); 1574 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1575 inp->sctp_ep.last_secret_number = 1576 inp->sctp_ep.current_secret_number; 1577 inp->sctp_ep.current_secret_number++; 1578 if (inp->sctp_ep.current_secret_number >= 1579 SCTP_HOW_MANY_SECRETS) { 1580 inp->sctp_ep.current_secret_number = 0; 1581 } 1582 secret = (int)inp->sctp_ep.current_secret_number; 1583 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1584 inp->sctp_ep.secret_key[secret][i] = 1585 sctp_select_initial_TSN(&inp->sctp_ep); 1586 } 1587 SCTP_INP_WUNLOCK(inp); 1588 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1589 } 1590 did_output = 0; 1591 break; 1592 case SCTP_TIMER_TYPE_PATHMTURAISE: 1593 SCTP_STAT_INCR(sctps_timopathmtu); 1594 sctp_pathmtu_timer(inp, stcb, net); 1595 did_output = 0; 1596 break; 1597 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1598 if (sctp_shutdownack_timer(inp, stcb, net)) { 1599 /* no need to unlock on tcb its gone */ 1600 goto out_decr; 1601 } 1602 SCTP_STAT_INCR(sctps_timoshutdownack); 1603 stcb->asoc.timoshutdownack++; 1604 #ifdef SCTP_AUDITING_ENABLED 1605 sctp_auditing(4, inp, stcb, net); 1606 #endif 1607 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR); 1608 break; 1609 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1610 SCTP_STAT_INCR(sctps_timoshutdownguard); 1611 sctp_abort_an_association(inp, stcb, 1612 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL); 1613 /* no need to unlock on tcb its gone */ 1614 goto out_decr; 1615 break; 1616 1617 case SCTP_TIMER_TYPE_STRRESET: 1618 if (sctp_strreset_timer(inp, stcb, net)) { 1619 /* no need to unlock on tcb its gone */ 1620 goto out_decr; 1621 } 1622 SCTP_STAT_INCR(sctps_timostrmrst); 1623 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR); 1624 break; 1625 case SCTP_TIMER_TYPE_EARLYFR: 1626 /* Need to do FR of things for net */ 1627 SCTP_STAT_INCR(sctps_timoearlyfr); 1628 sctp_early_fr_timer(inp, stcb, net); 1629 break; 1630 case SCTP_TIMER_TYPE_ASCONF: 1631 if (sctp_asconf_timer(inp, stcb, net)) { 1632 /* no need to unlock on tcb its gone */ 1633 goto out_decr; 1634 } 1635 SCTP_STAT_INCR(sctps_timoasconf); 1636 #ifdef SCTP_AUDITING_ENABLED 1637 sctp_auditing(4, inp, stcb, net); 1638 #endif 1639 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR); 1640 break; 1641 1642 case SCTP_TIMER_TYPE_AUTOCLOSE: 1643 SCTP_STAT_INCR(sctps_timoautoclose); 1644 sctp_autoclose_timer(inp, stcb, net); 1645 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1646 did_output = 0; 1647 break; 1648 case SCTP_TIMER_TYPE_ASOCKILL: 1649 SCTP_STAT_INCR(sctps_timoassockill); 1650 /* Can we free it yet? */ 1651 SCTP_INP_DECR_REF(inp); 1652 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1653 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1654 /* 1655 * free asoc, always unlocks (or destroy's) so prevent 1656 * duplicate unlock or unlock of a free mtx :-0 1657 */ 1658 stcb = NULL; 1659 goto out_no_decr; 1660 break; 1661 case SCTP_TIMER_TYPE_INPKILL: 1662 SCTP_STAT_INCR(sctps_timoinpkill); 1663 /* 1664 * special case, take away our increment since WE are the 1665 * killer 1666 */ 1667 SCTP_INP_DECR_REF(inp); 1668 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1669 sctp_inpcb_free(inp, 1, 0); 1670 goto out_no_decr; 1671 break; 1672 default: 1673 #ifdef SCTP_DEBUG 1674 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1675 printf("sctp_timeout_handler:unknown timer %d\n", 1676 tmr->type); 1677 } 1678 #endif /* SCTP_DEBUG */ 1679 break; 1680 }; 1681 #ifdef SCTP_AUDITING_ENABLED 1682 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1683 if (inp) 1684 sctp_auditing(5, inp, stcb, net); 1685 #endif 1686 if ((did_output) && stcb) { 1687 /* 1688 * Now we need to clean up the control chunk chain if an 1689 * ECNE is on it. It must be marked as UNSENT again so next 1690 * call will continue to send it until such time that we get 1691 * a CWR, to remove it. It is, however, less likely that we 1692 * will find a ecn echo on the chain though. 1693 */ 1694 sctp_fix_ecn_echo(&stcb->asoc); 1695 } 1696 get_out: 1697 if (stcb) { 1698 SCTP_TCB_UNLOCK(stcb); 1699 } 1700 out_decr: 1701 if (inp) { 1702 SCTP_INP_DECR_REF(inp); 1703 } 1704 out_no_decr: 1705 1706 #ifdef SCTP_DEBUG 1707 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 1708 printf("Timer now complete (type %d)\n", tmr->type); 1709 } 1710 #endif /* SCTP_DEBUG */ 1711 if (inp) { 1712 } 1713 } 1714 1715 int 1716 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1717 struct sctp_nets *net) 1718 { 1719 int to_ticks; 1720 struct sctp_timer *tmr; 1721 1722 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1723 return (EFAULT); 1724 1725 to_ticks = 0; 1726 1727 tmr = NULL; 1728 if (stcb) { 1729 SCTP_TCB_LOCK_ASSERT(stcb); 1730 } 1731 switch (t_type) { 1732 case SCTP_TIMER_TYPE_ADDR_WQ: 1733 /* Only 1 tick away :-) */ 1734 tmr = &sctppcbinfo.addr_wq_timer; 1735 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1736 break; 1737 case SCTP_TIMER_TYPE_ITERATOR: 1738 { 1739 struct sctp_iterator *it; 1740 1741 it = (struct sctp_iterator *)inp; 1742 tmr = &it->tmr; 1743 to_ticks = SCTP_ITERATOR_TICKS; 1744 } 1745 break; 1746 case SCTP_TIMER_TYPE_SEND: 1747 /* Here we use the RTO timer */ 1748 { 1749 int rto_val; 1750 1751 if ((stcb == NULL) || (net == NULL)) { 1752 return (EFAULT); 1753 } 1754 tmr = &net->rxt_timer; 1755 if (net->RTO == 0) { 1756 rto_val = stcb->asoc.initial_rto; 1757 } else { 1758 rto_val = net->RTO; 1759 } 1760 to_ticks = MSEC_TO_TICKS(rto_val); 1761 } 1762 break; 1763 case SCTP_TIMER_TYPE_INIT: 1764 /* 1765 * Here we use the INIT timer default usually about 1 1766 * minute. 1767 */ 1768 if ((stcb == NULL) || (net == NULL)) { 1769 return (EFAULT); 1770 } 1771 tmr = &net->rxt_timer; 1772 if (net->RTO == 0) { 1773 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1774 } else { 1775 to_ticks = MSEC_TO_TICKS(net->RTO); 1776 } 1777 break; 1778 case SCTP_TIMER_TYPE_RECV: 1779 /* 1780 * Here we use the Delayed-Ack timer value from the inp 1781 * ususually about 200ms. 1782 */ 1783 if (stcb == NULL) { 1784 return (EFAULT); 1785 } 1786 tmr = &stcb->asoc.dack_timer; 1787 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1788 break; 1789 case SCTP_TIMER_TYPE_SHUTDOWN: 1790 /* Here we use the RTO of the destination. */ 1791 if ((stcb == NULL) || (net == NULL)) { 1792 return (EFAULT); 1793 } 1794 if (net->RTO == 0) { 1795 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1796 } else { 1797 to_ticks = MSEC_TO_TICKS(net->RTO); 1798 } 1799 tmr = &net->rxt_timer; 1800 break; 1801 case SCTP_TIMER_TYPE_HEARTBEAT: 1802 /* 1803 * the net is used here so that we can add in the RTO. Even 1804 * though we use a different timer. We also add the HB timer 1805 * PLUS a random jitter. 1806 */ 1807 if (stcb == NULL) { 1808 return (EFAULT); 1809 } { 1810 uint32_t rndval; 1811 uint8_t this_random; 1812 int cnt_of_unconf = 0; 1813 struct sctp_nets *lnet; 1814 1815 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1816 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1817 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1818 cnt_of_unconf++; 1819 } 1820 } 1821 if (cnt_of_unconf) { 1822 lnet = NULL; 1823 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf); 1824 } 1825 if (stcb->asoc.hb_random_idx > 3) { 1826 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1827 memcpy(stcb->asoc.hb_random_values, &rndval, 1828 sizeof(stcb->asoc.hb_random_values)); 1829 stcb->asoc.hb_random_idx = 0; 1830 } 1831 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1832 stcb->asoc.hb_random_idx++; 1833 stcb->asoc.hb_ect_randombit = 0; 1834 /* 1835 * this_random will be 0 - 256 ms RTO is in ms. 1836 */ 1837 if ((stcb->asoc.hb_is_disabled) && 1838 (cnt_of_unconf == 0)) { 1839 return (0); 1840 } 1841 if (net) { 1842 struct sctp_nets *lnet; 1843 int delay; 1844 1845 delay = stcb->asoc.heart_beat_delay; 1846 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1847 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) && 1848 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) && 1849 (lnet->dest_state & SCTP_ADDR_REACHABLE)) { 1850 delay = 0; 1851 } 1852 } 1853 if (net->RTO == 0) { 1854 /* Never been checked */ 1855 to_ticks = this_random + stcb->asoc.initial_rto + delay; 1856 } else { 1857 /* set rto_val to the ms */ 1858 to_ticks = delay + net->RTO + this_random; 1859 } 1860 } else { 1861 if (cnt_of_unconf) { 1862 to_ticks = this_random + stcb->asoc.initial_rto; 1863 } else { 1864 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto; 1865 } 1866 } 1867 /* 1868 * Now we must convert the to_ticks that are now in 1869 * ms to ticks. 1870 */ 1871 to_ticks = MSEC_TO_TICKS(to_ticks); 1872 tmr = &stcb->asoc.hb_timer; 1873 } 1874 break; 1875 case SCTP_TIMER_TYPE_COOKIE: 1876 /* 1877 * Here we can use the RTO timer from the network since one 1878 * RTT was compelete. If a retran happened then we will be 1879 * using the RTO initial value. 1880 */ 1881 if ((stcb == NULL) || (net == NULL)) { 1882 return (EFAULT); 1883 } 1884 if (net->RTO == 0) { 1885 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1886 } else { 1887 to_ticks = MSEC_TO_TICKS(net->RTO); 1888 } 1889 tmr = &net->rxt_timer; 1890 break; 1891 case SCTP_TIMER_TYPE_NEWCOOKIE: 1892 /* 1893 * nothing needed but the endpoint here ususually about 60 1894 * minutes. 1895 */ 1896 tmr = &inp->sctp_ep.signature_change; 1897 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1898 break; 1899 case SCTP_TIMER_TYPE_ASOCKILL: 1900 if (stcb == NULL) { 1901 return (EFAULT); 1902 } 1903 tmr = &stcb->asoc.strreset_timer; 1904 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1905 break; 1906 case SCTP_TIMER_TYPE_INPKILL: 1907 /* 1908 * The inp is setup to die. We re-use the signature_chage 1909 * timer since that has stopped and we are in the GONE 1910 * state. 1911 */ 1912 tmr = &inp->sctp_ep.signature_change; 1913 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 1914 break; 1915 case SCTP_TIMER_TYPE_PATHMTURAISE: 1916 /* 1917 * Here we use the value found in the EP for PMTU ususually 1918 * about 10 minutes. 1919 */ 1920 if (stcb == NULL) { 1921 return (EFAULT); 1922 } 1923 if (net == NULL) { 1924 return (EFAULT); 1925 } 1926 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 1927 tmr = &net->pmtu_timer; 1928 break; 1929 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1930 /* Here we use the RTO of the destination */ 1931 if ((stcb == NULL) || (net == NULL)) { 1932 return (EFAULT); 1933 } 1934 if (net->RTO == 0) { 1935 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1936 } else { 1937 to_ticks = MSEC_TO_TICKS(net->RTO); 1938 } 1939 tmr = &net->rxt_timer; 1940 break; 1941 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1942 /* 1943 * Here we use the endpoints shutdown guard timer usually 1944 * about 3 minutes. 1945 */ 1946 if (stcb == NULL) { 1947 return (EFAULT); 1948 } 1949 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 1950 tmr = &stcb->asoc.shut_guard_timer; 1951 break; 1952 case SCTP_TIMER_TYPE_STRRESET: 1953 /* 1954 * Here the timer comes from the inp but its value is from 1955 * the RTO. 1956 */ 1957 if ((stcb == NULL) || (net == NULL)) { 1958 return (EFAULT); 1959 } 1960 if (net->RTO == 0) { 1961 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1962 } else { 1963 to_ticks = MSEC_TO_TICKS(net->RTO); 1964 } 1965 tmr = &stcb->asoc.strreset_timer; 1966 break; 1967 1968 case SCTP_TIMER_TYPE_EARLYFR: 1969 { 1970 unsigned int msec; 1971 1972 if ((stcb == NULL) || (net == NULL)) { 1973 return (EFAULT); 1974 } 1975 if (net->flight_size > net->cwnd) { 1976 /* no need to start */ 1977 return (0); 1978 } 1979 SCTP_STAT_INCR(sctps_earlyfrstart); 1980 if (net->lastsa == 0) { 1981 /* Hmm no rtt estimate yet? */ 1982 msec = stcb->asoc.initial_rto >> 2; 1983 } else { 1984 msec = ((net->lastsa >> 2) + net->lastsv) >> 1; 1985 } 1986 if (msec < sctp_early_fr_msec) { 1987 msec = sctp_early_fr_msec; 1988 if (msec < SCTP_MINFR_MSEC_FLOOR) { 1989 msec = SCTP_MINFR_MSEC_FLOOR; 1990 } 1991 } 1992 to_ticks = MSEC_TO_TICKS(msec); 1993 tmr = &net->fr_timer; 1994 } 1995 break; 1996 case SCTP_TIMER_TYPE_ASCONF: 1997 /* 1998 * Here the timer comes from the inp but its value is from 1999 * the RTO. 2000 */ 2001 if ((stcb == NULL) || (net == NULL)) { 2002 return (EFAULT); 2003 } 2004 if (net->RTO == 0) { 2005 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2006 } else { 2007 to_ticks = MSEC_TO_TICKS(net->RTO); 2008 } 2009 tmr = &stcb->asoc.asconf_timer; 2010 break; 2011 case SCTP_TIMER_TYPE_AUTOCLOSE: 2012 if (stcb == NULL) { 2013 return (EFAULT); 2014 } 2015 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2016 /* 2017 * Really an error since stcb is NOT set to 2018 * autoclose 2019 */ 2020 return (0); 2021 } 2022 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2023 tmr = &stcb->asoc.autoclose_timer; 2024 break; 2025 default: 2026 #ifdef SCTP_DEBUG 2027 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2028 printf("sctp_timer_start:Unknown timer type %d\n", 2029 t_type); 2030 } 2031 #endif /* SCTP_DEBUG */ 2032 return (EFAULT); 2033 break; 2034 }; 2035 if ((to_ticks <= 0) || (tmr == NULL)) { 2036 #ifdef SCTP_DEBUG 2037 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2038 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n", 2039 t_type, to_ticks, tmr); 2040 } 2041 #endif /* SCTP_DEBUG */ 2042 return (EFAULT); 2043 } 2044 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2045 /* 2046 * we do NOT allow you to have it already running. if it is 2047 * we leave the current one up unchanged 2048 */ 2049 return (EALREADY); 2050 } 2051 /* At this point we can proceed */ 2052 if (t_type == SCTP_TIMER_TYPE_SEND) { 2053 stcb->asoc.num_send_timers_up++; 2054 } 2055 tmr->stopped_from = 0; 2056 tmr->type = t_type; 2057 tmr->ep = (void *)inp; 2058 tmr->tcb = (void *)stcb; 2059 tmr->net = (void *)net; 2060 tmr->self = (void *)tmr; 2061 tmr->ticks = ticks; 2062 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2063 return (0); 2064 } 2065 2066 int 2067 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2068 struct sctp_nets *net, uint32_t from) 2069 { 2070 struct sctp_timer *tmr; 2071 2072 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2073 (inp == NULL)) 2074 return (EFAULT); 2075 2076 tmr = NULL; 2077 if (stcb) { 2078 SCTP_TCB_LOCK_ASSERT(stcb); 2079 } 2080 switch (t_type) { 2081 case SCTP_TIMER_TYPE_ADDR_WQ: 2082 tmr = &sctppcbinfo.addr_wq_timer; 2083 break; 2084 case SCTP_TIMER_TYPE_EARLYFR: 2085 if ((stcb == NULL) || (net == NULL)) { 2086 return (EFAULT); 2087 } 2088 tmr = &net->fr_timer; 2089 SCTP_STAT_INCR(sctps_earlyfrstop); 2090 break; 2091 case SCTP_TIMER_TYPE_ITERATOR: 2092 { 2093 struct sctp_iterator *it; 2094 2095 it = (struct sctp_iterator *)inp; 2096 tmr = &it->tmr; 2097 } 2098 break; 2099 case SCTP_TIMER_TYPE_SEND: 2100 if ((stcb == NULL) || (net == NULL)) { 2101 return (EFAULT); 2102 } 2103 tmr = &net->rxt_timer; 2104 break; 2105 case SCTP_TIMER_TYPE_INIT: 2106 if ((stcb == NULL) || (net == NULL)) { 2107 return (EFAULT); 2108 } 2109 tmr = &net->rxt_timer; 2110 break; 2111 case SCTP_TIMER_TYPE_RECV: 2112 if (stcb == NULL) { 2113 return (EFAULT); 2114 } 2115 tmr = &stcb->asoc.dack_timer; 2116 break; 2117 case SCTP_TIMER_TYPE_SHUTDOWN: 2118 if ((stcb == NULL) || (net == NULL)) { 2119 return (EFAULT); 2120 } 2121 tmr = &net->rxt_timer; 2122 break; 2123 case SCTP_TIMER_TYPE_HEARTBEAT: 2124 if (stcb == NULL) { 2125 return (EFAULT); 2126 } 2127 tmr = &stcb->asoc.hb_timer; 2128 break; 2129 case SCTP_TIMER_TYPE_COOKIE: 2130 if ((stcb == NULL) || (net == NULL)) { 2131 return (EFAULT); 2132 } 2133 tmr = &net->rxt_timer; 2134 break; 2135 case SCTP_TIMER_TYPE_NEWCOOKIE: 2136 /* nothing needed but the endpoint here */ 2137 tmr = &inp->sctp_ep.signature_change; 2138 /* 2139 * We re-use the newcookie timer for the INP kill timer. We 2140 * must assure that we do not kill it by accident. 2141 */ 2142 break; 2143 case SCTP_TIMER_TYPE_ASOCKILL: 2144 /* 2145 * Stop the asoc kill timer. 2146 */ 2147 if (stcb == NULL) { 2148 return (EFAULT); 2149 } 2150 tmr = &stcb->asoc.strreset_timer; 2151 break; 2152 2153 case SCTP_TIMER_TYPE_INPKILL: 2154 /* 2155 * The inp is setup to die. We re-use the signature_chage 2156 * timer since that has stopped and we are in the GONE 2157 * state. 2158 */ 2159 tmr = &inp->sctp_ep.signature_change; 2160 break; 2161 case SCTP_TIMER_TYPE_PATHMTURAISE: 2162 if ((stcb == NULL) || (net == NULL)) { 2163 return (EFAULT); 2164 } 2165 tmr = &net->pmtu_timer; 2166 break; 2167 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2168 if ((stcb == NULL) || (net == NULL)) { 2169 return (EFAULT); 2170 } 2171 tmr = &net->rxt_timer; 2172 break; 2173 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2174 if (stcb == NULL) { 2175 return (EFAULT); 2176 } 2177 tmr = &stcb->asoc.shut_guard_timer; 2178 break; 2179 case SCTP_TIMER_TYPE_STRRESET: 2180 if (stcb == NULL) { 2181 return (EFAULT); 2182 } 2183 tmr = &stcb->asoc.strreset_timer; 2184 break; 2185 case SCTP_TIMER_TYPE_ASCONF: 2186 if (stcb == NULL) { 2187 return (EFAULT); 2188 } 2189 tmr = &stcb->asoc.asconf_timer; 2190 break; 2191 case SCTP_TIMER_TYPE_AUTOCLOSE: 2192 if (stcb == NULL) { 2193 return (EFAULT); 2194 } 2195 tmr = &stcb->asoc.autoclose_timer; 2196 break; 2197 default: 2198 #ifdef SCTP_DEBUG 2199 if (sctp_debug_on & SCTP_DEBUG_TIMER1) { 2200 printf("sctp_timer_stop:Unknown timer type %d\n", 2201 t_type); 2202 } 2203 #endif /* SCTP_DEBUG */ 2204 break; 2205 }; 2206 if (tmr == NULL) { 2207 return (EFAULT); 2208 } 2209 if ((tmr->type != t_type) && tmr->type) { 2210 /* 2211 * Ok we have a timer that is under joint use. Cookie timer 2212 * per chance with the SEND timer. We therefore are NOT 2213 * running the timer that the caller wants stopped. So just 2214 * return. 2215 */ 2216 return (0); 2217 } 2218 if (t_type == SCTP_TIMER_TYPE_SEND) { 2219 stcb->asoc.num_send_timers_up--; 2220 if (stcb->asoc.num_send_timers_up < 0) { 2221 stcb->asoc.num_send_timers_up = 0; 2222 } 2223 } 2224 tmr->self = NULL; 2225 tmr->stopped_from = from; 2226 SCTP_OS_TIMER_STOP(&tmr->timer); 2227 return (0); 2228 } 2229 2230 #ifdef SCTP_USE_ADLER32 2231 static uint32_t 2232 update_adler32(uint32_t adler, uint8_t * buf, int32_t len) 2233 { 2234 uint32_t s1 = adler & 0xffff; 2235 uint32_t s2 = (adler >> 16) & 0xffff; 2236 int n; 2237 2238 for (n = 0; n < len; n++, buf++) { 2239 /* s1 = (s1 + buf[n]) % BASE */ 2240 /* first we add */ 2241 s1 = (s1 + *buf); 2242 /* 2243 * now if we need to, we do a mod by subtracting. It seems a 2244 * bit faster since I really will only ever do one subtract 2245 * at the MOST, since buf[n] is a max of 255. 2246 */ 2247 if (s1 >= SCTP_ADLER32_BASE) { 2248 s1 -= SCTP_ADLER32_BASE; 2249 } 2250 /* s2 = (s2 + s1) % BASE */ 2251 /* first we add */ 2252 s2 = (s2 + s1); 2253 /* 2254 * again, it is more efficent (it seems) to subtract since 2255 * the most s2 will ever be is (BASE-1 + BASE-1) in the 2256 * worse case. This would then be (2 * BASE) - 2, which will 2257 * still only do one subtract. On Intel this is much better 2258 * to do this way and avoid the divide. Have not -pg'd on 2259 * sparc. 2260 */ 2261 if (s2 >= SCTP_ADLER32_BASE) { 2262 s2 -= SCTP_ADLER32_BASE; 2263 } 2264 } 2265 /* Return the adler32 of the bytes buf[0..len-1] */ 2266 return ((s2 << 16) + s1); 2267 } 2268 2269 #endif 2270 2271 2272 uint32_t 2273 sctp_calculate_len(struct mbuf *m) 2274 { 2275 uint32_t tlen = 0; 2276 struct mbuf *at; 2277 2278 at = m; 2279 while (at) { 2280 tlen += SCTP_BUF_LEN(at); 2281 at = SCTP_BUF_NEXT(at); 2282 } 2283 return (tlen); 2284 } 2285 2286 #if defined(SCTP_WITH_NO_CSUM) 2287 2288 uint32_t 2289 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2290 { 2291 /* 2292 * given a mbuf chain with a packetheader offset by 'offset' 2293 * pointing at a sctphdr (with csum set to 0) go through the chain 2294 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2295 * currently Adler32 but will change to CRC32x soon. Also has a side 2296 * bonus calculate the total length of the mbuf chain. Note: if 2297 * offset is greater than the total mbuf length, checksum=1, 2298 * pktlen=0 is returned (ie. no real error code) 2299 */ 2300 if (pktlen == NULL) 2301 return (0); 2302 *pktlen = sctp_calculate_len(m); 2303 return (0); 2304 } 2305 2306 #elif defined(SCTP_USE_INCHKSUM) 2307 2308 #include <machine/in_cksum.h> 2309 2310 uint32_t 2311 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2312 { 2313 /* 2314 * given a mbuf chain with a packetheader offset by 'offset' 2315 * pointing at a sctphdr (with csum set to 0) go through the chain 2316 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2317 * currently Adler32 but will change to CRC32x soon. Also has a side 2318 * bonus calculate the total length of the mbuf chain. Note: if 2319 * offset is greater than the total mbuf length, checksum=1, 2320 * pktlen=0 is returned (ie. no real error code) 2321 */ 2322 int32_t tlen = 0; 2323 struct mbuf *at; 2324 uint32_t the_sum, retsum; 2325 2326 at = m; 2327 while (at) { 2328 tlen += SCTP_BUF_LEN(at); 2329 at = SCTP_BUF_NEXT(at); 2330 } 2331 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset)); 2332 if (pktlen != NULL) 2333 *pktlen = (tlen - offset); 2334 retsum = htons(the_sum); 2335 return (the_sum); 2336 } 2337 2338 #else 2339 2340 uint32_t 2341 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset) 2342 { 2343 /* 2344 * given a mbuf chain with a packetheader offset by 'offset' 2345 * pointing at a sctphdr (with csum set to 0) go through the chain 2346 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is 2347 * currently Adler32 but will change to CRC32x soon. Also has a side 2348 * bonus calculate the total length of the mbuf chain. Note: if 2349 * offset is greater than the total mbuf length, checksum=1, 2350 * pktlen=0 is returned (ie. no real error code) 2351 */ 2352 int32_t tlen = 0; 2353 2354 #ifdef SCTP_USE_ADLER32 2355 uint32_t base = 1L; 2356 2357 #else 2358 uint32_t base = 0xffffffff; 2359 2360 #endif /* SCTP_USE_ADLER32 */ 2361 struct mbuf *at; 2362 2363 at = m; 2364 /* find the correct mbuf and offset into mbuf */ 2365 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) { 2366 offset -= SCTP_BUF_LEN(at); /* update remaining offset 2367 * left */ 2368 at = SCTP_BUF_NEXT(at); 2369 } 2370 while (at != NULL) { 2371 if ((SCTP_BUF_LEN(at) - offset) > 0) { 2372 #ifdef SCTP_USE_ADLER32 2373 base = update_adler32(base, 2374 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2375 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2376 #else 2377 if ((SCTP_BUF_LEN(at) - offset) < 4) { 2378 /* Use old method if less than 4 bytes */ 2379 base = old_update_crc32(base, 2380 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2381 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2382 } else { 2383 base = update_crc32(base, 2384 (unsigned char *)(SCTP_BUF_AT(at, offset)), 2385 (unsigned int)(SCTP_BUF_LEN(at) - offset)); 2386 } 2387 #endif /* SCTP_USE_ADLER32 */ 2388 tlen += SCTP_BUF_LEN(at) - offset; 2389 /* we only offset once into the first mbuf */ 2390 } 2391 if (offset) { 2392 if (offset < SCTP_BUF_LEN(at)) 2393 offset = 0; 2394 else 2395 offset -= SCTP_BUF_LEN(at); 2396 } 2397 at = SCTP_BUF_NEXT(at); 2398 } 2399 if (pktlen != NULL) { 2400 *pktlen = tlen; 2401 } 2402 #ifdef SCTP_USE_ADLER32 2403 /* Adler32 */ 2404 base = htonl(base); 2405 #else 2406 /* CRC-32c */ 2407 base = sctp_csum_finalize(base); 2408 #endif 2409 return (base); 2410 } 2411 2412 2413 #endif 2414 2415 void 2416 sctp_mtu_size_reset(struct sctp_inpcb *inp, 2417 struct sctp_association *asoc, uint32_t mtu) 2418 { 2419 /* 2420 * Reset the P-MTU size on this association, this involves changing 2421 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2422 * allow the DF flag to be cleared. 2423 */ 2424 struct sctp_tmit_chunk *chk; 2425 unsigned int eff_mtu, ovh; 2426 2427 asoc->smallest_mtu = mtu; 2428 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2429 ovh = SCTP_MIN_OVERHEAD; 2430 } else { 2431 ovh = SCTP_MIN_V4_OVERHEAD; 2432 } 2433 eff_mtu = mtu - ovh; 2434 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2435 2436 if (chk->send_size > eff_mtu) { 2437 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2438 } 2439 } 2440 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2441 if (chk->send_size > eff_mtu) { 2442 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2443 } 2444 } 2445 } 2446 2447 2448 /* 2449 * given an association and starting time of the current RTT period return 2450 * RTO in number of msecs net should point to the current network 2451 */ 2452 uint32_t 2453 sctp_calculate_rto(struct sctp_tcb *stcb, 2454 struct sctp_association *asoc, 2455 struct sctp_nets *net, 2456 struct timeval *old) 2457 { 2458 /* 2459 * given an association and the starting time of the current RTT 2460 * period (in value1/value2) return RTO in number of msecs. 2461 */ 2462 int calc_time = 0; 2463 int o_calctime; 2464 uint32_t new_rto = 0; 2465 int first_measure = 0; 2466 struct timeval now; 2467 2468 /************************/ 2469 /* 1. calculate new RTT */ 2470 /************************/ 2471 /* get the current time */ 2472 SCTP_GETTIME_TIMEVAL(&now); 2473 /* compute the RTT value */ 2474 if ((u_long)now.tv_sec > (u_long)old->tv_sec) { 2475 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000; 2476 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2477 calc_time += (((u_long)now.tv_usec - 2478 (u_long)old->tv_usec) / 1000); 2479 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2480 /* Borrow 1,000ms from current calculation */ 2481 calc_time -= 1000; 2482 /* Add in the slop over */ 2483 calc_time += ((int)now.tv_usec / 1000); 2484 /* Add in the pre-second ms's */ 2485 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000); 2486 } 2487 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) { 2488 if ((u_long)now.tv_usec > (u_long)old->tv_usec) { 2489 calc_time = ((u_long)now.tv_usec - 2490 (u_long)old->tv_usec) / 1000; 2491 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) { 2492 /* impossible .. garbage in nothing out */ 2493 goto calc_rto; 2494 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) { 2495 /* 2496 * We have to have 1 usec :-D this must be the 2497 * loopback. 2498 */ 2499 calc_time = 1; 2500 } else { 2501 /* impossible .. garbage in nothing out */ 2502 goto calc_rto; 2503 } 2504 } else { 2505 /* Clock wrapped? */ 2506 goto calc_rto; 2507 } 2508 /***************************/ 2509 /* 2. update RTTVAR & SRTT */ 2510 /***************************/ 2511 o_calctime = calc_time; 2512 /* this is Van Jacobson's integer version */ 2513 if (net->RTO) { 2514 calc_time -= (net->lastsa >> 3); 2515 #ifdef SCTP_RTTVAR_LOGGING 2516 rto_logging(net, SCTP_LOG_RTTVAR); 2517 #endif 2518 net->prev_rtt = o_calctime; 2519 net->lastsa += calc_time; 2520 if (calc_time < 0) { 2521 calc_time = -calc_time; 2522 } 2523 calc_time -= (net->lastsv >> 2); 2524 net->lastsv += calc_time; 2525 if (net->lastsv == 0) { 2526 net->lastsv = SCTP_CLOCK_GRANULARITY; 2527 } 2528 } else { 2529 /* First RTO measurment */ 2530 net->lastsa = calc_time; 2531 net->lastsv = calc_time >> 1; 2532 first_measure = 1; 2533 net->prev_rtt = o_calctime; 2534 #ifdef SCTP_RTTVAR_LOGGING 2535 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2536 #endif 2537 } 2538 calc_rto: 2539 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1; 2540 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2541 (stcb->asoc.sat_network_lockout == 0)) { 2542 stcb->asoc.sat_network = 1; 2543 } else if ((!first_measure) && stcb->asoc.sat_network) { 2544 stcb->asoc.sat_network = 0; 2545 stcb->asoc.sat_network_lockout = 1; 2546 } 2547 /* bound it, per C6/C7 in Section 5.3.1 */ 2548 if (new_rto < stcb->asoc.minrto) { 2549 new_rto = stcb->asoc.minrto; 2550 } 2551 if (new_rto > stcb->asoc.maxrto) { 2552 new_rto = stcb->asoc.maxrto; 2553 } 2554 /* we are now returning the RTO */ 2555 return (new_rto); 2556 } 2557 2558 /* 2559 * return a pointer to a contiguous piece of data from the given mbuf chain 2560 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2561 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2562 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2563 */ 2564 __inline caddr_t 2565 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2566 { 2567 uint32_t count; 2568 uint8_t *ptr; 2569 2570 ptr = in_ptr; 2571 if ((off < 0) || (len <= 0)) 2572 return (NULL); 2573 2574 /* find the desired start location */ 2575 while ((m != NULL) && (off > 0)) { 2576 if (off < SCTP_BUF_LEN(m)) 2577 break; 2578 off -= SCTP_BUF_LEN(m); 2579 m = SCTP_BUF_NEXT(m); 2580 } 2581 if (m == NULL) 2582 return (NULL); 2583 2584 /* is the current mbuf large enough (eg. contiguous)? */ 2585 if ((SCTP_BUF_LEN(m) - off) >= len) { 2586 return (mtod(m, caddr_t)+off); 2587 } else { 2588 /* else, it spans more than one mbuf, so save a temp copy... */ 2589 while ((m != NULL) && (len > 0)) { 2590 count = min(SCTP_BUF_LEN(m) - off, len); 2591 bcopy(mtod(m, caddr_t)+off, ptr, count); 2592 len -= count; 2593 ptr += count; 2594 off = 0; 2595 m = SCTP_BUF_NEXT(m); 2596 } 2597 if ((m == NULL) && (len > 0)) 2598 return (NULL); 2599 else 2600 return ((caddr_t)in_ptr); 2601 } 2602 } 2603 2604 2605 2606 struct sctp_paramhdr * 2607 sctp_get_next_param(struct mbuf *m, 2608 int offset, 2609 struct sctp_paramhdr *pull, 2610 int pull_limit) 2611 { 2612 /* This just provides a typed signature to Peter's Pull routine */ 2613 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2614 (uint8_t *) pull)); 2615 } 2616 2617 2618 int 2619 sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2620 { 2621 /* 2622 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2623 * padlen is > 3 this routine will fail. 2624 */ 2625 uint8_t *dp; 2626 int i; 2627 2628 if (padlen > 3) { 2629 return (ENOBUFS); 2630 } 2631 if (M_TRAILINGSPACE(m)) { 2632 /* 2633 * The easy way. We hope the majority of the time we hit 2634 * here :) 2635 */ 2636 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2637 SCTP_BUF_LEN(m) += padlen; 2638 } else { 2639 /* Hard way we must grow the mbuf */ 2640 struct mbuf *tmp; 2641 2642 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA); 2643 if (tmp == NULL) { 2644 /* Out of space GAK! we are in big trouble. */ 2645 return (ENOSPC); 2646 } 2647 /* setup and insert in middle */ 2648 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m); 2649 SCTP_BUF_LEN(tmp) = padlen; 2650 SCTP_BUF_NEXT(m) = tmp; 2651 dp = mtod(tmp, uint8_t *); 2652 } 2653 /* zero out the pad */ 2654 for (i = 0; i < padlen; i++) { 2655 *dp = 0; 2656 dp++; 2657 } 2658 return (0); 2659 } 2660 2661 int 2662 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2663 { 2664 /* find the last mbuf in chain and pad it */ 2665 struct mbuf *m_at; 2666 2667 m_at = m; 2668 if (last_mbuf) { 2669 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2670 } else { 2671 while (m_at) { 2672 if (SCTP_BUF_NEXT(m_at) == NULL) { 2673 return (sctp_add_pad_tombuf(m_at, padval)); 2674 } 2675 m_at = SCTP_BUF_NEXT(m_at); 2676 } 2677 } 2678 return (EFAULT); 2679 } 2680 2681 int sctp_asoc_change_wake = 0; 2682 2683 static void 2684 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb, 2685 uint32_t error, void *data) 2686 { 2687 struct mbuf *m_notify; 2688 struct sctp_assoc_change *sac; 2689 struct sctp_queued_to_read *control; 2690 2691 /* 2692 * First if we are are going down dump everything we can to the 2693 * socket rcv queue. 2694 */ 2695 2696 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2697 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2698 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 2699 ) { 2700 /* If the socket is gone we are out of here */ 2701 return; 2702 } 2703 /* 2704 * For TCP model AND UDP connected sockets we will send an error up 2705 * when an ABORT comes in. 2706 */ 2707 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2708 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2709 (event == SCTP_COMM_LOST)) { 2710 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) 2711 stcb->sctp_socket->so_error = ECONNREFUSED; 2712 else 2713 stcb->sctp_socket->so_error = ECONNRESET; 2714 /* Wake ANY sleepers */ 2715 sorwakeup(stcb->sctp_socket); 2716 sowwakeup(stcb->sctp_socket); 2717 sctp_asoc_change_wake++; 2718 } 2719 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2720 /* event not enabled */ 2721 return; 2722 } 2723 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA); 2724 if (m_notify == NULL) 2725 /* no space left */ 2726 return; 2727 SCTP_BUF_LEN(m_notify) = 0; 2728 2729 sac = mtod(m_notify, struct sctp_assoc_change *); 2730 sac->sac_type = SCTP_ASSOC_CHANGE; 2731 sac->sac_flags = 0; 2732 sac->sac_length = sizeof(struct sctp_assoc_change); 2733 sac->sac_state = event; 2734 sac->sac_error = error; 2735 /* XXX verify these stream counts */ 2736 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2737 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2738 sac->sac_assoc_id = sctp_get_associd(stcb); 2739 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change); 2740 SCTP_BUF_NEXT(m_notify) = NULL; 2741 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2742 0, 0, 0, 0, 0, 0, 2743 m_notify); 2744 if (control == NULL) { 2745 /* no memory */ 2746 sctp_m_freem(m_notify); 2747 return; 2748 } 2749 control->length = SCTP_BUF_LEN(m_notify); 2750 /* not that we need this */ 2751 control->tail_mbuf = m_notify; 2752 control->spec_flags = M_NOTIFICATION; 2753 sctp_add_to_readq(stcb->sctp_ep, stcb, 2754 control, 2755 &stcb->sctp_socket->so_rcv, 1); 2756 if (event == SCTP_COMM_LOST) { 2757 /* Wake up any sleeper */ 2758 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 2759 } 2760 } 2761 2762 static void 2763 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2764 struct sockaddr *sa, uint32_t error) 2765 { 2766 struct mbuf *m_notify; 2767 struct sctp_paddr_change *spc; 2768 struct sctp_queued_to_read *control; 2769 2770 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) 2771 /* event not enabled */ 2772 return; 2773 2774 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA); 2775 if (m_notify == NULL) 2776 return; 2777 SCTP_BUF_LEN(m_notify) = 0; 2778 spc = mtod(m_notify, struct sctp_paddr_change *); 2779 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2780 spc->spc_flags = 0; 2781 spc->spc_length = sizeof(struct sctp_paddr_change); 2782 if (sa->sa_family == AF_INET) { 2783 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2784 } else { 2785 struct sockaddr_in6 *sin6; 2786 2787 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2788 2789 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2790 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2791 if (sin6->sin6_scope_id == 0) { 2792 /* recover scope_id for user */ 2793 (void)sa6_recoverscope(sin6); 2794 } else { 2795 /* clear embedded scope_id for user */ 2796 in6_clearscope(&sin6->sin6_addr); 2797 } 2798 } 2799 } 2800 spc->spc_state = state; 2801 spc->spc_error = error; 2802 spc->spc_assoc_id = sctp_get_associd(stcb); 2803 2804 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2805 SCTP_BUF_NEXT(m_notify) = NULL; 2806 2807 /* append to socket */ 2808 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2809 0, 0, 0, 0, 0, 0, 2810 m_notify); 2811 if (control == NULL) { 2812 /* no memory */ 2813 sctp_m_freem(m_notify); 2814 return; 2815 } 2816 control->length = SCTP_BUF_LEN(m_notify); 2817 control->spec_flags = M_NOTIFICATION; 2818 /* not that we need this */ 2819 control->tail_mbuf = m_notify; 2820 sctp_add_to_readq(stcb->sctp_ep, stcb, 2821 control, 2822 &stcb->sctp_socket->so_rcv, 1); 2823 } 2824 2825 2826 static void 2827 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error, 2828 struct sctp_tmit_chunk *chk) 2829 { 2830 struct mbuf *m_notify; 2831 struct sctp_send_failed *ssf; 2832 struct sctp_queued_to_read *control; 2833 int length; 2834 2835 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2836 /* event not enabled */ 2837 return; 2838 2839 length = sizeof(struct sctp_send_failed) + chk->send_size; 2840 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA); 2841 if (m_notify == NULL) 2842 /* no space left */ 2843 return; 2844 SCTP_BUF_LEN(m_notify) = 0; 2845 ssf = mtod(m_notify, struct sctp_send_failed *); 2846 ssf->ssf_type = SCTP_SEND_FAILED; 2847 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2848 ssf->ssf_flags = SCTP_DATA_UNSENT; 2849 else 2850 ssf->ssf_flags = SCTP_DATA_SENT; 2851 ssf->ssf_length = length; 2852 ssf->ssf_error = error; 2853 /* not exactly what the user sent in, but should be close :) */ 2854 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2855 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2856 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2857 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2858 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2859 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2860 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2861 SCTP_BUF_NEXT(m_notify) = chk->data; 2862 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2863 2864 /* Steal off the mbuf */ 2865 chk->data = NULL; 2866 /* 2867 * For this case, we check the actual socket buffer, since the assoc 2868 * is going away we don't want to overfill the socket buffer for a 2869 * non-reader 2870 */ 2871 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2872 sctp_m_freem(m_notify); 2873 return; 2874 } 2875 /* append to socket */ 2876 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2877 0, 0, 0, 0, 0, 0, 2878 m_notify); 2879 if (control == NULL) { 2880 /* no memory */ 2881 sctp_m_freem(m_notify); 2882 return; 2883 } 2884 control->spec_flags = M_NOTIFICATION; 2885 sctp_add_to_readq(stcb->sctp_ep, stcb, 2886 control, 2887 &stcb->sctp_socket->so_rcv, 1); 2888 } 2889 2890 2891 static void 2892 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2893 struct sctp_stream_queue_pending *sp) 2894 { 2895 struct mbuf *m_notify; 2896 struct sctp_send_failed *ssf; 2897 struct sctp_queued_to_read *control; 2898 int length; 2899 2900 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) 2901 /* event not enabled */ 2902 return; 2903 2904 length = sizeof(struct sctp_send_failed) + sp->length; 2905 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2906 if (m_notify == NULL) 2907 /* no space left */ 2908 return; 2909 SCTP_BUF_LEN(m_notify) = 0; 2910 ssf = mtod(m_notify, struct sctp_send_failed *); 2911 ssf->ssf_type = SCTP_SEND_FAILED; 2912 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT) 2913 ssf->ssf_flags = SCTP_DATA_UNSENT; 2914 else 2915 ssf->ssf_flags = SCTP_DATA_SENT; 2916 ssf->ssf_length = length; 2917 ssf->ssf_error = error; 2918 /* not exactly what the user sent in, but should be close :) */ 2919 ssf->ssf_info.sinfo_stream = sp->stream; 2920 ssf->ssf_info.sinfo_ssn = sp->strseq; 2921 ssf->ssf_info.sinfo_flags = sp->sinfo_flags; 2922 ssf->ssf_info.sinfo_ppid = sp->ppid; 2923 ssf->ssf_info.sinfo_context = sp->context; 2924 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2925 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2926 SCTP_BUF_NEXT(m_notify) = sp->data; 2927 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2928 2929 /* Steal off the mbuf */ 2930 sp->data = NULL; 2931 /* 2932 * For this case, we check the actual socket buffer, since the assoc 2933 * is going away we don't want to overfill the socket buffer for a 2934 * non-reader 2935 */ 2936 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2937 sctp_m_freem(m_notify); 2938 return; 2939 } 2940 /* append to socket */ 2941 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2942 0, 0, 0, 0, 0, 0, 2943 m_notify); 2944 if (control == NULL) { 2945 /* no memory */ 2946 sctp_m_freem(m_notify); 2947 return; 2948 } 2949 control->spec_flags = M_NOTIFICATION; 2950 sctp_add_to_readq(stcb->sctp_ep, stcb, 2951 control, 2952 &stcb->sctp_socket->so_rcv, 1); 2953 } 2954 2955 2956 2957 static void 2958 sctp_notify_adaptation_layer(struct sctp_tcb *stcb, 2959 uint32_t error) 2960 { 2961 struct mbuf *m_notify; 2962 struct sctp_adaptation_event *sai; 2963 struct sctp_queued_to_read *control; 2964 2965 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) 2966 /* event not enabled */ 2967 return; 2968 2969 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA); 2970 if (m_notify == NULL) 2971 /* no space left */ 2972 return; 2973 SCTP_BUF_LEN(m_notify) = 0; 2974 sai = mtod(m_notify, struct sctp_adaptation_event *); 2975 sai->sai_type = SCTP_ADAPTATION_INDICATION; 2976 sai->sai_flags = 0; 2977 sai->sai_length = sizeof(struct sctp_adaptation_event); 2978 sai->sai_adaptation_ind = error; 2979 sai->sai_assoc_id = sctp_get_associd(stcb); 2980 2981 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 2982 SCTP_BUF_NEXT(m_notify) = NULL; 2983 2984 /* append to socket */ 2985 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2986 0, 0, 0, 0, 0, 0, 2987 m_notify); 2988 if (control == NULL) { 2989 /* no memory */ 2990 sctp_m_freem(m_notify); 2991 return; 2992 } 2993 control->length = SCTP_BUF_LEN(m_notify); 2994 control->spec_flags = M_NOTIFICATION; 2995 /* not that we need this */ 2996 control->tail_mbuf = m_notify; 2997 sctp_add_to_readq(stcb->sctp_ep, stcb, 2998 control, 2999 &stcb->sctp_socket->so_rcv, 1); 3000 } 3001 3002 /* This always must be called with the read-queue LOCKED in the INP */ 3003 void 3004 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, 3005 uint32_t error, int nolock) 3006 { 3007 struct mbuf *m_notify; 3008 struct sctp_pdapi_event *pdapi; 3009 struct sctp_queued_to_read *control; 3010 struct sockbuf *sb; 3011 3012 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) 3013 /* event not enabled */ 3014 return; 3015 3016 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA); 3017 if (m_notify == NULL) 3018 /* no space left */ 3019 return; 3020 SCTP_BUF_LEN(m_notify) = 0; 3021 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3022 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3023 pdapi->pdapi_flags = 0; 3024 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3025 pdapi->pdapi_indication = error; 3026 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3027 3028 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3029 SCTP_BUF_NEXT(m_notify) = NULL; 3030 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3031 0, 0, 0, 0, 0, 0, 3032 m_notify); 3033 if (control == NULL) { 3034 /* no memory */ 3035 sctp_m_freem(m_notify); 3036 return; 3037 } 3038 control->spec_flags = M_NOTIFICATION; 3039 control->length = SCTP_BUF_LEN(m_notify); 3040 /* not that we need this */ 3041 control->tail_mbuf = m_notify; 3042 control->held_length = 0; 3043 control->length = 0; 3044 if (nolock == 0) { 3045 SCTP_INP_READ_LOCK(stcb->sctp_ep); 3046 } 3047 sb = &stcb->sctp_socket->so_rcv; 3048 #ifdef SCTP_SB_LOGGING 3049 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3050 #endif 3051 sctp_sballoc(stcb, sb, m_notify); 3052 #ifdef SCTP_SB_LOGGING 3053 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3054 #endif 3055 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3056 control->end_added = 1; 3057 if (stcb->asoc.control_pdapi) 3058 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3059 else { 3060 /* we really should not see this case */ 3061 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3062 } 3063 if (nolock == 0) { 3064 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 3065 } 3066 if (stcb->sctp_ep && stcb->sctp_socket) { 3067 /* This should always be the case */ 3068 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3069 } 3070 } 3071 3072 static void 3073 sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3074 { 3075 struct mbuf *m_notify; 3076 struct sctp_shutdown_event *sse; 3077 struct sctp_queued_to_read *control; 3078 3079 /* 3080 * For TCP model AND UDP connected sockets we will send an error up 3081 * when an SHUTDOWN completes 3082 */ 3083 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3084 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3085 /* mark socket closed for read/write and wakeup! */ 3086 socantsendmore(stcb->sctp_socket); 3087 } 3088 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) 3089 /* event not enabled */ 3090 return; 3091 3092 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA); 3093 if (m_notify == NULL) 3094 /* no space left */ 3095 return; 3096 sse = mtod(m_notify, struct sctp_shutdown_event *); 3097 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3098 sse->sse_flags = 0; 3099 sse->sse_length = sizeof(struct sctp_shutdown_event); 3100 sse->sse_assoc_id = sctp_get_associd(stcb); 3101 3102 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3103 SCTP_BUF_NEXT(m_notify) = NULL; 3104 3105 /* append to socket */ 3106 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3107 0, 0, 0, 0, 0, 0, 3108 m_notify); 3109 if (control == NULL) { 3110 /* no memory */ 3111 sctp_m_freem(m_notify); 3112 return; 3113 } 3114 control->spec_flags = M_NOTIFICATION; 3115 control->length = SCTP_BUF_LEN(m_notify); 3116 /* not that we need this */ 3117 control->tail_mbuf = m_notify; 3118 sctp_add_to_readq(stcb->sctp_ep, stcb, 3119 control, 3120 &stcb->sctp_socket->so_rcv, 1); 3121 } 3122 3123 static void 3124 sctp_notify_stream_reset(struct sctp_tcb *stcb, 3125 int number_entries, uint16_t * list, int flag) 3126 { 3127 struct mbuf *m_notify; 3128 struct sctp_queued_to_read *control; 3129 struct sctp_stream_reset_event *strreset; 3130 int len; 3131 3132 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) 3133 /* event not enabled */ 3134 return; 3135 3136 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3137 if (m_notify == NULL) 3138 /* no space left */ 3139 return; 3140 SCTP_BUF_LEN(m_notify) = 0; 3141 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3142 if (len > M_TRAILINGSPACE(m_notify)) { 3143 /* never enough room */ 3144 sctp_m_freem(m_notify); 3145 return; 3146 } 3147 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3148 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3149 if (number_entries == 0) { 3150 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS; 3151 } else { 3152 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST; 3153 } 3154 strreset->strreset_length = len; 3155 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3156 if (number_entries) { 3157 int i; 3158 3159 for (i = 0; i < number_entries; i++) { 3160 strreset->strreset_list[i] = ntohs(list[i]); 3161 } 3162 } 3163 SCTP_BUF_LEN(m_notify) = len; 3164 SCTP_BUF_NEXT(m_notify) = NULL; 3165 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3166 /* no space */ 3167 sctp_m_freem(m_notify); 3168 return; 3169 } 3170 /* append to socket */ 3171 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3172 0, 0, 0, 0, 0, 0, 3173 m_notify); 3174 if (control == NULL) { 3175 /* no memory */ 3176 sctp_m_freem(m_notify); 3177 return; 3178 } 3179 control->spec_flags = M_NOTIFICATION; 3180 control->length = SCTP_BUF_LEN(m_notify); 3181 /* not that we need this */ 3182 control->tail_mbuf = m_notify; 3183 sctp_add_to_readq(stcb->sctp_ep, stcb, 3184 control, 3185 &stcb->sctp_socket->so_rcv, 1); 3186 } 3187 3188 3189 void 3190 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3191 uint32_t error, void *data) 3192 { 3193 if (stcb == NULL) { 3194 /* unlikely but */ 3195 return; 3196 } 3197 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3198 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3199 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) 3200 ) { 3201 /* No notifications up when we are in a no socket state */ 3202 return; 3203 } 3204 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3205 /* Can't send up to a closed socket any notifications */ 3206 return; 3207 } 3208 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) { 3209 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) && 3210 (notification != SCTP_NOTIFY_ASSOC_ABORTED) && 3211 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) && 3212 (notification != SCTP_NOTIFY_DG_FAIL) && 3213 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) { 3214 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL); 3215 stcb->asoc.assoc_up_sent = 1; 3216 } 3217 } 3218 switch (notification) { 3219 case SCTP_NOTIFY_ASSOC_UP: 3220 if (stcb->asoc.assoc_up_sent == 0) { 3221 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL); 3222 stcb->asoc.assoc_up_sent = 1; 3223 } 3224 break; 3225 case SCTP_NOTIFY_ASSOC_DOWN: 3226 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL); 3227 break; 3228 case SCTP_NOTIFY_INTERFACE_DOWN: 3229 { 3230 struct sctp_nets *net; 3231 3232 net = (struct sctp_nets *)data; 3233 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3234 (struct sockaddr *)&net->ro._l_addr, error); 3235 break; 3236 } 3237 case SCTP_NOTIFY_INTERFACE_UP: 3238 { 3239 struct sctp_nets *net; 3240 3241 net = (struct sctp_nets *)data; 3242 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3243 (struct sockaddr *)&net->ro._l_addr, error); 3244 break; 3245 } 3246 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3247 { 3248 struct sctp_nets *net; 3249 3250 net = (struct sctp_nets *)data; 3251 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3252 (struct sockaddr *)&net->ro._l_addr, error); 3253 break; 3254 } 3255 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3256 sctp_notify_send_failed2(stcb, error, 3257 (struct sctp_stream_queue_pending *)data); 3258 break; 3259 case SCTP_NOTIFY_DG_FAIL: 3260 sctp_notify_send_failed(stcb, error, 3261 (struct sctp_tmit_chunk *)data); 3262 break; 3263 case SCTP_NOTIFY_ADAPTATION_INDICATION: 3264 /* Here the error is the adaptation indication */ 3265 sctp_notify_adaptation_layer(stcb, error); 3266 break; 3267 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3268 sctp_notify_partial_delivery_indication(stcb, error, 0); 3269 break; 3270 case SCTP_NOTIFY_STRDATA_ERR: 3271 break; 3272 case SCTP_NOTIFY_ASSOC_ABORTED: 3273 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL); 3274 break; 3275 case SCTP_NOTIFY_PEER_OPENED_STREAM: 3276 break; 3277 case SCTP_NOTIFY_STREAM_OPENED_OK: 3278 break; 3279 case SCTP_NOTIFY_ASSOC_RESTART: 3280 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data); 3281 break; 3282 case SCTP_NOTIFY_HB_RESP: 3283 break; 3284 case SCTP_NOTIFY_STR_RESET_SEND: 3285 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR); 3286 break; 3287 case SCTP_NOTIFY_STR_RESET_RECV: 3288 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR); 3289 break; 3290 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3291 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3292 break; 3293 3294 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3295 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR)); 3296 break; 3297 3298 case SCTP_NOTIFY_ASCONF_ADD_IP: 3299 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3300 error); 3301 break; 3302 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3303 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3304 error); 3305 break; 3306 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3307 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3308 error); 3309 break; 3310 case SCTP_NOTIFY_ASCONF_SUCCESS: 3311 break; 3312 case SCTP_NOTIFY_ASCONF_FAILED: 3313 break; 3314 case SCTP_NOTIFY_PEER_SHUTDOWN: 3315 sctp_notify_shutdown_event(stcb); 3316 break; 3317 case SCTP_NOTIFY_AUTH_NEW_KEY: 3318 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error, 3319 (uint16_t) (uintptr_t) data); 3320 break; 3321 #if 0 3322 case SCTP_NOTIFY_AUTH_KEY_CONFLICT: 3323 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT, 3324 error, (uint16_t) (uintptr_t) data); 3325 break; 3326 #endif /* not yet? remove? */ 3327 3328 3329 default: 3330 #ifdef SCTP_DEBUG 3331 if (sctp_debug_on & SCTP_DEBUG_UTIL1) { 3332 printf("NOTIFY: unknown notification %xh (%u)\n", 3333 notification, notification); 3334 } 3335 #endif /* SCTP_DEBUG */ 3336 break; 3337 } /* end switch */ 3338 } 3339 3340 void 3341 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock) 3342 { 3343 struct sctp_association *asoc; 3344 struct sctp_stream_out *outs; 3345 struct sctp_tmit_chunk *chk; 3346 struct sctp_stream_queue_pending *sp; 3347 int i; 3348 3349 asoc = &stcb->asoc; 3350 3351 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3352 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3353 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3354 return; 3355 } 3356 /* now through all the gunk freeing chunks */ 3357 if (holds_lock == 0) 3358 SCTP_TCB_SEND_LOCK(stcb); 3359 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3360 /* For each stream */ 3361 outs = &stcb->asoc.strmout[i]; 3362 /* clean up any sends there */ 3363 stcb->asoc.locked_on_sending = NULL; 3364 sp = TAILQ_FIRST(&outs->outqueue); 3365 while (sp) { 3366 stcb->asoc.stream_queue_cnt--; 3367 TAILQ_REMOVE(&outs->outqueue, sp, next); 3368 sctp_free_spbufspace(stcb, asoc, sp); 3369 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3370 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp); 3371 if (sp->data) { 3372 sctp_m_freem(sp->data); 3373 sp->data = NULL; 3374 } 3375 if (sp->net) 3376 sctp_free_remote_addr(sp->net); 3377 sp->net = NULL; 3378 /* Free the chunk */ 3379 sctp_free_a_strmoq(stcb, sp); 3380 sp = TAILQ_FIRST(&outs->outqueue); 3381 } 3382 } 3383 3384 /* pending send queue SHOULD be empty */ 3385 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3386 chk = TAILQ_FIRST(&asoc->send_queue); 3387 while (chk) { 3388 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3389 asoc->send_queue_cnt--; 3390 if (chk->data) { 3391 /* 3392 * trim off the sctp chunk header(it should 3393 * be there) 3394 */ 3395 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3396 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3397 sctp_mbuf_crush(chk->data); 3398 } 3399 } 3400 sctp_free_bufspace(stcb, asoc, chk, 1); 3401 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk); 3402 if (chk->data) { 3403 sctp_m_freem(chk->data); 3404 chk->data = NULL; 3405 } 3406 if (chk->whoTo) 3407 sctp_free_remote_addr(chk->whoTo); 3408 chk->whoTo = NULL; 3409 sctp_free_a_chunk(stcb, chk); 3410 chk = TAILQ_FIRST(&asoc->send_queue); 3411 } 3412 } 3413 /* sent queue SHOULD be empty */ 3414 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3415 chk = TAILQ_FIRST(&asoc->sent_queue); 3416 while (chk) { 3417 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3418 asoc->sent_queue_cnt--; 3419 if (chk->data) { 3420 /* 3421 * trim off the sctp chunk header(it should 3422 * be there) 3423 */ 3424 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 3425 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 3426 sctp_mbuf_crush(chk->data); 3427 } 3428 } 3429 sctp_free_bufspace(stcb, asoc, chk, 1); 3430 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3431 SCTP_NOTIFY_DATAGRAM_SENT, chk); 3432 if (chk->data) { 3433 sctp_m_freem(chk->data); 3434 chk->data = NULL; 3435 } 3436 if (chk->whoTo) 3437 sctp_free_remote_addr(chk->whoTo); 3438 chk->whoTo = NULL; 3439 sctp_free_a_chunk(stcb, chk); 3440 chk = TAILQ_FIRST(&asoc->sent_queue); 3441 } 3442 } 3443 if (holds_lock == 0) 3444 SCTP_TCB_SEND_UNLOCK(stcb); 3445 } 3446 3447 void 3448 sctp_abort_notification(struct sctp_tcb *stcb, int error) 3449 { 3450 3451 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3452 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3453 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3454 return; 3455 } 3456 /* Tell them we lost the asoc */ 3457 sctp_report_all_outbound(stcb, 1); 3458 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3459 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3460 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3461 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3462 } 3463 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL); 3464 } 3465 3466 void 3467 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3468 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err) 3469 { 3470 uint32_t vtag; 3471 3472 vtag = 0; 3473 if (stcb != NULL) { 3474 /* We have a TCB to abort, send notification too */ 3475 vtag = stcb->asoc.peer_vtag; 3476 sctp_abort_notification(stcb, 0); 3477 } 3478 sctp_send_abort(m, iphlen, sh, vtag, op_err); 3479 if (stcb != NULL) { 3480 /* Ok, now lets free it */ 3481 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3482 } else { 3483 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3484 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3485 sctp_inpcb_free(inp, 1, 0); 3486 } 3487 } 3488 } 3489 } 3490 3491 void 3492 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3493 int error, struct mbuf *op_err) 3494 { 3495 uint32_t vtag; 3496 3497 if (stcb == NULL) { 3498 /* Got to have a TCB */ 3499 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3500 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3501 sctp_inpcb_free(inp, 1, 0); 3502 } 3503 } 3504 return; 3505 } 3506 vtag = stcb->asoc.peer_vtag; 3507 /* notify the ulp */ 3508 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) 3509 sctp_abort_notification(stcb, error); 3510 /* notify the peer */ 3511 sctp_send_abort_tcb(stcb, op_err); 3512 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3513 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3514 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3515 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3516 } 3517 /* now free the asoc */ 3518 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 3519 } 3520 3521 void 3522 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 3523 struct sctp_inpcb *inp, struct mbuf *op_err) 3524 { 3525 struct sctp_chunkhdr *ch, chunk_buf; 3526 unsigned int chk_length; 3527 3528 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 3529 /* Generate a TO address for future reference */ 3530 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 3531 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) { 3532 sctp_inpcb_free(inp, 1, 0); 3533 } 3534 } 3535 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3536 sizeof(*ch), (uint8_t *) & chunk_buf); 3537 while (ch != NULL) { 3538 chk_length = ntohs(ch->chunk_length); 3539 if (chk_length < sizeof(*ch)) { 3540 /* break to abort land */ 3541 break; 3542 } 3543 switch (ch->chunk_type) { 3544 case SCTP_PACKET_DROPPED: 3545 /* we don't respond to pkt-dropped */ 3546 return; 3547 case SCTP_ABORT_ASSOCIATION: 3548 /* we don't respond with an ABORT to an ABORT */ 3549 return; 3550 case SCTP_SHUTDOWN_COMPLETE: 3551 /* 3552 * we ignore it since we are not waiting for it and 3553 * peer is gone 3554 */ 3555 return; 3556 case SCTP_SHUTDOWN_ACK: 3557 sctp_send_shutdown_complete2(m, iphlen, sh); 3558 return; 3559 default: 3560 break; 3561 } 3562 offset += SCTP_SIZE32(chk_length); 3563 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3564 sizeof(*ch), (uint8_t *) & chunk_buf); 3565 } 3566 sctp_send_abort(m, iphlen, sh, 0, op_err); 3567 } 3568 3569 /* 3570 * check the inbound datagram to make sure there is not an abort inside it, 3571 * if there is return 1, else return 0. 3572 */ 3573 int 3574 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 3575 { 3576 struct sctp_chunkhdr *ch; 3577 struct sctp_init_chunk *init_chk, chunk_buf; 3578 int offset; 3579 unsigned int chk_length; 3580 3581 offset = iphlen + sizeof(struct sctphdr); 3582 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 3583 (uint8_t *) & chunk_buf); 3584 while (ch != NULL) { 3585 chk_length = ntohs(ch->chunk_length); 3586 if (chk_length < sizeof(*ch)) { 3587 /* packet is probably corrupt */ 3588 break; 3589 } 3590 /* we seem to be ok, is it an abort? */ 3591 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 3592 /* yep, tell them */ 3593 return (1); 3594 } 3595 if (ch->chunk_type == SCTP_INITIATION) { 3596 /* need to update the Vtag */ 3597 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 3598 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 3599 if (init_chk != NULL) { 3600 *vtagfill = ntohl(init_chk->init.initiate_tag); 3601 } 3602 } 3603 /* Nope, move to the next chunk */ 3604 offset += SCTP_SIZE32(chk_length); 3605 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 3606 sizeof(*ch), (uint8_t *) & chunk_buf); 3607 } 3608 return (0); 3609 } 3610 3611 /* 3612 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 3613 * set (i.e. it's 0) so, create this function to compare link local scopes 3614 */ 3615 uint32_t 3616 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 3617 { 3618 struct sockaddr_in6 a, b; 3619 3620 /* save copies */ 3621 a = *addr1; 3622 b = *addr2; 3623 3624 if (a.sin6_scope_id == 0) 3625 if (sa6_recoverscope(&a)) { 3626 /* can't get scope, so can't match */ 3627 return (0); 3628 } 3629 if (b.sin6_scope_id == 0) 3630 if (sa6_recoverscope(&b)) { 3631 /* can't get scope, so can't match */ 3632 return (0); 3633 } 3634 if (a.sin6_scope_id != b.sin6_scope_id) 3635 return (0); 3636 3637 return (1); 3638 } 3639 3640 /* 3641 * returns a sockaddr_in6 with embedded scope recovered and removed 3642 */ 3643 struct sockaddr_in6 * 3644 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 3645 { 3646 /* check and strip embedded scope junk */ 3647 if (addr->sin6_family == AF_INET6) { 3648 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 3649 if (addr->sin6_scope_id == 0) { 3650 *store = *addr; 3651 if (!sa6_recoverscope(store)) { 3652 /* use the recovered scope */ 3653 addr = store; 3654 } 3655 } else { 3656 /* else, return the original "to" addr */ 3657 in6_clearscope(&addr->sin6_addr); 3658 } 3659 } 3660 } 3661 return (addr); 3662 } 3663 3664 /* 3665 * are the two addresses the same? currently a "scopeless" check returns: 1 3666 * if same, 0 if not 3667 */ 3668 __inline int 3669 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 3670 { 3671 3672 /* must be valid */ 3673 if (sa1 == NULL || sa2 == NULL) 3674 return (0); 3675 3676 /* must be the same family */ 3677 if (sa1->sa_family != sa2->sa_family) 3678 return (0); 3679 3680 if (sa1->sa_family == AF_INET6) { 3681 /* IPv6 addresses */ 3682 struct sockaddr_in6 *sin6_1, *sin6_2; 3683 3684 sin6_1 = (struct sockaddr_in6 *)sa1; 3685 sin6_2 = (struct sockaddr_in6 *)sa2; 3686 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr, 3687 &sin6_2->sin6_addr)); 3688 } else if (sa1->sa_family == AF_INET) { 3689 /* IPv4 addresses */ 3690 struct sockaddr_in *sin_1, *sin_2; 3691 3692 sin_1 = (struct sockaddr_in *)sa1; 3693 sin_2 = (struct sockaddr_in *)sa2; 3694 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 3695 } else { 3696 /* we don't do these... */ 3697 return (0); 3698 } 3699 } 3700 3701 void 3702 sctp_print_address(struct sockaddr *sa) 3703 { 3704 3705 if (sa->sa_family == AF_INET6) { 3706 struct sockaddr_in6 *sin6; 3707 char ip6buf[INET6_ADDRSTRLEN]; 3708 3709 sin6 = (struct sockaddr_in6 *)sa; 3710 printf("IPv6 address: %s:port:%d scope:%u\n", 3711 ip6_sprintf(ip6buf, &sin6->sin6_addr), 3712 ntohs(sin6->sin6_port), 3713 sin6->sin6_scope_id); 3714 } else if (sa->sa_family == AF_INET) { 3715 struct sockaddr_in *sin; 3716 unsigned char *p; 3717 3718 sin = (struct sockaddr_in *)sa; 3719 p = (unsigned char *)&sin->sin_addr; 3720 printf("IPv4 address: %u.%u.%u.%u:%d\n", 3721 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 3722 } else { 3723 printf("?\n"); 3724 } 3725 } 3726 3727 void 3728 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh) 3729 { 3730 if (iph->ip_v == IPVERSION) { 3731 struct sockaddr_in lsa, fsa; 3732 3733 bzero(&lsa, sizeof(lsa)); 3734 lsa.sin_len = sizeof(lsa); 3735 lsa.sin_family = AF_INET; 3736 lsa.sin_addr = iph->ip_src; 3737 lsa.sin_port = sh->src_port; 3738 bzero(&fsa, sizeof(fsa)); 3739 fsa.sin_len = sizeof(fsa); 3740 fsa.sin_family = AF_INET; 3741 fsa.sin_addr = iph->ip_dst; 3742 fsa.sin_port = sh->dest_port; 3743 printf("src: "); 3744 sctp_print_address((struct sockaddr *)&lsa); 3745 printf("dest: "); 3746 sctp_print_address((struct sockaddr *)&fsa); 3747 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3748 struct ip6_hdr *ip6; 3749 struct sockaddr_in6 lsa6, fsa6; 3750 3751 ip6 = (struct ip6_hdr *)iph; 3752 bzero(&lsa6, sizeof(lsa6)); 3753 lsa6.sin6_len = sizeof(lsa6); 3754 lsa6.sin6_family = AF_INET6; 3755 lsa6.sin6_addr = ip6->ip6_src; 3756 lsa6.sin6_port = sh->src_port; 3757 bzero(&fsa6, sizeof(fsa6)); 3758 fsa6.sin6_len = sizeof(fsa6); 3759 fsa6.sin6_family = AF_INET6; 3760 fsa6.sin6_addr = ip6->ip6_dst; 3761 fsa6.sin6_port = sh->dest_port; 3762 printf("src: "); 3763 sctp_print_address((struct sockaddr *)&lsa6); 3764 printf("dest: "); 3765 sctp_print_address((struct sockaddr *)&fsa6); 3766 } 3767 } 3768 3769 void 3770 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 3771 struct sctp_inpcb *new_inp, 3772 struct sctp_tcb *stcb) 3773 { 3774 /* 3775 * go through our old INP and pull off any control structures that 3776 * belong to stcb and move then to the new inp. 3777 */ 3778 struct socket *old_so, *new_so; 3779 struct sctp_queued_to_read *control, *nctl; 3780 struct sctp_readhead tmp_queue; 3781 struct mbuf *m; 3782 int error = 0; 3783 3784 old_so = old_inp->sctp_socket; 3785 new_so = new_inp->sctp_socket; 3786 TAILQ_INIT(&tmp_queue); 3787 3788 SOCKBUF_LOCK(&(old_so->so_rcv)); 3789 3790 error = sblock(&old_so->so_rcv, 0); 3791 3792 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3793 if (error) { 3794 /* 3795 * Gak, can't get sblock, we have a problem. data will be 3796 * left stranded.. and we don't dare look at it since the 3797 * other thread may be reading something. Oh well, its a 3798 * screwed up app that does a peeloff OR a accept while 3799 * reading from the main socket... actually its only the 3800 * peeloff() case, since I think read will fail on a 3801 * listening socket.. 3802 */ 3803 return; 3804 } 3805 /* lock the socket buffers */ 3806 SCTP_INP_READ_LOCK(old_inp); 3807 control = TAILQ_FIRST(&old_inp->read_queue); 3808 /* Pull off all for out target stcb */ 3809 while (control) { 3810 nctl = TAILQ_NEXT(control, next); 3811 if (control->stcb == stcb) { 3812 /* remove it we want it */ 3813 TAILQ_REMOVE(&old_inp->read_queue, control, next); 3814 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 3815 m = control->data; 3816 while (m) { 3817 #ifdef SCTP_SB_LOGGING 3818 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 3819 #endif 3820 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 3821 #ifdef SCTP_SB_LOGGING 3822 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3823 #endif 3824 m = SCTP_BUF_NEXT(m); 3825 } 3826 } 3827 control = nctl; 3828 } 3829 SCTP_INP_READ_UNLOCK(old_inp); 3830 3831 /* Remove the sb-lock on the old socket */ 3832 SOCKBUF_LOCK(&(old_so->so_rcv)); 3833 3834 sbunlock(&old_so->so_rcv); 3835 SOCKBUF_UNLOCK(&(old_so->so_rcv)); 3836 3837 /* Now we move them over to the new socket buffer */ 3838 control = TAILQ_FIRST(&tmp_queue); 3839 SCTP_INP_READ_LOCK(new_inp); 3840 while (control) { 3841 nctl = TAILQ_NEXT(control, next); 3842 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 3843 m = control->data; 3844 while (m) { 3845 #ifdef SCTP_SB_LOGGING 3846 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3847 #endif 3848 sctp_sballoc(stcb, &new_so->so_rcv, m); 3849 #ifdef SCTP_SB_LOGGING 3850 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3851 #endif 3852 m = SCTP_BUF_NEXT(m); 3853 } 3854 control = nctl; 3855 } 3856 SCTP_INP_READ_UNLOCK(new_inp); 3857 } 3858 3859 3860 void 3861 sctp_add_to_readq(struct sctp_inpcb *inp, 3862 struct sctp_tcb *stcb, 3863 struct sctp_queued_to_read *control, 3864 struct sockbuf *sb, 3865 int end) 3866 { 3867 /* 3868 * Here we must place the control on the end of the socket read 3869 * queue AND increment sb_cc so that select will work properly on 3870 * read. 3871 */ 3872 struct mbuf *m, *prev = NULL; 3873 3874 if (inp == NULL) { 3875 /* Gak, TSNH!! */ 3876 #ifdef INVARIANTS 3877 panic("Gak, inp NULL on add_to_readq"); 3878 #endif 3879 return; 3880 } 3881 SCTP_INP_READ_LOCK(inp); 3882 if (!(control->spec_flags & M_NOTIFICATION)) { 3883 atomic_add_int(&inp->total_recvs, 1); 3884 if (!control->do_not_ref_stcb) { 3885 atomic_add_int(&stcb->total_recvs, 1); 3886 } 3887 } 3888 m = control->data; 3889 control->held_length = 0; 3890 control->length = 0; 3891 while (m) { 3892 if (SCTP_BUF_LEN(m) == 0) { 3893 /* Skip mbufs with NO length */ 3894 if (prev == NULL) { 3895 /* First one */ 3896 control->data = sctp_m_free(m); 3897 m = control->data; 3898 } else { 3899 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 3900 m = SCTP_BUF_NEXT(prev); 3901 } 3902 if (m == NULL) { 3903 control->tail_mbuf = prev;; 3904 } 3905 continue; 3906 } 3907 prev = m; 3908 #ifdef SCTP_SB_LOGGING 3909 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 3910 #endif 3911 sctp_sballoc(stcb, sb, m); 3912 #ifdef SCTP_SB_LOGGING 3913 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3914 #endif 3915 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 3916 m = SCTP_BUF_NEXT(m); 3917 } 3918 if (prev != NULL) { 3919 control->tail_mbuf = prev; 3920 } else { 3921 /* Everything got collapsed out?? */ 3922 return; 3923 } 3924 if (end) { 3925 control->end_added = 1; 3926 } 3927 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 3928 SCTP_INP_READ_UNLOCK(inp); 3929 if (inp && inp->sctp_socket) { 3930 sctp_sorwakeup(inp, inp->sctp_socket); 3931 } 3932 } 3933 3934 3935 int 3936 sctp_append_to_readq(struct sctp_inpcb *inp, 3937 struct sctp_tcb *stcb, 3938 struct sctp_queued_to_read *control, 3939 struct mbuf *m, 3940 int end, 3941 int ctls_cumack, 3942 struct sockbuf *sb) 3943 { 3944 /* 3945 * A partial delivery API event is underway. OR we are appending on 3946 * the reassembly queue. 3947 * 3948 * If PDAPI this means we need to add m to the end of the data. 3949 * Increase the length in the control AND increment the sb_cc. 3950 * Otherwise sb is NULL and all we need to do is put it at the end 3951 * of the mbuf chain. 3952 */ 3953 int len = 0; 3954 struct mbuf *mm, *tail = NULL, *prev = NULL; 3955 3956 if (inp) { 3957 SCTP_INP_READ_LOCK(inp); 3958 } 3959 if (control == NULL) { 3960 get_out: 3961 if (inp) { 3962 SCTP_INP_READ_UNLOCK(inp); 3963 } 3964 return (-1); 3965 } 3966 if (control->end_added) { 3967 /* huh this one is complete? */ 3968 goto get_out; 3969 } 3970 mm = m; 3971 if (mm == NULL) { 3972 goto get_out; 3973 } 3974 while (mm) { 3975 if (SCTP_BUF_LEN(mm) == 0) { 3976 /* Skip mbufs with NO lenght */ 3977 if (prev == NULL) { 3978 /* First one */ 3979 m = sctp_m_free(mm); 3980 mm = m; 3981 } else { 3982 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 3983 mm = SCTP_BUF_NEXT(prev); 3984 } 3985 continue; 3986 } 3987 prev = mm; 3988 len += SCTP_BUF_LEN(mm); 3989 if (sb) { 3990 #ifdef SCTP_SB_LOGGING 3991 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 3992 #endif 3993 sctp_sballoc(stcb, sb, mm); 3994 #ifdef SCTP_SB_LOGGING 3995 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3996 #endif 3997 } 3998 mm = SCTP_BUF_NEXT(mm); 3999 } 4000 if (prev) { 4001 tail = prev; 4002 } else { 4003 /* Really there should always be a prev */ 4004 if (m == NULL) { 4005 /* Huh nothing left? */ 4006 #ifdef INVARIANTS 4007 panic("Nothing left to add?"); 4008 #else 4009 goto get_out; 4010 #endif 4011 } 4012 tail = m; 4013 } 4014 if (end) { 4015 /* message is complete */ 4016 if (control == stcb->asoc.control_pdapi) { 4017 stcb->asoc.control_pdapi = NULL; 4018 } 4019 control->held_length = 0; 4020 control->end_added = 1; 4021 } 4022 atomic_add_int(&control->length, len); 4023 if (control->tail_mbuf) { 4024 /* append */ 4025 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4026 control->tail_mbuf = tail; 4027 } else { 4028 /* nothing there */ 4029 #ifdef INVARIANTS 4030 if (control->data != NULL) { 4031 panic("This should NOT happen"); 4032 } 4033 #endif 4034 control->data = m; 4035 control->tail_mbuf = tail; 4036 } 4037 /* 4038 * When we are appending in partial delivery, the cum-ack is used 4039 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4040 * is populated in the outbound sinfo structure from the true cumack 4041 * if the association exists... 4042 */ 4043 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4044 if (inp) { 4045 SCTP_INP_READ_UNLOCK(inp); 4046 } 4047 if (inp && inp->sctp_socket) { 4048 sctp_sorwakeup(inp, inp->sctp_socket); 4049 } 4050 return (0); 4051 } 4052 4053 4054 4055 /*************HOLD THIS COMMENT FOR PATCH FILE OF 4056 *************ALTERNATE ROUTING CODE 4057 */ 4058 4059 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4060 *************ALTERNATE ROUTING CODE 4061 */ 4062 4063 struct mbuf * 4064 sctp_generate_invmanparam(int err) 4065 { 4066 /* Return a MBUF with a invalid mandatory parameter */ 4067 struct mbuf *m; 4068 4069 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA); 4070 if (m) { 4071 struct sctp_paramhdr *ph; 4072 4073 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4074 ph = mtod(m, struct sctp_paramhdr *); 4075 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4076 ph->param_type = htons(err); 4077 } 4078 return (m); 4079 } 4080 4081 #ifdef SCTP_MBCNT_LOGGING 4082 void 4083 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4084 struct sctp_tmit_chunk *tp1, int chk_cnt) 4085 { 4086 if (tp1->data == NULL) { 4087 return; 4088 } 4089 asoc->chunks_on_out_queue -= chk_cnt; 4090 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4091 asoc->total_output_queue_size, 4092 tp1->book_size, 4093 0, 4094 tp1->mbcnt); 4095 if (asoc->total_output_queue_size >= tp1->book_size) { 4096 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4097 } else { 4098 asoc->total_output_queue_size = 0; 4099 } 4100 4101 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4102 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4103 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4104 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4105 } else { 4106 stcb->sctp_socket->so_snd.sb_cc = 0; 4107 4108 } 4109 } 4110 } 4111 4112 #endif 4113 4114 int 4115 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4116 int reason, struct sctpchunk_listhead *queue) 4117 { 4118 int ret_sz = 0; 4119 int notdone; 4120 uint8_t foundeom = 0; 4121 4122 do { 4123 ret_sz += tp1->book_size; 4124 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4125 if (tp1->data) { 4126 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4127 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1); 4128 sctp_m_freem(tp1->data); 4129 tp1->data = NULL; 4130 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4131 } 4132 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4133 stcb->asoc.sent_queue_cnt_removeable--; 4134 } 4135 if (queue == &stcb->asoc.send_queue) { 4136 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4137 /* on to the sent queue */ 4138 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4139 sctp_next); 4140 stcb->asoc.sent_queue_cnt++; 4141 } 4142 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4143 SCTP_DATA_NOT_FRAG) { 4144 /* not frag'ed we ae done */ 4145 notdone = 0; 4146 foundeom = 1; 4147 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4148 /* end of frag, we are done */ 4149 notdone = 0; 4150 foundeom = 1; 4151 } else { 4152 /* 4153 * Its a begin or middle piece, we must mark all of 4154 * it 4155 */ 4156 notdone = 1; 4157 tp1 = TAILQ_NEXT(tp1, sctp_next); 4158 } 4159 } while (tp1 && notdone); 4160 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) { 4161 /* 4162 * The multi-part message was scattered across the send and 4163 * sent queue. 4164 */ 4165 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 4166 /* 4167 * recurse throught the send_queue too, starting at the 4168 * beginning. 4169 */ 4170 if (tp1) { 4171 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason, 4172 &stcb->asoc.send_queue); 4173 } else { 4174 printf("hmm, nothing on the send queue and no EOM?\n"); 4175 } 4176 } 4177 return (ret_sz); 4178 } 4179 4180 /* 4181 * checks to see if the given address, sa, is one that is currently known by 4182 * the kernel note: can't distinguish the same address on multiple interfaces 4183 * and doesn't handle multiple addresses with different zone/scope id's note: 4184 * ifa_ifwithaddr() compares the entire sockaddr struct 4185 */ 4186 struct sctp_ifa * 4187 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int holds_lock) 4188 { 4189 struct sctp_laddr *laddr; 4190 4191 if (holds_lock == 0) 4192 SCTP_INP_RLOCK(inp); 4193 4194 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4195 if (laddr->ifa == NULL) 4196 continue; 4197 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4198 continue; 4199 if (addr->sa_family == AF_INET) { 4200 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4201 laddr->ifa->address.sin.sin_addr.s_addr) { 4202 /* found him. */ 4203 if (holds_lock == 0) 4204 SCTP_INP_RUNLOCK(inp); 4205 return (laddr->ifa); 4206 break; 4207 } 4208 } else if (addr->sa_family == AF_INET6) { 4209 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4210 &laddr->ifa->address.sin6.sin6_addr)) { 4211 /* found him. */ 4212 if (holds_lock == 0) 4213 SCTP_INP_RUNLOCK(inp); 4214 return (laddr->ifa); 4215 break; 4216 } 4217 } 4218 } 4219 if (holds_lock == 0) 4220 SCTP_INP_RUNLOCK(inp); 4221 return (NULL); 4222 } 4223 4224 struct sctp_ifa * 4225 sctp_find_ifa_in_ifn(struct sctp_ifn *sctp_ifnp, struct sockaddr *addr, 4226 int holds_lock) 4227 { 4228 struct sctp_ifa *sctp_ifap; 4229 4230 if (holds_lock == 0) 4231 SCTP_IPI_ADDR_LOCK(); 4232 4233 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 4234 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4235 continue; 4236 if (addr->sa_family == AF_INET) { 4237 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4238 sctp_ifap->address.sin.sin_addr.s_addr) { 4239 /* found him. */ 4240 if (holds_lock == 0) 4241 SCTP_IPI_ADDR_UNLOCK(); 4242 return (sctp_ifap); 4243 break; 4244 } 4245 } else if (addr->sa_family == AF_INET6) { 4246 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4247 &sctp_ifap->address.sin6.sin6_addr)) { 4248 /* found him. */ 4249 if (holds_lock == 0) 4250 SCTP_IPI_ADDR_UNLOCK(); 4251 return (sctp_ifap); 4252 break; 4253 } 4254 } 4255 } 4256 if (holds_lock == 0) 4257 SCTP_IPI_ADDR_UNLOCK(); 4258 return (NULL); 4259 } 4260 4261 uint32_t 4262 sctp_get_ifa_hash_val(struct sockaddr *addr) 4263 { 4264 4265 if (addr->sa_family == AF_INET) { 4266 struct sockaddr_in *sin; 4267 4268 sin = (struct sockaddr_in *)addr; 4269 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4270 } else if (addr->sa_family == AF_INET6) { 4271 struct sockaddr_in6 *sin6; 4272 uint32_t hash_of_addr; 4273 4274 sin6 = (struct sockaddr_in6 *)addr; 4275 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4276 sin6->sin6_addr.s6_addr32[1] + 4277 sin6->sin6_addr.s6_addr32[2] + 4278 sin6->sin6_addr.s6_addr32[3]); 4279 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4280 return (hash_of_addr); 4281 } 4282 return (0); 4283 } 4284 4285 struct sctp_ifa * 4286 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4287 { 4288 struct sctp_ifa *sctp_ifap; 4289 struct sctp_vrf *vrf; 4290 struct sctp_ifalist *hash_head; 4291 uint32_t hash_of_addr; 4292 4293 if (holds_lock == 0) 4294 SCTP_IPI_ADDR_LOCK(); 4295 4296 vrf = sctp_find_vrf(vrf_id); 4297 if (vrf == NULL) { 4298 if (holds_lock == 0) 4299 SCTP_IPI_ADDR_UNLOCK(); 4300 return (NULL); 4301 } 4302 hash_of_addr = sctp_get_ifa_hash_val(addr); 4303 4304 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_hashmark)]; 4305 if (hash_head == NULL) { 4306 printf("hash_of_addr:%x mask:%x table:%x - ", 4307 (u_int)hash_of_addr, (u_int)vrf->vrf_hashmark, 4308 (u_int)(hash_of_addr & vrf->vrf_hashmark)); 4309 sctp_print_address(addr); 4310 printf("No such bucket for address\n"); 4311 if (holds_lock == 0) 4312 SCTP_IPI_ADDR_UNLOCK(); 4313 4314 return (NULL); 4315 } 4316 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 4317 if (sctp_ifap == NULL) { 4318 panic("Huh LIST_FOREACH corrupt"); 4319 } 4320 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 4321 continue; 4322 if (addr->sa_family == AF_INET) { 4323 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4324 sctp_ifap->address.sin.sin_addr.s_addr) { 4325 /* found him. */ 4326 if (holds_lock == 0) 4327 SCTP_IPI_ADDR_UNLOCK(); 4328 return (sctp_ifap); 4329 break; 4330 } 4331 } else if (addr->sa_family == AF_INET6) { 4332 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr, 4333 &sctp_ifap->address.sin6.sin6_addr)) { 4334 /* found him. */ 4335 if (holds_lock == 0) 4336 SCTP_IPI_ADDR_UNLOCK(); 4337 return (sctp_ifap); 4338 break; 4339 } 4340 } 4341 } 4342 if (holds_lock == 0) 4343 SCTP_IPI_ADDR_UNLOCK(); 4344 return (NULL); 4345 } 4346 4347 static void 4348 sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock, 4349 uint32_t rwnd_req) 4350 { 4351 /* User pulled some data, do we need a rwnd update? */ 4352 int r_unlocked = 0; 4353 uint32_t dif, rwnd; 4354 struct socket *so = NULL; 4355 4356 if (stcb == NULL) 4357 return; 4358 4359 atomic_add_int(&stcb->asoc.refcnt, 1); 4360 4361 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 4362 SCTP_STATE_SHUTDOWN_RECEIVED | 4363 SCTP_STATE_SHUTDOWN_ACK_SENT) 4364 ) { 4365 /* Pre-check If we are freeing no update */ 4366 goto no_lock; 4367 } 4368 SCTP_INP_INCR_REF(stcb->sctp_ep); 4369 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4370 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4371 goto out; 4372 } 4373 so = stcb->sctp_socket; 4374 if (so == NULL) { 4375 goto out; 4376 } 4377 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 4378 /* Have you have freed enough to look */ 4379 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4380 sctp_misc_ints(SCTP_ENTER_USER_RECV, 4381 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd), 4382 *freed_so_far, 4383 stcb->freed_by_sorcv_sincelast, 4384 rwnd_req); 4385 #endif 4386 *freed_so_far = 0; 4387 /* Yep, its worth a look and the lock overhead */ 4388 4389 /* Figure out what the rwnd would be */ 4390 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 4391 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 4392 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 4393 } else { 4394 dif = 0; 4395 } 4396 if (dif >= rwnd_req) { 4397 if (hold_rlock) { 4398 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 4399 r_unlocked = 1; 4400 } 4401 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4402 /* 4403 * One last check before we allow the guy possibly 4404 * to get in. There is a race, where the guy has not 4405 * reached the gate. In that case 4406 */ 4407 goto out; 4408 } 4409 SCTP_TCB_LOCK(stcb); 4410 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4411 /* No reports here */ 4412 SCTP_TCB_UNLOCK(stcb); 4413 goto out; 4414 } 4415 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4416 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4417 stcb->asoc.my_rwnd, 4418 stcb->asoc.my_last_reported_rwnd, 4419 stcb->freed_by_sorcv_sincelast, 4420 dif); 4421 #endif 4422 SCTP_STAT_INCR(sctps_wu_sacks_sent); 4423 sctp_send_sack(stcb); 4424 sctp_chunk_output(stcb->sctp_ep, stcb, 4425 SCTP_OUTPUT_FROM_USR_RCVD); 4426 /* make sure no timer is running */ 4427 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 4428 SCTP_TCB_UNLOCK(stcb); 4429 } else { 4430 /* Update how much we have pending */ 4431 stcb->freed_by_sorcv_sincelast = dif; 4432 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4433 sctp_misc_ints(SCTP_USER_RECV_SACKS, 4434 stcb->asoc.my_rwnd, 4435 stcb->asoc.my_last_reported_rwnd, 4436 stcb->freed_by_sorcv_sincelast, 4437 0); 4438 #endif 4439 } 4440 out: 4441 if (so && r_unlocked && hold_rlock) { 4442 SCTP_INP_READ_LOCK(stcb->sctp_ep); 4443 } 4444 SCTP_INP_DECR_REF(stcb->sctp_ep); 4445 no_lock: 4446 atomic_add_int(&stcb->asoc.refcnt, -1); 4447 return; 4448 } 4449 4450 int 4451 sctp_sorecvmsg(struct socket *so, 4452 struct uio *uio, 4453 struct mbuf **mp, 4454 struct sockaddr *from, 4455 int fromlen, 4456 int *msg_flags, 4457 struct sctp_sndrcvinfo *sinfo, 4458 int filling_sinfo) 4459 { 4460 /* 4461 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 4462 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 4463 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 4464 * On the way out we may send out any combination of: 4465 * MSG_NOTIFICATION MSG_EOR 4466 * 4467 */ 4468 struct sctp_inpcb *inp = NULL; 4469 int my_len = 0; 4470 int cp_len = 0, error = 0; 4471 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 4472 struct mbuf *m = NULL, *embuf = NULL; 4473 struct sctp_tcb *stcb = NULL; 4474 int wakeup_read_socket = 0; 4475 int freecnt_applied = 0; 4476 int out_flags = 0, in_flags = 0; 4477 int block_allowed = 1; 4478 int freed_so_far = 0; 4479 int copied_so_far = 0; 4480 int in_eeor_mode = 0; 4481 int no_rcv_needed = 0; 4482 uint32_t rwnd_req = 0; 4483 int hold_sblock = 0; 4484 int hold_rlock = 0; 4485 int alen = 0; 4486 int slen = 0; 4487 int held_length = 0; 4488 4489 if (msg_flags) { 4490 in_flags = *msg_flags; 4491 } else { 4492 in_flags = 0; 4493 } 4494 slen = uio->uio_resid; 4495 /* Pull in and set up our int flags */ 4496 if (in_flags & MSG_OOB) { 4497 /* Out of band's NOT supported */ 4498 return (EOPNOTSUPP); 4499 } 4500 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 4501 return (EINVAL); 4502 } 4503 if ((in_flags & (MSG_DONTWAIT 4504 | MSG_NBIO 4505 )) || 4506 SCTP_SO_IS_NBIO(so)) { 4507 block_allowed = 0; 4508 } 4509 /* setup the endpoint */ 4510 inp = (struct sctp_inpcb *)so->so_pcb; 4511 if (inp == NULL) { 4512 return (EFAULT); 4513 } 4514 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 4515 /* Must be at least a MTU's worth */ 4516 if (rwnd_req < SCTP_MIN_RWND) 4517 rwnd_req = SCTP_MIN_RWND; 4518 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 4519 #ifdef SCTP_RECV_RWND_LOGGING 4520 sctp_misc_ints(SCTP_SORECV_ENTER, 4521 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 4522 #endif 4523 SOCKBUF_LOCK(&so->so_rcv); 4524 hold_sblock = 1; 4525 #ifdef SCTP_RECV_RWND_LOGGING 4526 sctp_misc_ints(SCTP_SORECV_ENTERPL, 4527 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 4528 #endif 4529 4530 4531 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4532 if (error) { 4533 goto release_unlocked; 4534 } 4535 restart: 4536 if (hold_sblock == 0) { 4537 SOCKBUF_LOCK(&so->so_rcv); 4538 hold_sblock = 1; 4539 } 4540 sbunlock(&so->so_rcv); 4541 4542 restart_nosblocks: 4543 if (hold_sblock == 0) { 4544 SOCKBUF_LOCK(&so->so_rcv); 4545 hold_sblock = 1; 4546 } 4547 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4548 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4549 goto out; 4550 } 4551 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 4552 if (so->so_error) { 4553 error = so->so_error; 4554 if ((in_flags & MSG_PEEK) == 0) 4555 so->so_error = 0; 4556 } else { 4557 error = ENOTCONN; 4558 } 4559 goto out; 4560 } 4561 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 4562 /* we need to wait for data */ 4563 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4564 sctp_misc_ints(SCTP_SORECV_BLOCKSA, 4565 0, 0, so->so_rcv.sb_cc, uio->uio_resid); 4566 #endif 4567 if ((so->so_rcv.sb_cc == 0) && 4568 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4569 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 4570 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4571 /* 4572 * For active open side clear flags for 4573 * re-use passive open is blocked by 4574 * connect. 4575 */ 4576 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4577 /* 4578 * You were aborted, passive side 4579 * always hits here 4580 */ 4581 error = ECONNRESET; 4582 /* 4583 * You get this once if you are 4584 * active open side 4585 */ 4586 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4587 /* 4588 * Remove flag if on the 4589 * active open side 4590 */ 4591 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4592 } 4593 } 4594 so->so_state &= ~(SS_ISCONNECTING | 4595 SS_ISDISCONNECTING | 4596 SS_ISCONFIRMING | 4597 SS_ISCONNECTED); 4598 if (error == 0) { 4599 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4600 error = ENOTCONN; 4601 } else { 4602 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4603 } 4604 } 4605 goto out; 4606 } 4607 } 4608 error = sbwait(&so->so_rcv); 4609 if (error) { 4610 goto out; 4611 } 4612 held_length = 0; 4613 goto restart_nosblocks; 4614 } else if (so->so_rcv.sb_cc == 0) { 4615 if (so->so_error) { 4616 error = so->so_error; 4617 if ((in_flags & MSG_PEEK) == 0) 4618 so->so_error = 0; 4619 } else { 4620 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 4621 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4622 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 4623 /* 4624 * For active open side clear flags 4625 * for re-use passive open is 4626 * blocked by connect. 4627 */ 4628 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 4629 /* 4630 * You were aborted, passive 4631 * side always hits here 4632 */ 4633 error = ECONNRESET; 4634 /* 4635 * You get this once if you 4636 * are active open side 4637 */ 4638 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4639 /* 4640 * Remove flag if on 4641 * the active open 4642 * side 4643 */ 4644 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED; 4645 } 4646 } 4647 so->so_state &= ~(SS_ISCONNECTING | 4648 SS_ISDISCONNECTING | 4649 SS_ISCONFIRMING | 4650 SS_ISCONNECTED); 4651 if (error == 0) { 4652 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 4653 error = ENOTCONN; 4654 } else { 4655 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED; 4656 } 4657 } 4658 goto out; 4659 } 4660 } 4661 error = EWOULDBLOCK; 4662 } 4663 goto out; 4664 } 4665 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); 4666 /* we possibly have data we can read */ 4667 control = TAILQ_FIRST(&inp->read_queue); 4668 if (control == NULL) { 4669 /* 4670 * This could be happening since the appender did the 4671 * increment but as not yet did the tailq insert onto the 4672 * read_queue 4673 */ 4674 if (hold_rlock == 0) { 4675 SCTP_INP_READ_LOCK(inp); 4676 hold_rlock = 1; 4677 } 4678 control = TAILQ_FIRST(&inp->read_queue); 4679 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 4680 #ifdef INVARIANTS 4681 panic("Huh, its non zero and nothing on control?"); 4682 #endif 4683 so->so_rcv.sb_cc = 0; 4684 } 4685 SCTP_INP_READ_UNLOCK(inp); 4686 hold_rlock = 0; 4687 goto restart; 4688 } 4689 if ((control->length == 0) && 4690 (control->do_not_ref_stcb)) { 4691 /* 4692 * Clean up code for freeing assoc that left behind a 4693 * pdapi.. maybe a peer in EEOR that just closed after 4694 * sending and never indicated a EOR. 4695 */ 4696 if (hold_rlock == 0) { 4697 hold_rlock = 1; 4698 SCTP_INP_READ_LOCK(inp); 4699 } 4700 control->held_length = 0; 4701 if (control->data) { 4702 /* Hmm there is data here .. fix */ 4703 struct mbuf *m; 4704 int cnt = 0; 4705 4706 m = control->data; 4707 while (m) { 4708 cnt += SCTP_BUF_LEN(m); 4709 if (SCTP_BUF_NEXT(m) == NULL) { 4710 control->tail_mbuf = m; 4711 control->end_added = 1; 4712 } 4713 m = SCTP_BUF_NEXT(m); 4714 } 4715 control->length = cnt; 4716 } else { 4717 /* remove it */ 4718 TAILQ_REMOVE(&inp->read_queue, control, next); 4719 /* Add back any hiddend data */ 4720 sctp_free_remote_addr(control->whoFrom); 4721 sctp_free_a_readq(stcb, control); 4722 } 4723 if (hold_rlock) { 4724 hold_rlock = 0; 4725 SCTP_INP_READ_UNLOCK(inp); 4726 } 4727 goto restart; 4728 } 4729 if (control->length == 0) { 4730 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 4731 (filling_sinfo)) { 4732 /* find a more suitable one then this */ 4733 ctl = TAILQ_NEXT(control, next); 4734 while (ctl) { 4735 if ((ctl->stcb != control->stcb) && (ctl->length)) { 4736 /* found one */ 4737 control = ctl; 4738 goto found_one; 4739 } 4740 ctl = TAILQ_NEXT(ctl, next); 4741 } 4742 } 4743 /* 4744 * if we reach here, not suitable replacement is available 4745 * <or> fragment interleave is NOT on. So stuff the sb_cc 4746 * into the our held count, and its time to sleep again. 4747 */ 4748 held_length = so->so_rcv.sb_cc; 4749 control->held_length = so->so_rcv.sb_cc; 4750 goto restart; 4751 } 4752 /* Clear the held length since there is something to read */ 4753 control->held_length = 0; 4754 if (hold_rlock) { 4755 SCTP_INP_READ_UNLOCK(inp); 4756 hold_rlock = 0; 4757 } 4758 found_one: 4759 /* 4760 * If we reach here, control has a some data for us to read off. 4761 * Note that stcb COULD be NULL. 4762 */ 4763 if (hold_sblock) { 4764 SOCKBUF_UNLOCK(&so->so_rcv); 4765 hold_sblock = 0; 4766 } 4767 stcb = control->stcb; 4768 if (stcb) { 4769 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 4770 (control->do_not_ref_stcb == 0)) { 4771 if (freecnt_applied == 0) 4772 stcb = NULL; 4773 } else if (control->do_not_ref_stcb == 0) { 4774 /* you can't free it on me please */ 4775 /* 4776 * The lock on the socket buffer protects us so the 4777 * free code will stop. But since we used the 4778 * socketbuf lock and the sender uses the tcb_lock 4779 * to increment, we need to use the atomic add to 4780 * the refcnt 4781 */ 4782 atomic_add_int(&stcb->asoc.refcnt, 1); 4783 freecnt_applied = 1; 4784 /* 4785 * Setup to remember how much we have not yet told 4786 * the peer our rwnd has opened up. Note we grab the 4787 * value from the tcb from last time. Note too that 4788 * sack sending clears this when a sack is sent.. 4789 * which is fine. Once we hit the rwnd_req, we then 4790 * will go to the sctp_user_rcvd() that will not 4791 * lock until it KNOWs it MUST send a WUP-SACK. 4792 * 4793 */ 4794 freed_so_far = stcb->freed_by_sorcv_sincelast; 4795 stcb->freed_by_sorcv_sincelast = 0; 4796 } 4797 } 4798 /* First lets get off the sinfo and sockaddr info */ 4799 if ((sinfo) && filling_sinfo) { 4800 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 4801 nxt = TAILQ_NEXT(control, next); 4802 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 4803 struct sctp_extrcvinfo *s_extra; 4804 4805 s_extra = (struct sctp_extrcvinfo *)sinfo; 4806 if (nxt) { 4807 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL; 4808 if (nxt->sinfo_flags & SCTP_UNORDERED) { 4809 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 4810 } 4811 if (nxt->spec_flags & M_NOTIFICATION) { 4812 s_extra->next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 4813 } 4814 s_extra->next_asocid = nxt->sinfo_assoc_id; 4815 s_extra->next_length = nxt->length; 4816 s_extra->next_ppid = nxt->sinfo_ppid; 4817 s_extra->next_stream = nxt->sinfo_stream; 4818 if (nxt->tail_mbuf != NULL) { 4819 if (nxt->end_added) { 4820 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 4821 } 4822 } 4823 } else { 4824 /* 4825 * we explicitly 0 this, since the memcpy 4826 * got some other things beyond the older 4827 * sinfo_ that is on the control's structure 4828 * :-D 4829 */ 4830 s_extra->next_flags = SCTP_NO_NEXT_MSG; 4831 s_extra->next_asocid = 0; 4832 s_extra->next_length = 0; 4833 s_extra->next_ppid = 0; 4834 s_extra->next_stream = 0; 4835 } 4836 } 4837 /* 4838 * update off the real current cum-ack, if we have an stcb. 4839 */ 4840 if (stcb) 4841 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 4842 /* 4843 * mask off the high bits, we keep the actual chunk bits in 4844 * there. 4845 */ 4846 sinfo->sinfo_flags &= 0x00ff; 4847 } 4848 if (fromlen && from) { 4849 struct sockaddr *to; 4850 4851 #ifdef INET 4852 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len); 4853 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4854 ((struct sockaddr_in *)from)->sin_port = control->port_from; 4855 #else 4856 /* No AF_INET use AF_INET6 */ 4857 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len); 4858 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 4859 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 4860 #endif 4861 4862 to = from; 4863 #if defined(INET) && defined(INET6) 4864 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 4865 (to->sa_family == AF_INET) && 4866 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 4867 struct sockaddr_in *sin; 4868 struct sockaddr_in6 sin6; 4869 4870 sin = (struct sockaddr_in *)to; 4871 bzero(&sin6, sizeof(sin6)); 4872 sin6.sin6_family = AF_INET6; 4873 sin6.sin6_len = sizeof(struct sockaddr_in6); 4874 sin6.sin6_addr.s6_addr16[2] = 0xffff; 4875 bcopy(&sin->sin_addr, 4876 &sin6.sin6_addr.s6_addr16[3], 4877 sizeof(sin6.sin6_addr.s6_addr16[3])); 4878 sin6.sin6_port = sin->sin_port; 4879 memcpy(from, (caddr_t)&sin6, sizeof(sin6)); 4880 } 4881 #endif 4882 #if defined(INET6) 4883 { 4884 struct sockaddr_in6 lsa6, *to6; 4885 4886 to6 = (struct sockaddr_in6 *)to; 4887 sctp_recover_scope_mac(to6, (&lsa6)); 4888 } 4889 #endif 4890 } 4891 /* now copy out what data we can */ 4892 if (mp == NULL) { 4893 /* copy out each mbuf in the chain up to length */ 4894 get_more_data: 4895 m = control->data; 4896 while (m) { 4897 /* Move out all we can */ 4898 cp_len = (int)uio->uio_resid; 4899 my_len = (int)SCTP_BUF_LEN(m); 4900 if (cp_len > my_len) { 4901 /* not enough in this buf */ 4902 cp_len = my_len; 4903 } 4904 if (hold_rlock) { 4905 SCTP_INP_READ_UNLOCK(inp); 4906 hold_rlock = 0; 4907 } 4908 if (cp_len > 0) 4909 error = uiomove(mtod(m, char *), cp_len, uio); 4910 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4911 sctp_misc_ints(SCTP_SORCV_DOESCPY, 4912 so->so_rcv.sb_cc, 4913 cp_len, 4914 0, 4915 0); 4916 #endif 4917 /* re-read */ 4918 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4919 goto release; 4920 } 4921 if (stcb && 4922 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4923 no_rcv_needed = 1; 4924 } 4925 if (error) { 4926 /* error we are out of here */ 4927 goto release; 4928 } 4929 if ((SCTP_BUF_NEXT(m) == NULL) && 4930 (cp_len >= SCTP_BUF_LEN(m)) && 4931 ((control->end_added == 0) || 4932 (control->end_added && (TAILQ_NEXT(control, next) == NULL))) 4933 ) { 4934 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4935 sctp_misc_ints(SCTP_SORCV_DOESLCK, 4936 so->so_rcv.sb_cc, 4937 cp_len, 4938 SCTP_BUF_LEN(m), 4939 control->length); 4940 #endif 4941 SCTP_INP_READ_LOCK(inp); 4942 hold_rlock = 1; 4943 } 4944 if (cp_len == SCTP_BUF_LEN(m)) { 4945 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4946 sctp_misc_ints(SCTP_SORCV_DOESADJ, 4947 so->so_rcv.sb_cc, 4948 control->length, 4949 cp_len, 4950 0); 4951 #endif 4952 if ((SCTP_BUF_NEXT(m) == NULL) && 4953 (control->end_added)) { 4954 out_flags |= MSG_EOR; 4955 } 4956 if (control->spec_flags & M_NOTIFICATION) { 4957 out_flags |= MSG_NOTIFICATION; 4958 } 4959 /* we ate up the mbuf */ 4960 if (in_flags & MSG_PEEK) { 4961 /* just looking */ 4962 m = SCTP_BUF_NEXT(m); 4963 copied_so_far += cp_len; 4964 } else { 4965 /* dispose of the mbuf */ 4966 #ifdef SCTP_SB_LOGGING 4967 sctp_sblog(&so->so_rcv, 4968 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4969 #endif 4970 sctp_sbfree(control, stcb, &so->so_rcv, m); 4971 #ifdef SCTP_SB_LOGGING 4972 sctp_sblog(&so->so_rcv, 4973 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4974 #endif 4975 embuf = m; 4976 copied_so_far += cp_len; 4977 freed_so_far += cp_len; 4978 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 4979 if (alen < cp_len) { 4980 panic("Control length goes negative?"); 4981 } 4982 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 4983 sctp_misc_ints(SCTP_SORCV_PASSBF, 4984 so->so_rcv.sb_cc, 4985 control->length, 4986 0, 4987 0); 4988 #endif 4989 control->data = sctp_m_free(m); 4990 m = control->data; 4991 /* 4992 * been through it all, must hold sb 4993 * lock ok to null tail 4994 */ 4995 if (control->data == NULL) { 4996 #ifdef INVARIANTS 4997 if ((control->end_added == 0) || 4998 (TAILQ_NEXT(control, next) == NULL)) { 4999 /* 5000 * If the end is not 5001 * added, OR the 5002 * next is NOT null 5003 * we MUST have the 5004 * lock. 5005 */ 5006 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5007 panic("Hmm we don't own the lock?"); 5008 } 5009 } 5010 #endif 5011 control->tail_mbuf = NULL; 5012 #ifdef INVARIANTS 5013 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5014 panic("end_added, nothing left and no MSG_EOR"); 5015 } 5016 #endif 5017 } 5018 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5019 sctp_misc_ints(SCTP_SORCV_ADJD, 5020 so->so_rcv.sb_cc, 5021 control->length, 5022 0, 5023 0); 5024 #endif 5025 } 5026 } else { 5027 /* Do we need to trim the mbuf? */ 5028 if (control->spec_flags & M_NOTIFICATION) { 5029 out_flags |= MSG_NOTIFICATION; 5030 } 5031 if ((in_flags & MSG_PEEK) == 0) { 5032 SCTP_BUF_RESV_UF(m, cp_len); 5033 SCTP_BUF_LEN(m) -= cp_len; 5034 #ifdef SCTP_SB_LOGGING 5035 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5036 #endif 5037 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5038 if (stcb) { 5039 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5040 } 5041 copied_so_far += cp_len; 5042 embuf = m; 5043 freed_so_far += cp_len; 5044 #ifdef SCTP_SB_LOGGING 5045 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5046 SCTP_LOG_SBRESULT, 0); 5047 #endif 5048 alen = atomic_fetchadd_int(&control->length, -(cp_len)); 5049 if (alen < cp_len) { 5050 panic("Control length goes negative2?"); 5051 } 5052 } else { 5053 copied_so_far += cp_len; 5054 } 5055 } 5056 if ((out_flags & MSG_EOR) || 5057 (uio->uio_resid == 0) 5058 ) { 5059 break; 5060 } 5061 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5062 (control->do_not_ref_stcb == 0) && 5063 (freed_so_far >= rwnd_req)) { 5064 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5065 } 5066 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5067 sctp_misc_ints(SCTP_SORCV_BOTWHILE, 5068 so->so_rcv.sb_cc, 5069 control->length, 5070 0, 5071 0); 5072 #endif 5073 5074 } /* end while(m) */ 5075 /* 5076 * At this point we have looked at it all and we either have 5077 * a MSG_EOR/or read all the user wants... <OR> 5078 * control->length == 0. 5079 */ 5080 if ((out_flags & MSG_EOR) && 5081 ((in_flags & MSG_PEEK) == 0)) { 5082 /* we are done with this control */ 5083 if (control->length == 0) { 5084 if (control->data) { 5085 #ifdef INVARIANTS 5086 panic("control->data not null at read eor?"); 5087 #else 5088 printf("Strange, data left in the control buffer .. invarients would panic?\n"); 5089 sctp_m_freem(control->data); 5090 control->data = NULL; 5091 #endif 5092 } 5093 done_with_control: 5094 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5095 sctp_misc_ints(SCTP_SORCV_FREECTL, 5096 so->so_rcv.sb_cc, 5097 0, 5098 0, 5099 0); 5100 #endif 5101 if (TAILQ_NEXT(control, next) == NULL) { 5102 /* 5103 * If we don't have a next we need a 5104 * lock, if there is a next interupt 5105 * is filling ahead of us and we 5106 * don't need a lock to remove this 5107 * guy (which is the head of the 5108 * queue). 5109 */ 5110 if (hold_rlock == 0) { 5111 SCTP_INP_READ_LOCK(inp); 5112 hold_rlock = 1; 5113 } 5114 } 5115 TAILQ_REMOVE(&inp->read_queue, control, next); 5116 /* Add back any hiddend data */ 5117 if (control->held_length) { 5118 held_length = 0; 5119 control->held_length = 0; 5120 wakeup_read_socket = 1; 5121 } 5122 no_rcv_needed = control->do_not_ref_stcb; 5123 sctp_free_remote_addr(control->whoFrom); 5124 control->data = NULL; 5125 sctp_free_a_readq(stcb, control); 5126 control = NULL; 5127 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0)) 5128 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5129 5130 } else { 5131 /* 5132 * The user did not read all of this 5133 * message, turn off the returned MSG_EOR 5134 * since we are leaving more behind on the 5135 * control to read. 5136 */ 5137 #ifdef INVARIANTS 5138 if (control->end_added && (control->data == NULL) && 5139 (control->tail_mbuf == NULL)) { 5140 panic("Gak, control->length is corrupt?"); 5141 } 5142 #endif 5143 no_rcv_needed = control->do_not_ref_stcb; 5144 out_flags &= ~MSG_EOR; 5145 } 5146 } 5147 if (out_flags & MSG_EOR) { 5148 goto release; 5149 } 5150 if ((uio->uio_resid == 0) || 5151 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5152 ) { 5153 goto release; 5154 } 5155 /* 5156 * If I hit here the receiver wants more and this message is 5157 * NOT done (pd-api). So two questions. Can we block? if not 5158 * we are done. Did the user NOT set MSG_WAITALL? 5159 */ 5160 if (block_allowed == 0) { 5161 goto release; 5162 } 5163 /* 5164 * We need to wait for more data a few things: - We don't 5165 * sbunlock() so we don't get someone else reading. - We 5166 * must be sure to account for the case where what is added 5167 * is NOT to our control when we wakeup. 5168 */ 5169 5170 /* 5171 * Do we need to tell the transport a rwnd update might be 5172 * needed before we go to sleep? 5173 */ 5174 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5175 ((freed_so_far >= rwnd_req) && 5176 (control->do_not_ref_stcb == 0) && 5177 (no_rcv_needed == 0))) { 5178 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5179 } 5180 wait_some_more: 5181 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5182 goto release; 5183 } 5184 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5185 goto release; 5186 5187 if (hold_rlock == 1) { 5188 SCTP_INP_READ_UNLOCK(inp); 5189 hold_rlock = 0; 5190 } 5191 if (hold_sblock == 0) { 5192 SOCKBUF_LOCK(&so->so_rcv); 5193 hold_sblock = 1; 5194 } 5195 #ifdef SCTP_RECV_DETAIL_RWND_LOGGING 5196 if (stcb) 5197 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5198 freed_so_far, 5199 stcb->asoc.my_rwnd, 5200 so->so_rcv.sb_cc, 5201 uio->uio_resid); 5202 else 5203 sctp_misc_ints(SCTP_SORECV_BLOCKSB, 5204 freed_so_far, 5205 0, 5206 so->so_rcv.sb_cc, 5207 uio->uio_resid); 5208 #endif 5209 if (so->so_rcv.sb_cc <= control->held_length) { 5210 error = sbwait(&so->so_rcv); 5211 if (error) { 5212 goto release; 5213 } 5214 control->held_length = 0; 5215 } 5216 if (hold_sblock) { 5217 SOCKBUF_UNLOCK(&so->so_rcv); 5218 hold_sblock = 0; 5219 } 5220 if (control->length == 0) { 5221 /* still nothing here */ 5222 if (control->end_added == 1) { 5223 /* he aborted, or is done i.e.did a shutdown */ 5224 out_flags |= MSG_EOR; 5225 if (control->pdapi_aborted) 5226 out_flags |= MSG_TRUNC; 5227 goto done_with_control; 5228 } 5229 if (so->so_rcv.sb_cc > held_length) { 5230 control->held_length = so->so_rcv.sb_cc; 5231 held_length = 0; 5232 } 5233 goto wait_some_more; 5234 } else if (control->data == NULL) { 5235 /* 5236 * we must re-sync since data is probably being 5237 * added 5238 */ 5239 SCTP_INP_READ_LOCK(inp); 5240 if ((control->length > 0) && (control->data == NULL)) { 5241 /* 5242 * big trouble.. we have the lock and its 5243 * corrupt? 5244 */ 5245 panic("Impossible data==NULL length !=0"); 5246 } 5247 SCTP_INP_READ_UNLOCK(inp); 5248 /* We will fall around to get more data */ 5249 } 5250 goto get_more_data; 5251 } else { 5252 /* copy out the mbuf chain */ 5253 get_more_data2: 5254 /* 5255 * Do we have a uio, I doubt it if so we grab the size from 5256 * it, if not you get it all 5257 */ 5258 if (uio) 5259 cp_len = uio->uio_resid; 5260 else 5261 cp_len = control->length; 5262 5263 if ((uint32_t) cp_len >= control->length) { 5264 /* easy way */ 5265 if ((control->end_added == 0) || 5266 (TAILQ_NEXT(control, next) == NULL)) { 5267 /* Need to get rlock */ 5268 if (hold_rlock == 0) { 5269 SCTP_INP_READ_LOCK(inp); 5270 hold_rlock = 1; 5271 } 5272 } 5273 if (control->end_added) { 5274 out_flags |= MSG_EOR; 5275 } 5276 if (control->spec_flags & M_NOTIFICATION) { 5277 out_flags |= MSG_NOTIFICATION; 5278 } 5279 if (uio) 5280 uio->uio_resid -= control->length; 5281 *mp = control->data; 5282 m = control->data; 5283 while (m) { 5284 #ifdef SCTP_SB_LOGGING 5285 sctp_sblog(&so->so_rcv, 5286 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5287 #endif 5288 sctp_sbfree(control, stcb, &so->so_rcv, m); 5289 freed_so_far += SCTP_BUF_LEN(m); 5290 #ifdef SCTP_SB_LOGGING 5291 sctp_sblog(&so->so_rcv, 5292 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5293 #endif 5294 m = SCTP_BUF_NEXT(m); 5295 } 5296 control->data = control->tail_mbuf = NULL; 5297 control->length = 0; 5298 if (out_flags & MSG_EOR) { 5299 /* Done with this control */ 5300 goto done_with_control; 5301 } 5302 /* still more to do with this conntrol */ 5303 /* do we really support msg_waitall here? */ 5304 if ((block_allowed == 0) || 5305 ((in_flags & MSG_WAITALL) == 0)) { 5306 goto release; 5307 } 5308 wait_some_more2: 5309 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 5310 goto release; 5311 if (hold_rlock == 1) { 5312 SCTP_INP_READ_UNLOCK(inp); 5313 hold_rlock = 0; 5314 } 5315 if (hold_sblock == 0) { 5316 SOCKBUF_LOCK(&so->so_rcv); 5317 hold_sblock = 1; 5318 } 5319 if (so->so_rcv.sb_cc <= control->held_length) { 5320 error = sbwait(&so->so_rcv); 5321 if (error) { 5322 goto release; 5323 } 5324 } 5325 if (hold_sblock) { 5326 SOCKBUF_UNLOCK(&so->so_rcv); 5327 hold_sblock = 0; 5328 } 5329 if (control->length == 0) { 5330 /* still nothing here */ 5331 if (control->end_added == 1) { 5332 /* 5333 * he aborted, or is done i.e. 5334 * shutdown 5335 */ 5336 out_flags |= MSG_EOR; 5337 if (control->pdapi_aborted) 5338 out_flags |= MSG_TRUNC; 5339 goto done_with_control; 5340 } 5341 if (so->so_rcv.sb_cc > held_length) { 5342 control->held_length = so->so_rcv.sb_cc; 5343 /* 5344 * We don't use held_length while 5345 * getting a message 5346 */ 5347 held_length = 0; 5348 } 5349 goto wait_some_more2; 5350 } 5351 goto get_more_data2; 5352 } else { 5353 /* hard way mbuf by mbuf */ 5354 m = control->data; 5355 if (control->end_added == 0) { 5356 /* need the rlock */ 5357 if (hold_rlock == 0) { 5358 SCTP_INP_READ_LOCK(inp); 5359 hold_rlock = 1; 5360 } 5361 } 5362 if (control->spec_flags & M_NOTIFICATION) { 5363 out_flags |= MSG_NOTIFICATION; 5364 } 5365 while ((m) && (cp_len > 0)) { 5366 if (cp_len >= SCTP_BUF_LEN(m)) { 5367 *mp = m; 5368 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m)); 5369 if (uio) 5370 uio->uio_resid -= SCTP_BUF_LEN(m); 5371 cp_len -= SCTP_BUF_LEN(m); 5372 control->data = SCTP_BUF_NEXT(m); 5373 SCTP_BUF_NEXT(m) = NULL; 5374 #ifdef SCTP_SB_LOGGING 5375 sctp_sblog(&so->so_rcv, 5376 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5377 #endif 5378 sctp_sbfree(control, stcb, &so->so_rcv, m); 5379 freed_so_far += SCTP_BUF_LEN(m); 5380 #ifdef SCTP_SB_LOGGING 5381 sctp_sblog(&so->so_rcv, 5382 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5383 #endif 5384 mp = &SCTP_BUF_NEXT(m); 5385 m = control->data; 5386 } else { 5387 /* 5388 * got all he wants and its part of 5389 * this mbuf only. 5390 */ 5391 if (uio) 5392 uio->uio_resid -= SCTP_BUF_LEN(m); 5393 cp_len -= SCTP_BUF_LEN(m); 5394 if (hold_rlock) { 5395 SCTP_INP_READ_UNLOCK(inp); 5396 hold_rlock = 0; 5397 } 5398 if (hold_sblock) { 5399 SOCKBUF_UNLOCK(&so->so_rcv); 5400 hold_sblock = 0; 5401 } 5402 *mp = SCTP_M_COPYM(m, 0, cp_len, 5403 M_TRYWAIT 5404 ); 5405 #ifdef SCTP_LOCK_LOGGING 5406 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R); 5407 #endif 5408 if (hold_sblock == 0) { 5409 SOCKBUF_LOCK(&so->so_rcv); 5410 hold_sblock = 1; 5411 } 5412 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5413 goto release; 5414 5415 if (stcb && 5416 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5417 no_rcv_needed = 1; 5418 } 5419 SCTP_BUF_RESV_UF(m, cp_len); 5420 SCTP_BUF_LEN(m) -= cp_len; 5421 #ifdef SCTP_SB_LOGGING 5422 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5423 #endif 5424 freed_so_far += cp_len; 5425 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5426 if (stcb) { 5427 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5428 if ((freed_so_far >= rwnd_req) && 5429 (control->do_not_ref_stcb == 0) && 5430 (no_rcv_needed == 0)) 5431 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5432 } 5433 #ifdef SCTP_SB_LOGGING 5434 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5435 SCTP_LOG_SBRESULT, 0); 5436 #endif 5437 goto release; 5438 } 5439 } 5440 } 5441 } 5442 release: 5443 if (hold_rlock == 1) { 5444 SCTP_INP_READ_UNLOCK(inp); 5445 hold_rlock = 0; 5446 } 5447 if (hold_sblock == 0) { 5448 SOCKBUF_LOCK(&so->so_rcv); 5449 hold_sblock = 1; 5450 } 5451 sbunlock(&so->so_rcv); 5452 5453 release_unlocked: 5454 if (hold_sblock) { 5455 SOCKBUF_UNLOCK(&so->so_rcv); 5456 hold_sblock = 0; 5457 } 5458 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 5459 if ((freed_so_far >= rwnd_req) && 5460 (control && (control->do_not_ref_stcb == 0)) && 5461 (no_rcv_needed == 0)) 5462 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5463 } 5464 if (msg_flags) 5465 *msg_flags |= out_flags; 5466 out: 5467 if (hold_rlock == 1) { 5468 SCTP_INP_READ_UNLOCK(inp); 5469 hold_rlock = 0; 5470 } 5471 if (hold_sblock) { 5472 SOCKBUF_UNLOCK(&so->so_rcv); 5473 hold_sblock = 0; 5474 } 5475 if (freecnt_applied) { 5476 /* 5477 * The lock on the socket buffer protects us so the free 5478 * code will stop. But since we used the socketbuf lock and 5479 * the sender uses the tcb_lock to increment, we need to use 5480 * the atomic add to the refcnt. 5481 */ 5482 if (stcb == NULL) { 5483 panic("stcb for refcnt has gone NULL?"); 5484 } 5485 atomic_add_int(&stcb->asoc.refcnt, -1); 5486 freecnt_applied = 0; 5487 /* Save the value back for next time */ 5488 stcb->freed_by_sorcv_sincelast = freed_so_far; 5489 } 5490 #ifdef SCTP_RECV_RWND_LOGGING 5491 if (stcb) { 5492 sctp_misc_ints(SCTP_SORECV_DONE, 5493 freed_so_far, 5494 ((uio) ? (slen - uio->uio_resid) : slen), 5495 stcb->asoc.my_rwnd, 5496 so->so_rcv.sb_cc); 5497 } else { 5498 sctp_misc_ints(SCTP_SORECV_DONE, 5499 freed_so_far, 5500 ((uio) ? (slen - uio->uio_resid) : slen), 5501 0, 5502 so->so_rcv.sb_cc); 5503 } 5504 #endif 5505 if (wakeup_read_socket) { 5506 sctp_sorwakeup(inp, so); 5507 } 5508 return (error); 5509 } 5510 5511 5512 #ifdef SCTP_MBUF_LOGGING 5513 struct mbuf * 5514 sctp_m_free(struct mbuf *m) 5515 { 5516 if (SCTP_BUF_IS_EXTENDED(m)) { 5517 sctp_log_mb(m, SCTP_MBUF_IFREE); 5518 } 5519 return (m_free(m)); 5520 } 5521 5522 void 5523 sctp_m_freem(struct mbuf *mb) 5524 { 5525 while (mb != NULL) 5526 mb = sctp_m_free(mb); 5527 } 5528 5529 #endif 5530 5531 int 5532 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 5533 { 5534 /* 5535 * Given a local address. For all associations that holds the 5536 * address, request a peer-set-primary. 5537 */ 5538 struct sctp_ifa *ifa; 5539 struct sctp_laddr *wi; 5540 5541 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 5542 if (ifa == NULL) { 5543 return (EADDRNOTAVAIL); 5544 } 5545 /* 5546 * Now that we have the ifa we must awaken the iterator with this 5547 * message. 5548 */ 5549 wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr); 5550 if (wi == NULL) { 5551 return (ENOMEM); 5552 } 5553 /* Now incr the count and int wi structure */ 5554 SCTP_INCR_LADDR_COUNT(); 5555 bzero(wi, sizeof(*wi)); 5556 wi->ifa = ifa; 5557 wi->action = SCTP_SET_PRIM_ADDR; 5558 atomic_add_int(&ifa->refcount, 1); 5559 5560 /* Now add it to the work queue */ 5561 SCTP_IPI_ITERATOR_WQ_LOCK(); 5562 /* 5563 * Should this really be a tailq? As it is we will process the 5564 * newest first :-0 5565 */ 5566 LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr); 5567 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 5568 (struct sctp_inpcb *)NULL, 5569 (struct sctp_tcb *)NULL, 5570 (struct sctp_nets *)NULL); 5571 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 5572 return (0); 5573 } 5574 5575 5576 5577 5578 int 5579 sctp_soreceive(so, psa, uio, mp0, controlp, flagsp) 5580 struct socket *so; 5581 struct sockaddr **psa; 5582 struct uio *uio; 5583 struct mbuf **mp0; 5584 struct mbuf **controlp; 5585 int *flagsp; 5586 { 5587 int error, fromlen; 5588 uint8_t sockbuf[256]; 5589 struct sockaddr *from; 5590 struct sctp_extrcvinfo sinfo; 5591 int filling_sinfo = 1; 5592 struct sctp_inpcb *inp; 5593 5594 inp = (struct sctp_inpcb *)so->so_pcb; 5595 /* pickup the assoc we are reading from */ 5596 if (inp == NULL) { 5597 return (EINVAL); 5598 } 5599 if ((sctp_is_feature_off(inp, 5600 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) || 5601 (controlp == NULL)) { 5602 /* user does not want the sndrcv ctl */ 5603 filling_sinfo = 0; 5604 } 5605 if (psa) { 5606 from = (struct sockaddr *)sockbuf; 5607 fromlen = sizeof(sockbuf); 5608 from->sa_len = 0; 5609 } else { 5610 from = NULL; 5611 fromlen = 0; 5612 } 5613 5614 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 5615 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 5616 if ((controlp) && (filling_sinfo)) { 5617 /* copy back the sinfo in a CMSG format */ 5618 if (filling_sinfo) 5619 *controlp = sctp_build_ctl_nchunk(inp, 5620 (struct sctp_sndrcvinfo *)&sinfo); 5621 else 5622 *controlp = NULL; 5623 } 5624 if (psa) { 5625 /* copy back the address info */ 5626 if (from && from->sa_len) { 5627 *psa = sodupsockaddr(from, M_NOWAIT); 5628 } else { 5629 *psa = NULL; 5630 } 5631 } 5632 return (error); 5633 } 5634