1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_pcb.h> 41 #include <netinet/sctp_header.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_bsd_addr.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_sysctl.h> 50 #include <netinet/sctp_indata.h> 51 #include <sys/unistd.h> 52 53 /* Declare all of our malloc named types */ 54 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor"); 55 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array"); 56 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array"); 57 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address"); 58 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator"); 59 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist"); 60 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key"); 61 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list"); 62 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info"); 63 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset"); 64 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer"); 65 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all"); 66 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct"); 67 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct"); 68 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct"); 69 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block"); 70 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list"); 71 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control"); 72 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option"); 73 MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue"); 74 75 /* Global NON-VNET structure that controls the iterator */ 76 struct iterator_control sctp_it_ctl; 77 78 79 void 80 sctp_wakeup_iterator(void) 81 { 82 wakeup(&sctp_it_ctl.iterator_running); 83 } 84 85 static void 86 sctp_iterator_thread(void *v SCTP_UNUSED) 87 { 88 SCTP_IPI_ITERATOR_WQ_LOCK(); 89 /* In FreeBSD this thread never terminates. */ 90 for (;;) { 91 msleep(&sctp_it_ctl.iterator_running, 92 &sctp_it_ctl.ipi_iterator_wq_mtx, 93 0, "waiting_for_work", 0); 94 sctp_iterator_worker(); 95 } 96 } 97 98 void 99 sctp_startup_iterator(void) 100 { 101 static int called = 0; 102 int ret; 103 104 if (called) { 105 /* You only get one */ 106 return; 107 } 108 /* init the iterator head */ 109 called = 1; 110 sctp_it_ctl.iterator_running = 0; 111 sctp_it_ctl.iterator_flags = 0; 112 sctp_it_ctl.cur_it = NULL; 113 SCTP_ITERATOR_LOCK_INIT(); 114 SCTP_IPI_ITERATOR_WQ_INIT(); 115 TAILQ_INIT(&sctp_it_ctl.iteratorhead); 116 ret = kproc_create(sctp_iterator_thread, 117 (void *)NULL, 118 &sctp_it_ctl.thread_proc, 119 RFPROC, 120 SCTP_KTHREAD_PAGES, 121 SCTP_KTRHEAD_NAME); 122 } 123 124 #ifdef INET6 125 126 void 127 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa) 128 { 129 struct in6_ifaddr *ifa6; 130 131 ifa6 = (struct in6_ifaddr *)ifa->ifa; 132 ifa->flags = ifa6->ia6_flags; 133 if (!MODULE_GLOBAL(ip6_use_deprecated)) { 134 if (ifa->flags & 135 IN6_IFF_DEPRECATED) { 136 ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; 137 } else { 138 ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE; 139 } 140 } else { 141 ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE; 142 } 143 if (ifa->flags & 144 (IN6_IFF_DETACHED | 145 IN6_IFF_ANYCAST | 146 IN6_IFF_NOTREADY)) { 147 ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; 148 } else { 149 ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE; 150 } 151 } 152 153 #endif /* INET6 */ 154 155 156 static uint32_t 157 sctp_is_desired_interface_type(struct ifaddr *ifa) 158 { 159 int result; 160 161 /* check the interface type to see if it's one we care about */ 162 switch (ifa->ifa_ifp->if_type) { 163 case IFT_ETHER: 164 case IFT_ISO88023: 165 case IFT_ISO88024: 166 case IFT_ISO88025: 167 case IFT_ISO88026: 168 case IFT_STARLAN: 169 case IFT_P10: 170 case IFT_P80: 171 case IFT_HY: 172 case IFT_FDDI: 173 case IFT_XETHER: 174 case IFT_ISDNBASIC: 175 case IFT_ISDNPRIMARY: 176 case IFT_PTPSERIAL: 177 case IFT_OTHER: 178 case IFT_PPP: 179 case IFT_LOOP: 180 case IFT_SLIP: 181 case IFT_GIF: 182 case IFT_L2VLAN: 183 case IFT_STF: 184 case IFT_IP: 185 case IFT_IPOVERCDLC: 186 case IFT_IPOVERCLAW: 187 case IFT_PROPVIRTUAL: /* NetGraph Virtual too */ 188 case IFT_VIRTUALIPADDRESS: 189 result = 1; 190 break; 191 default: 192 result = 0; 193 } 194 195 return (result); 196 } 197 198 199 200 201 static void 202 sctp_init_ifns_for_vrf(int vrfid) 203 { 204 /* 205 * Here we must apply ANY locks needed by the IFN we access and also 206 * make sure we lock any IFA that exists as we float through the 207 * list of IFA's 208 */ 209 struct ifnet *ifn; 210 struct ifaddr *ifa; 211 struct sctp_ifa *sctp_ifa; 212 uint32_t ifa_flags; 213 214 #ifdef INET6 215 struct in6_ifaddr *ifa6; 216 217 #endif 218 219 IFNET_RLOCK(); 220 TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) { 221 IF_ADDR_RLOCK(ifn); 222 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 223 if (ifa->ifa_addr == NULL) { 224 continue; 225 } 226 switch (ifa->ifa_addr->sa_family) { 227 #ifdef INET 228 case AF_INET: 229 if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) { 230 continue; 231 } 232 break; 233 #endif 234 #ifdef INET6 235 case AF_INET6: 236 if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) { 237 /* skip unspecifed addresses */ 238 continue; 239 } 240 break; 241 #endif 242 default: 243 continue; 244 } 245 if (sctp_is_desired_interface_type(ifa) == 0) { 246 /* non desired type */ 247 continue; 248 } 249 switch (ifa->ifa_addr->sa_family) { 250 #ifdef INET 251 case AF_INET: 252 ifa_flags = 0; 253 break; 254 #endif 255 #ifdef INET6 256 case AF_INET6: 257 ifa6 = (struct in6_ifaddr *)ifa; 258 ifa_flags = ifa6->ia6_flags; 259 break; 260 #endif 261 default: 262 ifa_flags = 0; 263 break; 264 } 265 sctp_ifa = sctp_add_addr_to_vrf(vrfid, 266 (void *)ifn, 267 ifn->if_index, 268 ifn->if_type, 269 ifn->if_xname, 270 (void *)ifa, 271 ifa->ifa_addr, 272 ifa_flags, 273 0); 274 if (sctp_ifa) { 275 sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE; 276 } 277 } 278 IF_ADDR_RUNLOCK(ifn); 279 } 280 IFNET_RUNLOCK(); 281 } 282 283 void 284 sctp_init_vrf_list(int vrfid) 285 { 286 if (vrfid > SCTP_MAX_VRF_ID) 287 /* can't do that */ 288 return; 289 290 /* Don't care about return here */ 291 (void)sctp_allocate_vrf(vrfid); 292 293 /* 294 * Now we need to build all the ifn's for this vrf and there 295 * addresses 296 */ 297 sctp_init_ifns_for_vrf(vrfid); 298 } 299 300 void 301 sctp_addr_change(struct ifaddr *ifa, int cmd) 302 { 303 uint32_t ifa_flags = 0; 304 305 /* 306 * BSD only has one VRF, if this changes we will need to hook in the 307 * right things here to get the id to pass to the address managment 308 * routine. 309 */ 310 if (SCTP_BASE_VAR(first_time) == 0) { 311 /* Special test to see if my ::1 will showup with this */ 312 SCTP_BASE_VAR(first_time) = 1; 313 sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID); 314 } 315 if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) { 316 /* don't know what to do with this */ 317 return; 318 } 319 if (ifa->ifa_addr == NULL) { 320 return; 321 } 322 switch (ifa->ifa_addr->sa_family) { 323 #ifdef INET 324 case AF_INET: 325 if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) { 326 return; 327 } 328 break; 329 #endif 330 #ifdef INET6 331 case AF_INET6: 332 ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags; 333 if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) { 334 /* skip unspecifed addresses */ 335 return; 336 } 337 break; 338 #endif 339 default: 340 /* non inet/inet6 skip */ 341 return; 342 } 343 344 if (sctp_is_desired_interface_type(ifa) == 0) { 345 /* non desired type */ 346 return; 347 } 348 if (cmd == RTM_ADD) { 349 (void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp, 350 ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, 351 ifa->ifa_ifp->if_xname, 352 (void *)ifa, ifa->ifa_addr, ifa_flags, 1); 353 } else { 354 355 sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr, 356 ifa->ifa_ifp->if_index, 357 ifa->ifa_ifp->if_xname 358 ); 359 /* 360 * We don't bump refcount here so when it completes the 361 * final delete will happen. 362 */ 363 } 364 } 365 366 void 367 sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){ 368 struct ifnet *ifn; 369 struct ifaddr *ifa; 370 371 IFNET_RLOCK(); 372 TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) { 373 if (!(*pred) (ifn)) { 374 continue; 375 } 376 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 377 sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE); 378 } 379 } 380 IFNET_RUNLOCK(); 381 } 382 383 struct mbuf * 384 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, 385 int how, int allonebuf, int type) 386 { 387 struct mbuf *m = NULL; 388 389 m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0); 390 if (m == NULL) { 391 /* bad, no memory */ 392 return (m); 393 } 394 if (allonebuf) { 395 int siz; 396 397 if (SCTP_BUF_IS_EXTENDED(m)) { 398 siz = SCTP_BUF_EXTEND_SIZE(m); 399 } else { 400 if (want_header) 401 siz = MHLEN; 402 else 403 siz = MLEN; 404 } 405 if (siz < space_needed) { 406 m_freem(m); 407 return (NULL); 408 } 409 } 410 if (SCTP_BUF_NEXT(m)) { 411 sctp_m_freem(SCTP_BUF_NEXT(m)); 412 SCTP_BUF_NEXT(m) = NULL; 413 } 414 #ifdef SCTP_MBUF_LOGGING 415 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 416 if (SCTP_BUF_IS_EXTENDED(m)) { 417 sctp_log_mb(m, SCTP_MBUF_IALLOC); 418 } 419 } 420 #endif 421 return (m); 422 } 423 424 425 #ifdef SCTP_PACKET_LOGGING 426 void 427 sctp_packet_log(struct mbuf *m, int length) 428 { 429 int *lenat, thisone; 430 void *copyto; 431 uint32_t *tick_tock; 432 int total_len; 433 int grabbed_lock = 0; 434 int value, newval, thisend, thisbegin; 435 436 /* 437 * Buffer layout. -sizeof this entry (total_len) -previous end 438 * (value) -ticks of log (ticks) o -ip packet o -as logged - 439 * where this started (thisbegin) x <--end points here 440 */ 441 total_len = SCTP_SIZE32((length + (4 * sizeof(int)))); 442 /* Log a packet to the buffer. */ 443 if (total_len > SCTP_PACKET_LOG_SIZE) { 444 /* Can't log this packet I have not a buffer big enough */ 445 return; 446 } 447 if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) { 448 return; 449 } 450 atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1); 451 try_again: 452 if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) { 453 SCTP_IP_PKTLOG_LOCK(); 454 grabbed_lock = 1; 455 again_locked: 456 value = SCTP_BASE_VAR(packet_log_end); 457 newval = SCTP_BASE_VAR(packet_log_end) + total_len; 458 if (newval >= SCTP_PACKET_LOG_SIZE) { 459 /* we wrapped */ 460 thisbegin = 0; 461 thisend = total_len; 462 } else { 463 thisbegin = SCTP_BASE_VAR(packet_log_end); 464 thisend = newval; 465 } 466 if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) { 467 goto again_locked; 468 } 469 } else { 470 value = SCTP_BASE_VAR(packet_log_end); 471 newval = SCTP_BASE_VAR(packet_log_end) + total_len; 472 if (newval >= SCTP_PACKET_LOG_SIZE) { 473 /* we wrapped */ 474 thisbegin = 0; 475 thisend = total_len; 476 } else { 477 thisbegin = SCTP_BASE_VAR(packet_log_end); 478 thisend = newval; 479 } 480 if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) { 481 goto try_again; 482 } 483 } 484 /* Sanity check */ 485 if (thisend >= SCTP_PACKET_LOG_SIZE) { 486 SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n", 487 thisbegin, 488 thisend, 489 SCTP_BASE_VAR(packet_log_writers), 490 grabbed_lock, 491 SCTP_BASE_VAR(packet_log_end)); 492 SCTP_BASE_VAR(packet_log_end) = 0; 493 goto no_log; 494 495 } 496 lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin]; 497 *lenat = total_len; 498 lenat++; 499 *lenat = value; 500 lenat++; 501 tick_tock = (uint32_t *) lenat; 502 lenat++; 503 *tick_tock = sctp_get_tick_count(); 504 copyto = (void *)lenat; 505 thisone = thisend - sizeof(int); 506 lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone]; 507 *lenat = thisbegin; 508 if (grabbed_lock) { 509 SCTP_IP_PKTLOG_UNLOCK(); 510 grabbed_lock = 0; 511 } 512 m_copydata(m, 0, length, (caddr_t)copyto); 513 no_log: 514 if (grabbed_lock) { 515 SCTP_IP_PKTLOG_UNLOCK(); 516 } 517 atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1); 518 } 519 520 521 int 522 sctp_copy_out_packet_log(uint8_t * target, int length) 523 { 524 /* 525 * We wind through the packet log starting at start copying up to 526 * length bytes out. We return the number of bytes copied. 527 */ 528 int tocopy, this_copy; 529 int *lenat; 530 int did_delay = 0; 531 532 tocopy = length; 533 if (length < (int)(2 * sizeof(int))) { 534 /* not enough room */ 535 return (0); 536 } 537 if (SCTP_PKTLOG_WRITERS_NEED_LOCK) { 538 atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK); 539 again: 540 if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) { 541 /* 542 * we delay here for just a moment hoping the 543 * writer(s) that were present when we entered will 544 * have left and we only have locking ones that will 545 * contend with us for the lock. This does not 546 * assure 100% access, but its good enough for a 547 * logging facility like this. 548 */ 549 did_delay = 1; 550 DELAY(10); 551 goto again; 552 } 553 } 554 SCTP_IP_PKTLOG_LOCK(); 555 lenat = (int *)target; 556 *lenat = SCTP_BASE_VAR(packet_log_end); 557 lenat++; 558 this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE); 559 memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy); 560 if (SCTP_PKTLOG_WRITERS_NEED_LOCK) { 561 atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 562 SCTP_PKTLOG_WRITERS_NEED_LOCK); 563 } 564 SCTP_IP_PKTLOG_UNLOCK(); 565 return (this_copy + sizeof(int)); 566 } 567 568 #endif 569