1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_pcb.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_output.h> 42 #include <netinet/sctp_bsd_addr.h> 43 #include <netinet/sctp_uio.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_timer.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_sysctl.h> 48 #include <netinet/sctp_indata.h> 49 #include <sys/unistd.h> 50 51 /* Declare all of our malloc named types */ 52 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor"); 53 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array"); 54 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array"); 55 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address"); 56 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator"); 57 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist"); 58 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key"); 59 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list"); 60 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info"); 61 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset"); 62 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer"); 63 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all"); 64 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct"); 65 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct"); 66 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct"); 67 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block"); 68 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list"); 69 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control"); 70 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option"); 71 MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue"); 72 73 /* Global NON-VNET structure that controls the iterator */ 74 struct iterator_control sctp_it_ctl; 75 76 77 void 78 sctp_wakeup_iterator(void) 79 { 80 wakeup(&sctp_it_ctl.iterator_running); 81 } 82 83 static void 84 sctp_iterator_thread(void *v SCTP_UNUSED) 85 { 86 SCTP_IPI_ITERATOR_WQ_LOCK(); 87 /* In FreeBSD this thread never terminates. */ 88 for (;;) { 89 msleep(&sctp_it_ctl.iterator_running, 90 &sctp_it_ctl.ipi_iterator_wq_mtx, 91 0, "waiting_for_work", 0); 92 sctp_iterator_worker(); 93 } 94 } 95 96 void 97 sctp_startup_iterator(void) 98 { 99 if (sctp_it_ctl.thread_proc) { 100 /* You only get one */ 101 return; 102 } 103 /* Initialize global locks here, thus only once. */ 104 SCTP_ITERATOR_LOCK_INIT(); 105 SCTP_IPI_ITERATOR_WQ_INIT(); 106 TAILQ_INIT(&sctp_it_ctl.iteratorhead); 107 kproc_create(sctp_iterator_thread, 108 (void *)NULL, 109 &sctp_it_ctl.thread_proc, 110 RFPROC, 111 SCTP_KTHREAD_PAGES, 112 SCTP_KTRHEAD_NAME); 113 } 114 115 #ifdef INET6 116 117 void 118 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa) 119 { 120 struct in6_ifaddr *ifa6; 121 122 ifa6 = (struct in6_ifaddr *)ifa->ifa; 123 ifa->flags = ifa6->ia6_flags; 124 if (!MODULE_GLOBAL(ip6_use_deprecated)) { 125 if (ifa->flags & 126 IN6_IFF_DEPRECATED) { 127 ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; 128 } else { 129 ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE; 130 } 131 } else { 132 ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE; 133 } 134 if (ifa->flags & 135 (IN6_IFF_DETACHED | 136 IN6_IFF_ANYCAST | 137 IN6_IFF_NOTREADY)) { 138 ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; 139 } else { 140 ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE; 141 } 142 } 143 144 #endif /* INET6 */ 145 146 147 static uint32_t 148 sctp_is_desired_interface_type(struct ifnet *ifn) 149 { 150 int result; 151 152 /* check the interface type to see if it's one we care about */ 153 switch (ifn->if_type) { 154 case IFT_ETHER: 155 case IFT_ISO88023: 156 case IFT_ISO88024: 157 case IFT_ISO88025: 158 case IFT_ISO88026: 159 case IFT_STARLAN: 160 case IFT_P10: 161 case IFT_P80: 162 case IFT_HY: 163 case IFT_FDDI: 164 case IFT_XETHER: 165 case IFT_ISDNBASIC: 166 case IFT_ISDNPRIMARY: 167 case IFT_PTPSERIAL: 168 case IFT_OTHER: 169 case IFT_PPP: 170 case IFT_LOOP: 171 case IFT_SLIP: 172 case IFT_GIF: 173 case IFT_L2VLAN: 174 case IFT_STF: 175 case IFT_IP: 176 case IFT_IPOVERCDLC: 177 case IFT_IPOVERCLAW: 178 case IFT_PROPVIRTUAL: /* NetGraph Virtual too */ 179 case IFT_VIRTUALIPADDRESS: 180 result = 1; 181 break; 182 default: 183 result = 0; 184 } 185 186 return (result); 187 } 188 189 190 191 192 static void 193 sctp_init_ifns_for_vrf(int vrfid) 194 { 195 /* 196 * Here we must apply ANY locks needed by the IFN we access and also 197 * make sure we lock any IFA that exists as we float through the 198 * list of IFA's 199 */ 200 struct ifnet *ifn; 201 struct ifaddr *ifa; 202 struct sctp_ifa *sctp_ifa; 203 uint32_t ifa_flags; 204 205 #ifdef INET6 206 struct in6_ifaddr *ifa6; 207 208 #endif 209 210 IFNET_RLOCK(); 211 TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) { 212 if (sctp_is_desired_interface_type(ifn) == 0) { 213 /* non desired type */ 214 continue; 215 } 216 IF_ADDR_RLOCK(ifn); 217 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 218 if (ifa->ifa_addr == NULL) { 219 continue; 220 } 221 switch (ifa->ifa_addr->sa_family) { 222 #ifdef INET 223 case AF_INET: 224 if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) { 225 continue; 226 } 227 break; 228 #endif 229 #ifdef INET6 230 case AF_INET6: 231 if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) { 232 /* skip unspecifed addresses */ 233 continue; 234 } 235 break; 236 #endif 237 default: 238 continue; 239 } 240 switch (ifa->ifa_addr->sa_family) { 241 #ifdef INET 242 case AF_INET: 243 ifa_flags = 0; 244 break; 245 #endif 246 #ifdef INET6 247 case AF_INET6: 248 ifa6 = (struct in6_ifaddr *)ifa; 249 ifa_flags = ifa6->ia6_flags; 250 break; 251 #endif 252 default: 253 ifa_flags = 0; 254 break; 255 } 256 sctp_ifa = sctp_add_addr_to_vrf(vrfid, 257 (void *)ifn, 258 ifn->if_index, 259 ifn->if_type, 260 ifn->if_xname, 261 (void *)ifa, 262 ifa->ifa_addr, 263 ifa_flags, 264 0); 265 if (sctp_ifa) { 266 sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE; 267 } 268 } 269 IF_ADDR_RUNLOCK(ifn); 270 } 271 IFNET_RUNLOCK(); 272 } 273 274 void 275 sctp_init_vrf_list(int vrfid) 276 { 277 if (vrfid > SCTP_MAX_VRF_ID) 278 /* can't do that */ 279 return; 280 281 /* Don't care about return here */ 282 (void)sctp_allocate_vrf(vrfid); 283 284 /* 285 * Now we need to build all the ifn's for this vrf and there 286 * addresses 287 */ 288 sctp_init_ifns_for_vrf(vrfid); 289 } 290 291 void 292 sctp_addr_change(struct ifaddr *ifa, int cmd) 293 { 294 uint32_t ifa_flags = 0; 295 296 /* 297 * BSD only has one VRF, if this changes we will need to hook in the 298 * right things here to get the id to pass to the address managment 299 * routine. 300 */ 301 if (SCTP_BASE_VAR(first_time) == 0) { 302 /* Special test to see if my ::1 will showup with this */ 303 SCTP_BASE_VAR(first_time) = 1; 304 sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID); 305 } 306 if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) { 307 /* don't know what to do with this */ 308 return; 309 } 310 if (ifa->ifa_addr == NULL) { 311 return; 312 } 313 if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) { 314 /* non desired type */ 315 return; 316 } 317 switch (ifa->ifa_addr->sa_family) { 318 #ifdef INET 319 case AF_INET: 320 if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) { 321 return; 322 } 323 break; 324 #endif 325 #ifdef INET6 326 case AF_INET6: 327 ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags; 328 if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) { 329 /* skip unspecifed addresses */ 330 return; 331 } 332 break; 333 #endif 334 default: 335 /* non inet/inet6 skip */ 336 return; 337 } 338 if (cmd == RTM_ADD) { 339 (void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp, 340 ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname, 341 (void *)ifa, ifa->ifa_addr, ifa_flags, 1); 342 } else { 343 344 sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr, 345 ifa->ifa_ifp->if_index, 346 ifa->ifa_ifp->if_xname); 347 348 /* 349 * We don't bump refcount here so when it completes the 350 * final delete will happen. 351 */ 352 } 353 } 354 355 void 356 sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){ 357 struct ifnet *ifn; 358 struct ifaddr *ifa; 359 360 IFNET_RLOCK(); 361 TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) { 362 if (!(*pred) (ifn)) { 363 continue; 364 } 365 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 366 sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE); 367 } 368 } 369 IFNET_RUNLOCK(); 370 } 371 372 struct mbuf * 373 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, 374 int how, int allonebuf, int type) 375 { 376 struct mbuf *m = NULL; 377 378 m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0); 379 if (m == NULL) { 380 /* bad, no memory */ 381 return (m); 382 } 383 if (allonebuf) { 384 if (SCTP_BUF_SIZE(m) < space_needed) { 385 m_freem(m); 386 return (NULL); 387 } 388 } 389 if (SCTP_BUF_NEXT(m)) { 390 sctp_m_freem(SCTP_BUF_NEXT(m)); 391 SCTP_BUF_NEXT(m) = NULL; 392 } 393 #ifdef SCTP_MBUF_LOGGING 394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 395 sctp_log_mb(m, SCTP_MBUF_IALLOC); 396 } 397 #endif 398 return (m); 399 } 400 401 402 #ifdef SCTP_PACKET_LOGGING 403 void 404 sctp_packet_log(struct mbuf *m) 405 { 406 int *lenat, thisone; 407 void *copyto; 408 uint32_t *tick_tock; 409 int length; 410 int total_len; 411 int grabbed_lock = 0; 412 int value, newval, thisend, thisbegin; 413 414 /* 415 * Buffer layout. -sizeof this entry (total_len) -previous end 416 * (value) -ticks of log (ticks) o -ip packet o -as logged - 417 * where this started (thisbegin) x <--end points here 418 */ 419 length = SCTP_HEADER_LEN(m); 420 total_len = SCTP_SIZE32((length + (4 * sizeof(int)))); 421 /* Log a packet to the buffer. */ 422 if (total_len > SCTP_PACKET_LOG_SIZE) { 423 /* Can't log this packet I have not a buffer big enough */ 424 return; 425 } 426 if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) { 427 return; 428 } 429 atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1); 430 try_again: 431 if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) { 432 SCTP_IP_PKTLOG_LOCK(); 433 grabbed_lock = 1; 434 again_locked: 435 value = SCTP_BASE_VAR(packet_log_end); 436 newval = SCTP_BASE_VAR(packet_log_end) + total_len; 437 if (newval >= SCTP_PACKET_LOG_SIZE) { 438 /* we wrapped */ 439 thisbegin = 0; 440 thisend = total_len; 441 } else { 442 thisbegin = SCTP_BASE_VAR(packet_log_end); 443 thisend = newval; 444 } 445 if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) { 446 goto again_locked; 447 } 448 } else { 449 value = SCTP_BASE_VAR(packet_log_end); 450 newval = SCTP_BASE_VAR(packet_log_end) + total_len; 451 if (newval >= SCTP_PACKET_LOG_SIZE) { 452 /* we wrapped */ 453 thisbegin = 0; 454 thisend = total_len; 455 } else { 456 thisbegin = SCTP_BASE_VAR(packet_log_end); 457 thisend = newval; 458 } 459 if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) { 460 goto try_again; 461 } 462 } 463 /* Sanity check */ 464 if (thisend >= SCTP_PACKET_LOG_SIZE) { 465 SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n", 466 thisbegin, 467 thisend, 468 SCTP_BASE_VAR(packet_log_writers), 469 grabbed_lock, 470 SCTP_BASE_VAR(packet_log_end)); 471 SCTP_BASE_VAR(packet_log_end) = 0; 472 goto no_log; 473 474 } 475 lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin]; 476 *lenat = total_len; 477 lenat++; 478 *lenat = value; 479 lenat++; 480 tick_tock = (uint32_t *) lenat; 481 lenat++; 482 *tick_tock = sctp_get_tick_count(); 483 copyto = (void *)lenat; 484 thisone = thisend - sizeof(int); 485 lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone]; 486 *lenat = thisbegin; 487 if (grabbed_lock) { 488 SCTP_IP_PKTLOG_UNLOCK(); 489 grabbed_lock = 0; 490 } 491 m_copydata(m, 0, length, (caddr_t)copyto); 492 no_log: 493 if (grabbed_lock) { 494 SCTP_IP_PKTLOG_UNLOCK(); 495 } 496 atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1); 497 } 498 499 500 int 501 sctp_copy_out_packet_log(uint8_t * target, int length) 502 { 503 /* 504 * We wind through the packet log starting at start copying up to 505 * length bytes out. We return the number of bytes copied. 506 */ 507 int tocopy, this_copy; 508 int *lenat; 509 int did_delay = 0; 510 511 tocopy = length; 512 if (length < (int)(2 * sizeof(int))) { 513 /* not enough room */ 514 return (0); 515 } 516 if (SCTP_PKTLOG_WRITERS_NEED_LOCK) { 517 atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK); 518 again: 519 if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) { 520 /* 521 * we delay here for just a moment hoping the 522 * writer(s) that were present when we entered will 523 * have left and we only have locking ones that will 524 * contend with us for the lock. This does not 525 * assure 100% access, but its good enough for a 526 * logging facility like this. 527 */ 528 did_delay = 1; 529 DELAY(10); 530 goto again; 531 } 532 } 533 SCTP_IP_PKTLOG_LOCK(); 534 lenat = (int *)target; 535 *lenat = SCTP_BASE_VAR(packet_log_end); 536 lenat++; 537 this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE); 538 memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy); 539 if (SCTP_PKTLOG_WRITERS_NEED_LOCK) { 540 atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 541 SCTP_PKTLOG_WRITERS_NEED_LOCK); 542 } 543 SCTP_IP_PKTLOG_UNLOCK(); 544 return (this_copy + sizeof(int)); 545 } 546 547 #endif 548