1 /* 2 * IP multicast forwarding procedures 3 * 4 * Written by David Waitzman, BBN Labs, August 1988. 5 * Modified by Steve Deering, Stanford, February 1989. 6 * Modified by Mark J. Steiglitz, Stanford, May, 1991 7 * Modified by Van Jacobson, LBL, January 1993 8 * Modified by Ajit Thyagarajan, PARC, August 1993 9 * Modified by Bill Fenner, PARC, April 1995 10 * Modified by Ahmed Helmy, SGI, June 1996 11 * Modified by George Edmond Eddy (Rusty), ISI, February 1998 12 * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000 13 * Modified by Hitoshi Asaeda, WIDE, August 2000 14 * Modified by Pavlin Radoslavov, ICSI, October 2002 15 * 16 * MROUTING Revision: 3.5 17 * and PIM-SMv2 and PIM-DM support, advanced API support, 18 * bandwidth metering and signaling 19 * 20 * $FreeBSD$ 21 */ 22 23 #include "opt_mac.h" 24 #include "opt_mrouting.h" 25 26 #ifdef PIM 27 #define _PIM_VT 1 28 #endif 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/mac.h> 34 #include <sys/malloc.h> 35 #include <sys/mbuf.h> 36 #include <sys/module.h> 37 #include <sys/protosw.h> 38 #include <sys/signalvar.h> 39 #include <sys/socket.h> 40 #include <sys/socketvar.h> 41 #include <sys/sockio.h> 42 #include <sys/sx.h> 43 #include <sys/sysctl.h> 44 #include <sys/syslog.h> 45 #include <sys/systm.h> 46 #include <sys/time.h> 47 #include <net/if.h> 48 #include <net/netisr.h> 49 #include <net/route.h> 50 #include <netinet/in.h> 51 #include <netinet/igmp.h> 52 #include <netinet/in_systm.h> 53 #include <netinet/in_var.h> 54 #include <netinet/ip.h> 55 #include <netinet/ip_encap.h> 56 #include <netinet/ip_mroute.h> 57 #include <netinet/ip_var.h> 58 #ifdef PIM 59 #include <netinet/pim.h> 60 #include <netinet/pim_var.h> 61 #endif 62 #include <netinet/udp.h> 63 #include <machine/in_cksum.h> 64 65 /* 66 * Control debugging code for rsvp and multicast routing code. 67 * Can only set them with the debugger. 68 */ 69 static u_int rsvpdebug; /* non-zero enables debugging */ 70 71 static u_int mrtdebug; /* any set of the flags below */ 72 #define DEBUG_MFC 0x02 73 #define DEBUG_FORWARD 0x04 74 #define DEBUG_EXPIRE 0x08 75 #define DEBUG_XMIT 0x10 76 #define DEBUG_PIM 0x20 77 78 #define VIFI_INVALID ((vifi_t) -1) 79 80 #define M_HASCL(m) ((m)->m_flags & M_EXT) 81 82 static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast routing tables"); 83 84 /* 85 * Locking. We use two locks: one for the virtual interface table and 86 * one for the forwarding table. These locks may be nested in which case 87 * the VIF lock must always be taken first. Note that each lock is used 88 * to cover not only the specific data structure but also related data 89 * structures. It may be better to add more fine-grained locking later; 90 * it's not clear how performance-critical this code is. 91 */ 92 93 static struct mrtstat mrtstat; 94 SYSCTL_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW, 95 &mrtstat, mrtstat, 96 "Multicast Routing Statistics (struct mrtstat, netinet/ip_mroute.h)"); 97 98 static struct mfc *mfctable[MFCTBLSIZ]; 99 SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD, 100 &mfctable, sizeof(mfctable), "S,*mfc[MFCTBLSIZ]", 101 "Multicast Forwarding Table (struct *mfc[MFCTBLSIZ], netinet/ip_mroute.h)"); 102 103 static struct mtx mfc_mtx; 104 #define MFC_LOCK() mtx_lock(&mfc_mtx) 105 #define MFC_UNLOCK() mtx_unlock(&mfc_mtx) 106 #define MFC_LOCK_ASSERT() do { \ 107 mtx_assert(&mfc_mtx, MA_OWNED); \ 108 NET_ASSERT_GIANT(); \ 109 } while (0) 110 #define MFC_LOCK_INIT() mtx_init(&mfc_mtx, "mroute mfc table", NULL, MTX_DEF) 111 #define MFC_LOCK_DESTROY() mtx_destroy(&mfc_mtx) 112 113 static struct vif viftable[MAXVIFS]; 114 SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_RD, 115 &viftable, sizeof(viftable), "S,vif[MAXVIFS]", 116 "Multicast Virtual Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)"); 117 118 static struct mtx vif_mtx; 119 #define VIF_LOCK() mtx_lock(&vif_mtx) 120 #define VIF_UNLOCK() mtx_unlock(&vif_mtx) 121 #define VIF_LOCK_ASSERT() mtx_assert(&vif_mtx, MA_OWNED) 122 #define VIF_LOCK_INIT() mtx_init(&vif_mtx, "mroute vif table", NULL, MTX_DEF) 123 #define VIF_LOCK_DESTROY() mtx_destroy(&vif_mtx) 124 125 static u_char nexpire[MFCTBLSIZ]; 126 127 static struct callout expire_upcalls_ch; 128 129 #define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */ 130 #define UPCALL_EXPIRE 6 /* number of timeouts */ 131 132 /* 133 * Define the token bucket filter structures 134 * tbftable -> each vif has one of these for storing info 135 */ 136 137 static struct tbf tbftable[MAXVIFS]; 138 #define TBF_REPROCESS (hz / 100) /* 100x / second */ 139 140 /* 141 * 'Interfaces' associated with decapsulator (so we can tell 142 * packets that went through it from ones that get reflected 143 * by a broken gateway). These interfaces are never linked into 144 * the system ifnet list & no routes point to them. I.e., packets 145 * can't be sent this way. They only exist as a placeholder for 146 * multicast source verification. 147 */ 148 static struct ifnet multicast_decap_if[MAXVIFS]; 149 150 #define ENCAP_TTL 64 151 #define ENCAP_PROTO IPPROTO_IPIP /* 4 */ 152 153 /* prototype IP hdr for encapsulated packets */ 154 static struct ip multicast_encap_iphdr = { 155 #if BYTE_ORDER == LITTLE_ENDIAN 156 sizeof(struct ip) >> 2, IPVERSION, 157 #else 158 IPVERSION, sizeof(struct ip) >> 2, 159 #endif 160 0, /* tos */ 161 sizeof(struct ip), /* total length */ 162 0, /* id */ 163 0, /* frag offset */ 164 ENCAP_TTL, ENCAP_PROTO, 165 0, /* checksum */ 166 }; 167 168 /* 169 * Bandwidth meter variables and constants 170 */ 171 static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters"); 172 /* 173 * Pending timeouts are stored in a hash table, the key being the 174 * expiration time. Periodically, the entries are analysed and processed. 175 */ 176 #define BW_METER_BUCKETS 1024 177 static struct bw_meter *bw_meter_timers[BW_METER_BUCKETS]; 178 static struct callout bw_meter_ch; 179 #define BW_METER_PERIOD (hz) /* periodical handling of bw meters */ 180 181 /* 182 * Pending upcalls are stored in a vector which is flushed when 183 * full, or periodically 184 */ 185 static struct bw_upcall bw_upcalls[BW_UPCALLS_MAX]; 186 static u_int bw_upcalls_n; /* # of pending upcalls */ 187 static struct callout bw_upcalls_ch; 188 #define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */ 189 190 #ifdef PIM 191 static struct pimstat pimstat; 192 SYSCTL_STRUCT(_net_inet_pim, PIMCTL_STATS, stats, CTLFLAG_RD, 193 &pimstat, pimstat, 194 "PIM Statistics (struct pimstat, netinet/pim_var.h)"); 195 196 /* 197 * Note: the PIM Register encapsulation adds the following in front of a 198 * data packet: 199 * 200 * struct pim_encap_hdr { 201 * struct ip ip; 202 * struct pim_encap_pimhdr pim; 203 * } 204 * 205 */ 206 207 struct pim_encap_pimhdr { 208 struct pim pim; 209 uint32_t flags; 210 }; 211 212 static struct ip pim_encap_iphdr = { 213 #if BYTE_ORDER == LITTLE_ENDIAN 214 sizeof(struct ip) >> 2, 215 IPVERSION, 216 #else 217 IPVERSION, 218 sizeof(struct ip) >> 2, 219 #endif 220 0, /* tos */ 221 sizeof(struct ip), /* total length */ 222 0, /* id */ 223 0, /* frag offset */ 224 ENCAP_TTL, 225 IPPROTO_PIM, 226 0, /* checksum */ 227 }; 228 229 static struct pim_encap_pimhdr pim_encap_pimhdr = { 230 { 231 PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */ 232 0, /* reserved */ 233 0, /* checksum */ 234 }, 235 0 /* flags */ 236 }; 237 238 static struct ifnet multicast_register_if; 239 static vifi_t reg_vif_num = VIFI_INVALID; 240 #endif /* PIM */ 241 242 /* 243 * Private variables. 244 */ 245 static vifi_t numvifs; 246 static const struct encaptab *encap_cookie; 247 248 /* 249 * one-back cache used by mroute_encapcheck to locate a tunnel's vif 250 * given a datagram's src ip address. 251 */ 252 static u_long last_encap_src; 253 static struct vif *last_encap_vif; 254 255 /* 256 * Callout for queue processing. 257 */ 258 static struct callout tbf_reprocess_ch; 259 260 static u_long X_ip_mcast_src(int vifi); 261 static int X_ip_mforward(struct ip *ip, struct ifnet *ifp, 262 struct mbuf *m, struct ip_moptions *imo); 263 static int X_ip_mrouter_done(void); 264 static int X_ip_mrouter_get(struct socket *so, struct sockopt *m); 265 static int X_ip_mrouter_set(struct socket *so, struct sockopt *m); 266 static int X_legal_vif_num(int vif); 267 static int X_mrt_ioctl(int cmd, caddr_t data); 268 269 static int get_sg_cnt(struct sioc_sg_req *); 270 static int get_vif_cnt(struct sioc_vif_req *); 271 static int ip_mrouter_init(struct socket *, int); 272 static int add_vif(struct vifctl *); 273 static int del_vif(vifi_t); 274 static int add_mfc(struct mfcctl2 *); 275 static int del_mfc(struct mfcctl2 *); 276 static int set_api_config(uint32_t *); /* chose API capabilities */ 277 static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *); 278 static int set_assert(int); 279 static void expire_upcalls(void *); 280 static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t); 281 static void phyint_send(struct ip *, struct vif *, struct mbuf *); 282 static void encap_send(struct ip *, struct vif *, struct mbuf *); 283 static void tbf_control(struct vif *, struct mbuf *, struct ip *, u_long); 284 static void tbf_queue(struct vif *, struct mbuf *); 285 static void tbf_process_q(struct vif *); 286 static void tbf_reprocess_q(void *); 287 static int tbf_dq_sel(struct vif *, struct ip *); 288 static void tbf_send_packet(struct vif *, struct mbuf *); 289 static void tbf_update_tokens(struct vif *); 290 static int priority(struct vif *, struct ip *); 291 292 /* 293 * Bandwidth monitoring 294 */ 295 static void free_bw_list(struct bw_meter *list); 296 static int add_bw_upcall(struct bw_upcall *); 297 static int del_bw_upcall(struct bw_upcall *); 298 static void bw_meter_receive_packet(struct bw_meter *x, int plen, 299 struct timeval *nowp); 300 static void bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp); 301 static void bw_upcalls_send(void); 302 static void schedule_bw_meter(struct bw_meter *x, struct timeval *nowp); 303 static void unschedule_bw_meter(struct bw_meter *x); 304 static void bw_meter_process(void); 305 static void expire_bw_upcalls_send(void *); 306 static void expire_bw_meter_process(void *); 307 308 #ifdef PIM 309 static int pim_register_send(struct ip *, struct vif *, 310 struct mbuf *, struct mfc *); 311 static int pim_register_send_rp(struct ip *, struct vif *, 312 struct mbuf *, struct mfc *); 313 static int pim_register_send_upcall(struct ip *, struct vif *, 314 struct mbuf *, struct mfc *); 315 static struct mbuf *pim_register_prepare(struct ip *, struct mbuf *); 316 #endif 317 318 /* 319 * whether or not special PIM assert processing is enabled. 320 */ 321 static int pim_assert; 322 /* 323 * Rate limit for assert notification messages, in usec 324 */ 325 #define ASSERT_MSG_TIME 3000000 326 327 /* 328 * Kernel multicast routing API capabilities and setup. 329 * If more API capabilities are added to the kernel, they should be 330 * recorded in `mrt_api_support'. 331 */ 332 static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF | 333 MRT_MFC_FLAGS_BORDER_VIF | 334 MRT_MFC_RP | 335 MRT_MFC_BW_UPCALL); 336 static uint32_t mrt_api_config = 0; 337 338 /* 339 * Hash function for a source, group entry 340 */ 341 #define MFCHASH(a, g) MFCHASHMOD(((a) >> 20) ^ ((a) >> 10) ^ (a) ^ \ 342 ((g) >> 20) ^ ((g) >> 10) ^ (g)) 343 344 /* 345 * Find a route for a given origin IP address and Multicast group address 346 * Type of service parameter to be added in the future!!! 347 * Statistics are updated by the caller if needed 348 * (mrtstat.mrts_mfc_lookups and mrtstat.mrts_mfc_misses) 349 */ 350 static struct mfc * 351 mfc_find(in_addr_t o, in_addr_t g) 352 { 353 struct mfc *rt; 354 355 MFC_LOCK_ASSERT(); 356 357 for (rt = mfctable[MFCHASH(o,g)]; rt; rt = rt->mfc_next) 358 if ((rt->mfc_origin.s_addr == o) && 359 (rt->mfc_mcastgrp.s_addr == g) && (rt->mfc_stall == NULL)) 360 break; 361 return rt; 362 } 363 364 /* 365 * Macros to compute elapsed time efficiently 366 * Borrowed from Van Jacobson's scheduling code 367 */ 368 #define TV_DELTA(a, b, delta) { \ 369 int xxs; \ 370 delta = (a).tv_usec - (b).tv_usec; \ 371 if ((xxs = (a).tv_sec - (b).tv_sec)) { \ 372 switch (xxs) { \ 373 case 2: \ 374 delta += 1000000; \ 375 /* FALLTHROUGH */ \ 376 case 1: \ 377 delta += 1000000; \ 378 break; \ 379 default: \ 380 delta += (1000000 * xxs); \ 381 } \ 382 } \ 383 } 384 385 #define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \ 386 (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec) 387 388 /* 389 * Handle MRT setsockopt commands to modify the multicast routing tables. 390 */ 391 static int 392 X_ip_mrouter_set(struct socket *so, struct sockopt *sopt) 393 { 394 int error, optval; 395 vifi_t vifi; 396 struct vifctl vifc; 397 struct mfcctl2 mfc; 398 struct bw_upcall bw_upcall; 399 uint32_t i; 400 401 if (so != ip_mrouter && sopt->sopt_name != MRT_INIT) 402 return EPERM; 403 404 error = 0; 405 switch (sopt->sopt_name) { 406 case MRT_INIT: 407 error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); 408 if (error) 409 break; 410 error = ip_mrouter_init(so, optval); 411 break; 412 413 case MRT_DONE: 414 error = ip_mrouter_done(); 415 break; 416 417 case MRT_ADD_VIF: 418 error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc); 419 if (error) 420 break; 421 error = add_vif(&vifc); 422 break; 423 424 case MRT_DEL_VIF: 425 error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi); 426 if (error) 427 break; 428 error = del_vif(vifi); 429 break; 430 431 case MRT_ADD_MFC: 432 case MRT_DEL_MFC: 433 /* 434 * select data size depending on API version. 435 */ 436 if (sopt->sopt_name == MRT_ADD_MFC && 437 mrt_api_config & MRT_API_FLAGS_ALL) { 438 error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2), 439 sizeof(struct mfcctl2)); 440 } else { 441 error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl), 442 sizeof(struct mfcctl)); 443 bzero((caddr_t)&mfc + sizeof(struct mfcctl), 444 sizeof(mfc) - sizeof(struct mfcctl)); 445 } 446 if (error) 447 break; 448 if (sopt->sopt_name == MRT_ADD_MFC) 449 error = add_mfc(&mfc); 450 else 451 error = del_mfc(&mfc); 452 break; 453 454 case MRT_ASSERT: 455 error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); 456 if (error) 457 break; 458 set_assert(optval); 459 break; 460 461 case MRT_API_CONFIG: 462 error = sooptcopyin(sopt, &i, sizeof i, sizeof i); 463 if (!error) 464 error = set_api_config(&i); 465 if (!error) 466 error = sooptcopyout(sopt, &i, sizeof i); 467 break; 468 469 case MRT_ADD_BW_UPCALL: 470 case MRT_DEL_BW_UPCALL: 471 error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall, 472 sizeof bw_upcall); 473 if (error) 474 break; 475 if (sopt->sopt_name == MRT_ADD_BW_UPCALL) 476 error = add_bw_upcall(&bw_upcall); 477 else 478 error = del_bw_upcall(&bw_upcall); 479 break; 480 481 default: 482 error = EOPNOTSUPP; 483 break; 484 } 485 return error; 486 } 487 488 /* 489 * Handle MRT getsockopt commands 490 */ 491 static int 492 X_ip_mrouter_get(struct socket *so, struct sockopt *sopt) 493 { 494 int error; 495 static int version = 0x0305; /* !!! why is this here? XXX */ 496 497 switch (sopt->sopt_name) { 498 case MRT_VERSION: 499 error = sooptcopyout(sopt, &version, sizeof version); 500 break; 501 502 case MRT_ASSERT: 503 error = sooptcopyout(sopt, &pim_assert, sizeof pim_assert); 504 break; 505 506 case MRT_API_SUPPORT: 507 error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support); 508 break; 509 510 case MRT_API_CONFIG: 511 error = sooptcopyout(sopt, &mrt_api_config, sizeof mrt_api_config); 512 break; 513 514 default: 515 error = EOPNOTSUPP; 516 break; 517 } 518 return error; 519 } 520 521 /* 522 * Handle ioctl commands to obtain information from the cache 523 */ 524 static int 525 X_mrt_ioctl(int cmd, caddr_t data) 526 { 527 int error = 0; 528 529 /* 530 * Currently the only function calling this ioctl routine is rtioctl(). 531 * Typically, only root can create the raw socket in order to execute 532 * this ioctl method, however the request might be coming from a prison 533 */ 534 error = suser(curthread); 535 if (error) 536 return (error); 537 switch (cmd) { 538 case (SIOCGETVIFCNT): 539 error = get_vif_cnt((struct sioc_vif_req *)data); 540 break; 541 542 case (SIOCGETSGCNT): 543 error = get_sg_cnt((struct sioc_sg_req *)data); 544 break; 545 546 default: 547 error = EINVAL; 548 break; 549 } 550 return error; 551 } 552 553 /* 554 * returns the packet, byte, rpf-failure count for the source group provided 555 */ 556 static int 557 get_sg_cnt(struct sioc_sg_req *req) 558 { 559 struct mfc *rt; 560 561 MFC_LOCK(); 562 rt = mfc_find(req->src.s_addr, req->grp.s_addr); 563 if (rt == NULL) { 564 MFC_UNLOCK(); 565 req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff; 566 return EADDRNOTAVAIL; 567 } 568 req->pktcnt = rt->mfc_pkt_cnt; 569 req->bytecnt = rt->mfc_byte_cnt; 570 req->wrong_if = rt->mfc_wrong_if; 571 MFC_UNLOCK(); 572 return 0; 573 } 574 575 /* 576 * returns the input and output packet and byte counts on the vif provided 577 */ 578 static int 579 get_vif_cnt(struct sioc_vif_req *req) 580 { 581 vifi_t vifi = req->vifi; 582 583 VIF_LOCK(); 584 if (vifi >= numvifs) { 585 VIF_UNLOCK(); 586 return EINVAL; 587 } 588 589 req->icount = viftable[vifi].v_pkt_in; 590 req->ocount = viftable[vifi].v_pkt_out; 591 req->ibytes = viftable[vifi].v_bytes_in; 592 req->obytes = viftable[vifi].v_bytes_out; 593 VIF_UNLOCK(); 594 595 return 0; 596 } 597 598 static void 599 ip_mrouter_reset(void) 600 { 601 bzero((caddr_t)mfctable, sizeof(mfctable)); 602 bzero((caddr_t)nexpire, sizeof(nexpire)); 603 604 pim_assert = 0; 605 mrt_api_config = 0; 606 607 callout_init(&expire_upcalls_ch, CALLOUT_MPSAFE); 608 609 bw_upcalls_n = 0; 610 bzero((caddr_t)bw_meter_timers, sizeof(bw_meter_timers)); 611 callout_init(&bw_upcalls_ch, CALLOUT_MPSAFE); 612 callout_init(&bw_meter_ch, CALLOUT_MPSAFE); 613 614 callout_init(&tbf_reprocess_ch, CALLOUT_MPSAFE); 615 } 616 617 static struct mtx mrouter_mtx; /* used to synch init/done work */ 618 619 /* 620 * Enable multicast routing 621 */ 622 static int 623 ip_mrouter_init(struct socket *so, int version) 624 { 625 if (mrtdebug) 626 log(LOG_DEBUG, "ip_mrouter_init: so_type = %d, pr_protocol = %d\n", 627 so->so_type, so->so_proto->pr_protocol); 628 629 if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP) 630 return EOPNOTSUPP; 631 632 if (version != 1) 633 return ENOPROTOOPT; 634 635 mtx_lock(&mrouter_mtx); 636 637 if (ip_mrouter != NULL) { 638 mtx_unlock(&mrouter_mtx); 639 return EADDRINUSE; 640 } 641 642 callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL); 643 644 callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD, 645 expire_bw_upcalls_send, NULL); 646 callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL); 647 648 ip_mrouter = so; 649 650 mtx_unlock(&mrouter_mtx); 651 652 if (mrtdebug) 653 log(LOG_DEBUG, "ip_mrouter_init\n"); 654 655 return 0; 656 } 657 658 /* 659 * Disable multicast routing 660 */ 661 static int 662 X_ip_mrouter_done(void) 663 { 664 vifi_t vifi; 665 int i; 666 struct ifnet *ifp; 667 struct ifreq ifr; 668 struct mfc *rt; 669 struct rtdetq *rte; 670 671 mtx_lock(&mrouter_mtx); 672 673 if (ip_mrouter == NULL) { 674 mtx_unlock(&mrouter_mtx); 675 return EINVAL; 676 } 677 678 /* 679 * Detach/disable hooks to the reset of the system. 680 */ 681 ip_mrouter = NULL; 682 mrt_api_config = 0; 683 684 VIF_LOCK(); 685 if (encap_cookie) { 686 const struct encaptab *c = encap_cookie; 687 encap_cookie = NULL; 688 encap_detach(c); 689 } 690 VIF_UNLOCK(); 691 692 callout_stop(&tbf_reprocess_ch); 693 694 VIF_LOCK(); 695 /* 696 * For each phyint in use, disable promiscuous reception of all IP 697 * multicasts. 698 */ 699 for (vifi = 0; vifi < numvifs; vifi++) { 700 if (viftable[vifi].v_lcl_addr.s_addr != 0 && 701 !(viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) { 702 struct sockaddr_in *so = (struct sockaddr_in *)&(ifr.ifr_addr); 703 704 so->sin_len = sizeof(struct sockaddr_in); 705 so->sin_family = AF_INET; 706 so->sin_addr.s_addr = INADDR_ANY; 707 ifp = viftable[vifi].v_ifp; 708 if_allmulti(ifp, 0); 709 } 710 } 711 bzero((caddr_t)tbftable, sizeof(tbftable)); 712 bzero((caddr_t)viftable, sizeof(viftable)); 713 numvifs = 0; 714 pim_assert = 0; 715 VIF_UNLOCK(); 716 717 /* 718 * Free all multicast forwarding cache entries. 719 */ 720 callout_stop(&expire_upcalls_ch); 721 callout_stop(&bw_upcalls_ch); 722 callout_stop(&bw_meter_ch); 723 724 MFC_LOCK(); 725 for (i = 0; i < MFCTBLSIZ; i++) { 726 for (rt = mfctable[i]; rt != NULL; ) { 727 struct mfc *nr = rt->mfc_next; 728 729 for (rte = rt->mfc_stall; rte != NULL; ) { 730 struct rtdetq *n = rte->next; 731 732 m_freem(rte->m); 733 free(rte, M_MRTABLE); 734 rte = n; 735 } 736 free_bw_list(rt->mfc_bw_meter); 737 free(rt, M_MRTABLE); 738 rt = nr; 739 } 740 } 741 bzero((caddr_t)mfctable, sizeof(mfctable)); 742 bzero((caddr_t)nexpire, sizeof(nexpire)); 743 bw_upcalls_n = 0; 744 bzero(bw_meter_timers, sizeof(bw_meter_timers)); 745 MFC_UNLOCK(); 746 747 /* 748 * Reset de-encapsulation cache 749 */ 750 last_encap_src = INADDR_ANY; 751 last_encap_vif = NULL; 752 #ifdef PIM 753 reg_vif_num = VIFI_INVALID; 754 #endif 755 756 mtx_unlock(&mrouter_mtx); 757 758 if (mrtdebug) 759 log(LOG_DEBUG, "ip_mrouter_done\n"); 760 761 return 0; 762 } 763 764 /* 765 * Set PIM assert processing global 766 */ 767 static int 768 set_assert(int i) 769 { 770 if ((i != 1) && (i != 0)) 771 return EINVAL; 772 773 pim_assert = i; 774 775 return 0; 776 } 777 778 /* 779 * Configure API capabilities 780 */ 781 int 782 set_api_config(uint32_t *apival) 783 { 784 int i; 785 786 /* 787 * We can set the API capabilities only if it is the first operation 788 * after MRT_INIT. I.e.: 789 * - there are no vifs installed 790 * - pim_assert is not enabled 791 * - the MFC table is empty 792 */ 793 if (numvifs > 0) { 794 *apival = 0; 795 return EPERM; 796 } 797 if (pim_assert) { 798 *apival = 0; 799 return EPERM; 800 } 801 for (i = 0; i < MFCTBLSIZ; i++) { 802 if (mfctable[i] != NULL) { 803 *apival = 0; 804 return EPERM; 805 } 806 } 807 808 mrt_api_config = *apival & mrt_api_support; 809 *apival = mrt_api_config; 810 811 return 0; 812 } 813 814 /* 815 * Decide if a packet is from a tunnelled peer. 816 * Return 0 if not, 64 if so. XXX yuck.. 64 ??? 817 */ 818 static int 819 mroute_encapcheck(const struct mbuf *m, int off, int proto, void *arg) 820 { 821 struct ip *ip = mtod(m, struct ip *); 822 int hlen = ip->ip_hl << 2; 823 824 /* 825 * don't claim the packet if it's not to a multicast destination or if 826 * we don't have an encapsulating tunnel with the source. 827 * Note: This code assumes that the remote site IP address 828 * uniquely identifies the tunnel (i.e., that this site has 829 * at most one tunnel with the remote site). 830 */ 831 if (!IN_MULTICAST(ntohl(((struct ip *)((char *)ip+hlen))->ip_dst.s_addr))) 832 return 0; 833 if (ip->ip_src.s_addr != last_encap_src) { 834 struct vif *vifp = viftable; 835 struct vif *vife = vifp + numvifs; 836 837 last_encap_src = ip->ip_src.s_addr; 838 last_encap_vif = NULL; 839 for ( ; vifp < vife; ++vifp) 840 if (vifp->v_rmt_addr.s_addr == ip->ip_src.s_addr) { 841 if ((vifp->v_flags & (VIFF_TUNNEL|VIFF_SRCRT)) == VIFF_TUNNEL) 842 last_encap_vif = vifp; 843 break; 844 } 845 } 846 if (last_encap_vif == NULL) { 847 last_encap_src = INADDR_ANY; 848 return 0; 849 } 850 return 64; 851 } 852 853 /* 854 * De-encapsulate a packet and feed it back through ip input (this 855 * routine is called whenever IP gets a packet that mroute_encap_func() 856 * claimed). 857 */ 858 static void 859 mroute_encap_input(struct mbuf *m, int off) 860 { 861 struct ip *ip = mtod(m, struct ip *); 862 int hlen = ip->ip_hl << 2; 863 864 if (hlen > sizeof(struct ip)) 865 ip_stripoptions(m, (struct mbuf *) 0); 866 m->m_data += sizeof(struct ip); 867 m->m_len -= sizeof(struct ip); 868 m->m_pkthdr.len -= sizeof(struct ip); 869 870 m->m_pkthdr.rcvif = last_encap_vif->v_ifp; 871 872 netisr_queue(NETISR_IP, m); /* mbuf is free'd on failure. */ 873 /* 874 * normally we would need a "schednetisr(NETISR_IP)" 875 * here but we were called by ip_input and it is going 876 * to loop back & try to dequeue the packet we just 877 * queued as soon as we return so we avoid the 878 * unnecessary software interrrupt. 879 * 880 * XXX 881 * This no longer holds - we may have direct-dispatched the packet, 882 * or there may be a queue processing limit. 883 */ 884 } 885 886 extern struct domain inetdomain; 887 static struct protosw mroute_encap_protosw = 888 { SOCK_RAW, &inetdomain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR, 889 mroute_encap_input, 0, 0, rip_ctloutput, 890 0, 891 0, 0, 0, 0, 892 &rip_usrreqs 893 }; 894 895 /* 896 * Add a vif to the vif table 897 */ 898 static int 899 add_vif(struct vifctl *vifcp) 900 { 901 struct vif *vifp = viftable + vifcp->vifc_vifi; 902 struct sockaddr_in sin = {sizeof sin, AF_INET}; 903 struct ifaddr *ifa; 904 struct ifnet *ifp; 905 int error; 906 struct tbf *v_tbf = tbftable + vifcp->vifc_vifi; 907 908 VIF_LOCK(); 909 if (vifcp->vifc_vifi >= MAXVIFS) { 910 VIF_UNLOCK(); 911 return EINVAL; 912 } 913 if (vifp->v_lcl_addr.s_addr != INADDR_ANY) { 914 VIF_UNLOCK(); 915 return EADDRINUSE; 916 } 917 if (vifcp->vifc_lcl_addr.s_addr == INADDR_ANY) { 918 VIF_UNLOCK(); 919 return EADDRNOTAVAIL; 920 } 921 922 /* Find the interface with an address in AF_INET family */ 923 #ifdef PIM 924 if (vifcp->vifc_flags & VIFF_REGISTER) { 925 /* 926 * XXX: Because VIFF_REGISTER does not really need a valid 927 * local interface (e.g. it could be 127.0.0.2), we don't 928 * check its address. 929 */ 930 ifp = NULL; 931 } else 932 #endif 933 { 934 sin.sin_addr = vifcp->vifc_lcl_addr; 935 ifa = ifa_ifwithaddr((struct sockaddr *)&sin); 936 if (ifa == NULL) { 937 VIF_UNLOCK(); 938 return EADDRNOTAVAIL; 939 } 940 ifp = ifa->ifa_ifp; 941 } 942 943 if (vifcp->vifc_flags & VIFF_TUNNEL) { 944 if ((vifcp->vifc_flags & VIFF_SRCRT) == 0) { 945 /* 946 * An encapsulating tunnel is wanted. Tell 947 * mroute_encap_input() to start paying attention 948 * to encapsulated packets. 949 */ 950 if (encap_cookie == NULL) { 951 int i; 952 953 encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV4, 954 mroute_encapcheck, 955 (struct protosw *)&mroute_encap_protosw, NULL); 956 957 if (encap_cookie == NULL) { 958 printf("ip_mroute: unable to attach encap\n"); 959 VIF_UNLOCK(); 960 return EIO; /* XXX */ 961 } 962 for (i = 0; i < MAXVIFS; ++i) { 963 if_initname(&multicast_decap_if[i], "mdecap", i); 964 } 965 } 966 /* 967 * Set interface to fake encapsulator interface 968 */ 969 ifp = &multicast_decap_if[vifcp->vifc_vifi]; 970 /* 971 * Prepare cached route entry 972 */ 973 bzero(&vifp->v_route, sizeof(vifp->v_route)); 974 } else { 975 log(LOG_ERR, "source routed tunnels not supported\n"); 976 VIF_UNLOCK(); 977 return EOPNOTSUPP; 978 } 979 #ifdef PIM 980 } else if (vifcp->vifc_flags & VIFF_REGISTER) { 981 ifp = &multicast_register_if; 982 if (mrtdebug) 983 log(LOG_DEBUG, "Adding a register vif, ifp: %p\n", 984 (void *)&multicast_register_if); 985 if (reg_vif_num == VIFI_INVALID) { 986 if_initname(&multicast_register_if, "register_vif", 0); 987 multicast_register_if.if_flags = IFF_LOOPBACK; 988 bzero(&vifp->v_route, sizeof(vifp->v_route)); 989 reg_vif_num = vifcp->vifc_vifi; 990 } 991 #endif 992 } else { /* Make sure the interface supports multicast */ 993 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 994 VIF_UNLOCK(); 995 return EOPNOTSUPP; 996 } 997 998 /* Enable promiscuous reception of all IP multicasts from the if */ 999 error = if_allmulti(ifp, 1); 1000 if (error) { 1001 VIF_UNLOCK(); 1002 return error; 1003 } 1004 } 1005 1006 /* define parameters for the tbf structure */ 1007 vifp->v_tbf = v_tbf; 1008 GET_TIME(vifp->v_tbf->tbf_last_pkt_t); 1009 vifp->v_tbf->tbf_n_tok = 0; 1010 vifp->v_tbf->tbf_q_len = 0; 1011 vifp->v_tbf->tbf_max_q_len = MAXQSIZE; 1012 vifp->v_tbf->tbf_q = vifp->v_tbf->tbf_t = NULL; 1013 1014 vifp->v_flags = vifcp->vifc_flags; 1015 vifp->v_threshold = vifcp->vifc_threshold; 1016 vifp->v_lcl_addr = vifcp->vifc_lcl_addr; 1017 vifp->v_rmt_addr = vifcp->vifc_rmt_addr; 1018 vifp->v_ifp = ifp; 1019 /* scaling up here allows division by 1024 in critical code */ 1020 vifp->v_rate_limit= vifcp->vifc_rate_limit * 1024 / 1000; 1021 vifp->v_rsvp_on = 0; 1022 vifp->v_rsvpd = NULL; 1023 /* initialize per vif pkt counters */ 1024 vifp->v_pkt_in = 0; 1025 vifp->v_pkt_out = 0; 1026 vifp->v_bytes_in = 0; 1027 vifp->v_bytes_out = 0; 1028 1029 /* Adjust numvifs up if the vifi is higher than numvifs */ 1030 if (numvifs <= vifcp->vifc_vifi) numvifs = vifcp->vifc_vifi + 1; 1031 1032 VIF_UNLOCK(); 1033 1034 if (mrtdebug) 1035 log(LOG_DEBUG, "add_vif #%d, lcladdr %lx, %s %lx, thresh %x, rate %d\n", 1036 vifcp->vifc_vifi, 1037 (u_long)ntohl(vifcp->vifc_lcl_addr.s_addr), 1038 (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask", 1039 (u_long)ntohl(vifcp->vifc_rmt_addr.s_addr), 1040 vifcp->vifc_threshold, 1041 vifcp->vifc_rate_limit); 1042 1043 return 0; 1044 } 1045 1046 /* 1047 * Delete a vif from the vif table 1048 */ 1049 static int 1050 del_vif(vifi_t vifi) 1051 { 1052 struct vif *vifp; 1053 1054 VIF_LOCK(); 1055 1056 if (vifi >= numvifs) { 1057 VIF_UNLOCK(); 1058 return EINVAL; 1059 } 1060 vifp = &viftable[vifi]; 1061 if (vifp->v_lcl_addr.s_addr == INADDR_ANY) { 1062 VIF_UNLOCK(); 1063 return EADDRNOTAVAIL; 1064 } 1065 1066 if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) 1067 if_allmulti(vifp->v_ifp, 0); 1068 1069 if (vifp == last_encap_vif) { 1070 last_encap_vif = NULL; 1071 last_encap_src = INADDR_ANY; 1072 } 1073 1074 /* 1075 * Free packets queued at the interface 1076 */ 1077 while (vifp->v_tbf->tbf_q) { 1078 struct mbuf *m = vifp->v_tbf->tbf_q; 1079 1080 vifp->v_tbf->tbf_q = m->m_act; 1081 m_freem(m); 1082 } 1083 1084 #ifdef PIM 1085 if (vifp->v_flags & VIFF_REGISTER) 1086 reg_vif_num = VIFI_INVALID; 1087 #endif 1088 1089 bzero((caddr_t)vifp->v_tbf, sizeof(*(vifp->v_tbf))); 1090 bzero((caddr_t)vifp, sizeof (*vifp)); 1091 1092 if (mrtdebug) 1093 log(LOG_DEBUG, "del_vif %d, numvifs %d\n", vifi, numvifs); 1094 1095 /* Adjust numvifs down */ 1096 for (vifi = numvifs; vifi > 0; vifi--) 1097 if (viftable[vifi-1].v_lcl_addr.s_addr != INADDR_ANY) 1098 break; 1099 numvifs = vifi; 1100 1101 VIF_UNLOCK(); 1102 1103 return 0; 1104 } 1105 1106 /* 1107 * update an mfc entry without resetting counters and S,G addresses. 1108 */ 1109 static void 1110 update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp) 1111 { 1112 int i; 1113 1114 rt->mfc_parent = mfccp->mfcc_parent; 1115 for (i = 0; i < numvifs; i++) { 1116 rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; 1117 rt->mfc_flags[i] = mfccp->mfcc_flags[i] & mrt_api_config & 1118 MRT_MFC_FLAGS_ALL; 1119 } 1120 /* set the RP address */ 1121 if (mrt_api_config & MRT_MFC_RP) 1122 rt->mfc_rp = mfccp->mfcc_rp; 1123 else 1124 rt->mfc_rp.s_addr = INADDR_ANY; 1125 } 1126 1127 /* 1128 * fully initialize an mfc entry from the parameter. 1129 */ 1130 static void 1131 init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp) 1132 { 1133 rt->mfc_origin = mfccp->mfcc_origin; 1134 rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp; 1135 1136 update_mfc_params(rt, mfccp); 1137 1138 /* initialize pkt counters per src-grp */ 1139 rt->mfc_pkt_cnt = 0; 1140 rt->mfc_byte_cnt = 0; 1141 rt->mfc_wrong_if = 0; 1142 rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0; 1143 } 1144 1145 1146 /* 1147 * Add an mfc entry 1148 */ 1149 static int 1150 add_mfc(struct mfcctl2 *mfccp) 1151 { 1152 struct mfc *rt; 1153 u_long hash; 1154 struct rtdetq *rte; 1155 u_short nstl; 1156 1157 VIF_LOCK(); 1158 MFC_LOCK(); 1159 1160 rt = mfc_find(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr); 1161 1162 /* If an entry already exists, just update the fields */ 1163 if (rt) { 1164 if (mrtdebug & DEBUG_MFC) 1165 log(LOG_DEBUG,"add_mfc update o %lx g %lx p %x\n", 1166 (u_long)ntohl(mfccp->mfcc_origin.s_addr), 1167 (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), 1168 mfccp->mfcc_parent); 1169 1170 update_mfc_params(rt, mfccp); 1171 MFC_UNLOCK(); 1172 VIF_UNLOCK(); 1173 return 0; 1174 } 1175 1176 /* 1177 * Find the entry for which the upcall was made and update 1178 */ 1179 hash = MFCHASH(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr); 1180 for (rt = mfctable[hash], nstl = 0; rt; rt = rt->mfc_next) { 1181 1182 if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) && 1183 (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr) && 1184 (rt->mfc_stall != NULL)) { 1185 1186 if (nstl++) 1187 log(LOG_ERR, "add_mfc %s o %lx g %lx p %x dbx %p\n", 1188 "multiple kernel entries", 1189 (u_long)ntohl(mfccp->mfcc_origin.s_addr), 1190 (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), 1191 mfccp->mfcc_parent, (void *)rt->mfc_stall); 1192 1193 if (mrtdebug & DEBUG_MFC) 1194 log(LOG_DEBUG,"add_mfc o %lx g %lx p %x dbg %p\n", 1195 (u_long)ntohl(mfccp->mfcc_origin.s_addr), 1196 (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), 1197 mfccp->mfcc_parent, (void *)rt->mfc_stall); 1198 1199 init_mfc_params(rt, mfccp); 1200 1201 rt->mfc_expire = 0; /* Don't clean this guy up */ 1202 nexpire[hash]--; 1203 1204 /* free packets Qed at the end of this entry */ 1205 for (rte = rt->mfc_stall; rte != NULL; ) { 1206 struct rtdetq *n = rte->next; 1207 1208 ip_mdq(rte->m, rte->ifp, rt, -1); 1209 m_freem(rte->m); 1210 free(rte, M_MRTABLE); 1211 rte = n; 1212 } 1213 rt->mfc_stall = NULL; 1214 } 1215 } 1216 1217 /* 1218 * It is possible that an entry is being inserted without an upcall 1219 */ 1220 if (nstl == 0) { 1221 if (mrtdebug & DEBUG_MFC) 1222 log(LOG_DEBUG,"add_mfc no upcall h %lu o %lx g %lx p %x\n", 1223 hash, (u_long)ntohl(mfccp->mfcc_origin.s_addr), 1224 (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), 1225 mfccp->mfcc_parent); 1226 1227 for (rt = mfctable[hash]; rt != NULL; rt = rt->mfc_next) { 1228 if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) && 1229 (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr)) { 1230 init_mfc_params(rt, mfccp); 1231 if (rt->mfc_expire) 1232 nexpire[hash]--; 1233 rt->mfc_expire = 0; 1234 break; /* XXX */ 1235 } 1236 } 1237 if (rt == NULL) { /* no upcall, so make a new entry */ 1238 rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT); 1239 if (rt == NULL) { 1240 MFC_UNLOCK(); 1241 VIF_UNLOCK(); 1242 return ENOBUFS; 1243 } 1244 1245 init_mfc_params(rt, mfccp); 1246 rt->mfc_expire = 0; 1247 rt->mfc_stall = NULL; 1248 1249 rt->mfc_bw_meter = NULL; 1250 /* insert new entry at head of hash chain */ 1251 rt->mfc_next = mfctable[hash]; 1252 mfctable[hash] = rt; 1253 } 1254 } 1255 MFC_UNLOCK(); 1256 VIF_UNLOCK(); 1257 return 0; 1258 } 1259 1260 /* 1261 * Delete an mfc entry 1262 */ 1263 static int 1264 del_mfc(struct mfcctl2 *mfccp) 1265 { 1266 struct in_addr origin; 1267 struct in_addr mcastgrp; 1268 struct mfc *rt; 1269 struct mfc **nptr; 1270 u_long hash; 1271 struct bw_meter *list; 1272 1273 origin = mfccp->mfcc_origin; 1274 mcastgrp = mfccp->mfcc_mcastgrp; 1275 1276 if (mrtdebug & DEBUG_MFC) 1277 log(LOG_DEBUG,"del_mfc orig %lx mcastgrp %lx\n", 1278 (u_long)ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr)); 1279 1280 MFC_LOCK(); 1281 1282 hash = MFCHASH(origin.s_addr, mcastgrp.s_addr); 1283 for (nptr = &mfctable[hash]; (rt = *nptr) != NULL; nptr = &rt->mfc_next) 1284 if (origin.s_addr == rt->mfc_origin.s_addr && 1285 mcastgrp.s_addr == rt->mfc_mcastgrp.s_addr && 1286 rt->mfc_stall == NULL) 1287 break; 1288 if (rt == NULL) { 1289 MFC_UNLOCK(); 1290 return EADDRNOTAVAIL; 1291 } 1292 1293 *nptr = rt->mfc_next; 1294 1295 /* 1296 * free the bw_meter entries 1297 */ 1298 list = rt->mfc_bw_meter; 1299 rt->mfc_bw_meter = NULL; 1300 1301 free(rt, M_MRTABLE); 1302 1303 free_bw_list(list); 1304 1305 MFC_UNLOCK(); 1306 1307 return 0; 1308 } 1309 1310 /* 1311 * Send a message to mrouted on the multicast routing socket 1312 */ 1313 static int 1314 socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src) 1315 { 1316 if (s) { 1317 SOCKBUF_LOCK(&s->so_rcv); 1318 if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm, 1319 NULL) != 0) { 1320 sorwakeup_locked(s); 1321 return 0; 1322 } 1323 SOCKBUF_UNLOCK(&s->so_rcv); 1324 } 1325 m_freem(mm); 1326 return -1; 1327 } 1328 1329 /* 1330 * IP multicast forwarding function. This function assumes that the packet 1331 * pointed to by "ip" has arrived on (or is about to be sent to) the interface 1332 * pointed to by "ifp", and the packet is to be relayed to other networks 1333 * that have members of the packet's destination IP multicast group. 1334 * 1335 * The packet is returned unscathed to the caller, unless it is 1336 * erroneous, in which case a non-zero return value tells the caller to 1337 * discard it. 1338 */ 1339 1340 #define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */ 1341 1342 static int 1343 X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m, 1344 struct ip_moptions *imo) 1345 { 1346 struct mfc *rt; 1347 int error; 1348 vifi_t vifi; 1349 1350 if (mrtdebug & DEBUG_FORWARD) 1351 log(LOG_DEBUG, "ip_mforward: src %lx, dst %lx, ifp %p\n", 1352 (u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr), 1353 (void *)ifp); 1354 1355 if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 || 1356 ((u_char *)(ip + 1))[1] != IPOPT_LSRR ) { 1357 /* 1358 * Packet arrived via a physical interface or 1359 * an encapsulated tunnel or a register_vif. 1360 */ 1361 } else { 1362 /* 1363 * Packet arrived through a source-route tunnel. 1364 * Source-route tunnels are no longer supported. 1365 */ 1366 static int last_log; 1367 if (last_log != time_second) { 1368 last_log = time_second; 1369 log(LOG_ERR, 1370 "ip_mforward: received source-routed packet from %lx\n", 1371 (u_long)ntohl(ip->ip_src.s_addr)); 1372 } 1373 return 1; 1374 } 1375 1376 VIF_LOCK(); 1377 MFC_LOCK(); 1378 if (imo && ((vifi = imo->imo_multicast_vif) < numvifs)) { 1379 if (ip->ip_ttl < 255) 1380 ip->ip_ttl++; /* compensate for -1 in *_send routines */ 1381 if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) { 1382 struct vif *vifp = viftable + vifi; 1383 1384 printf("Sending IPPROTO_RSVP from %lx to %lx on vif %d (%s%s)\n", 1385 (long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr), 1386 vifi, 1387 (vifp->v_flags & VIFF_TUNNEL) ? "tunnel on " : "", 1388 vifp->v_ifp->if_xname); 1389 } 1390 error = ip_mdq(m, ifp, NULL, vifi); 1391 MFC_UNLOCK(); 1392 VIF_UNLOCK(); 1393 return error; 1394 } 1395 if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) { 1396 printf("Warning: IPPROTO_RSVP from %lx to %lx without vif option\n", 1397 (long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr)); 1398 if (!imo) 1399 printf("In fact, no options were specified at all\n"); 1400 } 1401 1402 /* 1403 * Don't forward a packet with time-to-live of zero or one, 1404 * or a packet destined to a local-only group. 1405 */ 1406 if (ip->ip_ttl <= 1 || ntohl(ip->ip_dst.s_addr) <= INADDR_MAX_LOCAL_GROUP) { 1407 MFC_UNLOCK(); 1408 VIF_UNLOCK(); 1409 return 0; 1410 } 1411 1412 /* 1413 * Determine forwarding vifs from the forwarding cache table 1414 */ 1415 ++mrtstat.mrts_mfc_lookups; 1416 rt = mfc_find(ip->ip_src.s_addr, ip->ip_dst.s_addr); 1417 1418 /* Entry exists, so forward if necessary */ 1419 if (rt != NULL) { 1420 error = ip_mdq(m, ifp, rt, -1); 1421 MFC_UNLOCK(); 1422 VIF_UNLOCK(); 1423 return error; 1424 } else { 1425 /* 1426 * If we don't have a route for packet's origin, 1427 * Make a copy of the packet & send message to routing daemon 1428 */ 1429 1430 struct mbuf *mb0; 1431 struct rtdetq *rte; 1432 u_long hash; 1433 int hlen = ip->ip_hl << 2; 1434 1435 ++mrtstat.mrts_mfc_misses; 1436 1437 mrtstat.mrts_no_route++; 1438 if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC)) 1439 log(LOG_DEBUG, "ip_mforward: no rte s %lx g %lx\n", 1440 (u_long)ntohl(ip->ip_src.s_addr), 1441 (u_long)ntohl(ip->ip_dst.s_addr)); 1442 1443 /* 1444 * Allocate mbufs early so that we don't do extra work if we are 1445 * just going to fail anyway. Make sure to pullup the header so 1446 * that other people can't step on it. 1447 */ 1448 rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE, M_NOWAIT); 1449 if (rte == NULL) { 1450 MFC_UNLOCK(); 1451 VIF_UNLOCK(); 1452 return ENOBUFS; 1453 } 1454 mb0 = m_copypacket(m, M_DONTWAIT); 1455 if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen)) 1456 mb0 = m_pullup(mb0, hlen); 1457 if (mb0 == NULL) { 1458 free(rte, M_MRTABLE); 1459 MFC_UNLOCK(); 1460 VIF_UNLOCK(); 1461 return ENOBUFS; 1462 } 1463 1464 /* is there an upcall waiting for this flow ? */ 1465 hash = MFCHASH(ip->ip_src.s_addr, ip->ip_dst.s_addr); 1466 for (rt = mfctable[hash]; rt; rt = rt->mfc_next) { 1467 if ((ip->ip_src.s_addr == rt->mfc_origin.s_addr) && 1468 (ip->ip_dst.s_addr == rt->mfc_mcastgrp.s_addr) && 1469 (rt->mfc_stall != NULL)) 1470 break; 1471 } 1472 1473 if (rt == NULL) { 1474 int i; 1475 struct igmpmsg *im; 1476 struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; 1477 struct mbuf *mm; 1478 1479 /* 1480 * Locate the vifi for the incoming interface for this packet. 1481 * If none found, drop packet. 1482 */ 1483 for (vifi=0; vifi < numvifs && viftable[vifi].v_ifp != ifp; vifi++) 1484 ; 1485 if (vifi >= numvifs) /* vif not found, drop packet */ 1486 goto non_fatal; 1487 1488 /* no upcall, so make a new entry */ 1489 rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT); 1490 if (rt == NULL) 1491 goto fail; 1492 /* Make a copy of the header to send to the user level process */ 1493 mm = m_copy(mb0, 0, hlen); 1494 if (mm == NULL) 1495 goto fail1; 1496 1497 /* 1498 * Send message to routing daemon to install 1499 * a route into the kernel table 1500 */ 1501 1502 im = mtod(mm, struct igmpmsg *); 1503 im->im_msgtype = IGMPMSG_NOCACHE; 1504 im->im_mbz = 0; 1505 im->im_vif = vifi; 1506 1507 mrtstat.mrts_upcalls++; 1508 1509 k_igmpsrc.sin_addr = ip->ip_src; 1510 if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) { 1511 log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n"); 1512 ++mrtstat.mrts_upq_sockfull; 1513 fail1: 1514 free(rt, M_MRTABLE); 1515 fail: 1516 free(rte, M_MRTABLE); 1517 m_freem(mb0); 1518 MFC_UNLOCK(); 1519 VIF_UNLOCK(); 1520 return ENOBUFS; 1521 } 1522 1523 /* insert new entry at head of hash chain */ 1524 rt->mfc_origin.s_addr = ip->ip_src.s_addr; 1525 rt->mfc_mcastgrp.s_addr = ip->ip_dst.s_addr; 1526 rt->mfc_expire = UPCALL_EXPIRE; 1527 nexpire[hash]++; 1528 for (i = 0; i < numvifs; i++) { 1529 rt->mfc_ttls[i] = 0; 1530 rt->mfc_flags[i] = 0; 1531 } 1532 rt->mfc_parent = -1; 1533 1534 rt->mfc_rp.s_addr = INADDR_ANY; /* clear the RP address */ 1535 1536 rt->mfc_bw_meter = NULL; 1537 1538 /* link into table */ 1539 rt->mfc_next = mfctable[hash]; 1540 mfctable[hash] = rt; 1541 rt->mfc_stall = rte; 1542 1543 } else { 1544 /* determine if q has overflowed */ 1545 int npkts = 0; 1546 struct rtdetq **p; 1547 1548 /* 1549 * XXX ouch! we need to append to the list, but we 1550 * only have a pointer to the front, so we have to 1551 * scan the entire list every time. 1552 */ 1553 for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next) 1554 npkts++; 1555 1556 if (npkts > MAX_UPQ) { 1557 mrtstat.mrts_upq_ovflw++; 1558 non_fatal: 1559 free(rte, M_MRTABLE); 1560 m_freem(mb0); 1561 MFC_UNLOCK(); 1562 VIF_UNLOCK(); 1563 return 0; 1564 } 1565 1566 /* Add this entry to the end of the queue */ 1567 *p = rte; 1568 } 1569 1570 rte->m = mb0; 1571 rte->ifp = ifp; 1572 rte->next = NULL; 1573 1574 MFC_UNLOCK(); 1575 VIF_UNLOCK(); 1576 1577 return 0; 1578 } 1579 } 1580 1581 /* 1582 * Clean up the cache entry if upcall is not serviced 1583 */ 1584 static void 1585 expire_upcalls(void *unused) 1586 { 1587 struct rtdetq *rte; 1588 struct mfc *mfc, **nptr; 1589 int i; 1590 1591 MFC_LOCK(); 1592 for (i = 0; i < MFCTBLSIZ; i++) { 1593 if (nexpire[i] == 0) 1594 continue; 1595 nptr = &mfctable[i]; 1596 for (mfc = *nptr; mfc != NULL; mfc = *nptr) { 1597 /* 1598 * Skip real cache entries 1599 * Make sure it wasn't marked to not expire (shouldn't happen) 1600 * If it expires now 1601 */ 1602 if (mfc->mfc_stall != NULL && mfc->mfc_expire != 0 && 1603 --mfc->mfc_expire == 0) { 1604 if (mrtdebug & DEBUG_EXPIRE) 1605 log(LOG_DEBUG, "expire_upcalls: expiring (%lx %lx)\n", 1606 (u_long)ntohl(mfc->mfc_origin.s_addr), 1607 (u_long)ntohl(mfc->mfc_mcastgrp.s_addr)); 1608 /* 1609 * drop all the packets 1610 * free the mbuf with the pkt, if, timing info 1611 */ 1612 for (rte = mfc->mfc_stall; rte; ) { 1613 struct rtdetq *n = rte->next; 1614 1615 m_freem(rte->m); 1616 free(rte, M_MRTABLE); 1617 rte = n; 1618 } 1619 ++mrtstat.mrts_cache_cleanups; 1620 nexpire[i]--; 1621 1622 /* 1623 * free the bw_meter entries 1624 */ 1625 while (mfc->mfc_bw_meter != NULL) { 1626 struct bw_meter *x = mfc->mfc_bw_meter; 1627 1628 mfc->mfc_bw_meter = x->bm_mfc_next; 1629 free(x, M_BWMETER); 1630 } 1631 1632 *nptr = mfc->mfc_next; 1633 free(mfc, M_MRTABLE); 1634 } else { 1635 nptr = &mfc->mfc_next; 1636 } 1637 } 1638 } 1639 MFC_UNLOCK(); 1640 1641 callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL); 1642 } 1643 1644 /* 1645 * Packet forwarding routine once entry in the cache is made 1646 */ 1647 static int 1648 ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif) 1649 { 1650 struct ip *ip = mtod(m, struct ip *); 1651 vifi_t vifi; 1652 int plen = ip->ip_len; 1653 1654 VIF_LOCK_ASSERT(); 1655 /* 1656 * Macro to send packet on vif. Since RSVP packets don't get counted on 1657 * input, they shouldn't get counted on output, so statistics keeping is 1658 * separate. 1659 */ 1660 #define MC_SEND(ip,vifp,m) { \ 1661 if ((vifp)->v_flags & VIFF_TUNNEL) \ 1662 encap_send((ip), (vifp), (m)); \ 1663 else \ 1664 phyint_send((ip), (vifp), (m)); \ 1665 } 1666 1667 /* 1668 * If xmt_vif is not -1, send on only the requested vif. 1669 * 1670 * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.) 1671 */ 1672 if (xmt_vif < numvifs) { 1673 #ifdef PIM 1674 if (viftable[xmt_vif].v_flags & VIFF_REGISTER) 1675 pim_register_send(ip, viftable + xmt_vif, m, rt); 1676 else 1677 #endif 1678 MC_SEND(ip, viftable + xmt_vif, m); 1679 return 1; 1680 } 1681 1682 /* 1683 * Don't forward if it didn't arrive from the parent vif for its origin. 1684 */ 1685 vifi = rt->mfc_parent; 1686 if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) { 1687 /* came in the wrong interface */ 1688 if (mrtdebug & DEBUG_FORWARD) 1689 log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n", 1690 (void *)ifp, vifi, (void *)viftable[vifi].v_ifp); 1691 ++mrtstat.mrts_wrong_if; 1692 ++rt->mfc_wrong_if; 1693 /* 1694 * If we are doing PIM assert processing, send a message 1695 * to the routing daemon. 1696 * 1697 * XXX: A PIM-SM router needs the WRONGVIF detection so it 1698 * can complete the SPT switch, regardless of the type 1699 * of the iif (broadcast media, GRE tunnel, etc). 1700 */ 1701 if (pim_assert && (vifi < numvifs) && viftable[vifi].v_ifp) { 1702 struct timeval now; 1703 u_long delta; 1704 1705 #ifdef PIM 1706 if (ifp == &multicast_register_if) 1707 pimstat.pims_rcv_registers_wrongiif++; 1708 #endif 1709 1710 /* Get vifi for the incoming packet */ 1711 for (vifi=0; vifi < numvifs && viftable[vifi].v_ifp != ifp; vifi++) 1712 ; 1713 if (vifi >= numvifs) 1714 return 0; /* The iif is not found: ignore the packet. */ 1715 1716 if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF) 1717 return 0; /* WRONGVIF disabled: ignore the packet */ 1718 1719 GET_TIME(now); 1720 1721 TV_DELTA(rt->mfc_last_assert, now, delta); 1722 1723 if (delta > ASSERT_MSG_TIME) { 1724 struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; 1725 struct igmpmsg *im; 1726 int hlen = ip->ip_hl << 2; 1727 struct mbuf *mm = m_copy(m, 0, hlen); 1728 1729 if (mm && (M_HASCL(mm) || mm->m_len < hlen)) 1730 mm = m_pullup(mm, hlen); 1731 if (mm == NULL) 1732 return ENOBUFS; 1733 1734 rt->mfc_last_assert = now; 1735 1736 im = mtod(mm, struct igmpmsg *); 1737 im->im_msgtype = IGMPMSG_WRONGVIF; 1738 im->im_mbz = 0; 1739 im->im_vif = vifi; 1740 1741 mrtstat.mrts_upcalls++; 1742 1743 k_igmpsrc.sin_addr = im->im_src; 1744 if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) { 1745 log(LOG_WARNING, 1746 "ip_mforward: ip_mrouter socket queue full\n"); 1747 ++mrtstat.mrts_upq_sockfull; 1748 return ENOBUFS; 1749 } 1750 } 1751 } 1752 return 0; 1753 } 1754 1755 /* If I sourced this packet, it counts as output, else it was input. */ 1756 if (ip->ip_src.s_addr == viftable[vifi].v_lcl_addr.s_addr) { 1757 viftable[vifi].v_pkt_out++; 1758 viftable[vifi].v_bytes_out += plen; 1759 } else { 1760 viftable[vifi].v_pkt_in++; 1761 viftable[vifi].v_bytes_in += plen; 1762 } 1763 rt->mfc_pkt_cnt++; 1764 rt->mfc_byte_cnt += plen; 1765 1766 /* 1767 * For each vif, decide if a copy of the packet should be forwarded. 1768 * Forward if: 1769 * - the ttl exceeds the vif's threshold 1770 * - there are group members downstream on interface 1771 */ 1772 for (vifi = 0; vifi < numvifs; vifi++) 1773 if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) { 1774 viftable[vifi].v_pkt_out++; 1775 viftable[vifi].v_bytes_out += plen; 1776 #ifdef PIM 1777 if (viftable[vifi].v_flags & VIFF_REGISTER) 1778 pim_register_send(ip, viftable + vifi, m, rt); 1779 else 1780 #endif 1781 MC_SEND(ip, viftable+vifi, m); 1782 } 1783 1784 /* 1785 * Perform upcall-related bw measuring. 1786 */ 1787 if (rt->mfc_bw_meter != NULL) { 1788 struct bw_meter *x; 1789 struct timeval now; 1790 1791 GET_TIME(now); 1792 MFC_LOCK_ASSERT(); 1793 for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) 1794 bw_meter_receive_packet(x, plen, &now); 1795 } 1796 1797 return 0; 1798 } 1799 1800 /* 1801 * check if a vif number is legal/ok. This is used by ip_output. 1802 */ 1803 static int 1804 X_legal_vif_num(int vif) 1805 { 1806 /* XXX unlocked, matter? */ 1807 return (vif >= 0 && vif < numvifs); 1808 } 1809 1810 /* 1811 * Return the local address used by this vif 1812 */ 1813 static u_long 1814 X_ip_mcast_src(int vifi) 1815 { 1816 /* XXX unlocked, matter? */ 1817 if (vifi >= 0 && vifi < numvifs) 1818 return viftable[vifi].v_lcl_addr.s_addr; 1819 else 1820 return INADDR_ANY; 1821 } 1822 1823 static void 1824 phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m) 1825 { 1826 struct mbuf *mb_copy; 1827 int hlen = ip->ip_hl << 2; 1828 1829 VIF_LOCK_ASSERT(); 1830 1831 /* 1832 * Make a new reference to the packet; make sure that 1833 * the IP header is actually copied, not just referenced, 1834 * so that ip_output() only scribbles on the copy. 1835 */ 1836 mb_copy = m_copypacket(m, M_DONTWAIT); 1837 if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen)) 1838 mb_copy = m_pullup(mb_copy, hlen); 1839 if (mb_copy == NULL) 1840 return; 1841 1842 if (vifp->v_rate_limit == 0) 1843 tbf_send_packet(vifp, mb_copy); 1844 else 1845 tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *), ip->ip_len); 1846 } 1847 1848 static void 1849 encap_send(struct ip *ip, struct vif *vifp, struct mbuf *m) 1850 { 1851 struct mbuf *mb_copy; 1852 struct ip *ip_copy; 1853 int i, len = ip->ip_len; 1854 1855 VIF_LOCK_ASSERT(); 1856 1857 /* Take care of delayed checksums */ 1858 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 1859 in_delayed_cksum(m); 1860 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 1861 } 1862 1863 /* 1864 * copy the old packet & pullup its IP header into the 1865 * new mbuf so we can modify it. Try to fill the new 1866 * mbuf since if we don't the ethernet driver will. 1867 */ 1868 MGETHDR(mb_copy, M_DONTWAIT, MT_HEADER); 1869 if (mb_copy == NULL) 1870 return; 1871 #ifdef MAC 1872 mac_create_mbuf_multicast_encap(m, vifp->v_ifp, mb_copy); 1873 #endif 1874 mb_copy->m_data += max_linkhdr; 1875 mb_copy->m_len = sizeof(multicast_encap_iphdr); 1876 1877 if ((mb_copy->m_next = m_copypacket(m, M_DONTWAIT)) == NULL) { 1878 m_freem(mb_copy); 1879 return; 1880 } 1881 i = MHLEN - M_LEADINGSPACE(mb_copy); 1882 if (i > len) 1883 i = len; 1884 mb_copy = m_pullup(mb_copy, i); 1885 if (mb_copy == NULL) 1886 return; 1887 mb_copy->m_pkthdr.len = len + sizeof(multicast_encap_iphdr); 1888 1889 /* 1890 * fill in the encapsulating IP header. 1891 */ 1892 ip_copy = mtod(mb_copy, struct ip *); 1893 *ip_copy = multicast_encap_iphdr; 1894 ip_copy->ip_id = ip_newid(); 1895 ip_copy->ip_len += len; 1896 ip_copy->ip_src = vifp->v_lcl_addr; 1897 ip_copy->ip_dst = vifp->v_rmt_addr; 1898 1899 /* 1900 * turn the encapsulated IP header back into a valid one. 1901 */ 1902 ip = (struct ip *)((caddr_t)ip_copy + sizeof(multicast_encap_iphdr)); 1903 --ip->ip_ttl; 1904 ip->ip_len = htons(ip->ip_len); 1905 ip->ip_off = htons(ip->ip_off); 1906 ip->ip_sum = 0; 1907 mb_copy->m_data += sizeof(multicast_encap_iphdr); 1908 ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2); 1909 mb_copy->m_data -= sizeof(multicast_encap_iphdr); 1910 1911 if (vifp->v_rate_limit == 0) 1912 tbf_send_packet(vifp, mb_copy); 1913 else 1914 tbf_control(vifp, mb_copy, ip, ip_copy->ip_len); 1915 } 1916 1917 /* 1918 * Token bucket filter module 1919 */ 1920 1921 static void 1922 tbf_control(struct vif *vifp, struct mbuf *m, struct ip *ip, u_long p_len) 1923 { 1924 struct tbf *t = vifp->v_tbf; 1925 1926 VIF_LOCK_ASSERT(); 1927 1928 if (p_len > MAX_BKT_SIZE) { /* drop if packet is too large */ 1929 mrtstat.mrts_pkt2large++; 1930 m_freem(m); 1931 return; 1932 } 1933 1934 tbf_update_tokens(vifp); 1935 1936 if (t->tbf_q_len == 0) { /* queue empty... */ 1937 if (p_len <= t->tbf_n_tok) { /* send packet if enough tokens */ 1938 t->tbf_n_tok -= p_len; 1939 tbf_send_packet(vifp, m); 1940 } else { /* no, queue packet and try later */ 1941 tbf_queue(vifp, m); 1942 callout_reset(&tbf_reprocess_ch, TBF_REPROCESS, 1943 tbf_reprocess_q, vifp); 1944 } 1945 } else if (t->tbf_q_len < t->tbf_max_q_len) { 1946 /* finite queue length, so queue pkts and process queue */ 1947 tbf_queue(vifp, m); 1948 tbf_process_q(vifp); 1949 } else { 1950 /* queue full, try to dq and queue and process */ 1951 if (!tbf_dq_sel(vifp, ip)) { 1952 mrtstat.mrts_q_overflow++; 1953 m_freem(m); 1954 } else { 1955 tbf_queue(vifp, m); 1956 tbf_process_q(vifp); 1957 } 1958 } 1959 } 1960 1961 /* 1962 * adds a packet to the queue at the interface 1963 */ 1964 static void 1965 tbf_queue(struct vif *vifp, struct mbuf *m) 1966 { 1967 struct tbf *t = vifp->v_tbf; 1968 1969 VIF_LOCK_ASSERT(); 1970 1971 if (t->tbf_t == NULL) /* Queue was empty */ 1972 t->tbf_q = m; 1973 else /* Insert at tail */ 1974 t->tbf_t->m_act = m; 1975 1976 t->tbf_t = m; /* Set new tail pointer */ 1977 1978 #ifdef DIAGNOSTIC 1979 /* Make sure we didn't get fed a bogus mbuf */ 1980 if (m->m_act) 1981 panic("tbf_queue: m_act"); 1982 #endif 1983 m->m_act = NULL; 1984 1985 t->tbf_q_len++; 1986 } 1987 1988 /* 1989 * processes the queue at the interface 1990 */ 1991 static void 1992 tbf_process_q(struct vif *vifp) 1993 { 1994 struct tbf *t = vifp->v_tbf; 1995 1996 VIF_LOCK_ASSERT(); 1997 1998 /* loop through the queue at the interface and send as many packets 1999 * as possible 2000 */ 2001 while (t->tbf_q_len > 0) { 2002 struct mbuf *m = t->tbf_q; 2003 int len = mtod(m, struct ip *)->ip_len; 2004 2005 /* determine if the packet can be sent */ 2006 if (len > t->tbf_n_tok) /* not enough tokens, we are done */ 2007 break; 2008 /* ok, reduce no of tokens, dequeue and send the packet. */ 2009 t->tbf_n_tok -= len; 2010 2011 t->tbf_q = m->m_act; 2012 if (--t->tbf_q_len == 0) 2013 t->tbf_t = NULL; 2014 2015 m->m_act = NULL; 2016 tbf_send_packet(vifp, m); 2017 } 2018 } 2019 2020 static void 2021 tbf_reprocess_q(void *xvifp) 2022 { 2023 struct vif *vifp = xvifp; 2024 2025 if (ip_mrouter == NULL) 2026 return; 2027 VIF_LOCK(); 2028 tbf_update_tokens(vifp); 2029 tbf_process_q(vifp); 2030 if (vifp->v_tbf->tbf_q_len) 2031 callout_reset(&tbf_reprocess_ch, TBF_REPROCESS, tbf_reprocess_q, vifp); 2032 VIF_UNLOCK(); 2033 } 2034 2035 /* function that will selectively discard a member of the queue 2036 * based on the precedence value and the priority 2037 */ 2038 static int 2039 tbf_dq_sel(struct vif *vifp, struct ip *ip) 2040 { 2041 u_int p; 2042 struct mbuf *m, *last; 2043 struct mbuf **np; 2044 struct tbf *t = vifp->v_tbf; 2045 2046 VIF_LOCK_ASSERT(); 2047 2048 p = priority(vifp, ip); 2049 2050 np = &t->tbf_q; 2051 last = NULL; 2052 while ((m = *np) != NULL) { 2053 if (p > priority(vifp, mtod(m, struct ip *))) { 2054 *np = m->m_act; 2055 /* If we're removing the last packet, fix the tail pointer */ 2056 if (m == t->tbf_t) 2057 t->tbf_t = last; 2058 m_freem(m); 2059 /* It's impossible for the queue to be empty, but check anyways. */ 2060 if (--t->tbf_q_len == 0) 2061 t->tbf_t = NULL; 2062 mrtstat.mrts_drop_sel++; 2063 return 1; 2064 } 2065 np = &m->m_act; 2066 last = m; 2067 } 2068 return 0; 2069 } 2070 2071 static void 2072 tbf_send_packet(struct vif *vifp, struct mbuf *m) 2073 { 2074 VIF_LOCK_ASSERT(); 2075 2076 if (vifp->v_flags & VIFF_TUNNEL) /* If tunnel options */ 2077 ip_output(m, NULL, &vifp->v_route, IP_FORWARDING, NULL, NULL); 2078 else { 2079 struct ip_moptions imo; 2080 int error; 2081 static struct route ro; /* XXX check this */ 2082 2083 imo.imo_multicast_ifp = vifp->v_ifp; 2084 imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1; 2085 imo.imo_multicast_loop = 1; 2086 imo.imo_multicast_vif = -1; 2087 2088 /* 2089 * Re-entrancy should not be a problem here, because 2090 * the packets that we send out and are looped back at us 2091 * should get rejected because they appear to come from 2092 * the loopback interface, thus preventing looping. 2093 */ 2094 error = ip_output(m, NULL, &ro, IP_FORWARDING, &imo, NULL); 2095 2096 if (mrtdebug & DEBUG_XMIT) 2097 log(LOG_DEBUG, "phyint_send on vif %d err %d\n", 2098 (int)(vifp - viftable), error); 2099 } 2100 } 2101 2102 /* determine the current time and then 2103 * the elapsed time (between the last time and time now) 2104 * in milliseconds & update the no. of tokens in the bucket 2105 */ 2106 static void 2107 tbf_update_tokens(struct vif *vifp) 2108 { 2109 struct timeval tp; 2110 u_long tm; 2111 struct tbf *t = vifp->v_tbf; 2112 2113 VIF_LOCK_ASSERT(); 2114 2115 GET_TIME(tp); 2116 2117 TV_DELTA(tp, t->tbf_last_pkt_t, tm); 2118 2119 /* 2120 * This formula is actually 2121 * "time in seconds" * "bytes/second". 2122 * 2123 * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8) 2124 * 2125 * The (1000/1024) was introduced in add_vif to optimize 2126 * this divide into a shift. 2127 */ 2128 t->tbf_n_tok += tm * vifp->v_rate_limit / 1024 / 8; 2129 t->tbf_last_pkt_t = tp; 2130 2131 if (t->tbf_n_tok > MAX_BKT_SIZE) 2132 t->tbf_n_tok = MAX_BKT_SIZE; 2133 } 2134 2135 static int 2136 priority(struct vif *vifp, struct ip *ip) 2137 { 2138 int prio = 50; /* the lowest priority -- default case */ 2139 2140 /* temporary hack; may add general packet classifier some day */ 2141 2142 /* 2143 * The UDP port space is divided up into four priority ranges: 2144 * [0, 16384) : unclassified - lowest priority 2145 * [16384, 32768) : audio - highest priority 2146 * [32768, 49152) : whiteboard - medium priority 2147 * [49152, 65536) : video - low priority 2148 * 2149 * Everything else gets lowest priority. 2150 */ 2151 if (ip->ip_p == IPPROTO_UDP) { 2152 struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2)); 2153 switch (ntohs(udp->uh_dport) & 0xc000) { 2154 case 0x4000: 2155 prio = 70; 2156 break; 2157 case 0x8000: 2158 prio = 60; 2159 break; 2160 case 0xc000: 2161 prio = 55; 2162 break; 2163 } 2164 } 2165 return prio; 2166 } 2167 2168 /* 2169 * End of token bucket filter modifications 2170 */ 2171 2172 static int 2173 X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt) 2174 { 2175 int error, vifi; 2176 2177 if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) 2178 return EOPNOTSUPP; 2179 2180 error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi); 2181 if (error) 2182 return error; 2183 2184 VIF_LOCK(); 2185 2186 if (vifi < 0 || vifi >= numvifs) { /* Error if vif is invalid */ 2187 VIF_UNLOCK(); 2188 return EADDRNOTAVAIL; 2189 } 2190 2191 if (sopt->sopt_name == IP_RSVP_VIF_ON) { 2192 /* Check if socket is available. */ 2193 if (viftable[vifi].v_rsvpd != NULL) { 2194 VIF_UNLOCK(); 2195 return EADDRINUSE; 2196 } 2197 2198 viftable[vifi].v_rsvpd = so; 2199 /* This may seem silly, but we need to be sure we don't over-increment 2200 * the RSVP counter, in case something slips up. 2201 */ 2202 if (!viftable[vifi].v_rsvp_on) { 2203 viftable[vifi].v_rsvp_on = 1; 2204 rsvp_on++; 2205 } 2206 } else { /* must be VIF_OFF */ 2207 /* 2208 * XXX as an additional consistency check, one could make sure 2209 * that viftable[vifi].v_rsvpd == so, otherwise passing so as 2210 * first parameter is pretty useless. 2211 */ 2212 viftable[vifi].v_rsvpd = NULL; 2213 /* 2214 * This may seem silly, but we need to be sure we don't over-decrement 2215 * the RSVP counter, in case something slips up. 2216 */ 2217 if (viftable[vifi].v_rsvp_on) { 2218 viftable[vifi].v_rsvp_on = 0; 2219 rsvp_on--; 2220 } 2221 } 2222 VIF_UNLOCK(); 2223 return 0; 2224 } 2225 2226 static void 2227 X_ip_rsvp_force_done(struct socket *so) 2228 { 2229 int vifi; 2230 2231 /* Don't bother if it is not the right type of socket. */ 2232 if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) 2233 return; 2234 2235 VIF_LOCK(); 2236 2237 /* The socket may be attached to more than one vif...this 2238 * is perfectly legal. 2239 */ 2240 for (vifi = 0; vifi < numvifs; vifi++) { 2241 if (viftable[vifi].v_rsvpd == so) { 2242 viftable[vifi].v_rsvpd = NULL; 2243 /* This may seem silly, but we need to be sure we don't 2244 * over-decrement the RSVP counter, in case something slips up. 2245 */ 2246 if (viftable[vifi].v_rsvp_on) { 2247 viftable[vifi].v_rsvp_on = 0; 2248 rsvp_on--; 2249 } 2250 } 2251 } 2252 2253 VIF_UNLOCK(); 2254 } 2255 2256 static void 2257 X_rsvp_input(struct mbuf *m, int off) 2258 { 2259 int vifi; 2260 struct ip *ip = mtod(m, struct ip *); 2261 struct sockaddr_in rsvp_src = { sizeof rsvp_src, AF_INET }; 2262 struct ifnet *ifp; 2263 2264 if (rsvpdebug) 2265 printf("rsvp_input: rsvp_on %d\n",rsvp_on); 2266 2267 /* Can still get packets with rsvp_on = 0 if there is a local member 2268 * of the group to which the RSVP packet is addressed. But in this 2269 * case we want to throw the packet away. 2270 */ 2271 if (!rsvp_on) { 2272 m_freem(m); 2273 return; 2274 } 2275 2276 if (rsvpdebug) 2277 printf("rsvp_input: check vifs\n"); 2278 2279 #ifdef DIAGNOSTIC 2280 M_ASSERTPKTHDR(m); 2281 #endif 2282 2283 ifp = m->m_pkthdr.rcvif; 2284 2285 VIF_LOCK(); 2286 /* Find which vif the packet arrived on. */ 2287 for (vifi = 0; vifi < numvifs; vifi++) 2288 if (viftable[vifi].v_ifp == ifp) 2289 break; 2290 2291 if (vifi == numvifs || viftable[vifi].v_rsvpd == NULL) { 2292 /* 2293 * Drop the lock here to avoid holding it across rip_input. 2294 * This could make rsvpdebug printfs wrong. If you care, 2295 * record the state of stuff before dropping the lock. 2296 */ 2297 VIF_UNLOCK(); 2298 /* 2299 * If the old-style non-vif-associated socket is set, 2300 * then use it. Otherwise, drop packet since there 2301 * is no specific socket for this vif. 2302 */ 2303 if (ip_rsvpd != NULL) { 2304 if (rsvpdebug) 2305 printf("rsvp_input: Sending packet up old-style socket\n"); 2306 rip_input(m, off); /* xxx */ 2307 } else { 2308 if (rsvpdebug && vifi == numvifs) 2309 printf("rsvp_input: Can't find vif for packet.\n"); 2310 else if (rsvpdebug && viftable[vifi].v_rsvpd == NULL) 2311 printf("rsvp_input: No socket defined for vif %d\n",vifi); 2312 m_freem(m); 2313 } 2314 return; 2315 } 2316 rsvp_src.sin_addr = ip->ip_src; 2317 2318 if (rsvpdebug && m) 2319 printf("rsvp_input: m->m_len = %d, sbspace() = %ld\n", 2320 m->m_len,sbspace(&(viftable[vifi].v_rsvpd->so_rcv))); 2321 2322 if (socket_send(viftable[vifi].v_rsvpd, m, &rsvp_src) < 0) { 2323 if (rsvpdebug) 2324 printf("rsvp_input: Failed to append to socket\n"); 2325 } else { 2326 if (rsvpdebug) 2327 printf("rsvp_input: send packet up\n"); 2328 } 2329 VIF_UNLOCK(); 2330 } 2331 2332 /* 2333 * Code for bandwidth monitors 2334 */ 2335 2336 /* 2337 * Define common interface for timeval-related methods 2338 */ 2339 #define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp) 2340 #define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp)) 2341 #define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp)) 2342 2343 static uint32_t 2344 compute_bw_meter_flags(struct bw_upcall *req) 2345 { 2346 uint32_t flags = 0; 2347 2348 if (req->bu_flags & BW_UPCALL_UNIT_PACKETS) 2349 flags |= BW_METER_UNIT_PACKETS; 2350 if (req->bu_flags & BW_UPCALL_UNIT_BYTES) 2351 flags |= BW_METER_UNIT_BYTES; 2352 if (req->bu_flags & BW_UPCALL_GEQ) 2353 flags |= BW_METER_GEQ; 2354 if (req->bu_flags & BW_UPCALL_LEQ) 2355 flags |= BW_METER_LEQ; 2356 2357 return flags; 2358 } 2359 2360 /* 2361 * Add a bw_meter entry 2362 */ 2363 static int 2364 add_bw_upcall(struct bw_upcall *req) 2365 { 2366 struct mfc *mfc; 2367 struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC, 2368 BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC }; 2369 struct timeval now; 2370 struct bw_meter *x; 2371 uint32_t flags; 2372 2373 if (!(mrt_api_config & MRT_MFC_BW_UPCALL)) 2374 return EOPNOTSUPP; 2375 2376 /* Test if the flags are valid */ 2377 if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES))) 2378 return EINVAL; 2379 if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))) 2380 return EINVAL; 2381 if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)) 2382 == (BW_UPCALL_GEQ | BW_UPCALL_LEQ)) 2383 return EINVAL; 2384 2385 /* Test if the threshold time interval is valid */ 2386 if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <)) 2387 return EINVAL; 2388 2389 flags = compute_bw_meter_flags(req); 2390 2391 /* 2392 * Find if we have already same bw_meter entry 2393 */ 2394 MFC_LOCK(); 2395 mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr); 2396 if (mfc == NULL) { 2397 MFC_UNLOCK(); 2398 return EADDRNOTAVAIL; 2399 } 2400 for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) { 2401 if ((BW_TIMEVALCMP(&x->bm_threshold.b_time, 2402 &req->bu_threshold.b_time, ==)) && 2403 (x->bm_threshold.b_packets == req->bu_threshold.b_packets) && 2404 (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) && 2405 (x->bm_flags & BW_METER_USER_FLAGS) == flags) { 2406 MFC_UNLOCK(); 2407 return 0; /* XXX Already installed */ 2408 } 2409 } 2410 2411 /* Allocate the new bw_meter entry */ 2412 x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT); 2413 if (x == NULL) { 2414 MFC_UNLOCK(); 2415 return ENOBUFS; 2416 } 2417 2418 /* Set the new bw_meter entry */ 2419 x->bm_threshold.b_time = req->bu_threshold.b_time; 2420 GET_TIME(now); 2421 x->bm_start_time = now; 2422 x->bm_threshold.b_packets = req->bu_threshold.b_packets; 2423 x->bm_threshold.b_bytes = req->bu_threshold.b_bytes; 2424 x->bm_measured.b_packets = 0; 2425 x->bm_measured.b_bytes = 0; 2426 x->bm_flags = flags; 2427 x->bm_time_next = NULL; 2428 x->bm_time_hash = BW_METER_BUCKETS; 2429 2430 /* Add the new bw_meter entry to the front of entries for this MFC */ 2431 x->bm_mfc = mfc; 2432 x->bm_mfc_next = mfc->mfc_bw_meter; 2433 mfc->mfc_bw_meter = x; 2434 schedule_bw_meter(x, &now); 2435 MFC_UNLOCK(); 2436 2437 return 0; 2438 } 2439 2440 static void 2441 free_bw_list(struct bw_meter *list) 2442 { 2443 while (list != NULL) { 2444 struct bw_meter *x = list; 2445 2446 list = list->bm_mfc_next; 2447 unschedule_bw_meter(x); 2448 free(x, M_BWMETER); 2449 } 2450 } 2451 2452 /* 2453 * Delete one or multiple bw_meter entries 2454 */ 2455 static int 2456 del_bw_upcall(struct bw_upcall *req) 2457 { 2458 struct mfc *mfc; 2459 struct bw_meter *x; 2460 2461 if (!(mrt_api_config & MRT_MFC_BW_UPCALL)) 2462 return EOPNOTSUPP; 2463 2464 MFC_LOCK(); 2465 /* Find the corresponding MFC entry */ 2466 mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr); 2467 if (mfc == NULL) { 2468 MFC_UNLOCK(); 2469 return EADDRNOTAVAIL; 2470 } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) { 2471 /* 2472 * Delete all bw_meter entries for this mfc 2473 */ 2474 struct bw_meter *list; 2475 2476 list = mfc->mfc_bw_meter; 2477 mfc->mfc_bw_meter = NULL; 2478 free_bw_list(list); 2479 MFC_UNLOCK(); 2480 return 0; 2481 } else { /* Delete a single bw_meter entry */ 2482 struct bw_meter *prev; 2483 uint32_t flags = 0; 2484 2485 flags = compute_bw_meter_flags(req); 2486 2487 /* Find the bw_meter entry to delete */ 2488 for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL; 2489 prev = x, x = x->bm_mfc_next) { 2490 if ((BW_TIMEVALCMP(&x->bm_threshold.b_time, 2491 &req->bu_threshold.b_time, ==)) && 2492 (x->bm_threshold.b_packets == req->bu_threshold.b_packets) && 2493 (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) && 2494 (x->bm_flags & BW_METER_USER_FLAGS) == flags) 2495 break; 2496 } 2497 if (x != NULL) { /* Delete entry from the list for this MFC */ 2498 if (prev != NULL) 2499 prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/ 2500 else 2501 x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */ 2502 2503 unschedule_bw_meter(x); 2504 MFC_UNLOCK(); 2505 /* Free the bw_meter entry */ 2506 free(x, M_BWMETER); 2507 return 0; 2508 } else { 2509 MFC_UNLOCK(); 2510 return EINVAL; 2511 } 2512 } 2513 /* NOTREACHED */ 2514 } 2515 2516 /* 2517 * Perform bandwidth measurement processing that may result in an upcall 2518 */ 2519 static void 2520 bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp) 2521 { 2522 struct timeval delta; 2523 2524 MFC_LOCK_ASSERT(); 2525 2526 delta = *nowp; 2527 BW_TIMEVALDECR(&delta, &x->bm_start_time); 2528 2529 if (x->bm_flags & BW_METER_GEQ) { 2530 /* 2531 * Processing for ">=" type of bw_meter entry 2532 */ 2533 if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) { 2534 /* Reset the bw_meter entry */ 2535 x->bm_start_time = *nowp; 2536 x->bm_measured.b_packets = 0; 2537 x->bm_measured.b_bytes = 0; 2538 x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; 2539 } 2540 2541 /* Record that a packet is received */ 2542 x->bm_measured.b_packets++; 2543 x->bm_measured.b_bytes += plen; 2544 2545 /* 2546 * Test if we should deliver an upcall 2547 */ 2548 if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) { 2549 if (((x->bm_flags & BW_METER_UNIT_PACKETS) && 2550 (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) || 2551 ((x->bm_flags & BW_METER_UNIT_BYTES) && 2552 (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) { 2553 /* Prepare an upcall for delivery */ 2554 bw_meter_prepare_upcall(x, nowp); 2555 x->bm_flags |= BW_METER_UPCALL_DELIVERED; 2556 } 2557 } 2558 } else if (x->bm_flags & BW_METER_LEQ) { 2559 /* 2560 * Processing for "<=" type of bw_meter entry 2561 */ 2562 if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) { 2563 /* 2564 * We are behind time with the multicast forwarding table 2565 * scanning for "<=" type of bw_meter entries, so test now 2566 * if we should deliver an upcall. 2567 */ 2568 if (((x->bm_flags & BW_METER_UNIT_PACKETS) && 2569 (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) || 2570 ((x->bm_flags & BW_METER_UNIT_BYTES) && 2571 (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) { 2572 /* Prepare an upcall for delivery */ 2573 bw_meter_prepare_upcall(x, nowp); 2574 } 2575 /* Reschedule the bw_meter entry */ 2576 unschedule_bw_meter(x); 2577 schedule_bw_meter(x, nowp); 2578 } 2579 2580 /* Record that a packet is received */ 2581 x->bm_measured.b_packets++; 2582 x->bm_measured.b_bytes += plen; 2583 2584 /* 2585 * Test if we should restart the measuring interval 2586 */ 2587 if ((x->bm_flags & BW_METER_UNIT_PACKETS && 2588 x->bm_measured.b_packets <= x->bm_threshold.b_packets) || 2589 (x->bm_flags & BW_METER_UNIT_BYTES && 2590 x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) { 2591 /* Don't restart the measuring interval */ 2592 } else { 2593 /* Do restart the measuring interval */ 2594 /* 2595 * XXX: note that we don't unschedule and schedule, because this 2596 * might be too much overhead per packet. Instead, when we process 2597 * all entries for a given timer hash bin, we check whether it is 2598 * really a timeout. If not, we reschedule at that time. 2599 */ 2600 x->bm_start_time = *nowp; 2601 x->bm_measured.b_packets = 0; 2602 x->bm_measured.b_bytes = 0; 2603 x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; 2604 } 2605 } 2606 } 2607 2608 /* 2609 * Prepare a bandwidth-related upcall 2610 */ 2611 static void 2612 bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp) 2613 { 2614 struct timeval delta; 2615 struct bw_upcall *u; 2616 2617 MFC_LOCK_ASSERT(); 2618 2619 /* 2620 * Compute the measured time interval 2621 */ 2622 delta = *nowp; 2623 BW_TIMEVALDECR(&delta, &x->bm_start_time); 2624 2625 /* 2626 * If there are too many pending upcalls, deliver them now 2627 */ 2628 if (bw_upcalls_n >= BW_UPCALLS_MAX) 2629 bw_upcalls_send(); 2630 2631 /* 2632 * Set the bw_upcall entry 2633 */ 2634 u = &bw_upcalls[bw_upcalls_n++]; 2635 u->bu_src = x->bm_mfc->mfc_origin; 2636 u->bu_dst = x->bm_mfc->mfc_mcastgrp; 2637 u->bu_threshold.b_time = x->bm_threshold.b_time; 2638 u->bu_threshold.b_packets = x->bm_threshold.b_packets; 2639 u->bu_threshold.b_bytes = x->bm_threshold.b_bytes; 2640 u->bu_measured.b_time = delta; 2641 u->bu_measured.b_packets = x->bm_measured.b_packets; 2642 u->bu_measured.b_bytes = x->bm_measured.b_bytes; 2643 u->bu_flags = 0; 2644 if (x->bm_flags & BW_METER_UNIT_PACKETS) 2645 u->bu_flags |= BW_UPCALL_UNIT_PACKETS; 2646 if (x->bm_flags & BW_METER_UNIT_BYTES) 2647 u->bu_flags |= BW_UPCALL_UNIT_BYTES; 2648 if (x->bm_flags & BW_METER_GEQ) 2649 u->bu_flags |= BW_UPCALL_GEQ; 2650 if (x->bm_flags & BW_METER_LEQ) 2651 u->bu_flags |= BW_UPCALL_LEQ; 2652 } 2653 2654 /* 2655 * Send the pending bandwidth-related upcalls 2656 */ 2657 static void 2658 bw_upcalls_send(void) 2659 { 2660 struct mbuf *m; 2661 int len = bw_upcalls_n * sizeof(bw_upcalls[0]); 2662 struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; 2663 static struct igmpmsg igmpmsg = { 0, /* unused1 */ 2664 0, /* unused2 */ 2665 IGMPMSG_BW_UPCALL,/* im_msgtype */ 2666 0, /* im_mbz */ 2667 0, /* im_vif */ 2668 0, /* unused3 */ 2669 { 0 }, /* im_src */ 2670 { 0 } }; /* im_dst */ 2671 2672 MFC_LOCK_ASSERT(); 2673 2674 if (bw_upcalls_n == 0) 2675 return; /* No pending upcalls */ 2676 2677 bw_upcalls_n = 0; 2678 2679 /* 2680 * Allocate a new mbuf, initialize it with the header and 2681 * the payload for the pending calls. 2682 */ 2683 MGETHDR(m, M_DONTWAIT, MT_HEADER); 2684 if (m == NULL) { 2685 log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n"); 2686 return; 2687 } 2688 2689 m->m_len = m->m_pkthdr.len = 0; 2690 m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg); 2691 m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&bw_upcalls[0]); 2692 2693 /* 2694 * Send the upcalls 2695 * XXX do we need to set the address in k_igmpsrc ? 2696 */ 2697 mrtstat.mrts_upcalls++; 2698 if (socket_send(ip_mrouter, m, &k_igmpsrc) < 0) { 2699 log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n"); 2700 ++mrtstat.mrts_upq_sockfull; 2701 } 2702 } 2703 2704 /* 2705 * Compute the timeout hash value for the bw_meter entries 2706 */ 2707 #define BW_METER_TIMEHASH(bw_meter, hash) \ 2708 do { \ 2709 struct timeval next_timeval = (bw_meter)->bm_start_time; \ 2710 \ 2711 BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \ 2712 (hash) = next_timeval.tv_sec; \ 2713 if (next_timeval.tv_usec) \ 2714 (hash)++; /* XXX: make sure we don't timeout early */ \ 2715 (hash) %= BW_METER_BUCKETS; \ 2716 } while (0) 2717 2718 /* 2719 * Schedule a timer to process periodically bw_meter entry of type "<=" 2720 * by linking the entry in the proper hash bucket. 2721 */ 2722 static void 2723 schedule_bw_meter(struct bw_meter *x, struct timeval *nowp) 2724 { 2725 int time_hash; 2726 2727 MFC_LOCK_ASSERT(); 2728 2729 if (!(x->bm_flags & BW_METER_LEQ)) 2730 return; /* XXX: we schedule timers only for "<=" entries */ 2731 2732 /* 2733 * Reset the bw_meter entry 2734 */ 2735 x->bm_start_time = *nowp; 2736 x->bm_measured.b_packets = 0; 2737 x->bm_measured.b_bytes = 0; 2738 x->bm_flags &= ~BW_METER_UPCALL_DELIVERED; 2739 2740 /* 2741 * Compute the timeout hash value and insert the entry 2742 */ 2743 BW_METER_TIMEHASH(x, time_hash); 2744 x->bm_time_next = bw_meter_timers[time_hash]; 2745 bw_meter_timers[time_hash] = x; 2746 x->bm_time_hash = time_hash; 2747 } 2748 2749 /* 2750 * Unschedule the periodic timer that processes bw_meter entry of type "<=" 2751 * by removing the entry from the proper hash bucket. 2752 */ 2753 static void 2754 unschedule_bw_meter(struct bw_meter *x) 2755 { 2756 int time_hash; 2757 struct bw_meter *prev, *tmp; 2758 2759 MFC_LOCK_ASSERT(); 2760 2761 if (!(x->bm_flags & BW_METER_LEQ)) 2762 return; /* XXX: we schedule timers only for "<=" entries */ 2763 2764 /* 2765 * Compute the timeout hash value and delete the entry 2766 */ 2767 time_hash = x->bm_time_hash; 2768 if (time_hash >= BW_METER_BUCKETS) 2769 return; /* Entry was not scheduled */ 2770 2771 for (prev = NULL, tmp = bw_meter_timers[time_hash]; 2772 tmp != NULL; prev = tmp, tmp = tmp->bm_time_next) 2773 if (tmp == x) 2774 break; 2775 2776 if (tmp == NULL) 2777 panic("unschedule_bw_meter: bw_meter entry not found"); 2778 2779 if (prev != NULL) 2780 prev->bm_time_next = x->bm_time_next; 2781 else 2782 bw_meter_timers[time_hash] = x->bm_time_next; 2783 2784 x->bm_time_next = NULL; 2785 x->bm_time_hash = BW_METER_BUCKETS; 2786 } 2787 2788 2789 /* 2790 * Process all "<=" type of bw_meter that should be processed now, 2791 * and for each entry prepare an upcall if necessary. Each processed 2792 * entry is rescheduled again for the (periodic) processing. 2793 * 2794 * This is run periodically (once per second normally). On each round, 2795 * all the potentially matching entries are in the hash slot that we are 2796 * looking at. 2797 */ 2798 static void 2799 bw_meter_process() 2800 { 2801 static uint32_t last_tv_sec; /* last time we processed this */ 2802 2803 uint32_t loops; 2804 int i; 2805 struct timeval now, process_endtime; 2806 2807 GET_TIME(now); 2808 if (last_tv_sec == now.tv_sec) 2809 return; /* nothing to do */ 2810 2811 loops = now.tv_sec - last_tv_sec; 2812 last_tv_sec = now.tv_sec; 2813 if (loops > BW_METER_BUCKETS) 2814 loops = BW_METER_BUCKETS; 2815 2816 MFC_LOCK(); 2817 /* 2818 * Process all bins of bw_meter entries from the one after the last 2819 * processed to the current one. On entry, i points to the last bucket 2820 * visited, so we need to increment i at the beginning of the loop. 2821 */ 2822 for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) { 2823 struct bw_meter *x, *tmp_list; 2824 2825 if (++i >= BW_METER_BUCKETS) 2826 i = 0; 2827 2828 /* Disconnect the list of bw_meter entries from the bin */ 2829 tmp_list = bw_meter_timers[i]; 2830 bw_meter_timers[i] = NULL; 2831 2832 /* Process the list of bw_meter entries */ 2833 while (tmp_list != NULL) { 2834 x = tmp_list; 2835 tmp_list = tmp_list->bm_time_next; 2836 2837 /* Test if the time interval is over */ 2838 process_endtime = x->bm_start_time; 2839 BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time); 2840 if (BW_TIMEVALCMP(&process_endtime, &now, >)) { 2841 /* Not yet: reschedule, but don't reset */ 2842 int time_hash; 2843 2844 BW_METER_TIMEHASH(x, time_hash); 2845 if (time_hash == i && process_endtime.tv_sec == now.tv_sec) { 2846 /* 2847 * XXX: somehow the bin processing is a bit ahead of time. 2848 * Put the entry in the next bin. 2849 */ 2850 if (++time_hash >= BW_METER_BUCKETS) 2851 time_hash = 0; 2852 } 2853 x->bm_time_next = bw_meter_timers[time_hash]; 2854 bw_meter_timers[time_hash] = x; 2855 x->bm_time_hash = time_hash; 2856 2857 continue; 2858 } 2859 2860 /* 2861 * Test if we should deliver an upcall 2862 */ 2863 if (((x->bm_flags & BW_METER_UNIT_PACKETS) && 2864 (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) || 2865 ((x->bm_flags & BW_METER_UNIT_BYTES) && 2866 (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) { 2867 /* Prepare an upcall for delivery */ 2868 bw_meter_prepare_upcall(x, &now); 2869 } 2870 2871 /* 2872 * Reschedule for next processing 2873 */ 2874 schedule_bw_meter(x, &now); 2875 } 2876 } 2877 2878 /* Send all upcalls that are pending delivery */ 2879 bw_upcalls_send(); 2880 2881 MFC_UNLOCK(); 2882 } 2883 2884 /* 2885 * A periodic function for sending all upcalls that are pending delivery 2886 */ 2887 static void 2888 expire_bw_upcalls_send(void *unused) 2889 { 2890 MFC_LOCK(); 2891 bw_upcalls_send(); 2892 MFC_UNLOCK(); 2893 2894 callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD, 2895 expire_bw_upcalls_send, NULL); 2896 } 2897 2898 /* 2899 * A periodic function for periodic scanning of the multicast forwarding 2900 * table for processing all "<=" bw_meter entries. 2901 */ 2902 static void 2903 expire_bw_meter_process(void *unused) 2904 { 2905 if (mrt_api_config & MRT_MFC_BW_UPCALL) 2906 bw_meter_process(); 2907 2908 callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL); 2909 } 2910 2911 /* 2912 * End of bandwidth monitoring code 2913 */ 2914 2915 #ifdef PIM 2916 /* 2917 * Send the packet up to the user daemon, or eventually do kernel encapsulation 2918 * 2919 */ 2920 static int 2921 pim_register_send(struct ip *ip, struct vif *vifp, 2922 struct mbuf *m, struct mfc *rt) 2923 { 2924 struct mbuf *mb_copy, *mm; 2925 2926 if (mrtdebug & DEBUG_PIM) 2927 log(LOG_DEBUG, "pim_register_send: "); 2928 2929 mb_copy = pim_register_prepare(ip, m); 2930 if (mb_copy == NULL) 2931 return ENOBUFS; 2932 2933 /* 2934 * Send all the fragments. Note that the mbuf for each fragment 2935 * is freed by the sending machinery. 2936 */ 2937 for (mm = mb_copy; mm; mm = mb_copy) { 2938 mb_copy = mm->m_nextpkt; 2939 mm->m_nextpkt = 0; 2940 mm = m_pullup(mm, sizeof(struct ip)); 2941 if (mm != NULL) { 2942 ip = mtod(mm, struct ip *); 2943 if ((mrt_api_config & MRT_MFC_RP) && 2944 (rt->mfc_rp.s_addr != INADDR_ANY)) { 2945 pim_register_send_rp(ip, vifp, mm, rt); 2946 } else { 2947 pim_register_send_upcall(ip, vifp, mm, rt); 2948 } 2949 } 2950 } 2951 2952 return 0; 2953 } 2954 2955 /* 2956 * Return a copy of the data packet that is ready for PIM Register 2957 * encapsulation. 2958 * XXX: Note that in the returned copy the IP header is a valid one. 2959 */ 2960 static struct mbuf * 2961 pim_register_prepare(struct ip *ip, struct mbuf *m) 2962 { 2963 struct mbuf *mb_copy = NULL; 2964 int mtu; 2965 2966 /* Take care of delayed checksums */ 2967 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 2968 in_delayed_cksum(m); 2969 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 2970 } 2971 2972 /* 2973 * Copy the old packet & pullup its IP header into the 2974 * new mbuf so we can modify it. 2975 */ 2976 mb_copy = m_copypacket(m, M_DONTWAIT); 2977 if (mb_copy == NULL) 2978 return NULL; 2979 mb_copy = m_pullup(mb_copy, ip->ip_hl << 2); 2980 if (mb_copy == NULL) 2981 return NULL; 2982 2983 /* take care of the TTL */ 2984 ip = mtod(mb_copy, struct ip *); 2985 --ip->ip_ttl; 2986 2987 /* Compute the MTU after the PIM Register encapsulation */ 2988 mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr); 2989 2990 if (ip->ip_len <= mtu) { 2991 /* Turn the IP header into a valid one */ 2992 ip->ip_len = htons(ip->ip_len); 2993 ip->ip_off = htons(ip->ip_off); 2994 ip->ip_sum = 0; 2995 ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2); 2996 } else { 2997 /* Fragment the packet */ 2998 if (ip_fragment(ip, &mb_copy, mtu, 0, CSUM_DELAY_IP) != 0) { 2999 m_freem(mb_copy); 3000 return NULL; 3001 } 3002 } 3003 return mb_copy; 3004 } 3005 3006 /* 3007 * Send an upcall with the data packet to the user-level process. 3008 */ 3009 static int 3010 pim_register_send_upcall(struct ip *ip, struct vif *vifp, 3011 struct mbuf *mb_copy, struct mfc *rt) 3012 { 3013 struct mbuf *mb_first; 3014 int len = ntohs(ip->ip_len); 3015 struct igmpmsg *im; 3016 struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; 3017 3018 VIF_LOCK_ASSERT(); 3019 3020 /* 3021 * Add a new mbuf with an upcall header 3022 */ 3023 MGETHDR(mb_first, M_DONTWAIT, MT_HEADER); 3024 if (mb_first == NULL) { 3025 m_freem(mb_copy); 3026 return ENOBUFS; 3027 } 3028 mb_first->m_data += max_linkhdr; 3029 mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg); 3030 mb_first->m_len = sizeof(struct igmpmsg); 3031 mb_first->m_next = mb_copy; 3032 3033 /* Send message to routing daemon */ 3034 im = mtod(mb_first, struct igmpmsg *); 3035 im->im_msgtype = IGMPMSG_WHOLEPKT; 3036 im->im_mbz = 0; 3037 im->im_vif = vifp - viftable; 3038 im->im_src = ip->ip_src; 3039 im->im_dst = ip->ip_dst; 3040 3041 k_igmpsrc.sin_addr = ip->ip_src; 3042 3043 mrtstat.mrts_upcalls++; 3044 3045 if (socket_send(ip_mrouter, mb_first, &k_igmpsrc) < 0) { 3046 if (mrtdebug & DEBUG_PIM) 3047 log(LOG_WARNING, 3048 "mcast: pim_register_send_upcall: ip_mrouter socket queue full"); 3049 ++mrtstat.mrts_upq_sockfull; 3050 return ENOBUFS; 3051 } 3052 3053 /* Keep statistics */ 3054 pimstat.pims_snd_registers_msgs++; 3055 pimstat.pims_snd_registers_bytes += len; 3056 3057 return 0; 3058 } 3059 3060 /* 3061 * Encapsulate the data packet in PIM Register message and send it to the RP. 3062 */ 3063 static int 3064 pim_register_send_rp(struct ip *ip, struct vif *vifp, 3065 struct mbuf *mb_copy, struct mfc *rt) 3066 { 3067 struct mbuf *mb_first; 3068 struct ip *ip_outer; 3069 struct pim_encap_pimhdr *pimhdr; 3070 int len = ntohs(ip->ip_len); 3071 vifi_t vifi = rt->mfc_parent; 3072 3073 VIF_LOCK_ASSERT(); 3074 3075 if ((vifi >= numvifs) || (viftable[vifi].v_lcl_addr.s_addr == 0)) { 3076 m_freem(mb_copy); 3077 return EADDRNOTAVAIL; /* The iif vif is invalid */ 3078 } 3079 3080 /* 3081 * Add a new mbuf with the encapsulating header 3082 */ 3083 MGETHDR(mb_first, M_DONTWAIT, MT_HEADER); 3084 if (mb_first == NULL) { 3085 m_freem(mb_copy); 3086 return ENOBUFS; 3087 } 3088 mb_first->m_data += max_linkhdr; 3089 mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr); 3090 mb_first->m_next = mb_copy; 3091 3092 mb_first->m_pkthdr.len = len + mb_first->m_len; 3093 3094 /* 3095 * Fill in the encapsulating IP and PIM header 3096 */ 3097 ip_outer = mtod(mb_first, struct ip *); 3098 *ip_outer = pim_encap_iphdr; 3099 ip_outer->ip_id = ip_newid(); 3100 ip_outer->ip_len = len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr); 3101 ip_outer->ip_src = viftable[vifi].v_lcl_addr; 3102 ip_outer->ip_dst = rt->mfc_rp; 3103 /* 3104 * Copy the inner header TOS to the outer header, and take care of the 3105 * IP_DF bit. 3106 */ 3107 ip_outer->ip_tos = ip->ip_tos; 3108 if (ntohs(ip->ip_off) & IP_DF) 3109 ip_outer->ip_off |= IP_DF; 3110 pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer 3111 + sizeof(pim_encap_iphdr)); 3112 *pimhdr = pim_encap_pimhdr; 3113 /* If the iif crosses a border, set the Border-bit */ 3114 if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & mrt_api_config) 3115 pimhdr->flags |= htonl(PIM_BORDER_REGISTER); 3116 3117 mb_first->m_data += sizeof(pim_encap_iphdr); 3118 pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr)); 3119 mb_first->m_data -= sizeof(pim_encap_iphdr); 3120 3121 if (vifp->v_rate_limit == 0) 3122 tbf_send_packet(vifp, mb_first); 3123 else 3124 tbf_control(vifp, mb_first, ip, ip_outer->ip_len); 3125 3126 /* Keep statistics */ 3127 pimstat.pims_snd_registers_msgs++; 3128 pimstat.pims_snd_registers_bytes += len; 3129 3130 return 0; 3131 } 3132 3133 /* 3134 * PIM-SMv2 and PIM-DM messages processing. 3135 * Receives and verifies the PIM control messages, and passes them 3136 * up to the listening socket, using rip_input(). 3137 * The only message with special processing is the PIM_REGISTER message 3138 * (used by PIM-SM): the PIM header is stripped off, and the inner packet 3139 * is passed to if_simloop(). 3140 */ 3141 void 3142 pim_input(struct mbuf *m, int off) 3143 { 3144 struct ip *ip = mtod(m, struct ip *); 3145 struct pim *pim; 3146 int minlen; 3147 int datalen = ip->ip_len; 3148 int ip_tos; 3149 int iphlen = off; 3150 3151 /* Keep statistics */ 3152 pimstat.pims_rcv_total_msgs++; 3153 pimstat.pims_rcv_total_bytes += datalen; 3154 3155 /* 3156 * Validate lengths 3157 */ 3158 if (datalen < PIM_MINLEN) { 3159 pimstat.pims_rcv_tooshort++; 3160 log(LOG_ERR, "pim_input: packet size too small %d from %lx\n", 3161 datalen, (u_long)ip->ip_src.s_addr); 3162 m_freem(m); 3163 return; 3164 } 3165 3166 /* 3167 * If the packet is at least as big as a REGISTER, go agead 3168 * and grab the PIM REGISTER header size, to avoid another 3169 * possible m_pullup() later. 3170 * 3171 * PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8 3172 * PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28 3173 */ 3174 minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN); 3175 /* 3176 * Get the IP and PIM headers in contiguous memory, and 3177 * possibly the PIM REGISTER header. 3178 */ 3179 if ((m->m_flags & M_EXT || m->m_len < minlen) && 3180 (m = m_pullup(m, minlen)) == 0) { 3181 log(LOG_ERR, "pim_input: m_pullup failure\n"); 3182 return; 3183 } 3184 /* m_pullup() may have given us a new mbuf so reset ip. */ 3185 ip = mtod(m, struct ip *); 3186 ip_tos = ip->ip_tos; 3187 3188 /* adjust mbuf to point to the PIM header */ 3189 m->m_data += iphlen; 3190 m->m_len -= iphlen; 3191 pim = mtod(m, struct pim *); 3192 3193 /* 3194 * Validate checksum. If PIM REGISTER, exclude the data packet. 3195 * 3196 * XXX: some older PIMv2 implementations don't make this distinction, 3197 * so for compatibility reason perform the checksum over part of the 3198 * message, and if error, then over the whole message. 3199 */ 3200 if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) { 3201 /* do nothing, checksum okay */ 3202 } else if (in_cksum(m, datalen)) { 3203 pimstat.pims_rcv_badsum++; 3204 if (mrtdebug & DEBUG_PIM) 3205 log(LOG_DEBUG, "pim_input: invalid checksum"); 3206 m_freem(m); 3207 return; 3208 } 3209 3210 /* PIM version check */ 3211 if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) { 3212 pimstat.pims_rcv_badversion++; 3213 log(LOG_ERR, "pim_input: incorrect version %d, expecting %d\n", 3214 PIM_VT_V(pim->pim_vt), PIM_VERSION); 3215 m_freem(m); 3216 return; 3217 } 3218 3219 /* restore mbuf back to the outer IP */ 3220 m->m_data -= iphlen; 3221 m->m_len += iphlen; 3222 3223 if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) { 3224 /* 3225 * Since this is a REGISTER, we'll make a copy of the register 3226 * headers ip + pim + u_int32 + encap_ip, to be passed up to the 3227 * routing daemon. 3228 */ 3229 struct sockaddr_in dst = { sizeof(dst), AF_INET }; 3230 struct mbuf *mcp; 3231 struct ip *encap_ip; 3232 u_int32_t *reghdr; 3233 struct ifnet *vifp; 3234 3235 VIF_LOCK(); 3236 if ((reg_vif_num >= numvifs) || (reg_vif_num == VIFI_INVALID)) { 3237 VIF_UNLOCK(); 3238 if (mrtdebug & DEBUG_PIM) 3239 log(LOG_DEBUG, 3240 "pim_input: register vif not set: %d\n", reg_vif_num); 3241 m_freem(m); 3242 return; 3243 } 3244 /* XXX need refcnt? */ 3245 vifp = viftable[reg_vif_num].v_ifp; 3246 VIF_UNLOCK(); 3247 3248 /* 3249 * Validate length 3250 */ 3251 if (datalen < PIM_REG_MINLEN) { 3252 pimstat.pims_rcv_tooshort++; 3253 pimstat.pims_rcv_badregisters++; 3254 log(LOG_ERR, 3255 "pim_input: register packet size too small %d from %lx\n", 3256 datalen, (u_long)ip->ip_src.s_addr); 3257 m_freem(m); 3258 return; 3259 } 3260 3261 reghdr = (u_int32_t *)(pim + 1); 3262 encap_ip = (struct ip *)(reghdr + 1); 3263 3264 if (mrtdebug & DEBUG_PIM) { 3265 log(LOG_DEBUG, 3266 "pim_input[register], encap_ip: %lx -> %lx, encap_ip len %d\n", 3267 (u_long)ntohl(encap_ip->ip_src.s_addr), 3268 (u_long)ntohl(encap_ip->ip_dst.s_addr), 3269 ntohs(encap_ip->ip_len)); 3270 } 3271 3272 /* verify the version number of the inner packet */ 3273 if (encap_ip->ip_v != IPVERSION) { 3274 pimstat.pims_rcv_badregisters++; 3275 if (mrtdebug & DEBUG_PIM) { 3276 log(LOG_DEBUG, "pim_input: invalid IP version (%d) " 3277 "of the inner packet\n", encap_ip->ip_v); 3278 } 3279 m_freem(m); 3280 return; 3281 } 3282 3283 /* verify the inner packet is destined to a mcast group */ 3284 if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) { 3285 pimstat.pims_rcv_badregisters++; 3286 if (mrtdebug & DEBUG_PIM) 3287 log(LOG_DEBUG, 3288 "pim_input: inner packet of register is not " 3289 "multicast %lx\n", 3290 (u_long)ntohl(encap_ip->ip_dst.s_addr)); 3291 m_freem(m); 3292 return; 3293 } 3294 3295 /* If a NULL_REGISTER, pass it to the daemon */ 3296 if ((ntohl(*reghdr) & PIM_NULL_REGISTER)) 3297 goto pim_input_to_daemon; 3298 3299 /* 3300 * Copy the TOS from the outer IP header to the inner IP header. 3301 */ 3302 if (encap_ip->ip_tos != ip_tos) { 3303 /* Outer TOS -> inner TOS */ 3304 encap_ip->ip_tos = ip_tos; 3305 /* Recompute the inner header checksum. Sigh... */ 3306 3307 /* adjust mbuf to point to the inner IP header */ 3308 m->m_data += (iphlen + PIM_MINLEN); 3309 m->m_len -= (iphlen + PIM_MINLEN); 3310 3311 encap_ip->ip_sum = 0; 3312 encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2); 3313 3314 /* restore mbuf to point back to the outer IP header */ 3315 m->m_data -= (iphlen + PIM_MINLEN); 3316 m->m_len += (iphlen + PIM_MINLEN); 3317 } 3318 3319 /* 3320 * Decapsulate the inner IP packet and loopback to forward it 3321 * as a normal multicast packet. Also, make a copy of the 3322 * outer_iphdr + pimhdr + reghdr + encap_iphdr 3323 * to pass to the daemon later, so it can take the appropriate 3324 * actions (e.g., send back PIM_REGISTER_STOP). 3325 * XXX: here m->m_data points to the outer IP header. 3326 */ 3327 mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN); 3328 if (mcp == NULL) { 3329 log(LOG_ERR, 3330 "pim_input: pim register: could not copy register head\n"); 3331 m_freem(m); 3332 return; 3333 } 3334 3335 /* Keep statistics */ 3336 /* XXX: registers_bytes include only the encap. mcast pkt */ 3337 pimstat.pims_rcv_registers_msgs++; 3338 pimstat.pims_rcv_registers_bytes += ntohs(encap_ip->ip_len); 3339 3340 /* 3341 * forward the inner ip packet; point m_data at the inner ip. 3342 */ 3343 m_adj(m, iphlen + PIM_MINLEN); 3344 3345 if (mrtdebug & DEBUG_PIM) { 3346 log(LOG_DEBUG, 3347 "pim_input: forwarding decapsulated register: " 3348 "src %lx, dst %lx, vif %d\n", 3349 (u_long)ntohl(encap_ip->ip_src.s_addr), 3350 (u_long)ntohl(encap_ip->ip_dst.s_addr), 3351 reg_vif_num); 3352 } 3353 /* NB: vifp was collected above; can it change on us? */ 3354 if_simloop(vifp, m, dst.sin_family, 0); 3355 3356 /* prepare the register head to send to the mrouting daemon */ 3357 m = mcp; 3358 } 3359 3360 pim_input_to_daemon: 3361 /* 3362 * Pass the PIM message up to the daemon; if it is a Register message, 3363 * pass the 'head' only up to the daemon. This includes the 3364 * outer IP header, PIM header, PIM-Register header and the 3365 * inner IP header. 3366 * XXX: the outer IP header pkt size of a Register is not adjust to 3367 * reflect the fact that the inner multicast data is truncated. 3368 */ 3369 rip_input(m, iphlen); 3370 3371 return; 3372 } 3373 #endif /* PIM */ 3374 3375 static int 3376 ip_mroute_modevent(module_t mod, int type, void *unused) 3377 { 3378 switch (type) { 3379 case MOD_LOAD: 3380 mtx_init(&mrouter_mtx, "mrouter initialization", NULL, MTX_DEF); 3381 MFC_LOCK_INIT(); 3382 VIF_LOCK_INIT(); 3383 ip_mrouter_reset(); 3384 ip_mcast_src = X_ip_mcast_src; 3385 ip_mforward = X_ip_mforward; 3386 ip_mrouter_done = X_ip_mrouter_done; 3387 ip_mrouter_get = X_ip_mrouter_get; 3388 ip_mrouter_set = X_ip_mrouter_set; 3389 ip_rsvp_force_done = X_ip_rsvp_force_done; 3390 ip_rsvp_vif = X_ip_rsvp_vif; 3391 legal_vif_num = X_legal_vif_num; 3392 mrt_ioctl = X_mrt_ioctl; 3393 rsvp_input_p = X_rsvp_input; 3394 break; 3395 3396 case MOD_UNLOAD: 3397 /* 3398 * Typically module unload happens after the user-level 3399 * process has shutdown the kernel services (the check 3400 * below insures someone can't just yank the module out 3401 * from under a running process). But if the module is 3402 * just loaded and then unloaded w/o starting up a user 3403 * process we still need to cleanup. 3404 */ 3405 if (ip_mrouter) 3406 return EINVAL; 3407 3408 X_ip_mrouter_done(); 3409 ip_mcast_src = NULL; 3410 ip_mforward = NULL; 3411 ip_mrouter_done = NULL; 3412 ip_mrouter_get = NULL; 3413 ip_mrouter_set = NULL; 3414 ip_rsvp_force_done = NULL; 3415 ip_rsvp_vif = NULL; 3416 legal_vif_num = NULL; 3417 mrt_ioctl = NULL; 3418 rsvp_input_p = NULL; 3419 VIF_LOCK_DESTROY(); 3420 MFC_LOCK_DESTROY(); 3421 mtx_destroy(&mrouter_mtx); 3422 break; 3423 default: 3424 return EOPNOTSUPP; 3425 } 3426 return 0; 3427 } 3428 3429 static moduledata_t ip_mroutemod = { 3430 "ip_mroute", 3431 ip_mroute_modevent, 3432 0 3433 }; 3434 DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3435