1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 #include "opt_carp.h" 83 84 #include <sys/param.h> 85 #include <sys/mbuf.h> 86 #include <sys/malloc.h> 87 #include <sys/protosw.h> 88 #include <sys/systm.h> 89 #include <sys/time.h> 90 #include <sys/socket.h> /* for net/if.h */ 91 #include <sys/sockio.h> 92 #include <sys/ctype.h> /* string functions */ 93 #include <sys/kernel.h> 94 #include <sys/random.h> 95 #include <sys/syslog.h> 96 #include <sys/sysctl.h> 97 #include <vm/uma.h> 98 #include <sys/module.h> 99 #include <sys/priv.h> 100 #include <sys/proc.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 #include <sys/rwlock.h> 104 105 #include <net/bpf.h> 106 #include <net/if.h> 107 #include <net/if_clone.h> 108 #include <net/if_dl.h> 109 #include <net/if_types.h> 110 #include <net/if_var.h> 111 #include <net/pfil.h> 112 #include <net/vnet.h> 113 114 #include <netinet/in.h> /* for struct arpcom */ 115 #include <netinet/in_systm.h> 116 #include <netinet/in_var.h> 117 #include <netinet/ip.h> 118 #include <netinet/ip_var.h> 119 #ifdef INET6 120 #include <netinet/ip6.h> 121 #include <netinet6/ip6_var.h> 122 #endif 123 #if defined(INET) || defined(INET6) 124 #ifdef DEV_CARP 125 #include <netinet/ip_carp.h> 126 #endif 127 #endif 128 #include <machine/in_cksum.h> 129 #include <netinet/if_ether.h> /* for struct arpcom */ 130 #include <net/bridgestp.h> 131 #include <net/if_bridgevar.h> 132 #include <net/if_llc.h> 133 #include <net/if_vlan_var.h> 134 135 #include <net/route.h> 136 #include <netinet/ip_fw.h> 137 #include <netinet/ipfw/ip_fw_private.h> 138 139 /* 140 * Size of the route hash table. Must be a power of two. 141 */ 142 #ifndef BRIDGE_RTHASH_SIZE 143 #define BRIDGE_RTHASH_SIZE 1024 144 #endif 145 146 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 147 148 /* 149 * Maximum number of addresses to cache. 150 */ 151 #ifndef BRIDGE_RTABLE_MAX 152 #define BRIDGE_RTABLE_MAX 100 153 #endif 154 155 /* 156 * Timeout (in seconds) for entries learned dynamically. 157 */ 158 #ifndef BRIDGE_RTABLE_TIMEOUT 159 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 160 #endif 161 162 /* 163 * Number of seconds between walks of the route list. 164 */ 165 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 166 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 167 #endif 168 169 /* 170 * List of capabilities to possibly mask on the member interface. 171 */ 172 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) 173 174 /* 175 * List of capabilities to strip 176 */ 177 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO 178 179 /* 180 * Bridge interface list entry. 181 */ 182 struct bridge_iflist { 183 LIST_ENTRY(bridge_iflist) bif_next; 184 struct ifnet *bif_ifp; /* member if */ 185 struct bstp_port bif_stp; /* STP state */ 186 uint32_t bif_flags; /* member if flags */ 187 int bif_savedcaps; /* saved capabilities */ 188 uint32_t bif_addrmax; /* max # of addresses */ 189 uint32_t bif_addrcnt; /* cur. # of addresses */ 190 uint32_t bif_addrexceeded;/* # of address violations */ 191 }; 192 193 /* 194 * Bridge route node. 195 */ 196 struct bridge_rtnode { 197 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 198 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 199 struct bridge_iflist *brt_dst; /* destination if */ 200 unsigned long brt_expire; /* expiration time */ 201 uint8_t brt_flags; /* address flags */ 202 uint8_t brt_addr[ETHER_ADDR_LEN]; 203 uint16_t brt_vlan; /* vlan id */ 204 }; 205 #define brt_ifp brt_dst->bif_ifp 206 207 /* 208 * Software state for each bridge. 209 */ 210 struct bridge_softc { 211 struct ifnet *sc_ifp; /* make this an interface */ 212 LIST_ENTRY(bridge_softc) sc_list; 213 struct mtx sc_mtx; 214 struct cv sc_cv; 215 uint32_t sc_brtmax; /* max # of addresses */ 216 uint32_t sc_brtcnt; /* cur. # of addresses */ 217 uint32_t sc_brttimeout; /* rt timeout in seconds */ 218 struct callout sc_brcallout; /* bridge callout */ 219 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 220 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 221 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 222 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 223 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 224 uint32_t sc_rthash_key; /* key for hash */ 225 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 226 struct bstp_state sc_stp; /* STP state */ 227 uint32_t sc_brtexceeded; /* # of cache drops */ 228 struct ifnet *sc_ifaddr; /* member mac copied from */ 229 u_char sc_defaddr[6]; /* Default MAC address */ 230 }; 231 232 static struct mtx bridge_list_mtx; 233 eventhandler_tag bridge_detach_cookie = NULL; 234 235 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 236 237 uma_zone_t bridge_rtnode_zone; 238 239 static int bridge_clone_create(struct if_clone *, int, caddr_t); 240 static void bridge_clone_destroy(struct ifnet *); 241 242 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 243 static void bridge_mutecaps(struct bridge_softc *); 244 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 245 int); 246 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 247 static void bridge_init(void *); 248 static void bridge_dummynet(struct mbuf *, struct ifnet *); 249 static void bridge_stop(struct ifnet *, int); 250 static void bridge_start(struct ifnet *); 251 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 252 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 253 struct rtentry *); 254 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 255 struct mbuf *); 256 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 257 258 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 259 struct mbuf *m); 260 261 static void bridge_timer(void *); 262 263 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 264 struct mbuf *, int); 265 static void bridge_span(struct bridge_softc *, struct mbuf *); 266 267 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 268 uint16_t, struct bridge_iflist *, int, uint8_t); 269 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 270 uint16_t); 271 static void bridge_rttrim(struct bridge_softc *); 272 static void bridge_rtage(struct bridge_softc *); 273 static void bridge_rtflush(struct bridge_softc *, int); 274 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 275 uint16_t); 276 277 static int bridge_rtable_init(struct bridge_softc *); 278 static void bridge_rtable_fini(struct bridge_softc *); 279 280 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 281 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 282 const uint8_t *, uint16_t); 283 static int bridge_rtnode_insert(struct bridge_softc *, 284 struct bridge_rtnode *); 285 static void bridge_rtnode_destroy(struct bridge_softc *, 286 struct bridge_rtnode *); 287 static void bridge_rtable_expire(struct ifnet *, int); 288 static void bridge_state_change(struct ifnet *, int); 289 290 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 291 const char *name); 292 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 293 struct ifnet *ifp); 294 static void bridge_delete_member(struct bridge_softc *, 295 struct bridge_iflist *, int); 296 static void bridge_delete_span(struct bridge_softc *, 297 struct bridge_iflist *); 298 299 static int bridge_ioctl_add(struct bridge_softc *, void *); 300 static int bridge_ioctl_del(struct bridge_softc *, void *); 301 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 302 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 303 static int bridge_ioctl_scache(struct bridge_softc *, void *); 304 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 305 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 306 static int bridge_ioctl_rts(struct bridge_softc *, void *); 307 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 308 static int bridge_ioctl_sto(struct bridge_softc *, void *); 309 static int bridge_ioctl_gto(struct bridge_softc *, void *); 310 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 311 static int bridge_ioctl_flush(struct bridge_softc *, void *); 312 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 313 static int bridge_ioctl_spri(struct bridge_softc *, void *); 314 static int bridge_ioctl_ght(struct bridge_softc *, void *); 315 static int bridge_ioctl_sht(struct bridge_softc *, void *); 316 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 317 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 318 static int bridge_ioctl_gma(struct bridge_softc *, void *); 319 static int bridge_ioctl_sma(struct bridge_softc *, void *); 320 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 321 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 322 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 323 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 324 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 325 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 326 static int bridge_ioctl_grte(struct bridge_softc *, void *); 327 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 328 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 329 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 330 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 331 int); 332 static int bridge_ip_checkbasic(struct mbuf **mp); 333 #ifdef INET6 334 static int bridge_ip6_checkbasic(struct mbuf **mp); 335 #endif /* INET6 */ 336 static int bridge_fragment(struct ifnet *, struct mbuf *, 337 struct ether_header *, int, struct llc *); 338 339 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 340 #define VLANTAGOF(_m) \ 341 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 342 343 static struct bstp_cb_ops bridge_ops = { 344 .bcb_state = bridge_state_change, 345 .bcb_rtage = bridge_rtable_expire 346 }; 347 348 SYSCTL_DECL(_net_link); 349 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 350 351 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 352 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 353 static int pfil_member = 1; /* run pfil hooks on the member interface */ 354 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 355 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 356 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 357 locally destined packets */ 358 static int log_stp = 0; /* log STP state changes */ 359 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ 360 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 361 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 362 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 363 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 364 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 365 &pfil_bridge, 0, "Packet filter on the bridge interface"); 366 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 367 &pfil_member, 0, "Packet filter on the member interface"); 368 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 369 &pfil_local_phys, 0, 370 "Packet filter on the physical interface for locally destined packets"); 371 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 372 &log_stp, 0, "Log STP state changes"); 373 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW, 374 &bridge_inherit_mac, 0, 375 "Inherit MAC address from the first bridge member"); 376 377 struct bridge_control { 378 int (*bc_func)(struct bridge_softc *, void *); 379 int bc_argsize; 380 int bc_flags; 381 }; 382 383 #define BC_F_COPYIN 0x01 /* copy arguments in */ 384 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 385 #define BC_F_SUSER 0x04 /* do super-user check */ 386 387 const struct bridge_control bridge_control_table[] = { 388 { bridge_ioctl_add, sizeof(struct ifbreq), 389 BC_F_COPYIN|BC_F_SUSER }, 390 { bridge_ioctl_del, sizeof(struct ifbreq), 391 BC_F_COPYIN|BC_F_SUSER }, 392 393 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 394 BC_F_COPYIN|BC_F_COPYOUT }, 395 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 396 BC_F_COPYIN|BC_F_SUSER }, 397 398 { bridge_ioctl_scache, sizeof(struct ifbrparam), 399 BC_F_COPYIN|BC_F_SUSER }, 400 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 401 BC_F_COPYOUT }, 402 403 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 404 BC_F_COPYIN|BC_F_COPYOUT }, 405 { bridge_ioctl_rts, sizeof(struct ifbaconf), 406 BC_F_COPYIN|BC_F_COPYOUT }, 407 408 { bridge_ioctl_saddr, sizeof(struct ifbareq), 409 BC_F_COPYIN|BC_F_SUSER }, 410 411 { bridge_ioctl_sto, sizeof(struct ifbrparam), 412 BC_F_COPYIN|BC_F_SUSER }, 413 { bridge_ioctl_gto, sizeof(struct ifbrparam), 414 BC_F_COPYOUT }, 415 416 { bridge_ioctl_daddr, sizeof(struct ifbareq), 417 BC_F_COPYIN|BC_F_SUSER }, 418 419 { bridge_ioctl_flush, sizeof(struct ifbreq), 420 BC_F_COPYIN|BC_F_SUSER }, 421 422 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 423 BC_F_COPYOUT }, 424 { bridge_ioctl_spri, sizeof(struct ifbrparam), 425 BC_F_COPYIN|BC_F_SUSER }, 426 427 { bridge_ioctl_ght, sizeof(struct ifbrparam), 428 BC_F_COPYOUT }, 429 { bridge_ioctl_sht, sizeof(struct ifbrparam), 430 BC_F_COPYIN|BC_F_SUSER }, 431 432 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 433 BC_F_COPYOUT }, 434 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 435 BC_F_COPYIN|BC_F_SUSER }, 436 437 { bridge_ioctl_gma, sizeof(struct ifbrparam), 438 BC_F_COPYOUT }, 439 { bridge_ioctl_sma, sizeof(struct ifbrparam), 440 BC_F_COPYIN|BC_F_SUSER }, 441 442 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 443 BC_F_COPYIN|BC_F_SUSER }, 444 445 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 446 BC_F_COPYIN|BC_F_SUSER }, 447 448 { bridge_ioctl_addspan, sizeof(struct ifbreq), 449 BC_F_COPYIN|BC_F_SUSER }, 450 { bridge_ioctl_delspan, sizeof(struct ifbreq), 451 BC_F_COPYIN|BC_F_SUSER }, 452 453 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 454 BC_F_COPYOUT }, 455 456 { bridge_ioctl_grte, sizeof(struct ifbrparam), 457 BC_F_COPYOUT }, 458 459 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 460 BC_F_COPYIN|BC_F_COPYOUT }, 461 462 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 463 BC_F_COPYIN|BC_F_SUSER }, 464 465 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 466 BC_F_COPYIN|BC_F_SUSER }, 467 468 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 469 BC_F_COPYIN|BC_F_SUSER }, 470 471 }; 472 const int bridge_control_table_size = 473 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 474 475 LIST_HEAD(, bridge_softc) bridge_list; 476 477 IFC_SIMPLE_DECLARE(bridge, 0); 478 479 static int 480 bridge_modevent(module_t mod, int type, void *data) 481 { 482 483 switch (type) { 484 case MOD_LOAD: 485 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 486 if_clone_attach(&bridge_cloner); 487 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 488 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 489 UMA_ALIGN_PTR, 0); 490 LIST_INIT(&bridge_list); 491 bridge_input_p = bridge_input; 492 bridge_output_p = bridge_output; 493 bridge_dn_p = bridge_dummynet; 494 bridge_detach_cookie = EVENTHANDLER_REGISTER( 495 ifnet_departure_event, bridge_ifdetach, NULL, 496 EVENTHANDLER_PRI_ANY); 497 break; 498 case MOD_UNLOAD: 499 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 500 bridge_detach_cookie); 501 if_clone_detach(&bridge_cloner); 502 uma_zdestroy(bridge_rtnode_zone); 503 bridge_input_p = NULL; 504 bridge_output_p = NULL; 505 bridge_dn_p = NULL; 506 mtx_destroy(&bridge_list_mtx); 507 break; 508 default: 509 return (EOPNOTSUPP); 510 } 511 return (0); 512 } 513 514 static moduledata_t bridge_mod = { 515 "if_bridge", 516 bridge_modevent, 517 0 518 }; 519 520 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 521 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 522 523 /* 524 * handler for net.link.bridge.pfil_ipfw 525 */ 526 static int 527 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 528 { 529 int enable = pfil_ipfw; 530 int error; 531 532 error = sysctl_handle_int(oidp, &enable, 0, req); 533 enable = (enable) ? 1 : 0; 534 535 if (enable != pfil_ipfw) { 536 pfil_ipfw = enable; 537 538 /* 539 * Disable pfil so that ipfw doesnt run twice, if the user 540 * really wants both then they can re-enable pfil_bridge and/or 541 * pfil_member. Also allow non-ip packets as ipfw can filter by 542 * layer2 type. 543 */ 544 if (pfil_ipfw) { 545 pfil_onlyip = 0; 546 pfil_bridge = 0; 547 pfil_member = 0; 548 } 549 } 550 551 return (error); 552 } 553 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 554 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 555 556 /* 557 * bridge_clone_create: 558 * 559 * Create a new bridge instance. 560 */ 561 static int 562 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 563 { 564 struct bridge_softc *sc, *sc2; 565 struct ifnet *bifp, *ifp; 566 int retry; 567 568 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 569 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 570 if (ifp == NULL) { 571 free(sc, M_DEVBUF); 572 return (ENOSPC); 573 } 574 575 BRIDGE_LOCK_INIT(sc); 576 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 577 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 578 579 /* Initialize our routing table. */ 580 bridge_rtable_init(sc); 581 582 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 583 584 LIST_INIT(&sc->sc_iflist); 585 LIST_INIT(&sc->sc_spanlist); 586 587 ifp->if_softc = sc; 588 if_initname(ifp, ifc->ifc_name, unit); 589 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 590 ifp->if_ioctl = bridge_ioctl; 591 ifp->if_start = bridge_start; 592 ifp->if_init = bridge_init; 593 ifp->if_type = IFT_BRIDGE; 594 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 595 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 596 IFQ_SET_READY(&ifp->if_snd); 597 598 /* 599 * Generate a random ethernet address with a locally administered 600 * address. 601 * 602 * Since we are using random ethernet addresses for the bridge, it is 603 * possible that we might have address collisions, so make sure that 604 * this hardware address isn't already in use on another bridge. 605 */ 606 for (retry = 1; retry != 0;) { 607 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 608 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */ 609 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 610 retry = 0; 611 mtx_lock(&bridge_list_mtx); 612 LIST_FOREACH(sc2, &bridge_list, sc_list) { 613 bifp = sc2->sc_ifp; 614 if (memcmp(sc->sc_defaddr, 615 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 616 retry = 1; 617 } 618 mtx_unlock(&bridge_list_mtx); 619 } 620 621 bstp_attach(&sc->sc_stp, &bridge_ops); 622 ether_ifattach(ifp, sc->sc_defaddr); 623 /* Now undo some of the damage... */ 624 ifp->if_baudrate = 0; 625 ifp->if_type = IFT_BRIDGE; 626 627 mtx_lock(&bridge_list_mtx); 628 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 629 mtx_unlock(&bridge_list_mtx); 630 631 return (0); 632 } 633 634 /* 635 * bridge_clone_destroy: 636 * 637 * Destroy a bridge instance. 638 */ 639 static void 640 bridge_clone_destroy(struct ifnet *ifp) 641 { 642 struct bridge_softc *sc = ifp->if_softc; 643 struct bridge_iflist *bif; 644 645 BRIDGE_LOCK(sc); 646 647 bridge_stop(ifp, 1); 648 ifp->if_flags &= ~IFF_UP; 649 650 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 651 bridge_delete_member(sc, bif, 0); 652 653 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 654 bridge_delete_span(sc, bif); 655 } 656 657 BRIDGE_UNLOCK(sc); 658 659 callout_drain(&sc->sc_brcallout); 660 661 mtx_lock(&bridge_list_mtx); 662 LIST_REMOVE(sc, sc_list); 663 mtx_unlock(&bridge_list_mtx); 664 665 bstp_detach(&sc->sc_stp); 666 ether_ifdetach(ifp); 667 if_free_type(ifp, IFT_ETHER); 668 669 /* Tear down the routing table. */ 670 bridge_rtable_fini(sc); 671 672 BRIDGE_LOCK_DESTROY(sc); 673 free(sc, M_DEVBUF); 674 } 675 676 /* 677 * bridge_ioctl: 678 * 679 * Handle a control request from the operator. 680 */ 681 static int 682 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 683 { 684 struct bridge_softc *sc = ifp->if_softc; 685 struct ifreq *ifr = (struct ifreq *)data; 686 struct bridge_iflist *bif; 687 struct thread *td = curthread; 688 union { 689 struct ifbreq ifbreq; 690 struct ifbifconf ifbifconf; 691 struct ifbareq ifbareq; 692 struct ifbaconf ifbaconf; 693 struct ifbrparam ifbrparam; 694 struct ifbropreq ifbropreq; 695 } args; 696 struct ifdrv *ifd = (struct ifdrv *) data; 697 const struct bridge_control *bc; 698 int error = 0; 699 700 switch (cmd) { 701 702 case SIOCADDMULTI: 703 case SIOCDELMULTI: 704 break; 705 706 case SIOCGDRVSPEC: 707 case SIOCSDRVSPEC: 708 if (ifd->ifd_cmd >= bridge_control_table_size) { 709 error = EINVAL; 710 break; 711 } 712 bc = &bridge_control_table[ifd->ifd_cmd]; 713 714 if (cmd == SIOCGDRVSPEC && 715 (bc->bc_flags & BC_F_COPYOUT) == 0) { 716 error = EINVAL; 717 break; 718 } 719 else if (cmd == SIOCSDRVSPEC && 720 (bc->bc_flags & BC_F_COPYOUT) != 0) { 721 error = EINVAL; 722 break; 723 } 724 725 if (bc->bc_flags & BC_F_SUSER) { 726 error = priv_check(td, PRIV_NET_BRIDGE); 727 if (error) 728 break; 729 } 730 731 if (ifd->ifd_len != bc->bc_argsize || 732 ifd->ifd_len > sizeof(args)) { 733 error = EINVAL; 734 break; 735 } 736 737 bzero(&args, sizeof(args)); 738 if (bc->bc_flags & BC_F_COPYIN) { 739 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 740 if (error) 741 break; 742 } 743 744 BRIDGE_LOCK(sc); 745 error = (*bc->bc_func)(sc, &args); 746 BRIDGE_UNLOCK(sc); 747 if (error) 748 break; 749 750 if (bc->bc_flags & BC_F_COPYOUT) 751 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 752 753 break; 754 755 case SIOCSIFFLAGS: 756 if (!(ifp->if_flags & IFF_UP) && 757 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 758 /* 759 * If interface is marked down and it is running, 760 * then stop and disable it. 761 */ 762 BRIDGE_LOCK(sc); 763 bridge_stop(ifp, 1); 764 BRIDGE_UNLOCK(sc); 765 } else if ((ifp->if_flags & IFF_UP) && 766 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 767 /* 768 * If interface is marked up and it is stopped, then 769 * start it. 770 */ 771 (*ifp->if_init)(sc); 772 } 773 break; 774 775 case SIOCSIFMTU: 776 if (ifr->ifr_mtu < 576) { 777 error = EINVAL; 778 break; 779 } 780 if (LIST_EMPTY(&sc->sc_iflist)) { 781 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 782 break; 783 } 784 BRIDGE_LOCK(sc); 785 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 786 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { 787 log(LOG_NOTICE, "%s: invalid MTU: %lu(%s)" 788 " != %d\n", sc->sc_ifp->if_xname, 789 bif->bif_ifp->if_mtu, 790 bif->bif_ifp->if_xname, ifr->ifr_mtu); 791 error = EINVAL; 792 break; 793 } 794 } 795 if (!error) 796 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 797 BRIDGE_UNLOCK(sc); 798 break; 799 default: 800 /* 801 * drop the lock as ether_ioctl() will call bridge_start() and 802 * cause the lock to be recursed. 803 */ 804 error = ether_ioctl(ifp, cmd, data); 805 break; 806 } 807 808 return (error); 809 } 810 811 /* 812 * bridge_mutecaps: 813 * 814 * Clear or restore unwanted capabilities on the member interface 815 */ 816 static void 817 bridge_mutecaps(struct bridge_softc *sc) 818 { 819 struct bridge_iflist *bif; 820 int enabled, mask; 821 822 /* Initial bitmask of capabilities to test */ 823 mask = BRIDGE_IFCAPS_MASK; 824 825 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 826 /* Every member must support it or its disabled */ 827 mask &= bif->bif_savedcaps; 828 } 829 830 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 831 enabled = bif->bif_ifp->if_capenable; 832 enabled &= ~BRIDGE_IFCAPS_STRIP; 833 /* strip off mask bits and enable them again if allowed */ 834 enabled &= ~BRIDGE_IFCAPS_MASK; 835 enabled |= mask; 836 bridge_set_ifcap(sc, bif, enabled); 837 } 838 839 } 840 841 static void 842 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 843 { 844 struct ifnet *ifp = bif->bif_ifp; 845 struct ifreq ifr; 846 int error; 847 848 bzero(&ifr, sizeof(ifr)); 849 ifr.ifr_reqcap = set; 850 851 if (ifp->if_capenable != set) { 852 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 853 if (error) 854 if_printf(sc->sc_ifp, 855 "error setting interface capabilities on %s\n", 856 ifp->if_xname); 857 } 858 } 859 860 /* 861 * bridge_lookup_member: 862 * 863 * Lookup a bridge member interface. 864 */ 865 static struct bridge_iflist * 866 bridge_lookup_member(struct bridge_softc *sc, const char *name) 867 { 868 struct bridge_iflist *bif; 869 struct ifnet *ifp; 870 871 BRIDGE_LOCK_ASSERT(sc); 872 873 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 874 ifp = bif->bif_ifp; 875 if (strcmp(ifp->if_xname, name) == 0) 876 return (bif); 877 } 878 879 return (NULL); 880 } 881 882 /* 883 * bridge_lookup_member_if: 884 * 885 * Lookup a bridge member interface by ifnet*. 886 */ 887 static struct bridge_iflist * 888 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 889 { 890 struct bridge_iflist *bif; 891 892 BRIDGE_LOCK_ASSERT(sc); 893 894 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 895 if (bif->bif_ifp == member_ifp) 896 return (bif); 897 } 898 899 return (NULL); 900 } 901 902 /* 903 * bridge_delete_member: 904 * 905 * Delete the specified member interface. 906 */ 907 static void 908 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 909 int gone) 910 { 911 struct ifnet *ifs = bif->bif_ifp; 912 struct ifnet *fif = NULL; 913 914 BRIDGE_LOCK_ASSERT(sc); 915 916 if (bif->bif_flags & IFBIF_STP) 917 bstp_disable(&bif->bif_stp); 918 919 ifs->if_bridge = NULL; 920 BRIDGE_XLOCK(sc); 921 LIST_REMOVE(bif, bif_next); 922 BRIDGE_XDROP(sc); 923 924 /* 925 * If removing the interface that gave the bridge its mac address, set 926 * the mac address of the bridge to the address of the next member, or 927 * to its default address if no members are left. 928 */ 929 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) { 930 if (LIST_EMPTY(&sc->sc_iflist)) { 931 bcopy(sc->sc_defaddr, 932 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 933 sc->sc_ifaddr = NULL; 934 } else { 935 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 936 bcopy(IF_LLADDR(fif), 937 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 938 sc->sc_ifaddr = fif; 939 } 940 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 941 } 942 943 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 944 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 945 KASSERT(bif->bif_addrcnt == 0, 946 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 947 948 BRIDGE_UNLOCK(sc); 949 if (!gone) { 950 switch (ifs->if_type) { 951 case IFT_ETHER: 952 case IFT_L2VLAN: 953 /* 954 * Take the interface out of promiscuous mode. 955 */ 956 (void) ifpromisc(ifs, 0); 957 break; 958 959 case IFT_GIF: 960 break; 961 962 default: 963 #ifdef DIAGNOSTIC 964 panic("bridge_delete_member: impossible"); 965 #endif 966 break; 967 } 968 /* reneable any interface capabilities */ 969 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 970 } 971 bstp_destroy(&bif->bif_stp); /* prepare to free */ 972 BRIDGE_LOCK(sc); 973 free(bif, M_DEVBUF); 974 } 975 976 /* 977 * bridge_delete_span: 978 * 979 * Delete the specified span interface. 980 */ 981 static void 982 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 983 { 984 BRIDGE_LOCK_ASSERT(sc); 985 986 KASSERT(bif->bif_ifp->if_bridge == NULL, 987 ("%s: not a span interface", __func__)); 988 989 LIST_REMOVE(bif, bif_next); 990 free(bif, M_DEVBUF); 991 } 992 993 static int 994 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 995 { 996 struct ifbreq *req = arg; 997 struct bridge_iflist *bif = NULL; 998 struct ifnet *ifs; 999 int error = 0; 1000 1001 ifs = ifunit(req->ifbr_ifsname); 1002 if (ifs == NULL) 1003 return (ENOENT); 1004 if (ifs->if_ioctl == NULL) /* must be supported */ 1005 return (EINVAL); 1006 1007 /* If it's in the span list, it can't be a member. */ 1008 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1009 if (ifs == bif->bif_ifp) 1010 return (EBUSY); 1011 1012 if (ifs->if_bridge == sc) 1013 return (EEXIST); 1014 1015 if (ifs->if_bridge != NULL) 1016 return (EBUSY); 1017 1018 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1019 if (bif == NULL) 1020 return (ENOMEM); 1021 1022 bif->bif_ifp = ifs; 1023 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1024 bif->bif_savedcaps = ifs->if_capenable; 1025 1026 switch (ifs->if_type) { 1027 case IFT_ETHER: 1028 case IFT_L2VLAN: 1029 case IFT_GIF: 1030 /* permitted interface types */ 1031 break; 1032 default: 1033 error = EINVAL; 1034 goto out; 1035 } 1036 1037 /* Allow the first Ethernet member to define the MTU */ 1038 if (LIST_EMPTY(&sc->sc_iflist)) 1039 sc->sc_ifp->if_mtu = ifs->if_mtu; 1040 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 1041 if_printf(sc->sc_ifp, "invalid MTU: %lu(%s) != %lu\n", 1042 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu); 1043 error = EINVAL; 1044 goto out; 1045 } 1046 1047 /* 1048 * Assign the interface's MAC address to the bridge if it's the first 1049 * member and the MAC address of the bridge has not been changed from 1050 * the default randomly generated one. 1051 */ 1052 if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1053 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1054 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1055 sc->sc_ifaddr = ifs; 1056 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1057 } 1058 1059 ifs->if_bridge = sc; 1060 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1061 /* 1062 * XXX: XLOCK HERE!?! 1063 * 1064 * NOTE: insert_***HEAD*** should be safe for the traversals. 1065 */ 1066 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1067 1068 /* Set interface capabilities to the intersection set of all members */ 1069 bridge_mutecaps(sc); 1070 1071 switch (ifs->if_type) { 1072 case IFT_ETHER: 1073 case IFT_L2VLAN: 1074 /* 1075 * Place the interface into promiscuous mode. 1076 */ 1077 BRIDGE_UNLOCK(sc); 1078 error = ifpromisc(ifs, 1); 1079 BRIDGE_LOCK(sc); 1080 break; 1081 } 1082 if (error) 1083 bridge_delete_member(sc, bif, 0); 1084 out: 1085 if (error) { 1086 if (bif != NULL) 1087 free(bif, M_DEVBUF); 1088 } 1089 return (error); 1090 } 1091 1092 static int 1093 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1094 { 1095 struct ifbreq *req = arg; 1096 struct bridge_iflist *bif; 1097 1098 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1099 if (bif == NULL) 1100 return (ENOENT); 1101 1102 bridge_delete_member(sc, bif, 0); 1103 1104 return (0); 1105 } 1106 1107 static int 1108 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1109 { 1110 struct ifbreq *req = arg; 1111 struct bridge_iflist *bif; 1112 struct bstp_port *bp; 1113 1114 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1115 if (bif == NULL) 1116 return (ENOENT); 1117 1118 bp = &bif->bif_stp; 1119 req->ifbr_ifsflags = bif->bif_flags; 1120 req->ifbr_state = bp->bp_state; 1121 req->ifbr_priority = bp->bp_priority; 1122 req->ifbr_path_cost = bp->bp_path_cost; 1123 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1124 req->ifbr_proto = bp->bp_protover; 1125 req->ifbr_role = bp->bp_role; 1126 req->ifbr_stpflags = bp->bp_flags; 1127 req->ifbr_addrcnt = bif->bif_addrcnt; 1128 req->ifbr_addrmax = bif->bif_addrmax; 1129 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1130 1131 /* Copy STP state options as flags */ 1132 if (bp->bp_operedge) 1133 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1134 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1135 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1136 if (bp->bp_ptp_link) 1137 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1138 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1139 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1140 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1141 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1142 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1143 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1144 return (0); 1145 } 1146 1147 static int 1148 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1149 { 1150 struct ifbreq *req = arg; 1151 struct bridge_iflist *bif; 1152 struct bstp_port *bp; 1153 int error; 1154 1155 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1156 if (bif == NULL) 1157 return (ENOENT); 1158 bp = &bif->bif_stp; 1159 1160 if (req->ifbr_ifsflags & IFBIF_SPAN) 1161 /* SPAN is readonly */ 1162 return (EINVAL); 1163 1164 if (req->ifbr_ifsflags & IFBIF_STP) { 1165 if ((bif->bif_flags & IFBIF_STP) == 0) { 1166 error = bstp_enable(&bif->bif_stp); 1167 if (error) 1168 return (error); 1169 } 1170 } else { 1171 if ((bif->bif_flags & IFBIF_STP) != 0) 1172 bstp_disable(&bif->bif_stp); 1173 } 1174 1175 /* Pass on STP flags */ 1176 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1177 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1178 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1179 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1180 1181 /* Save the bits relating to the bridge */ 1182 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1183 1184 return (0); 1185 } 1186 1187 static int 1188 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1189 { 1190 struct ifbrparam *param = arg; 1191 1192 sc->sc_brtmax = param->ifbrp_csize; 1193 bridge_rttrim(sc); 1194 1195 return (0); 1196 } 1197 1198 static int 1199 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1200 { 1201 struct ifbrparam *param = arg; 1202 1203 param->ifbrp_csize = sc->sc_brtmax; 1204 1205 return (0); 1206 } 1207 1208 static int 1209 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1210 { 1211 struct ifbifconf *bifc = arg; 1212 struct bridge_iflist *bif; 1213 struct ifbreq breq; 1214 char *buf, *outbuf; 1215 int count, buflen, len, error = 0; 1216 1217 count = 0; 1218 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1219 count++; 1220 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1221 count++; 1222 1223 buflen = sizeof(breq) * count; 1224 if (bifc->ifbic_len == 0) { 1225 bifc->ifbic_len = buflen; 1226 return (0); 1227 } 1228 BRIDGE_UNLOCK(sc); 1229 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1230 BRIDGE_LOCK(sc); 1231 1232 count = 0; 1233 buf = outbuf; 1234 len = min(bifc->ifbic_len, buflen); 1235 bzero(&breq, sizeof(breq)); 1236 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1237 if (len < sizeof(breq)) 1238 break; 1239 1240 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1241 sizeof(breq.ifbr_ifsname)); 1242 /* Fill in the ifbreq structure */ 1243 error = bridge_ioctl_gifflags(sc, &breq); 1244 if (error) 1245 break; 1246 memcpy(buf, &breq, sizeof(breq)); 1247 count++; 1248 buf += sizeof(breq); 1249 len -= sizeof(breq); 1250 } 1251 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1252 if (len < sizeof(breq)) 1253 break; 1254 1255 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1256 sizeof(breq.ifbr_ifsname)); 1257 breq.ifbr_ifsflags = bif->bif_flags; 1258 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1259 memcpy(buf, &breq, sizeof(breq)); 1260 count++; 1261 buf += sizeof(breq); 1262 len -= sizeof(breq); 1263 } 1264 1265 BRIDGE_UNLOCK(sc); 1266 bifc->ifbic_len = sizeof(breq) * count; 1267 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1268 BRIDGE_LOCK(sc); 1269 free(outbuf, M_TEMP); 1270 return (error); 1271 } 1272 1273 static int 1274 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1275 { 1276 struct ifbaconf *bac = arg; 1277 struct bridge_rtnode *brt; 1278 struct ifbareq bareq; 1279 char *buf, *outbuf; 1280 int count, buflen, len, error = 0; 1281 1282 if (bac->ifbac_len == 0) 1283 return (0); 1284 1285 count = 0; 1286 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1287 count++; 1288 buflen = sizeof(bareq) * count; 1289 1290 BRIDGE_UNLOCK(sc); 1291 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1292 BRIDGE_LOCK(sc); 1293 1294 count = 0; 1295 buf = outbuf; 1296 len = min(bac->ifbac_len, buflen); 1297 bzero(&bareq, sizeof(bareq)); 1298 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1299 if (len < sizeof(bareq)) 1300 goto out; 1301 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1302 sizeof(bareq.ifba_ifsname)); 1303 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1304 bareq.ifba_vlan = brt->brt_vlan; 1305 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1306 time_uptime < brt->brt_expire) 1307 bareq.ifba_expire = brt->brt_expire - time_uptime; 1308 else 1309 bareq.ifba_expire = 0; 1310 bareq.ifba_flags = brt->brt_flags; 1311 1312 memcpy(buf, &bareq, sizeof(bareq)); 1313 count++; 1314 buf += sizeof(bareq); 1315 len -= sizeof(bareq); 1316 } 1317 out: 1318 BRIDGE_UNLOCK(sc); 1319 bac->ifbac_len = sizeof(bareq) * count; 1320 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1321 BRIDGE_LOCK(sc); 1322 free(outbuf, M_TEMP); 1323 return (error); 1324 } 1325 1326 static int 1327 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1328 { 1329 struct ifbareq *req = arg; 1330 struct bridge_iflist *bif; 1331 int error; 1332 1333 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1334 if (bif == NULL) 1335 return (ENOENT); 1336 1337 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1338 req->ifba_flags); 1339 1340 return (error); 1341 } 1342 1343 static int 1344 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1345 { 1346 struct ifbrparam *param = arg; 1347 1348 sc->sc_brttimeout = param->ifbrp_ctime; 1349 return (0); 1350 } 1351 1352 static int 1353 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1354 { 1355 struct ifbrparam *param = arg; 1356 1357 param->ifbrp_ctime = sc->sc_brttimeout; 1358 return (0); 1359 } 1360 1361 static int 1362 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1363 { 1364 struct ifbareq *req = arg; 1365 1366 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1367 } 1368 1369 static int 1370 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1371 { 1372 struct ifbreq *req = arg; 1373 1374 bridge_rtflush(sc, req->ifbr_ifsflags); 1375 return (0); 1376 } 1377 1378 static int 1379 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1380 { 1381 struct ifbrparam *param = arg; 1382 struct bstp_state *bs = &sc->sc_stp; 1383 1384 param->ifbrp_prio = bs->bs_bridge_priority; 1385 return (0); 1386 } 1387 1388 static int 1389 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1390 { 1391 struct ifbrparam *param = arg; 1392 1393 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1394 } 1395 1396 static int 1397 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1398 { 1399 struct ifbrparam *param = arg; 1400 struct bstp_state *bs = &sc->sc_stp; 1401 1402 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1403 return (0); 1404 } 1405 1406 static int 1407 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1408 { 1409 struct ifbrparam *param = arg; 1410 1411 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1412 } 1413 1414 static int 1415 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1416 { 1417 struct ifbrparam *param = arg; 1418 struct bstp_state *bs = &sc->sc_stp; 1419 1420 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1421 return (0); 1422 } 1423 1424 static int 1425 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1426 { 1427 struct ifbrparam *param = arg; 1428 1429 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1430 } 1431 1432 static int 1433 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1434 { 1435 struct ifbrparam *param = arg; 1436 struct bstp_state *bs = &sc->sc_stp; 1437 1438 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1439 return (0); 1440 } 1441 1442 static int 1443 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1444 { 1445 struct ifbrparam *param = arg; 1446 1447 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1448 } 1449 1450 static int 1451 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1452 { 1453 struct ifbreq *req = arg; 1454 struct bridge_iflist *bif; 1455 1456 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1457 if (bif == NULL) 1458 return (ENOENT); 1459 1460 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1461 } 1462 1463 static int 1464 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1465 { 1466 struct ifbreq *req = arg; 1467 struct bridge_iflist *bif; 1468 1469 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1470 if (bif == NULL) 1471 return (ENOENT); 1472 1473 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1474 } 1475 1476 static int 1477 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1478 { 1479 struct ifbreq *req = arg; 1480 struct bridge_iflist *bif; 1481 1482 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1483 if (bif == NULL) 1484 return (ENOENT); 1485 1486 bif->bif_addrmax = req->ifbr_addrmax; 1487 return (0); 1488 } 1489 1490 static int 1491 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1492 { 1493 struct ifbreq *req = arg; 1494 struct bridge_iflist *bif = NULL; 1495 struct ifnet *ifs; 1496 1497 ifs = ifunit(req->ifbr_ifsname); 1498 if (ifs == NULL) 1499 return (ENOENT); 1500 1501 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1502 if (ifs == bif->bif_ifp) 1503 return (EBUSY); 1504 1505 if (ifs->if_bridge != NULL) 1506 return (EBUSY); 1507 1508 switch (ifs->if_type) { 1509 case IFT_ETHER: 1510 case IFT_GIF: 1511 case IFT_L2VLAN: 1512 break; 1513 default: 1514 return (EINVAL); 1515 } 1516 1517 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1518 if (bif == NULL) 1519 return (ENOMEM); 1520 1521 bif->bif_ifp = ifs; 1522 bif->bif_flags = IFBIF_SPAN; 1523 1524 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1525 1526 return (0); 1527 } 1528 1529 static int 1530 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1531 { 1532 struct ifbreq *req = arg; 1533 struct bridge_iflist *bif; 1534 struct ifnet *ifs; 1535 1536 ifs = ifunit(req->ifbr_ifsname); 1537 if (ifs == NULL) 1538 return (ENOENT); 1539 1540 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1541 if (ifs == bif->bif_ifp) 1542 break; 1543 1544 if (bif == NULL) 1545 return (ENOENT); 1546 1547 bridge_delete_span(sc, bif); 1548 1549 return (0); 1550 } 1551 1552 static int 1553 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1554 { 1555 struct ifbropreq *req = arg; 1556 struct bstp_state *bs = &sc->sc_stp; 1557 struct bstp_port *root_port; 1558 1559 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1560 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1561 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1562 1563 root_port = bs->bs_root_port; 1564 if (root_port == NULL) 1565 req->ifbop_root_port = 0; 1566 else 1567 req->ifbop_root_port = root_port->bp_ifp->if_index; 1568 1569 req->ifbop_holdcount = bs->bs_txholdcount; 1570 req->ifbop_priority = bs->bs_bridge_priority; 1571 req->ifbop_protocol = bs->bs_protover; 1572 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1573 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1574 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1575 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1576 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1577 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1578 1579 return (0); 1580 } 1581 1582 static int 1583 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1584 { 1585 struct ifbrparam *param = arg; 1586 1587 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1588 return (0); 1589 } 1590 1591 static int 1592 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1593 { 1594 struct ifbpstpconf *bifstp = arg; 1595 struct bridge_iflist *bif; 1596 struct bstp_port *bp; 1597 struct ifbpstpreq bpreq; 1598 char *buf, *outbuf; 1599 int count, buflen, len, error = 0; 1600 1601 count = 0; 1602 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1603 if ((bif->bif_flags & IFBIF_STP) != 0) 1604 count++; 1605 } 1606 1607 buflen = sizeof(bpreq) * count; 1608 if (bifstp->ifbpstp_len == 0) { 1609 bifstp->ifbpstp_len = buflen; 1610 return (0); 1611 } 1612 1613 BRIDGE_UNLOCK(sc); 1614 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1615 BRIDGE_LOCK(sc); 1616 1617 count = 0; 1618 buf = outbuf; 1619 len = min(bifstp->ifbpstp_len, buflen); 1620 bzero(&bpreq, sizeof(bpreq)); 1621 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1622 if (len < sizeof(bpreq)) 1623 break; 1624 1625 if ((bif->bif_flags & IFBIF_STP) == 0) 1626 continue; 1627 1628 bp = &bif->bif_stp; 1629 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1630 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1631 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1632 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1633 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1634 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1635 1636 memcpy(buf, &bpreq, sizeof(bpreq)); 1637 count++; 1638 buf += sizeof(bpreq); 1639 len -= sizeof(bpreq); 1640 } 1641 1642 BRIDGE_UNLOCK(sc); 1643 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1644 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1645 BRIDGE_LOCK(sc); 1646 free(outbuf, M_TEMP); 1647 return (error); 1648 } 1649 1650 static int 1651 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1652 { 1653 struct ifbrparam *param = arg; 1654 1655 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1656 } 1657 1658 static int 1659 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1660 { 1661 struct ifbrparam *param = arg; 1662 1663 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1664 } 1665 1666 /* 1667 * bridge_ifdetach: 1668 * 1669 * Detach an interface from a bridge. Called when a member 1670 * interface is detaching. 1671 */ 1672 static void 1673 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1674 { 1675 struct bridge_softc *sc = ifp->if_bridge; 1676 struct bridge_iflist *bif; 1677 1678 /* Check if the interface is a bridge member */ 1679 if (sc != NULL) { 1680 BRIDGE_LOCK(sc); 1681 1682 bif = bridge_lookup_member_if(sc, ifp); 1683 if (bif != NULL) 1684 bridge_delete_member(sc, bif, 1); 1685 1686 BRIDGE_UNLOCK(sc); 1687 return; 1688 } 1689 1690 /* Check if the interface is a span port */ 1691 mtx_lock(&bridge_list_mtx); 1692 LIST_FOREACH(sc, &bridge_list, sc_list) { 1693 BRIDGE_LOCK(sc); 1694 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1695 if (ifp == bif->bif_ifp) { 1696 bridge_delete_span(sc, bif); 1697 break; 1698 } 1699 1700 BRIDGE_UNLOCK(sc); 1701 } 1702 mtx_unlock(&bridge_list_mtx); 1703 } 1704 1705 /* 1706 * bridge_init: 1707 * 1708 * Initialize a bridge interface. 1709 */ 1710 static void 1711 bridge_init(void *xsc) 1712 { 1713 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1714 struct ifnet *ifp = sc->sc_ifp; 1715 1716 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1717 return; 1718 1719 BRIDGE_LOCK(sc); 1720 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1721 bridge_timer, sc); 1722 1723 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1724 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1725 1726 BRIDGE_UNLOCK(sc); 1727 } 1728 1729 /* 1730 * bridge_stop: 1731 * 1732 * Stop the bridge interface. 1733 */ 1734 static void 1735 bridge_stop(struct ifnet *ifp, int disable) 1736 { 1737 struct bridge_softc *sc = ifp->if_softc; 1738 1739 BRIDGE_LOCK_ASSERT(sc); 1740 1741 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1742 return; 1743 1744 callout_stop(&sc->sc_brcallout); 1745 bstp_stop(&sc->sc_stp); 1746 1747 bridge_rtflush(sc, IFBF_FLUSHDYN); 1748 1749 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1750 } 1751 1752 /* 1753 * bridge_enqueue: 1754 * 1755 * Enqueue a packet on a bridge member interface. 1756 * 1757 */ 1758 static void 1759 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1760 { 1761 int len, err = 0; 1762 short mflags; 1763 struct mbuf *m0; 1764 1765 len = m->m_pkthdr.len; 1766 mflags = m->m_flags; 1767 1768 /* We may be sending a fragment so traverse the mbuf */ 1769 for (; m; m = m0) { 1770 m0 = m->m_nextpkt; 1771 m->m_nextpkt = NULL; 1772 1773 /* 1774 * If underlying interface can not do VLAN tag insertion itself 1775 * then attach a packet tag that holds it. 1776 */ 1777 if ((m->m_flags & M_VLANTAG) && 1778 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1779 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1780 if (m == NULL) { 1781 if_printf(dst_ifp, 1782 "unable to prepend VLAN header\n"); 1783 dst_ifp->if_oerrors++; 1784 continue; 1785 } 1786 m->m_flags &= ~M_VLANTAG; 1787 } 1788 1789 if (err == 0) 1790 dst_ifp->if_transmit(dst_ifp, m); 1791 } 1792 1793 if (err == 0) { 1794 sc->sc_ifp->if_opackets++; 1795 sc->sc_ifp->if_obytes += len; 1796 if (mflags & M_MCAST) 1797 sc->sc_ifp->if_omcasts++; 1798 } 1799 } 1800 1801 /* 1802 * bridge_dummynet: 1803 * 1804 * Receive a queued packet from dummynet and pass it on to the output 1805 * interface. 1806 * 1807 * The mbuf has the Ethernet header already attached. 1808 */ 1809 static void 1810 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1811 { 1812 struct bridge_softc *sc; 1813 1814 sc = ifp->if_bridge; 1815 1816 /* 1817 * The packet didnt originate from a member interface. This should only 1818 * ever happen if a member interface is removed while packets are 1819 * queued for it. 1820 */ 1821 if (sc == NULL) { 1822 m_freem(m); 1823 return; 1824 } 1825 1826 if (PFIL_HOOKED(&V_inet_pfil_hook) 1827 #ifdef INET6 1828 || PFIL_HOOKED(&V_inet6_pfil_hook) 1829 #endif 1830 ) { 1831 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1832 return; 1833 if (m == NULL) 1834 return; 1835 } 1836 1837 bridge_enqueue(sc, ifp, m); 1838 } 1839 1840 /* 1841 * bridge_output: 1842 * 1843 * Send output from a bridge member interface. This 1844 * performs the bridging function for locally originated 1845 * packets. 1846 * 1847 * The mbuf has the Ethernet header already attached. We must 1848 * enqueue or free the mbuf before returning. 1849 */ 1850 static int 1851 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1852 struct rtentry *rt) 1853 { 1854 struct ether_header *eh; 1855 struct ifnet *dst_if; 1856 struct bridge_softc *sc; 1857 uint16_t vlan; 1858 1859 if (m->m_len < ETHER_HDR_LEN) { 1860 m = m_pullup(m, ETHER_HDR_LEN); 1861 if (m == NULL) 1862 return (0); 1863 } 1864 1865 eh = mtod(m, struct ether_header *); 1866 sc = ifp->if_bridge; 1867 vlan = VLANTAGOF(m); 1868 1869 BRIDGE_LOCK(sc); 1870 1871 /* 1872 * If bridge is down, but the original output interface is up, 1873 * go ahead and send out that interface. Otherwise, the packet 1874 * is dropped below. 1875 */ 1876 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1877 dst_if = ifp; 1878 goto sendunicast; 1879 } 1880 1881 /* 1882 * If the packet is a multicast, or we don't know a better way to 1883 * get there, send to all interfaces. 1884 */ 1885 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1886 dst_if = NULL; 1887 else 1888 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1889 if (dst_if == NULL) { 1890 struct bridge_iflist *bif; 1891 struct mbuf *mc; 1892 int error = 0, used = 0; 1893 1894 bridge_span(sc, m); 1895 1896 BRIDGE_LOCK2REF(sc, error); 1897 if (error) { 1898 m_freem(m); 1899 return (0); 1900 } 1901 1902 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1903 dst_if = bif->bif_ifp; 1904 1905 if (dst_if->if_type == IFT_GIF) 1906 continue; 1907 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1908 continue; 1909 1910 /* 1911 * If this is not the original output interface, 1912 * and the interface is participating in spanning 1913 * tree, make sure the port is in a state that 1914 * allows forwarding. 1915 */ 1916 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1917 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1918 continue; 1919 1920 if (LIST_NEXT(bif, bif_next) == NULL) { 1921 used = 1; 1922 mc = m; 1923 } else { 1924 mc = m_copypacket(m, M_DONTWAIT); 1925 if (mc == NULL) { 1926 sc->sc_ifp->if_oerrors++; 1927 continue; 1928 } 1929 } 1930 1931 bridge_enqueue(sc, dst_if, mc); 1932 } 1933 if (used == 0) 1934 m_freem(m); 1935 BRIDGE_UNREF(sc); 1936 return (0); 1937 } 1938 1939 sendunicast: 1940 /* 1941 * XXX Spanning tree consideration here? 1942 */ 1943 1944 bridge_span(sc, m); 1945 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1946 m_freem(m); 1947 BRIDGE_UNLOCK(sc); 1948 return (0); 1949 } 1950 1951 BRIDGE_UNLOCK(sc); 1952 bridge_enqueue(sc, dst_if, m); 1953 return (0); 1954 } 1955 1956 /* 1957 * bridge_start: 1958 * 1959 * Start output on a bridge. 1960 * 1961 */ 1962 static void 1963 bridge_start(struct ifnet *ifp) 1964 { 1965 struct bridge_softc *sc; 1966 struct mbuf *m; 1967 struct ether_header *eh; 1968 struct ifnet *dst_if; 1969 1970 sc = ifp->if_softc; 1971 1972 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1973 for (;;) { 1974 IFQ_DEQUEUE(&ifp->if_snd, m); 1975 if (m == 0) 1976 break; 1977 ETHER_BPF_MTAP(ifp, m); 1978 1979 eh = mtod(m, struct ether_header *); 1980 dst_if = NULL; 1981 1982 BRIDGE_LOCK(sc); 1983 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1984 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 1985 } 1986 1987 if (dst_if == NULL) 1988 bridge_broadcast(sc, ifp, m, 0); 1989 else { 1990 BRIDGE_UNLOCK(sc); 1991 bridge_enqueue(sc, dst_if, m); 1992 } 1993 } 1994 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1995 } 1996 1997 /* 1998 * bridge_forward: 1999 * 2000 * The forwarding function of the bridge. 2001 * 2002 * NOTE: Releases the lock on return. 2003 */ 2004 static void 2005 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 2006 struct mbuf *m) 2007 { 2008 struct bridge_iflist *dbif; 2009 struct ifnet *src_if, *dst_if, *ifp; 2010 struct ether_header *eh; 2011 uint16_t vlan; 2012 uint8_t *dst; 2013 int error; 2014 2015 src_if = m->m_pkthdr.rcvif; 2016 ifp = sc->sc_ifp; 2017 2018 ifp->if_ipackets++; 2019 ifp->if_ibytes += m->m_pkthdr.len; 2020 vlan = VLANTAGOF(m); 2021 2022 if ((sbif->bif_flags & IFBIF_STP) && 2023 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2024 goto drop; 2025 2026 eh = mtod(m, struct ether_header *); 2027 dst = eh->ether_dhost; 2028 2029 /* If the interface is learning, record the address. */ 2030 if (sbif->bif_flags & IFBIF_LEARNING) { 2031 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2032 sbif, 0, IFBAF_DYNAMIC); 2033 /* 2034 * If the interface has addresses limits then deny any source 2035 * that is not in the cache. 2036 */ 2037 if (error && sbif->bif_addrmax) 2038 goto drop; 2039 } 2040 2041 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2042 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2043 goto drop; 2044 2045 /* 2046 * At this point, the port either doesn't participate 2047 * in spanning tree or it is in the forwarding state. 2048 */ 2049 2050 /* 2051 * If the packet is unicast, destined for someone on 2052 * "this" side of the bridge, drop it. 2053 */ 2054 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2055 dst_if = bridge_rtlookup(sc, dst, vlan); 2056 if (src_if == dst_if) 2057 goto drop; 2058 } else { 2059 /* 2060 * Check if its a reserved multicast address, any address 2061 * listed in 802.1D section 7.12.6 may not be forwarded by the 2062 * bridge. 2063 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2064 */ 2065 if (dst[0] == 0x01 && dst[1] == 0x80 && 2066 dst[2] == 0xc2 && dst[3] == 0x00 && 2067 dst[4] == 0x00 && dst[5] <= 0x0f) 2068 goto drop; 2069 2070 /* ...forward it to all interfaces. */ 2071 ifp->if_imcasts++; 2072 dst_if = NULL; 2073 } 2074 2075 /* 2076 * If we have a destination interface which is a member of our bridge, 2077 * OR this is a unicast packet, push it through the bpf(4) machinery. 2078 * For broadcast or multicast packets, don't bother because it will 2079 * be reinjected into ether_input. We do this before we pass the packets 2080 * through the pfil(9) framework, as it is possible that pfil(9) will 2081 * drop the packet, or possibly modify it, making it difficult to debug 2082 * firewall issues on the bridge. 2083 */ 2084 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2085 ETHER_BPF_MTAP(ifp, m); 2086 2087 /* run the packet filter */ 2088 if (PFIL_HOOKED(&V_inet_pfil_hook) 2089 #ifdef INET6 2090 || PFIL_HOOKED(&V_inet6_pfil_hook) 2091 #endif 2092 ) { 2093 BRIDGE_UNLOCK(sc); 2094 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2095 return; 2096 if (m == NULL) 2097 return; 2098 BRIDGE_LOCK(sc); 2099 } 2100 2101 if (dst_if == NULL) { 2102 bridge_broadcast(sc, src_if, m, 1); 2103 return; 2104 } 2105 2106 /* 2107 * At this point, we're dealing with a unicast frame 2108 * going to a different interface. 2109 */ 2110 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2111 goto drop; 2112 2113 dbif = bridge_lookup_member_if(sc, dst_if); 2114 if (dbif == NULL) 2115 /* Not a member of the bridge (anymore?) */ 2116 goto drop; 2117 2118 /* Private segments can not talk to each other */ 2119 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2120 goto drop; 2121 2122 if ((dbif->bif_flags & IFBIF_STP) && 2123 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2124 goto drop; 2125 2126 BRIDGE_UNLOCK(sc); 2127 2128 if (PFIL_HOOKED(&V_inet_pfil_hook) 2129 #ifdef INET6 2130 || PFIL_HOOKED(&V_inet6_pfil_hook) 2131 #endif 2132 ) { 2133 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2134 return; 2135 if (m == NULL) 2136 return; 2137 } 2138 2139 bridge_enqueue(sc, dst_if, m); 2140 return; 2141 2142 drop: 2143 BRIDGE_UNLOCK(sc); 2144 m_freem(m); 2145 } 2146 2147 /* 2148 * bridge_input: 2149 * 2150 * Receive input from a member interface. Queue the packet for 2151 * bridging if it is not for us. 2152 */ 2153 static struct mbuf * 2154 bridge_input(struct ifnet *ifp, struct mbuf *m) 2155 { 2156 struct bridge_softc *sc = ifp->if_bridge; 2157 struct bridge_iflist *bif, *bif2; 2158 struct ifnet *bifp; 2159 struct ether_header *eh; 2160 struct mbuf *mc, *mc2; 2161 uint16_t vlan; 2162 int error; 2163 2164 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2165 return (m); 2166 2167 bifp = sc->sc_ifp; 2168 vlan = VLANTAGOF(m); 2169 2170 /* 2171 * Implement support for bridge monitoring. If this flag has been 2172 * set on this interface, discard the packet once we push it through 2173 * the bpf(4) machinery, but before we do, increment the byte and 2174 * packet counters associated with this interface. 2175 */ 2176 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2177 m->m_pkthdr.rcvif = bifp; 2178 ETHER_BPF_MTAP(bifp, m); 2179 bifp->if_ipackets++; 2180 bifp->if_ibytes += m->m_pkthdr.len; 2181 m_freem(m); 2182 return (NULL); 2183 } 2184 BRIDGE_LOCK(sc); 2185 bif = bridge_lookup_member_if(sc, ifp); 2186 if (bif == NULL) { 2187 BRIDGE_UNLOCK(sc); 2188 return (m); 2189 } 2190 2191 eh = mtod(m, struct ether_header *); 2192 2193 bridge_span(sc, m); 2194 2195 if (m->m_flags & (M_BCAST|M_MCAST)) { 2196 /* Tap off 802.1D packets; they do not get forwarded. */ 2197 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2198 ETHER_ADDR_LEN) == 0) { 2199 m = bstp_input(&bif->bif_stp, ifp, m); 2200 if (m == NULL) { 2201 BRIDGE_UNLOCK(sc); 2202 return (NULL); 2203 } 2204 } 2205 2206 if ((bif->bif_flags & IFBIF_STP) && 2207 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2208 BRIDGE_UNLOCK(sc); 2209 return (m); 2210 } 2211 2212 /* 2213 * Make a deep copy of the packet and enqueue the copy 2214 * for bridge processing; return the original packet for 2215 * local processing. 2216 */ 2217 mc = m_dup(m, M_DONTWAIT); 2218 if (mc == NULL) { 2219 BRIDGE_UNLOCK(sc); 2220 return (m); 2221 } 2222 2223 /* Perform the bridge forwarding function with the copy. */ 2224 bridge_forward(sc, bif, mc); 2225 2226 /* 2227 * Reinject the mbuf as arriving on the bridge so we have a 2228 * chance at claiming multicast packets. We can not loop back 2229 * here from ether_input as a bridge is never a member of a 2230 * bridge. 2231 */ 2232 KASSERT(bifp->if_bridge == NULL, 2233 ("loop created in bridge_input")); 2234 mc2 = m_dup(m, M_DONTWAIT); 2235 if (mc2 != NULL) { 2236 /* Keep the layer3 header aligned */ 2237 int i = min(mc2->m_pkthdr.len, max_protohdr); 2238 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2239 } 2240 if (mc2 != NULL) { 2241 mc2->m_pkthdr.rcvif = bifp; 2242 (*bifp->if_input)(bifp, mc2); 2243 } 2244 2245 /* Return the original packet for local processing. */ 2246 return (m); 2247 } 2248 2249 if ((bif->bif_flags & IFBIF_STP) && 2250 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2251 BRIDGE_UNLOCK(sc); 2252 return (m); 2253 } 2254 2255 #if (defined(INET) || defined(INET6)) && defined(DEV_CARP) 2256 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2257 || ((iface)->if_carp \ 2258 && carp_forus((iface)->if_carp, eh->ether_dhost)) 2259 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2260 || ((iface)->if_carp \ 2261 && carp_forus((iface)->if_carp, eh->ether_shost)) 2262 #else 2263 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2264 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2265 #endif 2266 2267 #ifdef INET6 2268 # define OR_PFIL_HOOKED_INET6 \ 2269 || PFIL_HOOKED(&V_inet6_pfil_hook) 2270 #else 2271 # define OR_PFIL_HOOKED_INET6 2272 #endif 2273 2274 #define GRAB_OUR_PACKETS(iface) \ 2275 if ((iface)->if_type == IFT_GIF) \ 2276 continue; \ 2277 /* It is destined for us. */ \ 2278 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2279 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2280 ) { \ 2281 if ((iface)->if_type == IFT_BRIDGE) { \ 2282 ETHER_BPF_MTAP(iface, m); \ 2283 iface->if_ipackets++; \ 2284 /* Filter on the physical interface. */ \ 2285 if (pfil_local_phys && \ 2286 (PFIL_HOOKED(&V_inet_pfil_hook) \ 2287 OR_PFIL_HOOKED_INET6)) { \ 2288 if (bridge_pfil(&m, NULL, ifp, \ 2289 PFIL_IN) != 0 || m == NULL) { \ 2290 BRIDGE_UNLOCK(sc); \ 2291 return (NULL); \ 2292 } \ 2293 } \ 2294 } \ 2295 if (bif->bif_flags & IFBIF_LEARNING) { \ 2296 error = bridge_rtupdate(sc, eh->ether_shost, \ 2297 vlan, bif, 0, IFBAF_DYNAMIC); \ 2298 if (error && bif->bif_addrmax) { \ 2299 BRIDGE_UNLOCK(sc); \ 2300 m_freem(m); \ 2301 return (NULL); \ 2302 } \ 2303 } \ 2304 m->m_pkthdr.rcvif = iface; \ 2305 BRIDGE_UNLOCK(sc); \ 2306 return (m); \ 2307 } \ 2308 \ 2309 /* We just received a packet that we sent out. */ \ 2310 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2311 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2312 ) { \ 2313 BRIDGE_UNLOCK(sc); \ 2314 m_freem(m); \ 2315 return (NULL); \ 2316 } 2317 2318 /* 2319 * Unicast. Make sure it's not for the bridge. 2320 */ 2321 do { GRAB_OUR_PACKETS(bifp) } while (0); 2322 2323 /* 2324 * Give a chance for ifp at first priority. This will help when the 2325 * packet comes through the interface like VLAN's with the same MACs 2326 * on several interfaces from the same bridge. This also will save 2327 * some CPU cycles in case the destination interface and the input 2328 * interface (eq ifp) are the same. 2329 */ 2330 do { GRAB_OUR_PACKETS(ifp) } while (0); 2331 2332 /* Now check the all bridge members. */ 2333 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2334 GRAB_OUR_PACKETS(bif2->bif_ifp) 2335 } 2336 2337 #undef OR_CARP_CHECK_WE_ARE_DST 2338 #undef OR_CARP_CHECK_WE_ARE_SRC 2339 #undef OR_PFIL_HOOKED_INET6 2340 #undef GRAB_OUR_PACKETS 2341 2342 /* Perform the bridge forwarding function. */ 2343 bridge_forward(sc, bif, m); 2344 2345 return (NULL); 2346 } 2347 2348 /* 2349 * bridge_broadcast: 2350 * 2351 * Send a frame to all interfaces that are members of 2352 * the bridge, except for the one on which the packet 2353 * arrived. 2354 * 2355 * NOTE: Releases the lock on return. 2356 */ 2357 static void 2358 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2359 struct mbuf *m, int runfilt) 2360 { 2361 struct bridge_iflist *dbif, *sbif; 2362 struct mbuf *mc; 2363 struct ifnet *dst_if; 2364 int error = 0, used = 0, i; 2365 2366 sbif = bridge_lookup_member_if(sc, src_if); 2367 2368 BRIDGE_LOCK2REF(sc, error); 2369 if (error) { 2370 m_freem(m); 2371 return; 2372 } 2373 2374 /* Filter on the bridge interface before broadcasting */ 2375 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2376 #ifdef INET6 2377 || PFIL_HOOKED(&V_inet6_pfil_hook) 2378 #endif 2379 )) { 2380 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2381 goto out; 2382 if (m == NULL) 2383 goto out; 2384 } 2385 2386 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2387 dst_if = dbif->bif_ifp; 2388 if (dst_if == src_if) 2389 continue; 2390 2391 /* Private segments can not talk to each other */ 2392 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2393 continue; 2394 2395 if ((dbif->bif_flags & IFBIF_STP) && 2396 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2397 continue; 2398 2399 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2400 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2401 continue; 2402 2403 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2404 continue; 2405 2406 if (LIST_NEXT(dbif, bif_next) == NULL) { 2407 mc = m; 2408 used = 1; 2409 } else { 2410 mc = m_dup(m, M_DONTWAIT); 2411 if (mc == NULL) { 2412 sc->sc_ifp->if_oerrors++; 2413 continue; 2414 } 2415 } 2416 2417 /* 2418 * Filter on the output interface. Pass a NULL bridge interface 2419 * pointer so we do not redundantly filter on the bridge for 2420 * each interface we broadcast on. 2421 */ 2422 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2423 #ifdef INET6 2424 || PFIL_HOOKED(&V_inet6_pfil_hook) 2425 #endif 2426 )) { 2427 if (used == 0) { 2428 /* Keep the layer3 header aligned */ 2429 i = min(mc->m_pkthdr.len, max_protohdr); 2430 mc = m_copyup(mc, i, ETHER_ALIGN); 2431 if (mc == NULL) { 2432 sc->sc_ifp->if_oerrors++; 2433 continue; 2434 } 2435 } 2436 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2437 continue; 2438 if (mc == NULL) 2439 continue; 2440 } 2441 2442 bridge_enqueue(sc, dst_if, mc); 2443 } 2444 if (used == 0) 2445 m_freem(m); 2446 2447 out: 2448 BRIDGE_UNREF(sc); 2449 } 2450 2451 /* 2452 * bridge_span: 2453 * 2454 * Duplicate a packet out one or more interfaces that are in span mode, 2455 * the original mbuf is unmodified. 2456 */ 2457 static void 2458 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2459 { 2460 struct bridge_iflist *bif; 2461 struct ifnet *dst_if; 2462 struct mbuf *mc; 2463 2464 if (LIST_EMPTY(&sc->sc_spanlist)) 2465 return; 2466 2467 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2468 dst_if = bif->bif_ifp; 2469 2470 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2471 continue; 2472 2473 mc = m_copypacket(m, M_DONTWAIT); 2474 if (mc == NULL) { 2475 sc->sc_ifp->if_oerrors++; 2476 continue; 2477 } 2478 2479 bridge_enqueue(sc, dst_if, mc); 2480 } 2481 } 2482 2483 /* 2484 * bridge_rtupdate: 2485 * 2486 * Add a bridge routing entry. 2487 */ 2488 static int 2489 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2490 struct bridge_iflist *bif, int setflags, uint8_t flags) 2491 { 2492 struct bridge_rtnode *brt; 2493 int error; 2494 2495 BRIDGE_LOCK_ASSERT(sc); 2496 2497 /* Check the source address is valid and not multicast. */ 2498 if (ETHER_IS_MULTICAST(dst) || 2499 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2500 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2501 return (EINVAL); 2502 2503 /* 802.1p frames map to vlan 1 */ 2504 if (vlan == 0) 2505 vlan = 1; 2506 2507 /* 2508 * A route for this destination might already exist. If so, 2509 * update it, otherwise create a new one. 2510 */ 2511 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2512 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2513 sc->sc_brtexceeded++; 2514 return (ENOSPC); 2515 } 2516 /* Check per interface address limits (if enabled) */ 2517 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2518 bif->bif_addrexceeded++; 2519 return (ENOSPC); 2520 } 2521 2522 /* 2523 * Allocate a new bridge forwarding node, and 2524 * initialize the expiration time and Ethernet 2525 * address. 2526 */ 2527 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2528 if (brt == NULL) 2529 return (ENOMEM); 2530 2531 if (bif->bif_flags & IFBIF_STICKY) 2532 brt->brt_flags = IFBAF_STICKY; 2533 else 2534 brt->brt_flags = IFBAF_DYNAMIC; 2535 2536 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2537 brt->brt_vlan = vlan; 2538 2539 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2540 uma_zfree(bridge_rtnode_zone, brt); 2541 return (error); 2542 } 2543 brt->brt_dst = bif; 2544 bif->bif_addrcnt++; 2545 } 2546 2547 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2548 brt->brt_dst != bif) { 2549 brt->brt_dst->bif_addrcnt--; 2550 brt->brt_dst = bif; 2551 brt->brt_dst->bif_addrcnt++; 2552 } 2553 2554 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2555 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2556 if (setflags) 2557 brt->brt_flags = flags; 2558 2559 return (0); 2560 } 2561 2562 /* 2563 * bridge_rtlookup: 2564 * 2565 * Lookup the destination interface for an address. 2566 */ 2567 static struct ifnet * 2568 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2569 { 2570 struct bridge_rtnode *brt; 2571 2572 BRIDGE_LOCK_ASSERT(sc); 2573 2574 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2575 return (NULL); 2576 2577 return (brt->brt_ifp); 2578 } 2579 2580 /* 2581 * bridge_rttrim: 2582 * 2583 * Trim the routine table so that we have a number 2584 * of routing entries less than or equal to the 2585 * maximum number. 2586 */ 2587 static void 2588 bridge_rttrim(struct bridge_softc *sc) 2589 { 2590 struct bridge_rtnode *brt, *nbrt; 2591 2592 BRIDGE_LOCK_ASSERT(sc); 2593 2594 /* Make sure we actually need to do this. */ 2595 if (sc->sc_brtcnt <= sc->sc_brtmax) 2596 return; 2597 2598 /* Force an aging cycle; this might trim enough addresses. */ 2599 bridge_rtage(sc); 2600 if (sc->sc_brtcnt <= sc->sc_brtmax) 2601 return; 2602 2603 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2604 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2605 bridge_rtnode_destroy(sc, brt); 2606 if (sc->sc_brtcnt <= sc->sc_brtmax) 2607 return; 2608 } 2609 } 2610 } 2611 2612 /* 2613 * bridge_timer: 2614 * 2615 * Aging timer for the bridge. 2616 */ 2617 static void 2618 bridge_timer(void *arg) 2619 { 2620 struct bridge_softc *sc = arg; 2621 2622 BRIDGE_LOCK_ASSERT(sc); 2623 2624 bridge_rtage(sc); 2625 2626 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2627 callout_reset(&sc->sc_brcallout, 2628 bridge_rtable_prune_period * hz, bridge_timer, sc); 2629 } 2630 2631 /* 2632 * bridge_rtage: 2633 * 2634 * Perform an aging cycle. 2635 */ 2636 static void 2637 bridge_rtage(struct bridge_softc *sc) 2638 { 2639 struct bridge_rtnode *brt, *nbrt; 2640 2641 BRIDGE_LOCK_ASSERT(sc); 2642 2643 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2644 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2645 if (time_uptime >= brt->brt_expire) 2646 bridge_rtnode_destroy(sc, brt); 2647 } 2648 } 2649 } 2650 2651 /* 2652 * bridge_rtflush: 2653 * 2654 * Remove all dynamic addresses from the bridge. 2655 */ 2656 static void 2657 bridge_rtflush(struct bridge_softc *sc, int full) 2658 { 2659 struct bridge_rtnode *brt, *nbrt; 2660 2661 BRIDGE_LOCK_ASSERT(sc); 2662 2663 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2664 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2665 bridge_rtnode_destroy(sc, brt); 2666 } 2667 } 2668 2669 /* 2670 * bridge_rtdaddr: 2671 * 2672 * Remove an address from the table. 2673 */ 2674 static int 2675 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2676 { 2677 struct bridge_rtnode *brt; 2678 int found = 0; 2679 2680 BRIDGE_LOCK_ASSERT(sc); 2681 2682 /* 2683 * If vlan is zero then we want to delete for all vlans so the lookup 2684 * may return more than one. 2685 */ 2686 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2687 bridge_rtnode_destroy(sc, brt); 2688 found = 1; 2689 } 2690 2691 return (found ? 0 : ENOENT); 2692 } 2693 2694 /* 2695 * bridge_rtdelete: 2696 * 2697 * Delete routes to a speicifc member interface. 2698 */ 2699 static void 2700 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2701 { 2702 struct bridge_rtnode *brt, *nbrt; 2703 2704 BRIDGE_LOCK_ASSERT(sc); 2705 2706 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2707 if (brt->brt_ifp == ifp && (full || 2708 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2709 bridge_rtnode_destroy(sc, brt); 2710 } 2711 } 2712 2713 /* 2714 * bridge_rtable_init: 2715 * 2716 * Initialize the route table for this bridge. 2717 */ 2718 static int 2719 bridge_rtable_init(struct bridge_softc *sc) 2720 { 2721 int i; 2722 2723 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2724 M_DEVBUF, M_NOWAIT); 2725 if (sc->sc_rthash == NULL) 2726 return (ENOMEM); 2727 2728 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2729 LIST_INIT(&sc->sc_rthash[i]); 2730 2731 sc->sc_rthash_key = arc4random(); 2732 2733 LIST_INIT(&sc->sc_rtlist); 2734 2735 return (0); 2736 } 2737 2738 /* 2739 * bridge_rtable_fini: 2740 * 2741 * Deconstruct the route table for this bridge. 2742 */ 2743 static void 2744 bridge_rtable_fini(struct bridge_softc *sc) 2745 { 2746 2747 KASSERT(sc->sc_brtcnt == 0, 2748 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2749 free(sc->sc_rthash, M_DEVBUF); 2750 } 2751 2752 /* 2753 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2754 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2755 */ 2756 #define mix(a, b, c) \ 2757 do { \ 2758 a -= b; a -= c; a ^= (c >> 13); \ 2759 b -= c; b -= a; b ^= (a << 8); \ 2760 c -= a; c -= b; c ^= (b >> 13); \ 2761 a -= b; a -= c; a ^= (c >> 12); \ 2762 b -= c; b -= a; b ^= (a << 16); \ 2763 c -= a; c -= b; c ^= (b >> 5); \ 2764 a -= b; a -= c; a ^= (c >> 3); \ 2765 b -= c; b -= a; b ^= (a << 10); \ 2766 c -= a; c -= b; c ^= (b >> 15); \ 2767 } while (/*CONSTCOND*/0) 2768 2769 static __inline uint32_t 2770 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2771 { 2772 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2773 2774 b += addr[5] << 8; 2775 b += addr[4]; 2776 a += addr[3] << 24; 2777 a += addr[2] << 16; 2778 a += addr[1] << 8; 2779 a += addr[0]; 2780 2781 mix(a, b, c); 2782 2783 return (c & BRIDGE_RTHASH_MASK); 2784 } 2785 2786 #undef mix 2787 2788 static int 2789 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2790 { 2791 int i, d; 2792 2793 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2794 d = ((int)a[i]) - ((int)b[i]); 2795 } 2796 2797 return (d); 2798 } 2799 2800 /* 2801 * bridge_rtnode_lookup: 2802 * 2803 * Look up a bridge route node for the specified destination. Compare the 2804 * vlan id or if zero then just return the first match. 2805 */ 2806 static struct bridge_rtnode * 2807 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2808 { 2809 struct bridge_rtnode *brt; 2810 uint32_t hash; 2811 int dir; 2812 2813 BRIDGE_LOCK_ASSERT(sc); 2814 2815 hash = bridge_rthash(sc, addr); 2816 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2817 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2818 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2819 return (brt); 2820 if (dir > 0) 2821 return (NULL); 2822 } 2823 2824 return (NULL); 2825 } 2826 2827 /* 2828 * bridge_rtnode_insert: 2829 * 2830 * Insert the specified bridge node into the route table. We 2831 * assume the entry is not already in the table. 2832 */ 2833 static int 2834 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2835 { 2836 struct bridge_rtnode *lbrt; 2837 uint32_t hash; 2838 int dir; 2839 2840 BRIDGE_LOCK_ASSERT(sc); 2841 2842 hash = bridge_rthash(sc, brt->brt_addr); 2843 2844 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2845 if (lbrt == NULL) { 2846 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2847 goto out; 2848 } 2849 2850 do { 2851 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2852 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2853 return (EEXIST); 2854 if (dir > 0) { 2855 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2856 goto out; 2857 } 2858 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2859 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2860 goto out; 2861 } 2862 lbrt = LIST_NEXT(lbrt, brt_hash); 2863 } while (lbrt != NULL); 2864 2865 #ifdef DIAGNOSTIC 2866 panic("bridge_rtnode_insert: impossible"); 2867 #endif 2868 2869 out: 2870 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2871 sc->sc_brtcnt++; 2872 2873 return (0); 2874 } 2875 2876 /* 2877 * bridge_rtnode_destroy: 2878 * 2879 * Destroy a bridge rtnode. 2880 */ 2881 static void 2882 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2883 { 2884 BRIDGE_LOCK_ASSERT(sc); 2885 2886 LIST_REMOVE(brt, brt_hash); 2887 2888 LIST_REMOVE(brt, brt_list); 2889 sc->sc_brtcnt--; 2890 brt->brt_dst->bif_addrcnt--; 2891 uma_zfree(bridge_rtnode_zone, brt); 2892 } 2893 2894 /* 2895 * bridge_rtable_expire: 2896 * 2897 * Set the expiry time for all routes on an interface. 2898 */ 2899 static void 2900 bridge_rtable_expire(struct ifnet *ifp, int age) 2901 { 2902 struct bridge_softc *sc = ifp->if_bridge; 2903 struct bridge_rtnode *brt; 2904 2905 BRIDGE_LOCK(sc); 2906 2907 /* 2908 * If the age is zero then flush, otherwise set all the expiry times to 2909 * age for the interface 2910 */ 2911 if (age == 0) 2912 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2913 else { 2914 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2915 /* Cap the expiry time to 'age' */ 2916 if (brt->brt_ifp == ifp && 2917 brt->brt_expire > time_uptime + age && 2918 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2919 brt->brt_expire = time_uptime + age; 2920 } 2921 } 2922 BRIDGE_UNLOCK(sc); 2923 } 2924 2925 /* 2926 * bridge_state_change: 2927 * 2928 * Callback from the bridgestp code when a port changes states. 2929 */ 2930 static void 2931 bridge_state_change(struct ifnet *ifp, int state) 2932 { 2933 struct bridge_softc *sc = ifp->if_bridge; 2934 static const char *stpstates[] = { 2935 "disabled", 2936 "listening", 2937 "learning", 2938 "forwarding", 2939 "blocking", 2940 "discarding" 2941 }; 2942 2943 if (log_stp) 2944 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2945 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2946 } 2947 2948 /* 2949 * Send bridge packets through pfil if they are one of the types pfil can deal 2950 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2951 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2952 * that interface. 2953 */ 2954 static int 2955 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2956 { 2957 int snap, error, i, hlen; 2958 struct ether_header *eh1, eh2; 2959 struct ip_fw_args args; 2960 struct ip *ip; 2961 struct llc llc1; 2962 u_int16_t ether_type; 2963 2964 snap = 0; 2965 error = -1; /* Default error if not error == 0 */ 2966 2967 #if 0 2968 /* we may return with the IP fields swapped, ensure its not shared */ 2969 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2970 #endif 2971 2972 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2973 return (0); /* filtering is disabled */ 2974 2975 i = min((*mp)->m_pkthdr.len, max_protohdr); 2976 if ((*mp)->m_len < i) { 2977 *mp = m_pullup(*mp, i); 2978 if (*mp == NULL) { 2979 printf("%s: m_pullup failed\n", __func__); 2980 return (-1); 2981 } 2982 } 2983 2984 eh1 = mtod(*mp, struct ether_header *); 2985 ether_type = ntohs(eh1->ether_type); 2986 2987 /* 2988 * Check for SNAP/LLC. 2989 */ 2990 if (ether_type < ETHERMTU) { 2991 struct llc *llc2 = (struct llc *)(eh1 + 1); 2992 2993 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 2994 llc2->llc_dsap == LLC_SNAP_LSAP && 2995 llc2->llc_ssap == LLC_SNAP_LSAP && 2996 llc2->llc_control == LLC_UI) { 2997 ether_type = htons(llc2->llc_un.type_snap.ether_type); 2998 snap = 1; 2999 } 3000 } 3001 3002 /* 3003 * If we're trying to filter bridge traffic, don't look at anything 3004 * other than IP and ARP traffic. If the filter doesn't understand 3005 * IPv6, don't allow IPv6 through the bridge either. This is lame 3006 * since if we really wanted, say, an AppleTalk filter, we are hosed, 3007 * but of course we don't have an AppleTalk filter to begin with. 3008 * (Note that since pfil doesn't understand ARP it will pass *ALL* 3009 * ARP traffic.) 3010 */ 3011 switch (ether_type) { 3012 case ETHERTYPE_ARP: 3013 case ETHERTYPE_REVARP: 3014 if (pfil_ipfw_arp == 0) 3015 return (0); /* Automatically pass */ 3016 break; 3017 3018 case ETHERTYPE_IP: 3019 #ifdef INET6 3020 case ETHERTYPE_IPV6: 3021 #endif /* INET6 */ 3022 break; 3023 default: 3024 /* 3025 * Check to see if the user wants to pass non-ip 3026 * packets, these will not be checked by pfil(9) and 3027 * passed unconditionally so the default is to drop. 3028 */ 3029 if (pfil_onlyip) 3030 goto bad; 3031 } 3032 3033 /* Strip off the Ethernet header and keep a copy. */ 3034 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3035 m_adj(*mp, ETHER_HDR_LEN); 3036 3037 /* Strip off snap header, if present */ 3038 if (snap) { 3039 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3040 m_adj(*mp, sizeof(struct llc)); 3041 } 3042 3043 /* 3044 * Check the IP header for alignment and errors 3045 */ 3046 if (dir == PFIL_IN) { 3047 switch (ether_type) { 3048 case ETHERTYPE_IP: 3049 error = bridge_ip_checkbasic(mp); 3050 break; 3051 #ifdef INET6 3052 case ETHERTYPE_IPV6: 3053 error = bridge_ip6_checkbasic(mp); 3054 break; 3055 #endif /* INET6 */ 3056 default: 3057 error = 0; 3058 } 3059 if (error) 3060 goto bad; 3061 } 3062 3063 /* XXX this section is also in if_ethersubr.c */ 3064 // XXX PFIL_OUT or DIR_OUT ? 3065 if (V_ip_fw_chk_ptr && pfil_ipfw != 0 && 3066 dir == PFIL_OUT && ifp != NULL) { 3067 struct m_tag *mtag; 3068 3069 error = -1; 3070 /* fetch the start point from existing tags, if any */ 3071 mtag = m_tag_locate(*mp, MTAG_IPFW_RULE, 0, NULL); 3072 if (mtag == NULL) { 3073 args.rule.slot = 0; 3074 } else { 3075 struct ipfw_rule_ref *r; 3076 3077 /* XXX can we free the tag after use ? */ 3078 mtag->m_tag_id = PACKET_TAG_NONE; 3079 r = (struct ipfw_rule_ref *)(mtag + 1); 3080 /* packet already partially processed ? */ 3081 if (r->info & IPFW_ONEPASS) 3082 goto ipfwpass; 3083 args.rule = *r; 3084 } 3085 3086 args.m = *mp; 3087 args.oif = ifp; 3088 args.next_hop = NULL; 3089 args.eh = &eh2; 3090 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 3091 i = V_ip_fw_chk_ptr(&args); 3092 *mp = args.m; 3093 3094 if (*mp == NULL) 3095 return (error); 3096 3097 if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) { 3098 3099 /* put the Ethernet header back on */ 3100 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3101 if (*mp == NULL) 3102 return (error); 3103 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3104 3105 /* 3106 * Pass the pkt to dummynet, which consumes it. The 3107 * packet will return to us via bridge_dummynet(). 3108 */ 3109 args.oif = ifp; 3110 ip_dn_io_ptr(mp, DIR_FWD | PROTO_IFB, &args); 3111 return (error); 3112 } 3113 3114 if (i != IP_FW_PASS) /* drop */ 3115 goto bad; 3116 } 3117 3118 ipfwpass: 3119 error = 0; 3120 3121 /* 3122 * Run the packet through pfil 3123 */ 3124 switch (ether_type) { 3125 case ETHERTYPE_IP: 3126 /* 3127 * before calling the firewall, swap fields the same as 3128 * IP does. here we assume the header is contiguous 3129 */ 3130 ip = mtod(*mp, struct ip *); 3131 3132 ip->ip_len = ntohs(ip->ip_len); 3133 ip->ip_off = ntohs(ip->ip_off); 3134 3135 /* 3136 * Run pfil on the member interface and the bridge, both can 3137 * be skipped by clearing pfil_member or pfil_bridge. 3138 * 3139 * Keep the order: 3140 * in_if -> bridge_if -> out_if 3141 */ 3142 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3143 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3144 dir, NULL); 3145 3146 if (*mp == NULL || error != 0) /* filter may consume */ 3147 break; 3148 3149 if (pfil_member && ifp != NULL) 3150 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp, 3151 dir, NULL); 3152 3153 if (*mp == NULL || error != 0) /* filter may consume */ 3154 break; 3155 3156 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3157 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3158 dir, NULL); 3159 3160 if (*mp == NULL || error != 0) /* filter may consume */ 3161 break; 3162 3163 /* check if we need to fragment the packet */ 3164 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3165 i = (*mp)->m_pkthdr.len; 3166 if (i > ifp->if_mtu) { 3167 error = bridge_fragment(ifp, *mp, &eh2, snap, 3168 &llc1); 3169 return (error); 3170 } 3171 } 3172 3173 /* Recalculate the ip checksum and restore byte ordering */ 3174 ip = mtod(*mp, struct ip *); 3175 hlen = ip->ip_hl << 2; 3176 if (hlen < sizeof(struct ip)) 3177 goto bad; 3178 if (hlen > (*mp)->m_len) { 3179 if ((*mp = m_pullup(*mp, hlen)) == 0) 3180 goto bad; 3181 ip = mtod(*mp, struct ip *); 3182 if (ip == NULL) 3183 goto bad; 3184 } 3185 ip->ip_len = htons(ip->ip_len); 3186 ip->ip_off = htons(ip->ip_off); 3187 ip->ip_sum = 0; 3188 if (hlen == sizeof(struct ip)) 3189 ip->ip_sum = in_cksum_hdr(ip); 3190 else 3191 ip->ip_sum = in_cksum(*mp, hlen); 3192 3193 break; 3194 #ifdef INET6 3195 case ETHERTYPE_IPV6: 3196 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3197 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3198 dir, NULL); 3199 3200 if (*mp == NULL || error != 0) /* filter may consume */ 3201 break; 3202 3203 if (pfil_member && ifp != NULL) 3204 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp, 3205 dir, NULL); 3206 3207 if (*mp == NULL || error != 0) /* filter may consume */ 3208 break; 3209 3210 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3211 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3212 dir, NULL); 3213 break; 3214 #endif 3215 default: 3216 error = 0; 3217 break; 3218 } 3219 3220 if (*mp == NULL) 3221 return (error); 3222 if (error != 0) 3223 goto bad; 3224 3225 error = -1; 3226 3227 /* 3228 * Finally, put everything back the way it was and return 3229 */ 3230 if (snap) { 3231 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3232 if (*mp == NULL) 3233 return (error); 3234 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3235 } 3236 3237 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3238 if (*mp == NULL) 3239 return (error); 3240 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3241 3242 return (0); 3243 3244 bad: 3245 m_freem(*mp); 3246 *mp = NULL; 3247 return (error); 3248 } 3249 3250 /* 3251 * Perform basic checks on header size since 3252 * pfil assumes ip_input has already processed 3253 * it for it. Cut-and-pasted from ip_input.c. 3254 * Given how simple the IPv6 version is, 3255 * does the IPv4 version really need to be 3256 * this complicated? 3257 * 3258 * XXX Should we update ipstat here, or not? 3259 * XXX Right now we update ipstat but not 3260 * XXX csum_counter. 3261 */ 3262 static int 3263 bridge_ip_checkbasic(struct mbuf **mp) 3264 { 3265 struct mbuf *m = *mp; 3266 struct ip *ip; 3267 int len, hlen; 3268 u_short sum; 3269 3270 if (*mp == NULL) 3271 return (-1); 3272 3273 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3274 if ((m = m_copyup(m, sizeof(struct ip), 3275 (max_linkhdr + 3) & ~3)) == NULL) { 3276 /* XXXJRT new stat, please */ 3277 KMOD_IPSTAT_INC(ips_toosmall); 3278 goto bad; 3279 } 3280 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3281 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3282 KMOD_IPSTAT_INC(ips_toosmall); 3283 goto bad; 3284 } 3285 } 3286 ip = mtod(m, struct ip *); 3287 if (ip == NULL) goto bad; 3288 3289 if (ip->ip_v != IPVERSION) { 3290 KMOD_IPSTAT_INC(ips_badvers); 3291 goto bad; 3292 } 3293 hlen = ip->ip_hl << 2; 3294 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3295 KMOD_IPSTAT_INC(ips_badhlen); 3296 goto bad; 3297 } 3298 if (hlen > m->m_len) { 3299 if ((m = m_pullup(m, hlen)) == 0) { 3300 KMOD_IPSTAT_INC(ips_badhlen); 3301 goto bad; 3302 } 3303 ip = mtod(m, struct ip *); 3304 if (ip == NULL) goto bad; 3305 } 3306 3307 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3308 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3309 } else { 3310 if (hlen == sizeof(struct ip)) { 3311 sum = in_cksum_hdr(ip); 3312 } else { 3313 sum = in_cksum(m, hlen); 3314 } 3315 } 3316 if (sum) { 3317 KMOD_IPSTAT_INC(ips_badsum); 3318 goto bad; 3319 } 3320 3321 /* Retrieve the packet length. */ 3322 len = ntohs(ip->ip_len); 3323 3324 /* 3325 * Check for additional length bogosity 3326 */ 3327 if (len < hlen) { 3328 KMOD_IPSTAT_INC(ips_badlen); 3329 goto bad; 3330 } 3331 3332 /* 3333 * Check that the amount of data in the buffers 3334 * is as at least much as the IP header would have us expect. 3335 * Drop packet if shorter than we expect. 3336 */ 3337 if (m->m_pkthdr.len < len) { 3338 KMOD_IPSTAT_INC(ips_tooshort); 3339 goto bad; 3340 } 3341 3342 /* Checks out, proceed */ 3343 *mp = m; 3344 return (0); 3345 3346 bad: 3347 *mp = m; 3348 return (-1); 3349 } 3350 3351 #ifdef INET6 3352 /* 3353 * Same as above, but for IPv6. 3354 * Cut-and-pasted from ip6_input.c. 3355 * XXX Should we update ip6stat, or not? 3356 */ 3357 static int 3358 bridge_ip6_checkbasic(struct mbuf **mp) 3359 { 3360 struct mbuf *m = *mp; 3361 struct ip6_hdr *ip6; 3362 3363 /* 3364 * If the IPv6 header is not aligned, slurp it up into a new 3365 * mbuf with space for link headers, in the event we forward 3366 * it. Otherwise, if it is aligned, make sure the entire base 3367 * IPv6 header is in the first mbuf of the chain. 3368 */ 3369 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3370 struct ifnet *inifp = m->m_pkthdr.rcvif; 3371 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3372 (max_linkhdr + 3) & ~3)) == NULL) { 3373 /* XXXJRT new stat, please */ 3374 V_ip6stat.ip6s_toosmall++; 3375 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3376 goto bad; 3377 } 3378 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3379 struct ifnet *inifp = m->m_pkthdr.rcvif; 3380 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3381 V_ip6stat.ip6s_toosmall++; 3382 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3383 goto bad; 3384 } 3385 } 3386 3387 ip6 = mtod(m, struct ip6_hdr *); 3388 3389 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3390 V_ip6stat.ip6s_badvers++; 3391 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3392 goto bad; 3393 } 3394 3395 /* Checks out, proceed */ 3396 *mp = m; 3397 return (0); 3398 3399 bad: 3400 *mp = m; 3401 return (-1); 3402 } 3403 #endif /* INET6 */ 3404 3405 /* 3406 * bridge_fragment: 3407 * 3408 * Return a fragmented mbuf chain. 3409 */ 3410 static int 3411 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3412 int snap, struct llc *llc) 3413 { 3414 struct mbuf *m0; 3415 struct ip *ip; 3416 int error = -1; 3417 3418 if (m->m_len < sizeof(struct ip) && 3419 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3420 goto out; 3421 ip = mtod(m, struct ip *); 3422 3423 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3424 CSUM_DELAY_IP); 3425 if (error) 3426 goto out; 3427 3428 /* walk the chain and re-add the Ethernet header */ 3429 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3430 if (error == 0) { 3431 if (snap) { 3432 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3433 if (m0 == NULL) { 3434 error = ENOBUFS; 3435 continue; 3436 } 3437 bcopy(llc, mtod(m0, caddr_t), 3438 sizeof(struct llc)); 3439 } 3440 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3441 if (m0 == NULL) { 3442 error = ENOBUFS; 3443 continue; 3444 } 3445 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3446 } else 3447 m_freem(m); 3448 } 3449 3450 if (error == 0) 3451 KMOD_IPSTAT_INC(ips_fragmented); 3452 3453 return (error); 3454 3455 out: 3456 if (m != NULL) 3457 m_freem(m); 3458 return (error); 3459 } 3460