1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/param.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/protosw.h> 87 #include <sys/systm.h> 88 #include <sys/jail.h> 89 #include <sys/time.h> 90 #include <sys/socket.h> /* for net/if.h */ 91 #include <sys/sockio.h> 92 #include <sys/ctype.h> /* string functions */ 93 #include <sys/kernel.h> 94 #include <sys/random.h> 95 #include <sys/syslog.h> 96 #include <sys/sysctl.h> 97 #include <vm/uma.h> 98 #include <sys/module.h> 99 #include <sys/priv.h> 100 #include <sys/proc.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 #include <sys/rwlock.h> 104 105 #include <net/bpf.h> 106 #include <net/if.h> 107 #include <net/if_clone.h> 108 #include <net/if_dl.h> 109 #include <net/if_types.h> 110 #include <net/if_var.h> 111 #include <net/pfil.h> 112 #include <net/vnet.h> 113 114 #include <netinet/in.h> /* for struct arpcom */ 115 #include <netinet/in_systm.h> 116 #include <netinet/in_var.h> 117 #include <netinet/ip.h> 118 #include <netinet/ip_var.h> 119 #ifdef INET6 120 #include <netinet/ip6.h> 121 #include <netinet6/ip6_var.h> 122 #endif 123 #if defined(INET) || defined(INET6) 124 #include <netinet/ip_carp.h> 125 #endif 126 #include <machine/in_cksum.h> 127 #include <netinet/if_ether.h> /* for struct arpcom */ 128 #include <net/bridgestp.h> 129 #include <net/if_bridgevar.h> 130 #include <net/if_llc.h> 131 #include <net/if_vlan_var.h> 132 133 #include <net/route.h> 134 #include <netinet/ip_fw.h> 135 #include <netinet/ipfw/ip_fw_private.h> 136 137 /* 138 * Size of the route hash table. Must be a power of two. 139 */ 140 #ifndef BRIDGE_RTHASH_SIZE 141 #define BRIDGE_RTHASH_SIZE 1024 142 #endif 143 144 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 145 146 /* 147 * Default maximum number of addresses to cache. 148 */ 149 #ifndef BRIDGE_RTABLE_MAX 150 #define BRIDGE_RTABLE_MAX 2000 151 #endif 152 153 /* 154 * Timeout (in seconds) for entries learned dynamically. 155 */ 156 #ifndef BRIDGE_RTABLE_TIMEOUT 157 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 158 #endif 159 160 /* 161 * Number of seconds between walks of the route list. 162 */ 163 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 164 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 165 #endif 166 167 /* 168 * List of capabilities to possibly mask on the member interface. 169 */ 170 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) 171 172 /* 173 * List of capabilities to strip 174 */ 175 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO 176 177 /* 178 * Bridge interface list entry. 179 */ 180 struct bridge_iflist { 181 LIST_ENTRY(bridge_iflist) bif_next; 182 struct ifnet *bif_ifp; /* member if */ 183 struct bstp_port bif_stp; /* STP state */ 184 uint32_t bif_flags; /* member if flags */ 185 int bif_savedcaps; /* saved capabilities */ 186 uint32_t bif_addrmax; /* max # of addresses */ 187 uint32_t bif_addrcnt; /* cur. # of addresses */ 188 uint32_t bif_addrexceeded;/* # of address violations */ 189 }; 190 191 /* 192 * Bridge route node. 193 */ 194 struct bridge_rtnode { 195 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 196 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 197 struct bridge_iflist *brt_dst; /* destination if */ 198 unsigned long brt_expire; /* expiration time */ 199 uint8_t brt_flags; /* address flags */ 200 uint8_t brt_addr[ETHER_ADDR_LEN]; 201 uint16_t brt_vlan; /* vlan id */ 202 }; 203 #define brt_ifp brt_dst->bif_ifp 204 205 /* 206 * Software state for each bridge. 207 */ 208 struct bridge_softc { 209 struct ifnet *sc_ifp; /* make this an interface */ 210 LIST_ENTRY(bridge_softc) sc_list; 211 struct mtx sc_mtx; 212 struct cv sc_cv; 213 uint32_t sc_brtmax; /* max # of addresses */ 214 uint32_t sc_brtcnt; /* cur. # of addresses */ 215 uint32_t sc_brttimeout; /* rt timeout in seconds */ 216 struct callout sc_brcallout; /* bridge callout */ 217 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 218 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 219 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 220 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 221 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 222 uint32_t sc_rthash_key; /* key for hash */ 223 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 224 struct bstp_state sc_stp; /* STP state */ 225 uint32_t sc_brtexceeded; /* # of cache drops */ 226 struct ifnet *sc_ifaddr; /* member mac copied from */ 227 u_char sc_defaddr[6]; /* Default MAC address */ 228 }; 229 230 static struct mtx bridge_list_mtx; 231 eventhandler_tag bridge_detach_cookie = NULL; 232 233 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 234 235 uma_zone_t bridge_rtnode_zone; 236 237 static int bridge_clone_create(struct if_clone *, int, caddr_t); 238 static void bridge_clone_destroy(struct ifnet *); 239 240 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 241 static void bridge_mutecaps(struct bridge_softc *); 242 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 243 int); 244 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 245 static void bridge_init(void *); 246 static void bridge_dummynet(struct mbuf *, struct ifnet *); 247 static void bridge_stop(struct ifnet *, int); 248 static void bridge_start(struct ifnet *); 249 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 250 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 251 struct rtentry *); 252 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 253 struct mbuf *); 254 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 255 256 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 257 struct mbuf *m); 258 259 static void bridge_timer(void *); 260 261 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 262 struct mbuf *, int); 263 static void bridge_span(struct bridge_softc *, struct mbuf *); 264 265 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 266 uint16_t, struct bridge_iflist *, int, uint8_t); 267 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 268 uint16_t); 269 static void bridge_rttrim(struct bridge_softc *); 270 static void bridge_rtage(struct bridge_softc *); 271 static void bridge_rtflush(struct bridge_softc *, int); 272 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 273 uint16_t); 274 275 static int bridge_rtable_init(struct bridge_softc *); 276 static void bridge_rtable_fini(struct bridge_softc *); 277 278 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 279 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 280 const uint8_t *, uint16_t); 281 static int bridge_rtnode_insert(struct bridge_softc *, 282 struct bridge_rtnode *); 283 static void bridge_rtnode_destroy(struct bridge_softc *, 284 struct bridge_rtnode *); 285 static void bridge_rtable_expire(struct ifnet *, int); 286 static void bridge_state_change(struct ifnet *, int); 287 288 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 289 const char *name); 290 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 291 struct ifnet *ifp); 292 static void bridge_delete_member(struct bridge_softc *, 293 struct bridge_iflist *, int); 294 static void bridge_delete_span(struct bridge_softc *, 295 struct bridge_iflist *); 296 297 static int bridge_ioctl_add(struct bridge_softc *, void *); 298 static int bridge_ioctl_del(struct bridge_softc *, void *); 299 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 300 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 301 static int bridge_ioctl_scache(struct bridge_softc *, void *); 302 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 303 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 304 static int bridge_ioctl_rts(struct bridge_softc *, void *); 305 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 306 static int bridge_ioctl_sto(struct bridge_softc *, void *); 307 static int bridge_ioctl_gto(struct bridge_softc *, void *); 308 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 309 static int bridge_ioctl_flush(struct bridge_softc *, void *); 310 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 311 static int bridge_ioctl_spri(struct bridge_softc *, void *); 312 static int bridge_ioctl_ght(struct bridge_softc *, void *); 313 static int bridge_ioctl_sht(struct bridge_softc *, void *); 314 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 315 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 316 static int bridge_ioctl_gma(struct bridge_softc *, void *); 317 static int bridge_ioctl_sma(struct bridge_softc *, void *); 318 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 319 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 320 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 321 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 322 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 323 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 324 static int bridge_ioctl_grte(struct bridge_softc *, void *); 325 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 326 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 327 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 328 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 329 int); 330 static int bridge_ip_checkbasic(struct mbuf **mp); 331 #ifdef INET6 332 static int bridge_ip6_checkbasic(struct mbuf **mp); 333 #endif /* INET6 */ 334 static int bridge_fragment(struct ifnet *, struct mbuf *, 335 struct ether_header *, int, struct llc *); 336 337 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 338 #define VLANTAGOF(_m) \ 339 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 340 341 static struct bstp_cb_ops bridge_ops = { 342 .bcb_state = bridge_state_change, 343 .bcb_rtage = bridge_rtable_expire 344 }; 345 346 SYSCTL_DECL(_net_link); 347 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 348 349 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 350 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 351 static int pfil_member = 1; /* run pfil hooks on the member interface */ 352 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 353 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 354 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 355 locally destined packets */ 356 static int log_stp = 0; /* log STP state changes */ 357 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ 358 TUNABLE_INT("net.link.bridge.pfil_onlyip", &pfil_onlyip); 359 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 360 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 361 TUNABLE_INT("net.link.bridge.ipfw_arp", &pfil_ipfw_arp); 362 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 363 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 364 TUNABLE_INT("net.link.bridge.pfil_bridge", &pfil_bridge); 365 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 366 &pfil_bridge, 0, "Packet filter on the bridge interface"); 367 TUNABLE_INT("net.link.bridge.pfil_member", &pfil_member); 368 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 369 &pfil_member, 0, "Packet filter on the member interface"); 370 TUNABLE_INT("net.link.bridge.pfil_local_phys", &pfil_local_phys); 371 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 372 &pfil_local_phys, 0, 373 "Packet filter on the physical interface for locally destined packets"); 374 TUNABLE_INT("net.link.bridge.log_stp", &log_stp); 375 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 376 &log_stp, 0, "Log STP state changes"); 377 TUNABLE_INT("net.link.bridge.inherit_mac", &bridge_inherit_mac); 378 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW, 379 &bridge_inherit_mac, 0, 380 "Inherit MAC address from the first bridge member"); 381 382 struct bridge_control { 383 int (*bc_func)(struct bridge_softc *, void *); 384 int bc_argsize; 385 int bc_flags; 386 }; 387 388 #define BC_F_COPYIN 0x01 /* copy arguments in */ 389 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 390 #define BC_F_SUSER 0x04 /* do super-user check */ 391 392 const struct bridge_control bridge_control_table[] = { 393 { bridge_ioctl_add, sizeof(struct ifbreq), 394 BC_F_COPYIN|BC_F_SUSER }, 395 { bridge_ioctl_del, sizeof(struct ifbreq), 396 BC_F_COPYIN|BC_F_SUSER }, 397 398 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 399 BC_F_COPYIN|BC_F_COPYOUT }, 400 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 401 BC_F_COPYIN|BC_F_SUSER }, 402 403 { bridge_ioctl_scache, sizeof(struct ifbrparam), 404 BC_F_COPYIN|BC_F_SUSER }, 405 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 406 BC_F_COPYOUT }, 407 408 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 409 BC_F_COPYIN|BC_F_COPYOUT }, 410 { bridge_ioctl_rts, sizeof(struct ifbaconf), 411 BC_F_COPYIN|BC_F_COPYOUT }, 412 413 { bridge_ioctl_saddr, sizeof(struct ifbareq), 414 BC_F_COPYIN|BC_F_SUSER }, 415 416 { bridge_ioctl_sto, sizeof(struct ifbrparam), 417 BC_F_COPYIN|BC_F_SUSER }, 418 { bridge_ioctl_gto, sizeof(struct ifbrparam), 419 BC_F_COPYOUT }, 420 421 { bridge_ioctl_daddr, sizeof(struct ifbareq), 422 BC_F_COPYIN|BC_F_SUSER }, 423 424 { bridge_ioctl_flush, sizeof(struct ifbreq), 425 BC_F_COPYIN|BC_F_SUSER }, 426 427 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 428 BC_F_COPYOUT }, 429 { bridge_ioctl_spri, sizeof(struct ifbrparam), 430 BC_F_COPYIN|BC_F_SUSER }, 431 432 { bridge_ioctl_ght, sizeof(struct ifbrparam), 433 BC_F_COPYOUT }, 434 { bridge_ioctl_sht, sizeof(struct ifbrparam), 435 BC_F_COPYIN|BC_F_SUSER }, 436 437 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 438 BC_F_COPYOUT }, 439 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 440 BC_F_COPYIN|BC_F_SUSER }, 441 442 { bridge_ioctl_gma, sizeof(struct ifbrparam), 443 BC_F_COPYOUT }, 444 { bridge_ioctl_sma, sizeof(struct ifbrparam), 445 BC_F_COPYIN|BC_F_SUSER }, 446 447 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 448 BC_F_COPYIN|BC_F_SUSER }, 449 450 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 451 BC_F_COPYIN|BC_F_SUSER }, 452 453 { bridge_ioctl_addspan, sizeof(struct ifbreq), 454 BC_F_COPYIN|BC_F_SUSER }, 455 { bridge_ioctl_delspan, sizeof(struct ifbreq), 456 BC_F_COPYIN|BC_F_SUSER }, 457 458 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 459 BC_F_COPYOUT }, 460 461 { bridge_ioctl_grte, sizeof(struct ifbrparam), 462 BC_F_COPYOUT }, 463 464 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 465 BC_F_COPYIN|BC_F_COPYOUT }, 466 467 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 468 BC_F_COPYIN|BC_F_SUSER }, 469 470 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 471 BC_F_COPYIN|BC_F_SUSER }, 472 473 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 474 BC_F_COPYIN|BC_F_SUSER }, 475 476 }; 477 const int bridge_control_table_size = 478 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 479 480 LIST_HEAD(, bridge_softc) bridge_list; 481 482 IFC_SIMPLE_DECLARE(bridge, 0); 483 484 static int 485 bridge_modevent(module_t mod, int type, void *data) 486 { 487 488 switch (type) { 489 case MOD_LOAD: 490 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 491 if_clone_attach(&bridge_cloner); 492 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 493 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 494 UMA_ALIGN_PTR, 0); 495 LIST_INIT(&bridge_list); 496 bridge_input_p = bridge_input; 497 bridge_output_p = bridge_output; 498 bridge_dn_p = bridge_dummynet; 499 bridge_detach_cookie = EVENTHANDLER_REGISTER( 500 ifnet_departure_event, bridge_ifdetach, NULL, 501 EVENTHANDLER_PRI_ANY); 502 break; 503 case MOD_UNLOAD: 504 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 505 bridge_detach_cookie); 506 if_clone_detach(&bridge_cloner); 507 uma_zdestroy(bridge_rtnode_zone); 508 bridge_input_p = NULL; 509 bridge_output_p = NULL; 510 bridge_dn_p = NULL; 511 mtx_destroy(&bridge_list_mtx); 512 break; 513 default: 514 return (EOPNOTSUPP); 515 } 516 return (0); 517 } 518 519 static moduledata_t bridge_mod = { 520 "if_bridge", 521 bridge_modevent, 522 0 523 }; 524 525 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 526 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 527 528 /* 529 * handler for net.link.bridge.pfil_ipfw 530 */ 531 static int 532 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 533 { 534 int enable = pfil_ipfw; 535 int error; 536 537 error = sysctl_handle_int(oidp, &enable, 0, req); 538 enable = (enable) ? 1 : 0; 539 540 if (enable != pfil_ipfw) { 541 pfil_ipfw = enable; 542 543 /* 544 * Disable pfil so that ipfw doesnt run twice, if the user 545 * really wants both then they can re-enable pfil_bridge and/or 546 * pfil_member. Also allow non-ip packets as ipfw can filter by 547 * layer2 type. 548 */ 549 if (pfil_ipfw) { 550 pfil_onlyip = 0; 551 pfil_bridge = 0; 552 pfil_member = 0; 553 } 554 } 555 556 return (error); 557 } 558 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 559 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 560 561 /* 562 * bridge_clone_create: 563 * 564 * Create a new bridge instance. 565 */ 566 static int 567 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 568 { 569 struct bridge_softc *sc, *sc2; 570 struct ifnet *bifp, *ifp; 571 int fb, retry; 572 unsigned long hostid; 573 574 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 575 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 576 if (ifp == NULL) { 577 free(sc, M_DEVBUF); 578 return (ENOSPC); 579 } 580 581 BRIDGE_LOCK_INIT(sc); 582 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 583 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 584 585 /* Initialize our routing table. */ 586 bridge_rtable_init(sc); 587 588 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 589 590 LIST_INIT(&sc->sc_iflist); 591 LIST_INIT(&sc->sc_spanlist); 592 593 ifp->if_softc = sc; 594 if_initname(ifp, ifc->ifc_name, unit); 595 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 596 ifp->if_ioctl = bridge_ioctl; 597 ifp->if_start = bridge_start; 598 ifp->if_init = bridge_init; 599 ifp->if_type = IFT_BRIDGE; 600 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 601 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 602 IFQ_SET_READY(&ifp->if_snd); 603 604 /* 605 * Generate an ethernet address with a locally administered address. 606 * 607 * Since we are using random ethernet addresses for the bridge, it is 608 * possible that we might have address collisions, so make sure that 609 * this hardware address isn't already in use on another bridge. 610 * The first try uses the hostid and falls back to arc4rand(). 611 */ 612 fb = 0; 613 getcredhostid(curthread->td_ucred, &hostid); 614 for (retry = 1; retry != 0;) { 615 if (fb || hostid == 0) { 616 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 617 sc->sc_defaddr[0] &= ~1;/* clear multicast bit */ 618 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 619 } else { 620 sc->sc_defaddr[0] = 0x2; 621 sc->sc_defaddr[1] = (hostid >> 24) & 0xff; 622 sc->sc_defaddr[2] = (hostid >> 16) & 0xff; 623 sc->sc_defaddr[3] = (hostid >> 8 ) & 0xff; 624 sc->sc_defaddr[4] = hostid & 0xff; 625 sc->sc_defaddr[5] = ifp->if_dunit & 0xff; 626 } 627 628 fb = 1; 629 retry = 0; 630 mtx_lock(&bridge_list_mtx); 631 LIST_FOREACH(sc2, &bridge_list, sc_list) { 632 bifp = sc2->sc_ifp; 633 if (memcmp(sc->sc_defaddr, 634 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 635 retry = 1; 636 } 637 mtx_unlock(&bridge_list_mtx); 638 } 639 640 bstp_attach(&sc->sc_stp, &bridge_ops); 641 ether_ifattach(ifp, sc->sc_defaddr); 642 /* Now undo some of the damage... */ 643 ifp->if_baudrate = 0; 644 ifp->if_type = IFT_BRIDGE; 645 646 mtx_lock(&bridge_list_mtx); 647 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 648 mtx_unlock(&bridge_list_mtx); 649 650 return (0); 651 } 652 653 /* 654 * bridge_clone_destroy: 655 * 656 * Destroy a bridge instance. 657 */ 658 static void 659 bridge_clone_destroy(struct ifnet *ifp) 660 { 661 struct bridge_softc *sc = ifp->if_softc; 662 struct bridge_iflist *bif; 663 664 BRIDGE_LOCK(sc); 665 666 bridge_stop(ifp, 1); 667 ifp->if_flags &= ~IFF_UP; 668 669 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 670 bridge_delete_member(sc, bif, 0); 671 672 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 673 bridge_delete_span(sc, bif); 674 } 675 676 BRIDGE_UNLOCK(sc); 677 678 callout_drain(&sc->sc_brcallout); 679 680 mtx_lock(&bridge_list_mtx); 681 LIST_REMOVE(sc, sc_list); 682 mtx_unlock(&bridge_list_mtx); 683 684 bstp_detach(&sc->sc_stp); 685 ether_ifdetach(ifp); 686 if_free(ifp); 687 688 /* Tear down the routing table. */ 689 bridge_rtable_fini(sc); 690 691 BRIDGE_LOCK_DESTROY(sc); 692 free(sc, M_DEVBUF); 693 } 694 695 /* 696 * bridge_ioctl: 697 * 698 * Handle a control request from the operator. 699 */ 700 static int 701 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 702 { 703 struct bridge_softc *sc = ifp->if_softc; 704 struct ifreq *ifr = (struct ifreq *)data; 705 struct bridge_iflist *bif; 706 struct thread *td = curthread; 707 union { 708 struct ifbreq ifbreq; 709 struct ifbifconf ifbifconf; 710 struct ifbareq ifbareq; 711 struct ifbaconf ifbaconf; 712 struct ifbrparam ifbrparam; 713 struct ifbropreq ifbropreq; 714 } args; 715 struct ifdrv *ifd = (struct ifdrv *) data; 716 const struct bridge_control *bc; 717 int error = 0; 718 719 switch (cmd) { 720 721 case SIOCADDMULTI: 722 case SIOCDELMULTI: 723 break; 724 725 case SIOCGDRVSPEC: 726 case SIOCSDRVSPEC: 727 if (ifd->ifd_cmd >= bridge_control_table_size) { 728 error = EINVAL; 729 break; 730 } 731 bc = &bridge_control_table[ifd->ifd_cmd]; 732 733 if (cmd == SIOCGDRVSPEC && 734 (bc->bc_flags & BC_F_COPYOUT) == 0) { 735 error = EINVAL; 736 break; 737 } 738 else if (cmd == SIOCSDRVSPEC && 739 (bc->bc_flags & BC_F_COPYOUT) != 0) { 740 error = EINVAL; 741 break; 742 } 743 744 if (bc->bc_flags & BC_F_SUSER) { 745 error = priv_check(td, PRIV_NET_BRIDGE); 746 if (error) 747 break; 748 } 749 750 if (ifd->ifd_len != bc->bc_argsize || 751 ifd->ifd_len > sizeof(args)) { 752 error = EINVAL; 753 break; 754 } 755 756 bzero(&args, sizeof(args)); 757 if (bc->bc_flags & BC_F_COPYIN) { 758 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 759 if (error) 760 break; 761 } 762 763 BRIDGE_LOCK(sc); 764 error = (*bc->bc_func)(sc, &args); 765 BRIDGE_UNLOCK(sc); 766 if (error) 767 break; 768 769 if (bc->bc_flags & BC_F_COPYOUT) 770 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 771 772 break; 773 774 case SIOCSIFFLAGS: 775 if (!(ifp->if_flags & IFF_UP) && 776 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 777 /* 778 * If interface is marked down and it is running, 779 * then stop and disable it. 780 */ 781 BRIDGE_LOCK(sc); 782 bridge_stop(ifp, 1); 783 BRIDGE_UNLOCK(sc); 784 } else if ((ifp->if_flags & IFF_UP) && 785 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 786 /* 787 * If interface is marked up and it is stopped, then 788 * start it. 789 */ 790 (*ifp->if_init)(sc); 791 } 792 break; 793 794 case SIOCSIFMTU: 795 if (ifr->ifr_mtu < 576) { 796 error = EINVAL; 797 break; 798 } 799 if (LIST_EMPTY(&sc->sc_iflist)) { 800 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 801 break; 802 } 803 BRIDGE_LOCK(sc); 804 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 805 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { 806 log(LOG_NOTICE, "%s: invalid MTU: %lu(%s)" 807 " != %d\n", sc->sc_ifp->if_xname, 808 bif->bif_ifp->if_mtu, 809 bif->bif_ifp->if_xname, ifr->ifr_mtu); 810 error = EINVAL; 811 break; 812 } 813 } 814 if (!error) 815 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 816 BRIDGE_UNLOCK(sc); 817 break; 818 default: 819 /* 820 * drop the lock as ether_ioctl() will call bridge_start() and 821 * cause the lock to be recursed. 822 */ 823 error = ether_ioctl(ifp, cmd, data); 824 break; 825 } 826 827 return (error); 828 } 829 830 /* 831 * bridge_mutecaps: 832 * 833 * Clear or restore unwanted capabilities on the member interface 834 */ 835 static void 836 bridge_mutecaps(struct bridge_softc *sc) 837 { 838 struct bridge_iflist *bif; 839 int enabled, mask; 840 841 /* Initial bitmask of capabilities to test */ 842 mask = BRIDGE_IFCAPS_MASK; 843 844 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 845 /* Every member must support it or its disabled */ 846 mask &= bif->bif_savedcaps; 847 } 848 849 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 850 enabled = bif->bif_ifp->if_capenable; 851 enabled &= ~BRIDGE_IFCAPS_STRIP; 852 /* strip off mask bits and enable them again if allowed */ 853 enabled &= ~BRIDGE_IFCAPS_MASK; 854 enabled |= mask; 855 bridge_set_ifcap(sc, bif, enabled); 856 } 857 858 } 859 860 static void 861 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 862 { 863 struct ifnet *ifp = bif->bif_ifp; 864 struct ifreq ifr; 865 int error; 866 867 bzero(&ifr, sizeof(ifr)); 868 ifr.ifr_reqcap = set; 869 870 if (ifp->if_capenable != set) { 871 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 872 if (error) 873 if_printf(sc->sc_ifp, 874 "error setting interface capabilities on %s\n", 875 ifp->if_xname); 876 } 877 } 878 879 /* 880 * bridge_lookup_member: 881 * 882 * Lookup a bridge member interface. 883 */ 884 static struct bridge_iflist * 885 bridge_lookup_member(struct bridge_softc *sc, const char *name) 886 { 887 struct bridge_iflist *bif; 888 struct ifnet *ifp; 889 890 BRIDGE_LOCK_ASSERT(sc); 891 892 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 893 ifp = bif->bif_ifp; 894 if (strcmp(ifp->if_xname, name) == 0) 895 return (bif); 896 } 897 898 return (NULL); 899 } 900 901 /* 902 * bridge_lookup_member_if: 903 * 904 * Lookup a bridge member interface by ifnet*. 905 */ 906 static struct bridge_iflist * 907 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 908 { 909 struct bridge_iflist *bif; 910 911 BRIDGE_LOCK_ASSERT(sc); 912 913 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 914 if (bif->bif_ifp == member_ifp) 915 return (bif); 916 } 917 918 return (NULL); 919 } 920 921 /* 922 * bridge_delete_member: 923 * 924 * Delete the specified member interface. 925 */ 926 static void 927 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 928 int gone) 929 { 930 struct ifnet *ifs = bif->bif_ifp; 931 struct ifnet *fif = NULL; 932 933 BRIDGE_LOCK_ASSERT(sc); 934 935 if (bif->bif_flags & IFBIF_STP) 936 bstp_disable(&bif->bif_stp); 937 938 ifs->if_bridge = NULL; 939 BRIDGE_XLOCK(sc); 940 LIST_REMOVE(bif, bif_next); 941 BRIDGE_XDROP(sc); 942 943 /* 944 * If removing the interface that gave the bridge its mac address, set 945 * the mac address of the bridge to the address of the next member, or 946 * to its default address if no members are left. 947 */ 948 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) { 949 if (LIST_EMPTY(&sc->sc_iflist)) { 950 bcopy(sc->sc_defaddr, 951 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 952 sc->sc_ifaddr = NULL; 953 } else { 954 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 955 bcopy(IF_LLADDR(fif), 956 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 957 sc->sc_ifaddr = fif; 958 } 959 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 960 } 961 962 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 963 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 964 KASSERT(bif->bif_addrcnt == 0, 965 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 966 967 BRIDGE_UNLOCK(sc); 968 if (!gone) { 969 switch (ifs->if_type) { 970 case IFT_ETHER: 971 case IFT_L2VLAN: 972 /* 973 * Take the interface out of promiscuous mode. 974 */ 975 (void) ifpromisc(ifs, 0); 976 break; 977 978 case IFT_GIF: 979 break; 980 981 default: 982 #ifdef DIAGNOSTIC 983 panic("bridge_delete_member: impossible"); 984 #endif 985 break; 986 } 987 /* reneable any interface capabilities */ 988 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 989 } 990 bstp_destroy(&bif->bif_stp); /* prepare to free */ 991 BRIDGE_LOCK(sc); 992 free(bif, M_DEVBUF); 993 } 994 995 /* 996 * bridge_delete_span: 997 * 998 * Delete the specified span interface. 999 */ 1000 static void 1001 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 1002 { 1003 BRIDGE_LOCK_ASSERT(sc); 1004 1005 KASSERT(bif->bif_ifp->if_bridge == NULL, 1006 ("%s: not a span interface", __func__)); 1007 1008 LIST_REMOVE(bif, bif_next); 1009 free(bif, M_DEVBUF); 1010 } 1011 1012 static int 1013 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 1014 { 1015 struct ifbreq *req = arg; 1016 struct bridge_iflist *bif = NULL; 1017 struct ifnet *ifs; 1018 int error = 0; 1019 1020 ifs = ifunit(req->ifbr_ifsname); 1021 if (ifs == NULL) 1022 return (ENOENT); 1023 if (ifs->if_ioctl == NULL) /* must be supported */ 1024 return (EINVAL); 1025 1026 /* If it's in the span list, it can't be a member. */ 1027 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1028 if (ifs == bif->bif_ifp) 1029 return (EBUSY); 1030 1031 if (ifs->if_bridge == sc) 1032 return (EEXIST); 1033 1034 if (ifs->if_bridge != NULL) 1035 return (EBUSY); 1036 1037 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1038 if (bif == NULL) 1039 return (ENOMEM); 1040 1041 bif->bif_ifp = ifs; 1042 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1043 bif->bif_savedcaps = ifs->if_capenable; 1044 1045 switch (ifs->if_type) { 1046 case IFT_ETHER: 1047 case IFT_L2VLAN: 1048 case IFT_GIF: 1049 /* permitted interface types */ 1050 break; 1051 default: 1052 error = EINVAL; 1053 goto out; 1054 } 1055 1056 /* Allow the first Ethernet member to define the MTU */ 1057 if (LIST_EMPTY(&sc->sc_iflist)) 1058 sc->sc_ifp->if_mtu = ifs->if_mtu; 1059 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 1060 if_printf(sc->sc_ifp, "invalid MTU: %lu(%s) != %lu\n", 1061 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu); 1062 error = EINVAL; 1063 goto out; 1064 } 1065 1066 /* 1067 * Assign the interface's MAC address to the bridge if it's the first 1068 * member and the MAC address of the bridge has not been changed from 1069 * the default randomly generated one. 1070 */ 1071 if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1072 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1073 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1074 sc->sc_ifaddr = ifs; 1075 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1076 } 1077 1078 ifs->if_bridge = sc; 1079 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1080 /* 1081 * XXX: XLOCK HERE!?! 1082 * 1083 * NOTE: insert_***HEAD*** should be safe for the traversals. 1084 */ 1085 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1086 1087 /* Set interface capabilities to the intersection set of all members */ 1088 bridge_mutecaps(sc); 1089 1090 switch (ifs->if_type) { 1091 case IFT_ETHER: 1092 case IFT_L2VLAN: 1093 /* 1094 * Place the interface into promiscuous mode. 1095 */ 1096 BRIDGE_UNLOCK(sc); 1097 error = ifpromisc(ifs, 1); 1098 BRIDGE_LOCK(sc); 1099 break; 1100 } 1101 if (error) 1102 bridge_delete_member(sc, bif, 0); 1103 out: 1104 if (error) { 1105 if (bif != NULL) 1106 free(bif, M_DEVBUF); 1107 } 1108 return (error); 1109 } 1110 1111 static int 1112 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1113 { 1114 struct ifbreq *req = arg; 1115 struct bridge_iflist *bif; 1116 1117 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1118 if (bif == NULL) 1119 return (ENOENT); 1120 1121 bridge_delete_member(sc, bif, 0); 1122 1123 return (0); 1124 } 1125 1126 static int 1127 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1128 { 1129 struct ifbreq *req = arg; 1130 struct bridge_iflist *bif; 1131 struct bstp_port *bp; 1132 1133 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1134 if (bif == NULL) 1135 return (ENOENT); 1136 1137 bp = &bif->bif_stp; 1138 req->ifbr_ifsflags = bif->bif_flags; 1139 req->ifbr_state = bp->bp_state; 1140 req->ifbr_priority = bp->bp_priority; 1141 req->ifbr_path_cost = bp->bp_path_cost; 1142 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1143 req->ifbr_proto = bp->bp_protover; 1144 req->ifbr_role = bp->bp_role; 1145 req->ifbr_stpflags = bp->bp_flags; 1146 req->ifbr_addrcnt = bif->bif_addrcnt; 1147 req->ifbr_addrmax = bif->bif_addrmax; 1148 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1149 1150 /* Copy STP state options as flags */ 1151 if (bp->bp_operedge) 1152 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1153 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1154 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1155 if (bp->bp_ptp_link) 1156 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1157 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1158 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1159 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1160 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1161 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1162 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1163 return (0); 1164 } 1165 1166 static int 1167 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1168 { 1169 struct ifbreq *req = arg; 1170 struct bridge_iflist *bif; 1171 struct bstp_port *bp; 1172 int error; 1173 1174 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1175 if (bif == NULL) 1176 return (ENOENT); 1177 bp = &bif->bif_stp; 1178 1179 if (req->ifbr_ifsflags & IFBIF_SPAN) 1180 /* SPAN is readonly */ 1181 return (EINVAL); 1182 1183 if (req->ifbr_ifsflags & IFBIF_STP) { 1184 if ((bif->bif_flags & IFBIF_STP) == 0) { 1185 error = bstp_enable(&bif->bif_stp); 1186 if (error) 1187 return (error); 1188 } 1189 } else { 1190 if ((bif->bif_flags & IFBIF_STP) != 0) 1191 bstp_disable(&bif->bif_stp); 1192 } 1193 1194 /* Pass on STP flags */ 1195 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1196 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1197 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1198 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1199 1200 /* Save the bits relating to the bridge */ 1201 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1202 1203 return (0); 1204 } 1205 1206 static int 1207 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1208 { 1209 struct ifbrparam *param = arg; 1210 1211 sc->sc_brtmax = param->ifbrp_csize; 1212 bridge_rttrim(sc); 1213 1214 return (0); 1215 } 1216 1217 static int 1218 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1219 { 1220 struct ifbrparam *param = arg; 1221 1222 param->ifbrp_csize = sc->sc_brtmax; 1223 1224 return (0); 1225 } 1226 1227 static int 1228 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1229 { 1230 struct ifbifconf *bifc = arg; 1231 struct bridge_iflist *bif; 1232 struct ifbreq breq; 1233 char *buf, *outbuf; 1234 int count, buflen, len, error = 0; 1235 1236 count = 0; 1237 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1238 count++; 1239 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1240 count++; 1241 1242 buflen = sizeof(breq) * count; 1243 if (bifc->ifbic_len == 0) { 1244 bifc->ifbic_len = buflen; 1245 return (0); 1246 } 1247 BRIDGE_UNLOCK(sc); 1248 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1249 BRIDGE_LOCK(sc); 1250 1251 count = 0; 1252 buf = outbuf; 1253 len = min(bifc->ifbic_len, buflen); 1254 bzero(&breq, sizeof(breq)); 1255 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1256 if (len < sizeof(breq)) 1257 break; 1258 1259 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1260 sizeof(breq.ifbr_ifsname)); 1261 /* Fill in the ifbreq structure */ 1262 error = bridge_ioctl_gifflags(sc, &breq); 1263 if (error) 1264 break; 1265 memcpy(buf, &breq, sizeof(breq)); 1266 count++; 1267 buf += sizeof(breq); 1268 len -= sizeof(breq); 1269 } 1270 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1271 if (len < sizeof(breq)) 1272 break; 1273 1274 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1275 sizeof(breq.ifbr_ifsname)); 1276 breq.ifbr_ifsflags = bif->bif_flags; 1277 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1278 memcpy(buf, &breq, sizeof(breq)); 1279 count++; 1280 buf += sizeof(breq); 1281 len -= sizeof(breq); 1282 } 1283 1284 BRIDGE_UNLOCK(sc); 1285 bifc->ifbic_len = sizeof(breq) * count; 1286 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1287 BRIDGE_LOCK(sc); 1288 free(outbuf, M_TEMP); 1289 return (error); 1290 } 1291 1292 static int 1293 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1294 { 1295 struct ifbaconf *bac = arg; 1296 struct bridge_rtnode *brt; 1297 struct ifbareq bareq; 1298 char *buf, *outbuf; 1299 int count, buflen, len, error = 0; 1300 1301 if (bac->ifbac_len == 0) 1302 return (0); 1303 1304 count = 0; 1305 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1306 count++; 1307 buflen = sizeof(bareq) * count; 1308 1309 BRIDGE_UNLOCK(sc); 1310 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1311 BRIDGE_LOCK(sc); 1312 1313 count = 0; 1314 buf = outbuf; 1315 len = min(bac->ifbac_len, buflen); 1316 bzero(&bareq, sizeof(bareq)); 1317 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1318 if (len < sizeof(bareq)) 1319 goto out; 1320 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1321 sizeof(bareq.ifba_ifsname)); 1322 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1323 bareq.ifba_vlan = brt->brt_vlan; 1324 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1325 time_uptime < brt->brt_expire) 1326 bareq.ifba_expire = brt->brt_expire - time_uptime; 1327 else 1328 bareq.ifba_expire = 0; 1329 bareq.ifba_flags = brt->brt_flags; 1330 1331 memcpy(buf, &bareq, sizeof(bareq)); 1332 count++; 1333 buf += sizeof(bareq); 1334 len -= sizeof(bareq); 1335 } 1336 out: 1337 BRIDGE_UNLOCK(sc); 1338 bac->ifbac_len = sizeof(bareq) * count; 1339 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1340 BRIDGE_LOCK(sc); 1341 free(outbuf, M_TEMP); 1342 return (error); 1343 } 1344 1345 static int 1346 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1347 { 1348 struct ifbareq *req = arg; 1349 struct bridge_iflist *bif; 1350 int error; 1351 1352 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1353 if (bif == NULL) 1354 return (ENOENT); 1355 1356 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1357 req->ifba_flags); 1358 1359 return (error); 1360 } 1361 1362 static int 1363 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1364 { 1365 struct ifbrparam *param = arg; 1366 1367 sc->sc_brttimeout = param->ifbrp_ctime; 1368 return (0); 1369 } 1370 1371 static int 1372 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1373 { 1374 struct ifbrparam *param = arg; 1375 1376 param->ifbrp_ctime = sc->sc_brttimeout; 1377 return (0); 1378 } 1379 1380 static int 1381 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1382 { 1383 struct ifbareq *req = arg; 1384 1385 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1386 } 1387 1388 static int 1389 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1390 { 1391 struct ifbreq *req = arg; 1392 1393 bridge_rtflush(sc, req->ifbr_ifsflags); 1394 return (0); 1395 } 1396 1397 static int 1398 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1399 { 1400 struct ifbrparam *param = arg; 1401 struct bstp_state *bs = &sc->sc_stp; 1402 1403 param->ifbrp_prio = bs->bs_bridge_priority; 1404 return (0); 1405 } 1406 1407 static int 1408 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1409 { 1410 struct ifbrparam *param = arg; 1411 1412 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1413 } 1414 1415 static int 1416 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1417 { 1418 struct ifbrparam *param = arg; 1419 struct bstp_state *bs = &sc->sc_stp; 1420 1421 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1422 return (0); 1423 } 1424 1425 static int 1426 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1427 { 1428 struct ifbrparam *param = arg; 1429 1430 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1431 } 1432 1433 static int 1434 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1435 { 1436 struct ifbrparam *param = arg; 1437 struct bstp_state *bs = &sc->sc_stp; 1438 1439 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1440 return (0); 1441 } 1442 1443 static int 1444 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1445 { 1446 struct ifbrparam *param = arg; 1447 1448 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1449 } 1450 1451 static int 1452 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1453 { 1454 struct ifbrparam *param = arg; 1455 struct bstp_state *bs = &sc->sc_stp; 1456 1457 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1458 return (0); 1459 } 1460 1461 static int 1462 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1463 { 1464 struct ifbrparam *param = arg; 1465 1466 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1467 } 1468 1469 static int 1470 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1471 { 1472 struct ifbreq *req = arg; 1473 struct bridge_iflist *bif; 1474 1475 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1476 if (bif == NULL) 1477 return (ENOENT); 1478 1479 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1480 } 1481 1482 static int 1483 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1484 { 1485 struct ifbreq *req = arg; 1486 struct bridge_iflist *bif; 1487 1488 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1489 if (bif == NULL) 1490 return (ENOENT); 1491 1492 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1493 } 1494 1495 static int 1496 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1497 { 1498 struct ifbreq *req = arg; 1499 struct bridge_iflist *bif; 1500 1501 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1502 if (bif == NULL) 1503 return (ENOENT); 1504 1505 bif->bif_addrmax = req->ifbr_addrmax; 1506 return (0); 1507 } 1508 1509 static int 1510 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1511 { 1512 struct ifbreq *req = arg; 1513 struct bridge_iflist *bif = NULL; 1514 struct ifnet *ifs; 1515 1516 ifs = ifunit(req->ifbr_ifsname); 1517 if (ifs == NULL) 1518 return (ENOENT); 1519 1520 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1521 if (ifs == bif->bif_ifp) 1522 return (EBUSY); 1523 1524 if (ifs->if_bridge != NULL) 1525 return (EBUSY); 1526 1527 switch (ifs->if_type) { 1528 case IFT_ETHER: 1529 case IFT_GIF: 1530 case IFT_L2VLAN: 1531 break; 1532 default: 1533 return (EINVAL); 1534 } 1535 1536 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1537 if (bif == NULL) 1538 return (ENOMEM); 1539 1540 bif->bif_ifp = ifs; 1541 bif->bif_flags = IFBIF_SPAN; 1542 1543 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1544 1545 return (0); 1546 } 1547 1548 static int 1549 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1550 { 1551 struct ifbreq *req = arg; 1552 struct bridge_iflist *bif; 1553 struct ifnet *ifs; 1554 1555 ifs = ifunit(req->ifbr_ifsname); 1556 if (ifs == NULL) 1557 return (ENOENT); 1558 1559 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1560 if (ifs == bif->bif_ifp) 1561 break; 1562 1563 if (bif == NULL) 1564 return (ENOENT); 1565 1566 bridge_delete_span(sc, bif); 1567 1568 return (0); 1569 } 1570 1571 static int 1572 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1573 { 1574 struct ifbropreq *req = arg; 1575 struct bstp_state *bs = &sc->sc_stp; 1576 struct bstp_port *root_port; 1577 1578 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1579 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1580 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1581 1582 root_port = bs->bs_root_port; 1583 if (root_port == NULL) 1584 req->ifbop_root_port = 0; 1585 else 1586 req->ifbop_root_port = root_port->bp_ifp->if_index; 1587 1588 req->ifbop_holdcount = bs->bs_txholdcount; 1589 req->ifbop_priority = bs->bs_bridge_priority; 1590 req->ifbop_protocol = bs->bs_protover; 1591 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1592 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1593 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1594 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1595 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1596 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1597 1598 return (0); 1599 } 1600 1601 static int 1602 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1603 { 1604 struct ifbrparam *param = arg; 1605 1606 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1607 return (0); 1608 } 1609 1610 static int 1611 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1612 { 1613 struct ifbpstpconf *bifstp = arg; 1614 struct bridge_iflist *bif; 1615 struct bstp_port *bp; 1616 struct ifbpstpreq bpreq; 1617 char *buf, *outbuf; 1618 int count, buflen, len, error = 0; 1619 1620 count = 0; 1621 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1622 if ((bif->bif_flags & IFBIF_STP) != 0) 1623 count++; 1624 } 1625 1626 buflen = sizeof(bpreq) * count; 1627 if (bifstp->ifbpstp_len == 0) { 1628 bifstp->ifbpstp_len = buflen; 1629 return (0); 1630 } 1631 1632 BRIDGE_UNLOCK(sc); 1633 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1634 BRIDGE_LOCK(sc); 1635 1636 count = 0; 1637 buf = outbuf; 1638 len = min(bifstp->ifbpstp_len, buflen); 1639 bzero(&bpreq, sizeof(bpreq)); 1640 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1641 if (len < sizeof(bpreq)) 1642 break; 1643 1644 if ((bif->bif_flags & IFBIF_STP) == 0) 1645 continue; 1646 1647 bp = &bif->bif_stp; 1648 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1649 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1650 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1651 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1652 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1653 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1654 1655 memcpy(buf, &bpreq, sizeof(bpreq)); 1656 count++; 1657 buf += sizeof(bpreq); 1658 len -= sizeof(bpreq); 1659 } 1660 1661 BRIDGE_UNLOCK(sc); 1662 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1663 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1664 BRIDGE_LOCK(sc); 1665 free(outbuf, M_TEMP); 1666 return (error); 1667 } 1668 1669 static int 1670 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1671 { 1672 struct ifbrparam *param = arg; 1673 1674 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1675 } 1676 1677 static int 1678 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1679 { 1680 struct ifbrparam *param = arg; 1681 1682 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1683 } 1684 1685 /* 1686 * bridge_ifdetach: 1687 * 1688 * Detach an interface from a bridge. Called when a member 1689 * interface is detaching. 1690 */ 1691 static void 1692 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1693 { 1694 struct bridge_softc *sc = ifp->if_bridge; 1695 struct bridge_iflist *bif; 1696 1697 /* Check if the interface is a bridge member */ 1698 if (sc != NULL) { 1699 BRIDGE_LOCK(sc); 1700 1701 bif = bridge_lookup_member_if(sc, ifp); 1702 if (bif != NULL) 1703 bridge_delete_member(sc, bif, 1); 1704 1705 BRIDGE_UNLOCK(sc); 1706 return; 1707 } 1708 1709 /* Check if the interface is a span port */ 1710 mtx_lock(&bridge_list_mtx); 1711 LIST_FOREACH(sc, &bridge_list, sc_list) { 1712 BRIDGE_LOCK(sc); 1713 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1714 if (ifp == bif->bif_ifp) { 1715 bridge_delete_span(sc, bif); 1716 break; 1717 } 1718 1719 BRIDGE_UNLOCK(sc); 1720 } 1721 mtx_unlock(&bridge_list_mtx); 1722 } 1723 1724 /* 1725 * bridge_init: 1726 * 1727 * Initialize a bridge interface. 1728 */ 1729 static void 1730 bridge_init(void *xsc) 1731 { 1732 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1733 struct ifnet *ifp = sc->sc_ifp; 1734 1735 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1736 return; 1737 1738 BRIDGE_LOCK(sc); 1739 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1740 bridge_timer, sc); 1741 1742 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1743 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1744 1745 BRIDGE_UNLOCK(sc); 1746 } 1747 1748 /* 1749 * bridge_stop: 1750 * 1751 * Stop the bridge interface. 1752 */ 1753 static void 1754 bridge_stop(struct ifnet *ifp, int disable) 1755 { 1756 struct bridge_softc *sc = ifp->if_softc; 1757 1758 BRIDGE_LOCK_ASSERT(sc); 1759 1760 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1761 return; 1762 1763 callout_stop(&sc->sc_brcallout); 1764 bstp_stop(&sc->sc_stp); 1765 1766 bridge_rtflush(sc, IFBF_FLUSHDYN); 1767 1768 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1769 } 1770 1771 /* 1772 * bridge_enqueue: 1773 * 1774 * Enqueue a packet on a bridge member interface. 1775 * 1776 */ 1777 static void 1778 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1779 { 1780 int len, err = 0; 1781 short mflags; 1782 struct mbuf *m0; 1783 1784 len = m->m_pkthdr.len; 1785 mflags = m->m_flags; 1786 1787 /* We may be sending a fragment so traverse the mbuf */ 1788 for (; m; m = m0) { 1789 m0 = m->m_nextpkt; 1790 m->m_nextpkt = NULL; 1791 1792 /* 1793 * If underlying interface can not do VLAN tag insertion itself 1794 * then attach a packet tag that holds it. 1795 */ 1796 if ((m->m_flags & M_VLANTAG) && 1797 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1798 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1799 if (m == NULL) { 1800 if_printf(dst_ifp, 1801 "unable to prepend VLAN header\n"); 1802 dst_ifp->if_oerrors++; 1803 continue; 1804 } 1805 m->m_flags &= ~M_VLANTAG; 1806 } 1807 1808 if (err == 0) 1809 dst_ifp->if_transmit(dst_ifp, m); 1810 } 1811 1812 if (err == 0) { 1813 sc->sc_ifp->if_opackets++; 1814 sc->sc_ifp->if_obytes += len; 1815 if (mflags & M_MCAST) 1816 sc->sc_ifp->if_omcasts++; 1817 } 1818 } 1819 1820 /* 1821 * bridge_dummynet: 1822 * 1823 * Receive a queued packet from dummynet and pass it on to the output 1824 * interface. 1825 * 1826 * The mbuf has the Ethernet header already attached. 1827 */ 1828 static void 1829 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1830 { 1831 struct bridge_softc *sc; 1832 1833 sc = ifp->if_bridge; 1834 1835 /* 1836 * The packet didnt originate from a member interface. This should only 1837 * ever happen if a member interface is removed while packets are 1838 * queued for it. 1839 */ 1840 if (sc == NULL) { 1841 m_freem(m); 1842 return; 1843 } 1844 1845 if (PFIL_HOOKED(&V_inet_pfil_hook) 1846 #ifdef INET6 1847 || PFIL_HOOKED(&V_inet6_pfil_hook) 1848 #endif 1849 ) { 1850 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1851 return; 1852 if (m == NULL) 1853 return; 1854 } 1855 1856 bridge_enqueue(sc, ifp, m); 1857 } 1858 1859 /* 1860 * bridge_output: 1861 * 1862 * Send output from a bridge member interface. This 1863 * performs the bridging function for locally originated 1864 * packets. 1865 * 1866 * The mbuf has the Ethernet header already attached. We must 1867 * enqueue or free the mbuf before returning. 1868 */ 1869 static int 1870 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1871 struct rtentry *rt) 1872 { 1873 struct ether_header *eh; 1874 struct ifnet *dst_if; 1875 struct bridge_softc *sc; 1876 uint16_t vlan; 1877 1878 if (m->m_len < ETHER_HDR_LEN) { 1879 m = m_pullup(m, ETHER_HDR_LEN); 1880 if (m == NULL) 1881 return (0); 1882 } 1883 1884 eh = mtod(m, struct ether_header *); 1885 sc = ifp->if_bridge; 1886 vlan = VLANTAGOF(m); 1887 1888 BRIDGE_LOCK(sc); 1889 1890 /* 1891 * If bridge is down, but the original output interface is up, 1892 * go ahead and send out that interface. Otherwise, the packet 1893 * is dropped below. 1894 */ 1895 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1896 dst_if = ifp; 1897 goto sendunicast; 1898 } 1899 1900 /* 1901 * If the packet is a multicast, or we don't know a better way to 1902 * get there, send to all interfaces. 1903 */ 1904 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1905 dst_if = NULL; 1906 else 1907 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1908 if (dst_if == NULL) { 1909 struct bridge_iflist *bif; 1910 struct mbuf *mc; 1911 int error = 0, used = 0; 1912 1913 bridge_span(sc, m); 1914 1915 BRIDGE_LOCK2REF(sc, error); 1916 if (error) { 1917 m_freem(m); 1918 return (0); 1919 } 1920 1921 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1922 dst_if = bif->bif_ifp; 1923 1924 if (dst_if->if_type == IFT_GIF) 1925 continue; 1926 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1927 continue; 1928 1929 /* 1930 * If this is not the original output interface, 1931 * and the interface is participating in spanning 1932 * tree, make sure the port is in a state that 1933 * allows forwarding. 1934 */ 1935 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1936 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1937 continue; 1938 1939 if (LIST_NEXT(bif, bif_next) == NULL) { 1940 used = 1; 1941 mc = m; 1942 } else { 1943 mc = m_copypacket(m, M_DONTWAIT); 1944 if (mc == NULL) { 1945 sc->sc_ifp->if_oerrors++; 1946 continue; 1947 } 1948 } 1949 1950 bridge_enqueue(sc, dst_if, mc); 1951 } 1952 if (used == 0) 1953 m_freem(m); 1954 BRIDGE_UNREF(sc); 1955 return (0); 1956 } 1957 1958 sendunicast: 1959 /* 1960 * XXX Spanning tree consideration here? 1961 */ 1962 1963 bridge_span(sc, m); 1964 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1965 m_freem(m); 1966 BRIDGE_UNLOCK(sc); 1967 return (0); 1968 } 1969 1970 BRIDGE_UNLOCK(sc); 1971 bridge_enqueue(sc, dst_if, m); 1972 return (0); 1973 } 1974 1975 /* 1976 * bridge_start: 1977 * 1978 * Start output on a bridge. 1979 * 1980 */ 1981 static void 1982 bridge_start(struct ifnet *ifp) 1983 { 1984 struct bridge_softc *sc; 1985 struct mbuf *m; 1986 struct ether_header *eh; 1987 struct ifnet *dst_if; 1988 1989 sc = ifp->if_softc; 1990 1991 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1992 for (;;) { 1993 IFQ_DEQUEUE(&ifp->if_snd, m); 1994 if (m == 0) 1995 break; 1996 ETHER_BPF_MTAP(ifp, m); 1997 1998 eh = mtod(m, struct ether_header *); 1999 dst_if = NULL; 2000 2001 BRIDGE_LOCK(sc); 2002 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2003 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 2004 } 2005 2006 if (dst_if == NULL) 2007 bridge_broadcast(sc, ifp, m, 0); 2008 else { 2009 BRIDGE_UNLOCK(sc); 2010 bridge_enqueue(sc, dst_if, m); 2011 } 2012 } 2013 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2014 } 2015 2016 /* 2017 * bridge_forward: 2018 * 2019 * The forwarding function of the bridge. 2020 * 2021 * NOTE: Releases the lock on return. 2022 */ 2023 static void 2024 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 2025 struct mbuf *m) 2026 { 2027 struct bridge_iflist *dbif; 2028 struct ifnet *src_if, *dst_if, *ifp; 2029 struct ether_header *eh; 2030 uint16_t vlan; 2031 uint8_t *dst; 2032 int error; 2033 2034 src_if = m->m_pkthdr.rcvif; 2035 ifp = sc->sc_ifp; 2036 2037 ifp->if_ipackets++; 2038 ifp->if_ibytes += m->m_pkthdr.len; 2039 vlan = VLANTAGOF(m); 2040 2041 if ((sbif->bif_flags & IFBIF_STP) && 2042 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2043 goto drop; 2044 2045 eh = mtod(m, struct ether_header *); 2046 dst = eh->ether_dhost; 2047 2048 /* If the interface is learning, record the address. */ 2049 if (sbif->bif_flags & IFBIF_LEARNING) { 2050 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2051 sbif, 0, IFBAF_DYNAMIC); 2052 /* 2053 * If the interface has addresses limits then deny any source 2054 * that is not in the cache. 2055 */ 2056 if (error && sbif->bif_addrmax) 2057 goto drop; 2058 } 2059 2060 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2061 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2062 goto drop; 2063 2064 /* 2065 * At this point, the port either doesn't participate 2066 * in spanning tree or it is in the forwarding state. 2067 */ 2068 2069 /* 2070 * If the packet is unicast, destined for someone on 2071 * "this" side of the bridge, drop it. 2072 */ 2073 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2074 dst_if = bridge_rtlookup(sc, dst, vlan); 2075 if (src_if == dst_if) 2076 goto drop; 2077 } else { 2078 /* 2079 * Check if its a reserved multicast address, any address 2080 * listed in 802.1D section 7.12.6 may not be forwarded by the 2081 * bridge. 2082 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2083 */ 2084 if (dst[0] == 0x01 && dst[1] == 0x80 && 2085 dst[2] == 0xc2 && dst[3] == 0x00 && 2086 dst[4] == 0x00 && dst[5] <= 0x0f) 2087 goto drop; 2088 2089 /* ...forward it to all interfaces. */ 2090 ifp->if_imcasts++; 2091 dst_if = NULL; 2092 } 2093 2094 /* 2095 * If we have a destination interface which is a member of our bridge, 2096 * OR this is a unicast packet, push it through the bpf(4) machinery. 2097 * For broadcast or multicast packets, don't bother because it will 2098 * be reinjected into ether_input. We do this before we pass the packets 2099 * through the pfil(9) framework, as it is possible that pfil(9) will 2100 * drop the packet, or possibly modify it, making it difficult to debug 2101 * firewall issues on the bridge. 2102 */ 2103 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2104 ETHER_BPF_MTAP(ifp, m); 2105 2106 /* run the packet filter */ 2107 if (PFIL_HOOKED(&V_inet_pfil_hook) 2108 #ifdef INET6 2109 || PFIL_HOOKED(&V_inet6_pfil_hook) 2110 #endif 2111 ) { 2112 BRIDGE_UNLOCK(sc); 2113 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2114 return; 2115 if (m == NULL) 2116 return; 2117 BRIDGE_LOCK(sc); 2118 } 2119 2120 if (dst_if == NULL) { 2121 bridge_broadcast(sc, src_if, m, 1); 2122 return; 2123 } 2124 2125 /* 2126 * At this point, we're dealing with a unicast frame 2127 * going to a different interface. 2128 */ 2129 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2130 goto drop; 2131 2132 dbif = bridge_lookup_member_if(sc, dst_if); 2133 if (dbif == NULL) 2134 /* Not a member of the bridge (anymore?) */ 2135 goto drop; 2136 2137 /* Private segments can not talk to each other */ 2138 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2139 goto drop; 2140 2141 if ((dbif->bif_flags & IFBIF_STP) && 2142 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2143 goto drop; 2144 2145 BRIDGE_UNLOCK(sc); 2146 2147 if (PFIL_HOOKED(&V_inet_pfil_hook) 2148 #ifdef INET6 2149 || PFIL_HOOKED(&V_inet6_pfil_hook) 2150 #endif 2151 ) { 2152 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2153 return; 2154 if (m == NULL) 2155 return; 2156 } 2157 2158 bridge_enqueue(sc, dst_if, m); 2159 return; 2160 2161 drop: 2162 BRIDGE_UNLOCK(sc); 2163 m_freem(m); 2164 } 2165 2166 /* 2167 * bridge_input: 2168 * 2169 * Receive input from a member interface. Queue the packet for 2170 * bridging if it is not for us. 2171 */ 2172 static struct mbuf * 2173 bridge_input(struct ifnet *ifp, struct mbuf *m) 2174 { 2175 struct bridge_softc *sc = ifp->if_bridge; 2176 struct bridge_iflist *bif, *bif2; 2177 struct ifnet *bifp; 2178 struct ether_header *eh; 2179 struct mbuf *mc, *mc2; 2180 uint16_t vlan; 2181 int error; 2182 2183 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2184 return (m); 2185 2186 bifp = sc->sc_ifp; 2187 vlan = VLANTAGOF(m); 2188 2189 /* 2190 * Implement support for bridge monitoring. If this flag has been 2191 * set on this interface, discard the packet once we push it through 2192 * the bpf(4) machinery, but before we do, increment the byte and 2193 * packet counters associated with this interface. 2194 */ 2195 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2196 m->m_pkthdr.rcvif = bifp; 2197 ETHER_BPF_MTAP(bifp, m); 2198 bifp->if_ipackets++; 2199 bifp->if_ibytes += m->m_pkthdr.len; 2200 m_freem(m); 2201 return (NULL); 2202 } 2203 BRIDGE_LOCK(sc); 2204 bif = bridge_lookup_member_if(sc, ifp); 2205 if (bif == NULL) { 2206 BRIDGE_UNLOCK(sc); 2207 return (m); 2208 } 2209 2210 eh = mtod(m, struct ether_header *); 2211 2212 bridge_span(sc, m); 2213 2214 if (m->m_flags & (M_BCAST|M_MCAST)) { 2215 /* Tap off 802.1D packets; they do not get forwarded. */ 2216 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2217 ETHER_ADDR_LEN) == 0) { 2218 bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */ 2219 BRIDGE_UNLOCK(sc); 2220 return (NULL); 2221 } 2222 2223 if ((bif->bif_flags & IFBIF_STP) && 2224 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2225 BRIDGE_UNLOCK(sc); 2226 return (m); 2227 } 2228 2229 /* 2230 * Make a deep copy of the packet and enqueue the copy 2231 * for bridge processing; return the original packet for 2232 * local processing. 2233 */ 2234 mc = m_dup(m, M_DONTWAIT); 2235 if (mc == NULL) { 2236 BRIDGE_UNLOCK(sc); 2237 return (m); 2238 } 2239 2240 /* Perform the bridge forwarding function with the copy. */ 2241 bridge_forward(sc, bif, mc); 2242 2243 /* 2244 * Reinject the mbuf as arriving on the bridge so we have a 2245 * chance at claiming multicast packets. We can not loop back 2246 * here from ether_input as a bridge is never a member of a 2247 * bridge. 2248 */ 2249 KASSERT(bifp->if_bridge == NULL, 2250 ("loop created in bridge_input")); 2251 mc2 = m_dup(m, M_DONTWAIT); 2252 if (mc2 != NULL) { 2253 /* Keep the layer3 header aligned */ 2254 int i = min(mc2->m_pkthdr.len, max_protohdr); 2255 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2256 } 2257 if (mc2 != NULL) { 2258 mc2->m_pkthdr.rcvif = bifp; 2259 (*bifp->if_input)(bifp, mc2); 2260 } 2261 2262 /* Return the original packet for local processing. */ 2263 return (m); 2264 } 2265 2266 if ((bif->bif_flags & IFBIF_STP) && 2267 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2268 BRIDGE_UNLOCK(sc); 2269 return (m); 2270 } 2271 2272 #if (defined(INET) || defined(INET6)) 2273 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2274 || ((iface)->if_carp \ 2275 && (*carp_forus_p)((iface), eh->ether_dhost)) 2276 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2277 || ((iface)->if_carp \ 2278 && (*carp_forus_p)((iface), eh->ether_shost)) 2279 #else 2280 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2281 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2282 #endif 2283 2284 #ifdef INET6 2285 # define OR_PFIL_HOOKED_INET6 \ 2286 || PFIL_HOOKED(&V_inet6_pfil_hook) 2287 #else 2288 # define OR_PFIL_HOOKED_INET6 2289 #endif 2290 2291 #define GRAB_OUR_PACKETS(iface) \ 2292 if ((iface)->if_type == IFT_GIF) \ 2293 continue; \ 2294 /* It is destined for us. */ \ 2295 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2296 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2297 ) { \ 2298 if ((iface)->if_type == IFT_BRIDGE) { \ 2299 ETHER_BPF_MTAP(iface, m); \ 2300 iface->if_ipackets++; \ 2301 /* Filter on the physical interface. */ \ 2302 if (pfil_local_phys && \ 2303 (PFIL_HOOKED(&V_inet_pfil_hook) \ 2304 OR_PFIL_HOOKED_INET6)) { \ 2305 if (bridge_pfil(&m, NULL, ifp, \ 2306 PFIL_IN) != 0 || m == NULL) { \ 2307 BRIDGE_UNLOCK(sc); \ 2308 return (NULL); \ 2309 } \ 2310 } \ 2311 } \ 2312 if (bif->bif_flags & IFBIF_LEARNING) { \ 2313 error = bridge_rtupdate(sc, eh->ether_shost, \ 2314 vlan, bif, 0, IFBAF_DYNAMIC); \ 2315 if (error && bif->bif_addrmax) { \ 2316 BRIDGE_UNLOCK(sc); \ 2317 m_freem(m); \ 2318 return (NULL); \ 2319 } \ 2320 } \ 2321 m->m_pkthdr.rcvif = iface; \ 2322 BRIDGE_UNLOCK(sc); \ 2323 return (m); \ 2324 } \ 2325 \ 2326 /* We just received a packet that we sent out. */ \ 2327 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2328 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2329 ) { \ 2330 BRIDGE_UNLOCK(sc); \ 2331 m_freem(m); \ 2332 return (NULL); \ 2333 } 2334 2335 /* 2336 * Unicast. Make sure it's not for the bridge. 2337 */ 2338 do { GRAB_OUR_PACKETS(bifp) } while (0); 2339 2340 /* 2341 * Give a chance for ifp at first priority. This will help when the 2342 * packet comes through the interface like VLAN's with the same MACs 2343 * on several interfaces from the same bridge. This also will save 2344 * some CPU cycles in case the destination interface and the input 2345 * interface (eq ifp) are the same. 2346 */ 2347 do { GRAB_OUR_PACKETS(ifp) } while (0); 2348 2349 /* Now check the all bridge members. */ 2350 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2351 GRAB_OUR_PACKETS(bif2->bif_ifp) 2352 } 2353 2354 #undef OR_CARP_CHECK_WE_ARE_DST 2355 #undef OR_CARP_CHECK_WE_ARE_SRC 2356 #undef OR_PFIL_HOOKED_INET6 2357 #undef GRAB_OUR_PACKETS 2358 2359 /* Perform the bridge forwarding function. */ 2360 bridge_forward(sc, bif, m); 2361 2362 return (NULL); 2363 } 2364 2365 /* 2366 * bridge_broadcast: 2367 * 2368 * Send a frame to all interfaces that are members of 2369 * the bridge, except for the one on which the packet 2370 * arrived. 2371 * 2372 * NOTE: Releases the lock on return. 2373 */ 2374 static void 2375 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2376 struct mbuf *m, int runfilt) 2377 { 2378 struct bridge_iflist *dbif, *sbif; 2379 struct mbuf *mc; 2380 struct ifnet *dst_if; 2381 int error = 0, used = 0, i; 2382 2383 sbif = bridge_lookup_member_if(sc, src_if); 2384 2385 BRIDGE_LOCK2REF(sc, error); 2386 if (error) { 2387 m_freem(m); 2388 return; 2389 } 2390 2391 /* Filter on the bridge interface before broadcasting */ 2392 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2393 #ifdef INET6 2394 || PFIL_HOOKED(&V_inet6_pfil_hook) 2395 #endif 2396 )) { 2397 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2398 goto out; 2399 if (m == NULL) 2400 goto out; 2401 } 2402 2403 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2404 dst_if = dbif->bif_ifp; 2405 if (dst_if == src_if) 2406 continue; 2407 2408 /* Private segments can not talk to each other */ 2409 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2410 continue; 2411 2412 if ((dbif->bif_flags & IFBIF_STP) && 2413 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2414 continue; 2415 2416 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2417 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2418 continue; 2419 2420 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2421 continue; 2422 2423 if (LIST_NEXT(dbif, bif_next) == NULL) { 2424 mc = m; 2425 used = 1; 2426 } else { 2427 mc = m_dup(m, M_DONTWAIT); 2428 if (mc == NULL) { 2429 sc->sc_ifp->if_oerrors++; 2430 continue; 2431 } 2432 } 2433 2434 /* 2435 * Filter on the output interface. Pass a NULL bridge interface 2436 * pointer so we do not redundantly filter on the bridge for 2437 * each interface we broadcast on. 2438 */ 2439 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2440 #ifdef INET6 2441 || PFIL_HOOKED(&V_inet6_pfil_hook) 2442 #endif 2443 )) { 2444 if (used == 0) { 2445 /* Keep the layer3 header aligned */ 2446 i = min(mc->m_pkthdr.len, max_protohdr); 2447 mc = m_copyup(mc, i, ETHER_ALIGN); 2448 if (mc == NULL) { 2449 sc->sc_ifp->if_oerrors++; 2450 continue; 2451 } 2452 } 2453 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2454 continue; 2455 if (mc == NULL) 2456 continue; 2457 } 2458 2459 bridge_enqueue(sc, dst_if, mc); 2460 } 2461 if (used == 0) 2462 m_freem(m); 2463 2464 out: 2465 BRIDGE_UNREF(sc); 2466 } 2467 2468 /* 2469 * bridge_span: 2470 * 2471 * Duplicate a packet out one or more interfaces that are in span mode, 2472 * the original mbuf is unmodified. 2473 */ 2474 static void 2475 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2476 { 2477 struct bridge_iflist *bif; 2478 struct ifnet *dst_if; 2479 struct mbuf *mc; 2480 2481 if (LIST_EMPTY(&sc->sc_spanlist)) 2482 return; 2483 2484 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2485 dst_if = bif->bif_ifp; 2486 2487 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2488 continue; 2489 2490 mc = m_copypacket(m, M_DONTWAIT); 2491 if (mc == NULL) { 2492 sc->sc_ifp->if_oerrors++; 2493 continue; 2494 } 2495 2496 bridge_enqueue(sc, dst_if, mc); 2497 } 2498 } 2499 2500 /* 2501 * bridge_rtupdate: 2502 * 2503 * Add a bridge routing entry. 2504 */ 2505 static int 2506 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2507 struct bridge_iflist *bif, int setflags, uint8_t flags) 2508 { 2509 struct bridge_rtnode *brt; 2510 int error; 2511 2512 BRIDGE_LOCK_ASSERT(sc); 2513 2514 /* Check the source address is valid and not multicast. */ 2515 if (ETHER_IS_MULTICAST(dst) || 2516 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2517 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2518 return (EINVAL); 2519 2520 /* 802.1p frames map to vlan 1 */ 2521 if (vlan == 0) 2522 vlan = 1; 2523 2524 /* 2525 * A route for this destination might already exist. If so, 2526 * update it, otherwise create a new one. 2527 */ 2528 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2529 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2530 sc->sc_brtexceeded++; 2531 return (ENOSPC); 2532 } 2533 /* Check per interface address limits (if enabled) */ 2534 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2535 bif->bif_addrexceeded++; 2536 return (ENOSPC); 2537 } 2538 2539 /* 2540 * Allocate a new bridge forwarding node, and 2541 * initialize the expiration time and Ethernet 2542 * address. 2543 */ 2544 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2545 if (brt == NULL) 2546 return (ENOMEM); 2547 2548 if (bif->bif_flags & IFBIF_STICKY) 2549 brt->brt_flags = IFBAF_STICKY; 2550 else 2551 brt->brt_flags = IFBAF_DYNAMIC; 2552 2553 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2554 brt->brt_vlan = vlan; 2555 2556 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2557 uma_zfree(bridge_rtnode_zone, brt); 2558 return (error); 2559 } 2560 brt->brt_dst = bif; 2561 bif->bif_addrcnt++; 2562 } 2563 2564 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2565 brt->brt_dst != bif) { 2566 brt->brt_dst->bif_addrcnt--; 2567 brt->brt_dst = bif; 2568 brt->brt_dst->bif_addrcnt++; 2569 } 2570 2571 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2572 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2573 if (setflags) 2574 brt->brt_flags = flags; 2575 2576 return (0); 2577 } 2578 2579 /* 2580 * bridge_rtlookup: 2581 * 2582 * Lookup the destination interface for an address. 2583 */ 2584 static struct ifnet * 2585 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2586 { 2587 struct bridge_rtnode *brt; 2588 2589 BRIDGE_LOCK_ASSERT(sc); 2590 2591 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2592 return (NULL); 2593 2594 return (brt->brt_ifp); 2595 } 2596 2597 /* 2598 * bridge_rttrim: 2599 * 2600 * Trim the routine table so that we have a number 2601 * of routing entries less than or equal to the 2602 * maximum number. 2603 */ 2604 static void 2605 bridge_rttrim(struct bridge_softc *sc) 2606 { 2607 struct bridge_rtnode *brt, *nbrt; 2608 2609 BRIDGE_LOCK_ASSERT(sc); 2610 2611 /* Make sure we actually need to do this. */ 2612 if (sc->sc_brtcnt <= sc->sc_brtmax) 2613 return; 2614 2615 /* Force an aging cycle; this might trim enough addresses. */ 2616 bridge_rtage(sc); 2617 if (sc->sc_brtcnt <= sc->sc_brtmax) 2618 return; 2619 2620 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2621 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2622 bridge_rtnode_destroy(sc, brt); 2623 if (sc->sc_brtcnt <= sc->sc_brtmax) 2624 return; 2625 } 2626 } 2627 } 2628 2629 /* 2630 * bridge_timer: 2631 * 2632 * Aging timer for the bridge. 2633 */ 2634 static void 2635 bridge_timer(void *arg) 2636 { 2637 struct bridge_softc *sc = arg; 2638 2639 BRIDGE_LOCK_ASSERT(sc); 2640 2641 bridge_rtage(sc); 2642 2643 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2644 callout_reset(&sc->sc_brcallout, 2645 bridge_rtable_prune_period * hz, bridge_timer, sc); 2646 } 2647 2648 /* 2649 * bridge_rtage: 2650 * 2651 * Perform an aging cycle. 2652 */ 2653 static void 2654 bridge_rtage(struct bridge_softc *sc) 2655 { 2656 struct bridge_rtnode *brt, *nbrt; 2657 2658 BRIDGE_LOCK_ASSERT(sc); 2659 2660 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2661 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2662 if (time_uptime >= brt->brt_expire) 2663 bridge_rtnode_destroy(sc, brt); 2664 } 2665 } 2666 } 2667 2668 /* 2669 * bridge_rtflush: 2670 * 2671 * Remove all dynamic addresses from the bridge. 2672 */ 2673 static void 2674 bridge_rtflush(struct bridge_softc *sc, int full) 2675 { 2676 struct bridge_rtnode *brt, *nbrt; 2677 2678 BRIDGE_LOCK_ASSERT(sc); 2679 2680 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2681 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2682 bridge_rtnode_destroy(sc, brt); 2683 } 2684 } 2685 2686 /* 2687 * bridge_rtdaddr: 2688 * 2689 * Remove an address from the table. 2690 */ 2691 static int 2692 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2693 { 2694 struct bridge_rtnode *brt; 2695 int found = 0; 2696 2697 BRIDGE_LOCK_ASSERT(sc); 2698 2699 /* 2700 * If vlan is zero then we want to delete for all vlans so the lookup 2701 * may return more than one. 2702 */ 2703 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2704 bridge_rtnode_destroy(sc, brt); 2705 found = 1; 2706 } 2707 2708 return (found ? 0 : ENOENT); 2709 } 2710 2711 /* 2712 * bridge_rtdelete: 2713 * 2714 * Delete routes to a speicifc member interface. 2715 */ 2716 static void 2717 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2718 { 2719 struct bridge_rtnode *brt, *nbrt; 2720 2721 BRIDGE_LOCK_ASSERT(sc); 2722 2723 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2724 if (brt->brt_ifp == ifp && (full || 2725 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2726 bridge_rtnode_destroy(sc, brt); 2727 } 2728 } 2729 2730 /* 2731 * bridge_rtable_init: 2732 * 2733 * Initialize the route table for this bridge. 2734 */ 2735 static int 2736 bridge_rtable_init(struct bridge_softc *sc) 2737 { 2738 int i; 2739 2740 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2741 M_DEVBUF, M_NOWAIT); 2742 if (sc->sc_rthash == NULL) 2743 return (ENOMEM); 2744 2745 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2746 LIST_INIT(&sc->sc_rthash[i]); 2747 2748 sc->sc_rthash_key = arc4random(); 2749 2750 LIST_INIT(&sc->sc_rtlist); 2751 2752 return (0); 2753 } 2754 2755 /* 2756 * bridge_rtable_fini: 2757 * 2758 * Deconstruct the route table for this bridge. 2759 */ 2760 static void 2761 bridge_rtable_fini(struct bridge_softc *sc) 2762 { 2763 2764 KASSERT(sc->sc_brtcnt == 0, 2765 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2766 free(sc->sc_rthash, M_DEVBUF); 2767 } 2768 2769 /* 2770 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2771 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2772 */ 2773 #define mix(a, b, c) \ 2774 do { \ 2775 a -= b; a -= c; a ^= (c >> 13); \ 2776 b -= c; b -= a; b ^= (a << 8); \ 2777 c -= a; c -= b; c ^= (b >> 13); \ 2778 a -= b; a -= c; a ^= (c >> 12); \ 2779 b -= c; b -= a; b ^= (a << 16); \ 2780 c -= a; c -= b; c ^= (b >> 5); \ 2781 a -= b; a -= c; a ^= (c >> 3); \ 2782 b -= c; b -= a; b ^= (a << 10); \ 2783 c -= a; c -= b; c ^= (b >> 15); \ 2784 } while (/*CONSTCOND*/0) 2785 2786 static __inline uint32_t 2787 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2788 { 2789 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2790 2791 b += addr[5] << 8; 2792 b += addr[4]; 2793 a += addr[3] << 24; 2794 a += addr[2] << 16; 2795 a += addr[1] << 8; 2796 a += addr[0]; 2797 2798 mix(a, b, c); 2799 2800 return (c & BRIDGE_RTHASH_MASK); 2801 } 2802 2803 #undef mix 2804 2805 static int 2806 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2807 { 2808 int i, d; 2809 2810 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2811 d = ((int)a[i]) - ((int)b[i]); 2812 } 2813 2814 return (d); 2815 } 2816 2817 /* 2818 * bridge_rtnode_lookup: 2819 * 2820 * Look up a bridge route node for the specified destination. Compare the 2821 * vlan id or if zero then just return the first match. 2822 */ 2823 static struct bridge_rtnode * 2824 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2825 { 2826 struct bridge_rtnode *brt; 2827 uint32_t hash; 2828 int dir; 2829 2830 BRIDGE_LOCK_ASSERT(sc); 2831 2832 hash = bridge_rthash(sc, addr); 2833 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2834 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2835 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2836 return (brt); 2837 if (dir > 0) 2838 return (NULL); 2839 } 2840 2841 return (NULL); 2842 } 2843 2844 /* 2845 * bridge_rtnode_insert: 2846 * 2847 * Insert the specified bridge node into the route table. We 2848 * assume the entry is not already in the table. 2849 */ 2850 static int 2851 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2852 { 2853 struct bridge_rtnode *lbrt; 2854 uint32_t hash; 2855 int dir; 2856 2857 BRIDGE_LOCK_ASSERT(sc); 2858 2859 hash = bridge_rthash(sc, brt->brt_addr); 2860 2861 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2862 if (lbrt == NULL) { 2863 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2864 goto out; 2865 } 2866 2867 do { 2868 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2869 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2870 return (EEXIST); 2871 if (dir > 0) { 2872 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2873 goto out; 2874 } 2875 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2876 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2877 goto out; 2878 } 2879 lbrt = LIST_NEXT(lbrt, brt_hash); 2880 } while (lbrt != NULL); 2881 2882 #ifdef DIAGNOSTIC 2883 panic("bridge_rtnode_insert: impossible"); 2884 #endif 2885 2886 out: 2887 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2888 sc->sc_brtcnt++; 2889 2890 return (0); 2891 } 2892 2893 /* 2894 * bridge_rtnode_destroy: 2895 * 2896 * Destroy a bridge rtnode. 2897 */ 2898 static void 2899 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2900 { 2901 BRIDGE_LOCK_ASSERT(sc); 2902 2903 LIST_REMOVE(brt, brt_hash); 2904 2905 LIST_REMOVE(brt, brt_list); 2906 sc->sc_brtcnt--; 2907 brt->brt_dst->bif_addrcnt--; 2908 uma_zfree(bridge_rtnode_zone, brt); 2909 } 2910 2911 /* 2912 * bridge_rtable_expire: 2913 * 2914 * Set the expiry time for all routes on an interface. 2915 */ 2916 static void 2917 bridge_rtable_expire(struct ifnet *ifp, int age) 2918 { 2919 struct bridge_softc *sc = ifp->if_bridge; 2920 struct bridge_rtnode *brt; 2921 2922 BRIDGE_LOCK(sc); 2923 2924 /* 2925 * If the age is zero then flush, otherwise set all the expiry times to 2926 * age for the interface 2927 */ 2928 if (age == 0) 2929 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2930 else { 2931 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2932 /* Cap the expiry time to 'age' */ 2933 if (brt->brt_ifp == ifp && 2934 brt->brt_expire > time_uptime + age && 2935 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2936 brt->brt_expire = time_uptime + age; 2937 } 2938 } 2939 BRIDGE_UNLOCK(sc); 2940 } 2941 2942 /* 2943 * bridge_state_change: 2944 * 2945 * Callback from the bridgestp code when a port changes states. 2946 */ 2947 static void 2948 bridge_state_change(struct ifnet *ifp, int state) 2949 { 2950 struct bridge_softc *sc = ifp->if_bridge; 2951 static const char *stpstates[] = { 2952 "disabled", 2953 "listening", 2954 "learning", 2955 "forwarding", 2956 "blocking", 2957 "discarding" 2958 }; 2959 2960 if (log_stp) 2961 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2962 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2963 } 2964 2965 /* 2966 * Send bridge packets through pfil if they are one of the types pfil can deal 2967 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2968 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2969 * that interface. 2970 */ 2971 static int 2972 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2973 { 2974 int snap, error, i, hlen; 2975 struct ether_header *eh1, eh2; 2976 struct ip_fw_args args; 2977 struct ip *ip; 2978 struct llc llc1; 2979 u_int16_t ether_type; 2980 2981 snap = 0; 2982 error = -1; /* Default error if not error == 0 */ 2983 2984 #if 0 2985 /* we may return with the IP fields swapped, ensure its not shared */ 2986 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2987 #endif 2988 2989 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2990 return (0); /* filtering is disabled */ 2991 2992 i = min((*mp)->m_pkthdr.len, max_protohdr); 2993 if ((*mp)->m_len < i) { 2994 *mp = m_pullup(*mp, i); 2995 if (*mp == NULL) { 2996 printf("%s: m_pullup failed\n", __func__); 2997 return (-1); 2998 } 2999 } 3000 3001 eh1 = mtod(*mp, struct ether_header *); 3002 ether_type = ntohs(eh1->ether_type); 3003 3004 /* 3005 * Check for SNAP/LLC. 3006 */ 3007 if (ether_type < ETHERMTU) { 3008 struct llc *llc2 = (struct llc *)(eh1 + 1); 3009 3010 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 3011 llc2->llc_dsap == LLC_SNAP_LSAP && 3012 llc2->llc_ssap == LLC_SNAP_LSAP && 3013 llc2->llc_control == LLC_UI) { 3014 ether_type = htons(llc2->llc_un.type_snap.ether_type); 3015 snap = 1; 3016 } 3017 } 3018 3019 /* 3020 * If we're trying to filter bridge traffic, don't look at anything 3021 * other than IP and ARP traffic. If the filter doesn't understand 3022 * IPv6, don't allow IPv6 through the bridge either. This is lame 3023 * since if we really wanted, say, an AppleTalk filter, we are hosed, 3024 * but of course we don't have an AppleTalk filter to begin with. 3025 * (Note that since pfil doesn't understand ARP it will pass *ALL* 3026 * ARP traffic.) 3027 */ 3028 switch (ether_type) { 3029 case ETHERTYPE_ARP: 3030 case ETHERTYPE_REVARP: 3031 if (pfil_ipfw_arp == 0) 3032 return (0); /* Automatically pass */ 3033 break; 3034 3035 case ETHERTYPE_IP: 3036 #ifdef INET6 3037 case ETHERTYPE_IPV6: 3038 #endif /* INET6 */ 3039 break; 3040 default: 3041 /* 3042 * Check to see if the user wants to pass non-ip 3043 * packets, these will not be checked by pfil(9) and 3044 * passed unconditionally so the default is to drop. 3045 */ 3046 if (pfil_onlyip) 3047 goto bad; 3048 } 3049 3050 /* Strip off the Ethernet header and keep a copy. */ 3051 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3052 m_adj(*mp, ETHER_HDR_LEN); 3053 3054 /* Strip off snap header, if present */ 3055 if (snap) { 3056 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3057 m_adj(*mp, sizeof(struct llc)); 3058 } 3059 3060 /* 3061 * Check the IP header for alignment and errors 3062 */ 3063 if (dir == PFIL_IN) { 3064 switch (ether_type) { 3065 case ETHERTYPE_IP: 3066 error = bridge_ip_checkbasic(mp); 3067 break; 3068 #ifdef INET6 3069 case ETHERTYPE_IPV6: 3070 error = bridge_ip6_checkbasic(mp); 3071 break; 3072 #endif /* INET6 */ 3073 default: 3074 error = 0; 3075 } 3076 if (error) 3077 goto bad; 3078 } 3079 3080 /* XXX this section is also in if_ethersubr.c */ 3081 // XXX PFIL_OUT or DIR_OUT ? 3082 if (V_ip_fw_chk_ptr && pfil_ipfw != 0 && 3083 dir == PFIL_OUT && ifp != NULL) { 3084 struct m_tag *mtag; 3085 3086 error = -1; 3087 /* fetch the start point from existing tags, if any */ 3088 mtag = m_tag_locate(*mp, MTAG_IPFW_RULE, 0, NULL); 3089 if (mtag == NULL) { 3090 args.rule.slot = 0; 3091 } else { 3092 struct ipfw_rule_ref *r; 3093 3094 /* XXX can we free the tag after use ? */ 3095 mtag->m_tag_id = PACKET_TAG_NONE; 3096 r = (struct ipfw_rule_ref *)(mtag + 1); 3097 /* packet already partially processed ? */ 3098 if (r->info & IPFW_ONEPASS) 3099 goto ipfwpass; 3100 args.rule = *r; 3101 } 3102 3103 args.m = *mp; 3104 args.oif = ifp; 3105 args.next_hop = NULL; 3106 args.next_hop6 = NULL; 3107 args.eh = &eh2; 3108 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 3109 i = V_ip_fw_chk_ptr(&args); 3110 *mp = args.m; 3111 3112 if (*mp == NULL) 3113 return (error); 3114 3115 if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) { 3116 3117 /* put the Ethernet header back on */ 3118 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3119 if (*mp == NULL) 3120 return (error); 3121 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3122 3123 /* 3124 * Pass the pkt to dummynet, which consumes it. The 3125 * packet will return to us via bridge_dummynet(). 3126 */ 3127 args.oif = ifp; 3128 ip_dn_io_ptr(mp, DIR_FWD | PROTO_IFB, &args); 3129 return (error); 3130 } 3131 3132 if (i != IP_FW_PASS) /* drop */ 3133 goto bad; 3134 } 3135 3136 ipfwpass: 3137 error = 0; 3138 3139 /* 3140 * Run the packet through pfil 3141 */ 3142 switch (ether_type) { 3143 case ETHERTYPE_IP: 3144 /* 3145 * before calling the firewall, swap fields the same as 3146 * IP does. here we assume the header is contiguous 3147 */ 3148 ip = mtod(*mp, struct ip *); 3149 3150 ip->ip_len = ntohs(ip->ip_len); 3151 ip->ip_off = ntohs(ip->ip_off); 3152 3153 /* 3154 * Run pfil on the member interface and the bridge, both can 3155 * be skipped by clearing pfil_member or pfil_bridge. 3156 * 3157 * Keep the order: 3158 * in_if -> bridge_if -> out_if 3159 */ 3160 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3161 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3162 dir, NULL); 3163 3164 if (*mp == NULL || error != 0) /* filter may consume */ 3165 break; 3166 3167 if (pfil_member && ifp != NULL) 3168 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp, 3169 dir, NULL); 3170 3171 if (*mp == NULL || error != 0) /* filter may consume */ 3172 break; 3173 3174 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3175 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3176 dir, NULL); 3177 3178 if (*mp == NULL || error != 0) /* filter may consume */ 3179 break; 3180 3181 /* check if we need to fragment the packet */ 3182 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3183 i = (*mp)->m_pkthdr.len; 3184 if (i > ifp->if_mtu) { 3185 error = bridge_fragment(ifp, *mp, &eh2, snap, 3186 &llc1); 3187 return (error); 3188 } 3189 } 3190 3191 /* Recalculate the ip checksum and restore byte ordering */ 3192 ip = mtod(*mp, struct ip *); 3193 hlen = ip->ip_hl << 2; 3194 if (hlen < sizeof(struct ip)) 3195 goto bad; 3196 if (hlen > (*mp)->m_len) { 3197 if ((*mp = m_pullup(*mp, hlen)) == 0) 3198 goto bad; 3199 ip = mtod(*mp, struct ip *); 3200 if (ip == NULL) 3201 goto bad; 3202 } 3203 ip->ip_len = htons(ip->ip_len); 3204 ip->ip_off = htons(ip->ip_off); 3205 ip->ip_sum = 0; 3206 if (hlen == sizeof(struct ip)) 3207 ip->ip_sum = in_cksum_hdr(ip); 3208 else 3209 ip->ip_sum = in_cksum(*mp, hlen); 3210 3211 break; 3212 #ifdef INET6 3213 case ETHERTYPE_IPV6: 3214 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3215 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3216 dir, NULL); 3217 3218 if (*mp == NULL || error != 0) /* filter may consume */ 3219 break; 3220 3221 if (pfil_member && ifp != NULL) 3222 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp, 3223 dir, NULL); 3224 3225 if (*mp == NULL || error != 0) /* filter may consume */ 3226 break; 3227 3228 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3229 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3230 dir, NULL); 3231 break; 3232 #endif 3233 default: 3234 error = 0; 3235 break; 3236 } 3237 3238 if (*mp == NULL) 3239 return (error); 3240 if (error != 0) 3241 goto bad; 3242 3243 error = -1; 3244 3245 /* 3246 * Finally, put everything back the way it was and return 3247 */ 3248 if (snap) { 3249 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3250 if (*mp == NULL) 3251 return (error); 3252 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3253 } 3254 3255 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3256 if (*mp == NULL) 3257 return (error); 3258 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3259 3260 return (0); 3261 3262 bad: 3263 m_freem(*mp); 3264 *mp = NULL; 3265 return (error); 3266 } 3267 3268 /* 3269 * Perform basic checks on header size since 3270 * pfil assumes ip_input has already processed 3271 * it for it. Cut-and-pasted from ip_input.c. 3272 * Given how simple the IPv6 version is, 3273 * does the IPv4 version really need to be 3274 * this complicated? 3275 * 3276 * XXX Should we update ipstat here, or not? 3277 * XXX Right now we update ipstat but not 3278 * XXX csum_counter. 3279 */ 3280 static int 3281 bridge_ip_checkbasic(struct mbuf **mp) 3282 { 3283 struct mbuf *m = *mp; 3284 struct ip *ip; 3285 int len, hlen; 3286 u_short sum; 3287 3288 if (*mp == NULL) 3289 return (-1); 3290 3291 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3292 if ((m = m_copyup(m, sizeof(struct ip), 3293 (max_linkhdr + 3) & ~3)) == NULL) { 3294 /* XXXJRT new stat, please */ 3295 KMOD_IPSTAT_INC(ips_toosmall); 3296 goto bad; 3297 } 3298 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3299 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3300 KMOD_IPSTAT_INC(ips_toosmall); 3301 goto bad; 3302 } 3303 } 3304 ip = mtod(m, struct ip *); 3305 if (ip == NULL) goto bad; 3306 3307 if (ip->ip_v != IPVERSION) { 3308 KMOD_IPSTAT_INC(ips_badvers); 3309 goto bad; 3310 } 3311 hlen = ip->ip_hl << 2; 3312 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3313 KMOD_IPSTAT_INC(ips_badhlen); 3314 goto bad; 3315 } 3316 if (hlen > m->m_len) { 3317 if ((m = m_pullup(m, hlen)) == 0) { 3318 KMOD_IPSTAT_INC(ips_badhlen); 3319 goto bad; 3320 } 3321 ip = mtod(m, struct ip *); 3322 if (ip == NULL) goto bad; 3323 } 3324 3325 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3326 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3327 } else { 3328 if (hlen == sizeof(struct ip)) { 3329 sum = in_cksum_hdr(ip); 3330 } else { 3331 sum = in_cksum(m, hlen); 3332 } 3333 } 3334 if (sum) { 3335 KMOD_IPSTAT_INC(ips_badsum); 3336 goto bad; 3337 } 3338 3339 /* Retrieve the packet length. */ 3340 len = ntohs(ip->ip_len); 3341 3342 /* 3343 * Check for additional length bogosity 3344 */ 3345 if (len < hlen) { 3346 KMOD_IPSTAT_INC(ips_badlen); 3347 goto bad; 3348 } 3349 3350 /* 3351 * Check that the amount of data in the buffers 3352 * is as at least much as the IP header would have us expect. 3353 * Drop packet if shorter than we expect. 3354 */ 3355 if (m->m_pkthdr.len < len) { 3356 KMOD_IPSTAT_INC(ips_tooshort); 3357 goto bad; 3358 } 3359 3360 /* Checks out, proceed */ 3361 *mp = m; 3362 return (0); 3363 3364 bad: 3365 *mp = m; 3366 return (-1); 3367 } 3368 3369 #ifdef INET6 3370 /* 3371 * Same as above, but for IPv6. 3372 * Cut-and-pasted from ip6_input.c. 3373 * XXX Should we update ip6stat, or not? 3374 */ 3375 static int 3376 bridge_ip6_checkbasic(struct mbuf **mp) 3377 { 3378 struct mbuf *m = *mp; 3379 struct ip6_hdr *ip6; 3380 3381 /* 3382 * If the IPv6 header is not aligned, slurp it up into a new 3383 * mbuf with space for link headers, in the event we forward 3384 * it. Otherwise, if it is aligned, make sure the entire base 3385 * IPv6 header is in the first mbuf of the chain. 3386 */ 3387 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3388 struct ifnet *inifp = m->m_pkthdr.rcvif; 3389 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3390 (max_linkhdr + 3) & ~3)) == NULL) { 3391 /* XXXJRT new stat, please */ 3392 V_ip6stat.ip6s_toosmall++; 3393 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3394 goto bad; 3395 } 3396 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3397 struct ifnet *inifp = m->m_pkthdr.rcvif; 3398 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3399 V_ip6stat.ip6s_toosmall++; 3400 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3401 goto bad; 3402 } 3403 } 3404 3405 ip6 = mtod(m, struct ip6_hdr *); 3406 3407 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3408 V_ip6stat.ip6s_badvers++; 3409 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3410 goto bad; 3411 } 3412 3413 /* Checks out, proceed */ 3414 *mp = m; 3415 return (0); 3416 3417 bad: 3418 *mp = m; 3419 return (-1); 3420 } 3421 #endif /* INET6 */ 3422 3423 /* 3424 * bridge_fragment: 3425 * 3426 * Return a fragmented mbuf chain. 3427 */ 3428 static int 3429 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3430 int snap, struct llc *llc) 3431 { 3432 struct mbuf *m0; 3433 struct ip *ip; 3434 int error = -1; 3435 3436 if (m->m_len < sizeof(struct ip) && 3437 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3438 goto out; 3439 ip = mtod(m, struct ip *); 3440 3441 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3442 CSUM_DELAY_IP); 3443 if (error) 3444 goto out; 3445 3446 /* walk the chain and re-add the Ethernet header */ 3447 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3448 if (error == 0) { 3449 if (snap) { 3450 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3451 if (m0 == NULL) { 3452 error = ENOBUFS; 3453 continue; 3454 } 3455 bcopy(llc, mtod(m0, caddr_t), 3456 sizeof(struct llc)); 3457 } 3458 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3459 if (m0 == NULL) { 3460 error = ENOBUFS; 3461 continue; 3462 } 3463 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3464 } else 3465 m_freem(m); 3466 } 3467 3468 if (error == 0) 3469 KMOD_IPSTAT_INC(ips_fragmented); 3470 3471 return (error); 3472 3473 out: 3474 if (m != NULL) 3475 m_freem(m); 3476 return (error); 3477 } 3478