1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Device handling code 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/netdevice.h> 12 #include <linux/netpoll.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/list.h> 16 #include <linux/netfilter_bridge.h> 17 18 #include <linux/uaccess.h> 19 #include "br_private.h" 20 21 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \ 22 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM) 23 24 const struct nf_br_ops __rcu *nf_br_ops __read_mostly; 25 EXPORT_SYMBOL_GPL(nf_br_ops); 26 27 /* net device transmit always called with BH disabled */ 28 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 29 { 30 struct net_bridge_mcast_port *pmctx_null = NULL; 31 struct net_bridge *br = netdev_priv(dev); 32 struct net_bridge_mcast *brmctx = &br->multicast_ctx; 33 struct net_bridge_fdb_entry *dst; 34 struct net_bridge_mdb_entry *mdst; 35 const struct nf_br_ops *nf_ops; 36 u8 state = BR_STATE_FORWARDING; 37 struct net_bridge_vlan *vlan; 38 const unsigned char *dest; 39 u16 vid = 0; 40 41 memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); 42 br_tc_skb_miss_set(skb, false); 43 44 rcu_read_lock(); 45 nf_ops = rcu_dereference(nf_br_ops); 46 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { 47 rcu_read_unlock(); 48 return NETDEV_TX_OK; 49 } 50 51 dev_sw_netstats_tx_add(dev, 1, skb->len); 52 53 br_switchdev_frame_unmark(skb); 54 BR_INPUT_SKB_CB(skb)->brdev = dev; 55 BR_INPUT_SKB_CB(skb)->frag_max_size = 0; 56 57 skb_reset_mac_header(skb); 58 skb_pull(skb, ETH_HLEN); 59 60 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, 61 &state, &vlan)) 62 goto out; 63 64 if (IS_ENABLED(CONFIG_INET) && 65 (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) || 66 eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) && 67 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { 68 br_do_proxy_suppress_arp(skb, br, vid, NULL); 69 } else if (IS_ENABLED(CONFIG_IPV6) && 70 skb->protocol == htons(ETH_P_IPV6) && 71 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && 72 pskb_may_pull(skb, sizeof(struct ipv6hdr) + 73 sizeof(struct nd_msg)) && 74 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 75 struct nd_msg *msg, _msg; 76 77 msg = br_is_nd_neigh_msg(skb, &_msg); 78 if (msg) 79 br_do_suppress_nd(skb, br, vid, NULL, msg); 80 } 81 82 dest = eth_hdr(skb)->h_dest; 83 if (is_broadcast_ether_addr(dest)) { 84 br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid); 85 } else if (is_multicast_ether_addr(dest)) { 86 if (unlikely(netpoll_tx_running(dev))) { 87 br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid); 88 goto out; 89 } 90 if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) { 91 kfree_skb(skb); 92 goto out; 93 } 94 95 mdst = br_mdb_entry_skb_get(brmctx, skb, vid); 96 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 97 br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) 98 br_multicast_flood(mdst, skb, brmctx, false, true); 99 else 100 br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid); 101 } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) { 102 br_forward(dst->dst, skb, false, true); 103 } else { 104 br_flood(br, skb, BR_PKT_UNICAST, false, true, vid); 105 } 106 out: 107 rcu_read_unlock(); 108 return NETDEV_TX_OK; 109 } 110 111 static int br_dev_init(struct net_device *dev) 112 { 113 struct net_bridge *br = netdev_priv(dev); 114 int err; 115 116 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 117 if (!dev->tstats) 118 return -ENOMEM; 119 120 err = br_fdb_hash_init(br); 121 if (err) { 122 free_percpu(dev->tstats); 123 return err; 124 } 125 126 err = br_mdb_hash_init(br); 127 if (err) { 128 free_percpu(dev->tstats); 129 br_fdb_hash_fini(br); 130 return err; 131 } 132 133 err = br_vlan_init(br); 134 if (err) { 135 free_percpu(dev->tstats); 136 br_mdb_hash_fini(br); 137 br_fdb_hash_fini(br); 138 return err; 139 } 140 141 err = br_multicast_init_stats(br); 142 if (err) { 143 free_percpu(dev->tstats); 144 br_vlan_flush(br); 145 br_mdb_hash_fini(br); 146 br_fdb_hash_fini(br); 147 } 148 149 netdev_lockdep_set_classes(dev); 150 return err; 151 } 152 153 static void br_dev_uninit(struct net_device *dev) 154 { 155 struct net_bridge *br = netdev_priv(dev); 156 157 br_multicast_dev_del(br); 158 br_multicast_uninit_stats(br); 159 br_vlan_flush(br); 160 br_mdb_hash_fini(br); 161 br_fdb_hash_fini(br); 162 free_percpu(dev->tstats); 163 } 164 165 static int br_dev_open(struct net_device *dev) 166 { 167 struct net_bridge *br = netdev_priv(dev); 168 169 netdev_update_features(dev); 170 netif_start_queue(dev); 171 br_stp_enable_bridge(br); 172 br_multicast_open(br); 173 174 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 175 br_multicast_join_snoopers(br); 176 177 return 0; 178 } 179 180 static void br_dev_set_multicast_list(struct net_device *dev) 181 { 182 } 183 184 static void br_dev_change_rx_flags(struct net_device *dev, int change) 185 { 186 if (change & IFF_PROMISC) 187 br_manage_promisc(netdev_priv(dev)); 188 } 189 190 static int br_dev_stop(struct net_device *dev) 191 { 192 struct net_bridge *br = netdev_priv(dev); 193 194 br_stp_disable_bridge(br); 195 br_multicast_stop(br); 196 197 if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) 198 br_multicast_leave_snoopers(br); 199 200 netif_stop_queue(dev); 201 202 return 0; 203 } 204 205 static int br_change_mtu(struct net_device *dev, int new_mtu) 206 { 207 struct net_bridge *br = netdev_priv(dev); 208 209 dev->mtu = new_mtu; 210 211 /* this flag will be cleared if the MTU was automatically adjusted */ 212 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true); 213 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 214 /* remember the MTU in the rtable for PMTU */ 215 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); 216 #endif 217 218 return 0; 219 } 220 221 /* Allow setting mac address to any valid ethernet address. */ 222 static int br_set_mac_address(struct net_device *dev, void *p) 223 { 224 struct net_bridge *br = netdev_priv(dev); 225 struct sockaddr *addr = p; 226 227 if (!is_valid_ether_addr(addr->sa_data)) 228 return -EADDRNOTAVAIL; 229 230 /* dev_set_mac_addr() can be called by a master device on bridge's 231 * NETDEV_UNREGISTER, but since it's being destroyed do nothing 232 */ 233 if (dev->reg_state != NETREG_REGISTERED) 234 return -EBUSY; 235 236 spin_lock_bh(&br->lock); 237 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 238 /* Mac address will be changed in br_stp_change_bridge_id(). */ 239 br_stp_change_bridge_id(br, addr->sa_data); 240 } 241 spin_unlock_bh(&br->lock); 242 243 return 0; 244 } 245 246 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) 247 { 248 strscpy(info->driver, "bridge", sizeof(info->driver)); 249 strscpy(info->version, BR_VERSION, sizeof(info->version)); 250 strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); 251 strscpy(info->bus_info, "N/A", sizeof(info->bus_info)); 252 } 253 254 static int br_get_link_ksettings(struct net_device *dev, 255 struct ethtool_link_ksettings *cmd) 256 { 257 struct net_bridge *br = netdev_priv(dev); 258 struct net_bridge_port *p; 259 260 cmd->base.duplex = DUPLEX_UNKNOWN; 261 cmd->base.port = PORT_OTHER; 262 cmd->base.speed = SPEED_UNKNOWN; 263 264 list_for_each_entry(p, &br->port_list, list) { 265 struct ethtool_link_ksettings ecmd; 266 struct net_device *pdev = p->dev; 267 268 if (!netif_running(pdev) || !netif_oper_up(pdev)) 269 continue; 270 271 if (__ethtool_get_link_ksettings(pdev, &ecmd)) 272 continue; 273 274 if (ecmd.base.speed == (__u32)SPEED_UNKNOWN) 275 continue; 276 277 if (cmd->base.speed == (__u32)SPEED_UNKNOWN || 278 cmd->base.speed < ecmd.base.speed) 279 cmd->base.speed = ecmd.base.speed; 280 } 281 282 return 0; 283 } 284 285 static netdev_features_t br_fix_features(struct net_device *dev, 286 netdev_features_t features) 287 { 288 struct net_bridge *br = netdev_priv(dev); 289 290 return br_features_recompute(br, features); 291 } 292 293 #ifdef CONFIG_NET_POLL_CONTROLLER 294 static void br_poll_controller(struct net_device *br_dev) 295 { 296 } 297 298 static void br_netpoll_cleanup(struct net_device *dev) 299 { 300 struct net_bridge *br = netdev_priv(dev); 301 struct net_bridge_port *p; 302 303 list_for_each_entry(p, &br->port_list, list) 304 br_netpoll_disable(p); 305 } 306 307 static int __br_netpoll_enable(struct net_bridge_port *p) 308 { 309 struct netpoll *np; 310 int err; 311 312 np = kzalloc(sizeof(*p->np), GFP_KERNEL); 313 if (!np) 314 return -ENOMEM; 315 316 err = __netpoll_setup(np, p->dev); 317 if (err) { 318 kfree(np); 319 return err; 320 } 321 322 p->np = np; 323 return err; 324 } 325 326 int br_netpoll_enable(struct net_bridge_port *p) 327 { 328 if (!p->br->dev->npinfo) 329 return 0; 330 331 return __br_netpoll_enable(p); 332 } 333 334 static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) 335 { 336 struct net_bridge *br = netdev_priv(dev); 337 struct net_bridge_port *p; 338 int err = 0; 339 340 list_for_each_entry(p, &br->port_list, list) { 341 if (!p->dev) 342 continue; 343 err = __br_netpoll_enable(p); 344 if (err) 345 goto fail; 346 } 347 348 out: 349 return err; 350 351 fail: 352 br_netpoll_cleanup(dev); 353 goto out; 354 } 355 356 void br_netpoll_disable(struct net_bridge_port *p) 357 { 358 struct netpoll *np = p->np; 359 360 if (!np) 361 return; 362 363 p->np = NULL; 364 365 __netpoll_free(np); 366 } 367 368 #endif 369 370 static int br_add_slave(struct net_device *dev, struct net_device *slave_dev, 371 struct netlink_ext_ack *extack) 372 373 { 374 struct net_bridge *br = netdev_priv(dev); 375 376 return br_add_if(br, slave_dev, extack); 377 } 378 379 static int br_del_slave(struct net_device *dev, struct net_device *slave_dev) 380 { 381 struct net_bridge *br = netdev_priv(dev); 382 383 return br_del_if(br, slave_dev); 384 } 385 386 static int br_fill_forward_path(struct net_device_path_ctx *ctx, 387 struct net_device_path *path) 388 { 389 struct net_bridge_fdb_entry *f; 390 struct net_bridge_port *dst; 391 struct net_bridge *br; 392 393 if (netif_is_bridge_port(ctx->dev)) 394 return -1; 395 396 br = netdev_priv(ctx->dev); 397 398 br_vlan_fill_forward_path_pvid(br, ctx, path); 399 400 f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id); 401 if (!f || !f->dst) 402 return -1; 403 404 dst = READ_ONCE(f->dst); 405 if (!dst) 406 return -1; 407 408 if (br_vlan_fill_forward_path_mode(br, dst, path)) 409 return -1; 410 411 path->type = DEV_PATH_BRIDGE; 412 path->dev = dst->br->dev; 413 ctx->dev = dst->dev; 414 415 switch (path->bridge.vlan_mode) { 416 case DEV_PATH_BR_VLAN_TAG: 417 if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) 418 return -ENOSPC; 419 ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id; 420 ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto; 421 ctx->num_vlans++; 422 break; 423 case DEV_PATH_BR_VLAN_UNTAG_HW: 424 case DEV_PATH_BR_VLAN_UNTAG: 425 ctx->num_vlans--; 426 break; 427 case DEV_PATH_BR_VLAN_KEEP: 428 break; 429 } 430 431 return 0; 432 } 433 434 static const struct ethtool_ops br_ethtool_ops = { 435 .get_drvinfo = br_getinfo, 436 .get_link = ethtool_op_get_link, 437 .get_link_ksettings = br_get_link_ksettings, 438 }; 439 440 static const struct net_device_ops br_netdev_ops = { 441 .ndo_open = br_dev_open, 442 .ndo_stop = br_dev_stop, 443 .ndo_init = br_dev_init, 444 .ndo_uninit = br_dev_uninit, 445 .ndo_start_xmit = br_dev_xmit, 446 .ndo_get_stats64 = dev_get_tstats64, 447 .ndo_set_mac_address = br_set_mac_address, 448 .ndo_set_rx_mode = br_dev_set_multicast_list, 449 .ndo_change_rx_flags = br_dev_change_rx_flags, 450 .ndo_change_mtu = br_change_mtu, 451 .ndo_siocdevprivate = br_dev_siocdevprivate, 452 #ifdef CONFIG_NET_POLL_CONTROLLER 453 .ndo_netpoll_setup = br_netpoll_setup, 454 .ndo_netpoll_cleanup = br_netpoll_cleanup, 455 .ndo_poll_controller = br_poll_controller, 456 #endif 457 .ndo_add_slave = br_add_slave, 458 .ndo_del_slave = br_del_slave, 459 .ndo_fix_features = br_fix_features, 460 .ndo_fdb_add = br_fdb_add, 461 .ndo_fdb_del = br_fdb_delete, 462 .ndo_fdb_del_bulk = br_fdb_delete_bulk, 463 .ndo_fdb_dump = br_fdb_dump, 464 .ndo_fdb_get = br_fdb_get, 465 .ndo_mdb_add = br_mdb_add, 466 .ndo_mdb_del = br_mdb_del, 467 .ndo_mdb_del_bulk = br_mdb_del_bulk, 468 .ndo_mdb_dump = br_mdb_dump, 469 .ndo_mdb_get = br_mdb_get, 470 .ndo_bridge_getlink = br_getlink, 471 .ndo_bridge_setlink = br_setlink, 472 .ndo_bridge_dellink = br_dellink, 473 .ndo_features_check = passthru_features_check, 474 .ndo_fill_forward_path = br_fill_forward_path, 475 }; 476 477 static const struct device_type br_type = { 478 .name = "bridge", 479 }; 480 481 void br_dev_setup(struct net_device *dev) 482 { 483 struct net_bridge *br = netdev_priv(dev); 484 485 eth_hw_addr_random(dev); 486 ether_setup(dev); 487 488 dev->netdev_ops = &br_netdev_ops; 489 dev->needs_free_netdev = true; 490 dev->ethtool_ops = &br_ethtool_ops; 491 SET_NETDEV_DEVTYPE(dev, &br_type); 492 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 493 494 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | 495 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 496 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 497 NETIF_F_HW_VLAN_STAG_TX; 498 dev->vlan_features = COMMON_FEATURES; 499 500 br->dev = dev; 501 spin_lock_init(&br->lock); 502 INIT_LIST_HEAD(&br->port_list); 503 INIT_HLIST_HEAD(&br->fdb_list); 504 INIT_HLIST_HEAD(&br->frame_type_list); 505 #if IS_ENABLED(CONFIG_BRIDGE_MRP) 506 INIT_HLIST_HEAD(&br->mrp_list); 507 #endif 508 #if IS_ENABLED(CONFIG_BRIDGE_CFM) 509 INIT_HLIST_HEAD(&br->mep_list); 510 #endif 511 spin_lock_init(&br->hash_lock); 512 513 br->bridge_id.prio[0] = 0x80; 514 br->bridge_id.prio[1] = 0x00; 515 516 ether_addr_copy(br->group_addr, eth_stp_addr); 517 518 br->stp_enabled = BR_NO_STP; 519 br->group_fwd_mask = BR_GROUPFWD_DEFAULT; 520 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; 521 522 br->designated_root = br->bridge_id; 523 br->bridge_max_age = br->max_age = 20 * HZ; 524 br->bridge_hello_time = br->hello_time = 2 * HZ; 525 br->bridge_forward_delay = br->forward_delay = 15 * HZ; 526 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME; 527 dev->max_mtu = ETH_MAX_MTU; 528 529 br_netfilter_rtable_init(br); 530 br_stp_timer_init(br); 531 br_multicast_init(br); 532 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); 533 } 534