xref: /linux/net/bridge/br_device.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Device handling code
4  *	Linux ethernet bridge
5  *
6  *	Authors:
7  *	Lennert Buytenhek		<buytenh@gnu.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/netpoll.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/list.h>
16 #include <linux/netfilter_bridge.h>
17 
18 #include <linux/uaccess.h>
19 #include <net/netdev_lock.h>
20 
21 #include "br_private.h"
22 
23 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
24 			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
25 
26 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
27 EXPORT_SYMBOL_GPL(nf_br_ops);
28 
29 /* net device transmit always called with BH disabled */
br_dev_xmit(struct sk_buff * skb,struct net_device * dev)30 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
31 {
32 	enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
33 	struct net_bridge_mcast_port *pmctx_null = NULL;
34 	struct net_bridge *br = netdev_priv(dev);
35 	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
36 	struct net_bridge_fdb_entry *dst;
37 	struct net_bridge_mdb_entry *mdst;
38 	const struct nf_br_ops *nf_ops;
39 	u8 state = BR_STATE_FORWARDING;
40 	struct net_bridge_vlan *vlan;
41 	const unsigned char *dest;
42 	u16 vid = 0;
43 
44 	if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
45 		kfree_skb_reason(skb, reason);
46 		return NETDEV_TX_OK;
47 	}
48 
49 	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
50 	br_tc_skb_miss_set(skb, false);
51 
52 	rcu_read_lock();
53 	nf_ops = rcu_dereference(nf_br_ops);
54 	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
55 		rcu_read_unlock();
56 		return NETDEV_TX_OK;
57 	}
58 
59 	dev_sw_netstats_tx_add(dev, 1, skb->len);
60 
61 	br_switchdev_frame_unmark(skb);
62 	BR_INPUT_SKB_CB(skb)->brdev = dev;
63 	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
64 
65 	skb_reset_mac_header(skb);
66 	skb_pull(skb, ETH_HLEN);
67 
68 	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid,
69 				&state, &vlan))
70 		goto out;
71 
72 	if (IS_ENABLED(CONFIG_INET) &&
73 	    (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
74 	     eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
75 	    br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
76 		br_do_proxy_suppress_arp(skb, br, vid, NULL);
77 	} else if (IS_ENABLED(CONFIG_IPV6) &&
78 		   skb->protocol == htons(ETH_P_IPV6) &&
79 		   br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
80 		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
81 				 sizeof(struct nd_msg)) &&
82 		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
83 			struct nd_msg *msg, _msg;
84 
85 			msg = br_is_nd_neigh_msg(skb, &_msg);
86 			if (msg)
87 				br_do_suppress_nd(skb, br, vid, NULL, msg);
88 	}
89 
90 	dest = eth_hdr(skb)->h_dest;
91 	if (is_broadcast_ether_addr(dest)) {
92 		br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid);
93 	} else if (is_multicast_ether_addr(dest)) {
94 		if (unlikely(netpoll_tx_running(dev))) {
95 			br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
96 			goto out;
97 		}
98 		if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) {
99 			kfree_skb(skb);
100 			goto out;
101 		}
102 
103 		mdst = br_mdb_entry_skb_get(brmctx, skb, vid);
104 		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
105 		    br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst))
106 			br_multicast_flood(mdst, skb, brmctx, false, true);
107 		else
108 			br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
109 	} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
110 		br_forward(dst->dst, skb, false, true);
111 	} else {
112 		br_flood(br, skb, BR_PKT_UNICAST, false, true, vid);
113 	}
114 out:
115 	rcu_read_unlock();
116 	return NETDEV_TX_OK;
117 }
118 
br_dev_init(struct net_device * dev)119 static int br_dev_init(struct net_device *dev)
120 {
121 	struct net_bridge *br = netdev_priv(dev);
122 	int err;
123 
124 	err = br_fdb_hash_init(br);
125 	if (err)
126 		return err;
127 
128 	err = br_mdb_hash_init(br);
129 	if (err) {
130 		br_fdb_hash_fini(br);
131 		return err;
132 	}
133 
134 	err = br_vlan_init(br);
135 	if (err) {
136 		br_mdb_hash_fini(br);
137 		br_fdb_hash_fini(br);
138 		return err;
139 	}
140 
141 	err = br_multicast_init_stats(br);
142 	if (err) {
143 		br_vlan_flush(br);
144 		br_mdb_hash_fini(br);
145 		br_fdb_hash_fini(br);
146 		return err;
147 	}
148 
149 	netdev_lockdep_set_classes(dev);
150 	return 0;
151 }
152 
br_dev_uninit(struct net_device * dev)153 static void br_dev_uninit(struct net_device *dev)
154 {
155 	struct net_bridge *br = netdev_priv(dev);
156 
157 	br_multicast_dev_del(br);
158 	br_multicast_uninit_stats(br);
159 	br_vlan_flush(br);
160 	br_mdb_hash_fini(br);
161 	br_fdb_hash_fini(br);
162 }
163 
br_dev_open(struct net_device * dev)164 static int br_dev_open(struct net_device *dev)
165 {
166 	struct net_bridge *br = netdev_priv(dev);
167 
168 	netdev_update_features(dev);
169 	netif_start_queue(dev);
170 	br_stp_enable_bridge(br);
171 	br_multicast_open(br);
172 
173 	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
174 		br_multicast_join_snoopers(br);
175 
176 	return 0;
177 }
178 
br_dev_set_multicast_list(struct net_device * dev)179 static void br_dev_set_multicast_list(struct net_device *dev)
180 {
181 }
182 
br_dev_change_rx_flags(struct net_device * dev,int change)183 static void br_dev_change_rx_flags(struct net_device *dev, int change)
184 {
185 	if (change & IFF_PROMISC)
186 		br_manage_promisc(netdev_priv(dev));
187 }
188 
br_dev_stop(struct net_device * dev)189 static int br_dev_stop(struct net_device *dev)
190 {
191 	struct net_bridge *br = netdev_priv(dev);
192 
193 	br_stp_disable_bridge(br);
194 	br_multicast_stop(br);
195 
196 	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
197 		br_multicast_leave_snoopers(br);
198 
199 	netif_stop_queue(dev);
200 
201 	return 0;
202 }
203 
br_change_mtu(struct net_device * dev,int new_mtu)204 static int br_change_mtu(struct net_device *dev, int new_mtu)
205 {
206 	struct net_bridge *br = netdev_priv(dev);
207 
208 	WRITE_ONCE(dev->mtu, new_mtu);
209 
210 	/* this flag will be cleared if the MTU was automatically adjusted */
211 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
212 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
213 	/* remember the MTU in the rtable for PMTU */
214 	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
215 #endif
216 
217 	return 0;
218 }
219 
220 /* Allow setting mac address to any valid ethernet address. */
br_set_mac_address(struct net_device * dev,void * p)221 static int br_set_mac_address(struct net_device *dev, void *p)
222 {
223 	struct net_bridge *br = netdev_priv(dev);
224 	struct sockaddr *addr = p;
225 
226 	if (!is_valid_ether_addr(addr->sa_data))
227 		return -EADDRNOTAVAIL;
228 
229 	/* dev_set_mac_addr() can be called by a master device on bridge's
230 	 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
231 	 */
232 	if (dev->reg_state != NETREG_REGISTERED)
233 		return -EBUSY;
234 
235 	spin_lock_bh(&br->lock);
236 	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
237 		/* Mac address will be changed in br_stp_change_bridge_id(). */
238 		br_stp_change_bridge_id(br, addr->sa_data);
239 	}
240 	spin_unlock_bh(&br->lock);
241 
242 	return 0;
243 }
244 
br_getinfo(struct net_device * dev,struct ethtool_drvinfo * info)245 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
246 {
247 	strscpy(info->driver, "bridge", sizeof(info->driver));
248 	strscpy(info->version, BR_VERSION, sizeof(info->version));
249 	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
250 	strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
251 }
252 
br_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)253 static int br_get_link_ksettings(struct net_device *dev,
254 				 struct ethtool_link_ksettings *cmd)
255 {
256 	struct net_bridge *br = netdev_priv(dev);
257 	struct net_bridge_port *p;
258 
259 	cmd->base.duplex = DUPLEX_UNKNOWN;
260 	cmd->base.port = PORT_OTHER;
261 	cmd->base.speed = SPEED_UNKNOWN;
262 
263 	list_for_each_entry(p, &br->port_list, list) {
264 		struct ethtool_link_ksettings ecmd;
265 		struct net_device *pdev = p->dev;
266 
267 		if (!netif_running(pdev) || !netif_oper_up(pdev))
268 			continue;
269 
270 		if (__ethtool_get_link_ksettings(pdev, &ecmd))
271 			continue;
272 
273 		if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
274 			continue;
275 
276 		if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
277 		    cmd->base.speed < ecmd.base.speed)
278 			cmd->base.speed = ecmd.base.speed;
279 	}
280 
281 	return 0;
282 }
283 
br_fix_features(struct net_device * dev,netdev_features_t features)284 static netdev_features_t br_fix_features(struct net_device *dev,
285 	netdev_features_t features)
286 {
287 	struct net_bridge *br = netdev_priv(dev);
288 
289 	return br_features_recompute(br, features);
290 }
291 
292 #ifdef CONFIG_NET_POLL_CONTROLLER
br_poll_controller(struct net_device * br_dev)293 static void br_poll_controller(struct net_device *br_dev)
294 {
295 }
296 
br_netpoll_cleanup(struct net_device * dev)297 static void br_netpoll_cleanup(struct net_device *dev)
298 {
299 	struct net_bridge *br = netdev_priv(dev);
300 	struct net_bridge_port *p;
301 
302 	list_for_each_entry(p, &br->port_list, list)
303 		br_netpoll_disable(p);
304 }
305 
__br_netpoll_enable(struct net_bridge_port * p)306 static int __br_netpoll_enable(struct net_bridge_port *p)
307 {
308 	struct netpoll *np;
309 	int err;
310 
311 	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
312 	if (!np)
313 		return -ENOMEM;
314 
315 	err = __netpoll_setup(np, p->dev);
316 	if (err) {
317 		kfree(np);
318 		return err;
319 	}
320 
321 	p->np = np;
322 	return err;
323 }
324 
br_netpoll_enable(struct net_bridge_port * p)325 int br_netpoll_enable(struct net_bridge_port *p)
326 {
327 	if (!p->br->dev->npinfo)
328 		return 0;
329 
330 	return __br_netpoll_enable(p);
331 }
332 
br_netpoll_setup(struct net_device * dev)333 static int br_netpoll_setup(struct net_device *dev)
334 {
335 	struct net_bridge *br = netdev_priv(dev);
336 	struct net_bridge_port *p;
337 	int err = 0;
338 
339 	list_for_each_entry(p, &br->port_list, list) {
340 		if (!p->dev)
341 			continue;
342 		err = __br_netpoll_enable(p);
343 		if (err)
344 			goto fail;
345 	}
346 
347 out:
348 	return err;
349 
350 fail:
351 	br_netpoll_cleanup(dev);
352 	goto out;
353 }
354 
br_netpoll_disable(struct net_bridge_port * p)355 void br_netpoll_disable(struct net_bridge_port *p)
356 {
357 	struct netpoll *np = p->np;
358 
359 	if (!np)
360 		return;
361 
362 	p->np = NULL;
363 
364 	__netpoll_free(np);
365 }
366 
367 #endif
368 
br_add_slave(struct net_device * dev,struct net_device * slave_dev,struct netlink_ext_ack * extack)369 static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
370 			struct netlink_ext_ack *extack)
371 
372 {
373 	struct net_bridge *br = netdev_priv(dev);
374 
375 	return br_add_if(br, slave_dev, extack);
376 }
377 
br_del_slave(struct net_device * dev,struct net_device * slave_dev)378 static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
379 {
380 	struct net_bridge *br = netdev_priv(dev);
381 
382 	return br_del_if(br, slave_dev);
383 }
384 
br_fill_forward_path(struct net_device_path_ctx * ctx,struct net_device_path * path)385 static int br_fill_forward_path(struct net_device_path_ctx *ctx,
386 				struct net_device_path *path)
387 {
388 	struct net_bridge_fdb_entry *f;
389 	struct net_bridge_port *dst;
390 	struct net_bridge *br;
391 
392 	if (netif_is_bridge_port(ctx->dev))
393 		return -1;
394 
395 	br = netdev_priv(ctx->dev);
396 
397 	br_vlan_fill_forward_path_pvid(br, ctx, path);
398 
399 	f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
400 	if (!f)
401 		return -1;
402 
403 	dst = READ_ONCE(f->dst);
404 	if (!dst)
405 		return -1;
406 
407 	if (br_vlan_fill_forward_path_mode(br, dst, path))
408 		return -1;
409 
410 	path->type = DEV_PATH_BRIDGE;
411 	path->dev = dst->br->dev;
412 	ctx->dev = dst->dev;
413 
414 	switch (path->bridge.vlan_mode) {
415 	case DEV_PATH_BR_VLAN_TAG:
416 		if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
417 			return -ENOSPC;
418 		ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id;
419 		ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
420 		ctx->num_vlans++;
421 		break;
422 	case DEV_PATH_BR_VLAN_UNTAG_HW:
423 	case DEV_PATH_BR_VLAN_UNTAG:
424 		ctx->num_vlans--;
425 		break;
426 	case DEV_PATH_BR_VLAN_KEEP:
427 		break;
428 	}
429 
430 	return 0;
431 }
432 
433 static const struct ethtool_ops br_ethtool_ops = {
434 	.get_drvinfo		 = br_getinfo,
435 	.get_link		 = ethtool_op_get_link,
436 	.get_link_ksettings	 = br_get_link_ksettings,
437 };
438 
439 static const struct net_device_ops br_netdev_ops = {
440 	.ndo_open		 = br_dev_open,
441 	.ndo_stop		 = br_dev_stop,
442 	.ndo_init		 = br_dev_init,
443 	.ndo_uninit		 = br_dev_uninit,
444 	.ndo_start_xmit		 = br_dev_xmit,
445 	.ndo_get_stats64	 = dev_get_tstats64,
446 	.ndo_set_mac_address	 = br_set_mac_address,
447 	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
448 	.ndo_change_rx_flags	 = br_dev_change_rx_flags,
449 	.ndo_change_mtu		 = br_change_mtu,
450 	.ndo_siocdevprivate	 = br_dev_siocdevprivate,
451 #ifdef CONFIG_NET_POLL_CONTROLLER
452 	.ndo_netpoll_setup	 = br_netpoll_setup,
453 	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
454 	.ndo_poll_controller	 = br_poll_controller,
455 #endif
456 	.ndo_add_slave		 = br_add_slave,
457 	.ndo_del_slave		 = br_del_slave,
458 	.ndo_fix_features        = br_fix_features,
459 	.ndo_fdb_add		 = br_fdb_add,
460 	.ndo_fdb_del		 = br_fdb_delete,
461 	.ndo_fdb_del_bulk	 = br_fdb_delete_bulk,
462 	.ndo_fdb_dump		 = br_fdb_dump,
463 	.ndo_fdb_get		 = br_fdb_get,
464 	.ndo_mdb_add		 = br_mdb_add,
465 	.ndo_mdb_del		 = br_mdb_del,
466 	.ndo_mdb_del_bulk	 = br_mdb_del_bulk,
467 	.ndo_mdb_dump		 = br_mdb_dump,
468 	.ndo_mdb_get		 = br_mdb_get,
469 	.ndo_bridge_getlink	 = br_getlink,
470 	.ndo_bridge_setlink	 = br_setlink,
471 	.ndo_bridge_dellink	 = br_dellink,
472 	.ndo_features_check	 = passthru_features_check,
473 	.ndo_fill_forward_path	 = br_fill_forward_path,
474 };
475 
476 static const struct device_type br_type = {
477 	.name	= "bridge",
478 };
479 
br_dev_setup(struct net_device * dev)480 void br_dev_setup(struct net_device *dev)
481 {
482 	struct net_bridge *br = netdev_priv(dev);
483 
484 	eth_hw_addr_random(dev);
485 	ether_setup(dev);
486 
487 	dev->netdev_ops = &br_netdev_ops;
488 	dev->needs_free_netdev = true;
489 	dev->ethtool_ops = &br_ethtool_ops;
490 	SET_NETDEV_DEVTYPE(dev, &br_type);
491 	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
492 	dev->lltx = true;
493 	dev->netns_immutable = true;
494 
495 	dev->features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
496 			NETIF_F_HW_VLAN_STAG_TX;
497 	dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
498 			   NETIF_F_HW_VLAN_STAG_TX;
499 	dev->vlan_features = COMMON_FEATURES;
500 	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
501 
502 	br->dev = dev;
503 	spin_lock_init(&br->lock);
504 	INIT_LIST_HEAD(&br->port_list);
505 	INIT_HLIST_HEAD(&br->fdb_list);
506 	INIT_HLIST_HEAD(&br->frame_type_list);
507 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
508 	INIT_HLIST_HEAD(&br->mrp_list);
509 #endif
510 #if IS_ENABLED(CONFIG_BRIDGE_CFM)
511 	INIT_HLIST_HEAD(&br->mep_list);
512 #endif
513 	spin_lock_init(&br->hash_lock);
514 
515 	br->bridge_id.prio[0] = 0x80;
516 	br->bridge_id.prio[1] = 0x00;
517 
518 	ether_addr_copy(br->group_addr, eth_stp_addr);
519 
520 	br->stp_enabled = BR_NO_STP;
521 	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
522 	br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
523 
524 	br->designated_root = br->bridge_id;
525 	br->bridge_max_age = br->max_age = 20 * HZ;
526 	br->bridge_hello_time = br->hello_time = 2 * HZ;
527 	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
528 	br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
529 	dev->max_mtu = ETH_MAX_MTU;
530 
531 	br_netfilter_rtable_init(br);
532 	br_stp_timer_init(br);
533 	br_multicast_init(br);
534 	INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
535 }
536