xref: /linux/net/bridge/br_device.c (revision 96f30c8f0aa9923aa39b30bcaefeacf88b490231)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Device handling code
4  *	Linux ethernet bridge
5  *
6  *	Authors:
7  *	Lennert Buytenhek		<buytenh@gnu.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/netpoll.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/list.h>
16 #include <linux/netfilter_bridge.h>
17 
18 #include <linux/uaccess.h>
19 #include "br_private.h"
20 
21 #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
22 			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
23 
24 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25 EXPORT_SYMBOL_GPL(nf_br_ops);
26 
27 /* net device transmit always called with BH disabled */
28 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
29 {
30 	enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
31 	struct net_bridge_mcast_port *pmctx_null = NULL;
32 	struct net_bridge *br = netdev_priv(dev);
33 	struct net_bridge_mcast *brmctx = &br->multicast_ctx;
34 	struct net_bridge_fdb_entry *dst;
35 	struct net_bridge_mdb_entry *mdst;
36 	const struct nf_br_ops *nf_ops;
37 	u8 state = BR_STATE_FORWARDING;
38 	struct net_bridge_vlan *vlan;
39 	const unsigned char *dest;
40 	u16 vid = 0;
41 
42 	if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
43 		kfree_skb_reason(skb, reason);
44 		return NETDEV_TX_OK;
45 	}
46 
47 	memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
48 	br_tc_skb_miss_set(skb, false);
49 
50 	rcu_read_lock();
51 	nf_ops = rcu_dereference(nf_br_ops);
52 	if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
53 		rcu_read_unlock();
54 		return NETDEV_TX_OK;
55 	}
56 
57 	dev_sw_netstats_tx_add(dev, 1, skb->len);
58 
59 	br_switchdev_frame_unmark(skb);
60 	BR_INPUT_SKB_CB(skb)->brdev = dev;
61 	BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
62 
63 	skb_reset_mac_header(skb);
64 	skb_pull(skb, ETH_HLEN);
65 
66 	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid,
67 				&state, &vlan))
68 		goto out;
69 
70 	if (IS_ENABLED(CONFIG_INET) &&
71 	    (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
72 	     eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
73 	    br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
74 		br_do_proxy_suppress_arp(skb, br, vid, NULL);
75 	} else if (IS_ENABLED(CONFIG_IPV6) &&
76 		   skb->protocol == htons(ETH_P_IPV6) &&
77 		   br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
78 		   pskb_may_pull(skb, sizeof(struct ipv6hdr) +
79 				 sizeof(struct nd_msg)) &&
80 		   ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
81 			struct nd_msg *msg, _msg;
82 
83 			msg = br_is_nd_neigh_msg(skb, &_msg);
84 			if (msg)
85 				br_do_suppress_nd(skb, br, vid, NULL, msg);
86 	}
87 
88 	dest = eth_hdr(skb)->h_dest;
89 	if (is_broadcast_ether_addr(dest)) {
90 		br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid);
91 	} else if (is_multicast_ether_addr(dest)) {
92 		if (unlikely(netpoll_tx_running(dev))) {
93 			br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
94 			goto out;
95 		}
96 		if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) {
97 			kfree_skb(skb);
98 			goto out;
99 		}
100 
101 		mdst = br_mdb_entry_skb_get(brmctx, skb, vid);
102 		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
103 		    br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst))
104 			br_multicast_flood(mdst, skb, brmctx, false, true);
105 		else
106 			br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
107 	} else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
108 		br_forward(dst->dst, skb, false, true);
109 	} else {
110 		br_flood(br, skb, BR_PKT_UNICAST, false, true, vid);
111 	}
112 out:
113 	rcu_read_unlock();
114 	return NETDEV_TX_OK;
115 }
116 
117 static int br_dev_init(struct net_device *dev)
118 {
119 	struct net_bridge *br = netdev_priv(dev);
120 	int err;
121 
122 	err = br_fdb_hash_init(br);
123 	if (err)
124 		return err;
125 
126 	err = br_mdb_hash_init(br);
127 	if (err) {
128 		br_fdb_hash_fini(br);
129 		return err;
130 	}
131 
132 	err = br_vlan_init(br);
133 	if (err) {
134 		br_mdb_hash_fini(br);
135 		br_fdb_hash_fini(br);
136 		return err;
137 	}
138 
139 	err = br_multicast_init_stats(br);
140 	if (err) {
141 		br_vlan_flush(br);
142 		br_mdb_hash_fini(br);
143 		br_fdb_hash_fini(br);
144 		return err;
145 	}
146 
147 	netdev_lockdep_set_classes(dev);
148 	return 0;
149 }
150 
151 static void br_dev_uninit(struct net_device *dev)
152 {
153 	struct net_bridge *br = netdev_priv(dev);
154 
155 	br_multicast_dev_del(br);
156 	br_multicast_uninit_stats(br);
157 	br_vlan_flush(br);
158 	br_mdb_hash_fini(br);
159 	br_fdb_hash_fini(br);
160 }
161 
162 static int br_dev_open(struct net_device *dev)
163 {
164 	struct net_bridge *br = netdev_priv(dev);
165 
166 	netdev_update_features(dev);
167 	netif_start_queue(dev);
168 	br_stp_enable_bridge(br);
169 	br_multicast_open(br);
170 
171 	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
172 		br_multicast_join_snoopers(br);
173 
174 	return 0;
175 }
176 
177 static void br_dev_set_multicast_list(struct net_device *dev)
178 {
179 }
180 
181 static void br_dev_change_rx_flags(struct net_device *dev, int change)
182 {
183 	if (change & IFF_PROMISC)
184 		br_manage_promisc(netdev_priv(dev));
185 }
186 
187 static int br_dev_stop(struct net_device *dev)
188 {
189 	struct net_bridge *br = netdev_priv(dev);
190 
191 	br_stp_disable_bridge(br);
192 	br_multicast_stop(br);
193 
194 	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
195 		br_multicast_leave_snoopers(br);
196 
197 	netif_stop_queue(dev);
198 
199 	return 0;
200 }
201 
202 static int br_change_mtu(struct net_device *dev, int new_mtu)
203 {
204 	struct net_bridge *br = netdev_priv(dev);
205 
206 	WRITE_ONCE(dev->mtu, new_mtu);
207 
208 	/* this flag will be cleared if the MTU was automatically adjusted */
209 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
210 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
211 	/* remember the MTU in the rtable for PMTU */
212 	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
213 #endif
214 
215 	return 0;
216 }
217 
218 /* Allow setting mac address to any valid ethernet address. */
219 static int br_set_mac_address(struct net_device *dev, void *p)
220 {
221 	struct net_bridge *br = netdev_priv(dev);
222 	struct sockaddr *addr = p;
223 
224 	if (!is_valid_ether_addr(addr->sa_data))
225 		return -EADDRNOTAVAIL;
226 
227 	/* dev_set_mac_addr() can be called by a master device on bridge's
228 	 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
229 	 */
230 	if (dev->reg_state != NETREG_REGISTERED)
231 		return -EBUSY;
232 
233 	spin_lock_bh(&br->lock);
234 	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
235 		/* Mac address will be changed in br_stp_change_bridge_id(). */
236 		br_stp_change_bridge_id(br, addr->sa_data);
237 	}
238 	spin_unlock_bh(&br->lock);
239 
240 	return 0;
241 }
242 
243 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
244 {
245 	strscpy(info->driver, "bridge", sizeof(info->driver));
246 	strscpy(info->version, BR_VERSION, sizeof(info->version));
247 	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
248 	strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
249 }
250 
251 static int br_get_link_ksettings(struct net_device *dev,
252 				 struct ethtool_link_ksettings *cmd)
253 {
254 	struct net_bridge *br = netdev_priv(dev);
255 	struct net_bridge_port *p;
256 
257 	cmd->base.duplex = DUPLEX_UNKNOWN;
258 	cmd->base.port = PORT_OTHER;
259 	cmd->base.speed = SPEED_UNKNOWN;
260 
261 	list_for_each_entry(p, &br->port_list, list) {
262 		struct ethtool_link_ksettings ecmd;
263 		struct net_device *pdev = p->dev;
264 
265 		if (!netif_running(pdev) || !netif_oper_up(pdev))
266 			continue;
267 
268 		if (__ethtool_get_link_ksettings(pdev, &ecmd))
269 			continue;
270 
271 		if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
272 			continue;
273 
274 		if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
275 		    cmd->base.speed < ecmd.base.speed)
276 			cmd->base.speed = ecmd.base.speed;
277 	}
278 
279 	return 0;
280 }
281 
282 static netdev_features_t br_fix_features(struct net_device *dev,
283 	netdev_features_t features)
284 {
285 	struct net_bridge *br = netdev_priv(dev);
286 
287 	return br_features_recompute(br, features);
288 }
289 
290 #ifdef CONFIG_NET_POLL_CONTROLLER
291 static void br_poll_controller(struct net_device *br_dev)
292 {
293 }
294 
295 static void br_netpoll_cleanup(struct net_device *dev)
296 {
297 	struct net_bridge *br = netdev_priv(dev);
298 	struct net_bridge_port *p;
299 
300 	list_for_each_entry(p, &br->port_list, list)
301 		br_netpoll_disable(p);
302 }
303 
304 static int __br_netpoll_enable(struct net_bridge_port *p)
305 {
306 	struct netpoll *np;
307 	int err;
308 
309 	np = kzalloc(sizeof(*p->np), GFP_KERNEL);
310 	if (!np)
311 		return -ENOMEM;
312 
313 	err = __netpoll_setup(np, p->dev);
314 	if (err) {
315 		kfree(np);
316 		return err;
317 	}
318 
319 	p->np = np;
320 	return err;
321 }
322 
323 int br_netpoll_enable(struct net_bridge_port *p)
324 {
325 	if (!p->br->dev->npinfo)
326 		return 0;
327 
328 	return __br_netpoll_enable(p);
329 }
330 
331 static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
332 {
333 	struct net_bridge *br = netdev_priv(dev);
334 	struct net_bridge_port *p;
335 	int err = 0;
336 
337 	list_for_each_entry(p, &br->port_list, list) {
338 		if (!p->dev)
339 			continue;
340 		err = __br_netpoll_enable(p);
341 		if (err)
342 			goto fail;
343 	}
344 
345 out:
346 	return err;
347 
348 fail:
349 	br_netpoll_cleanup(dev);
350 	goto out;
351 }
352 
353 void br_netpoll_disable(struct net_bridge_port *p)
354 {
355 	struct netpoll *np = p->np;
356 
357 	if (!np)
358 		return;
359 
360 	p->np = NULL;
361 
362 	__netpoll_free(np);
363 }
364 
365 #endif
366 
367 static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
368 			struct netlink_ext_ack *extack)
369 
370 {
371 	struct net_bridge *br = netdev_priv(dev);
372 
373 	return br_add_if(br, slave_dev, extack);
374 }
375 
376 static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
377 {
378 	struct net_bridge *br = netdev_priv(dev);
379 
380 	return br_del_if(br, slave_dev);
381 }
382 
383 static int br_fill_forward_path(struct net_device_path_ctx *ctx,
384 				struct net_device_path *path)
385 {
386 	struct net_bridge_fdb_entry *f;
387 	struct net_bridge_port *dst;
388 	struct net_bridge *br;
389 
390 	if (netif_is_bridge_port(ctx->dev))
391 		return -1;
392 
393 	br = netdev_priv(ctx->dev);
394 
395 	br_vlan_fill_forward_path_pvid(br, ctx, path);
396 
397 	f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id);
398 	if (!f)
399 		return -1;
400 
401 	dst = READ_ONCE(f->dst);
402 	if (!dst)
403 		return -1;
404 
405 	if (br_vlan_fill_forward_path_mode(br, dst, path))
406 		return -1;
407 
408 	path->type = DEV_PATH_BRIDGE;
409 	path->dev = dst->br->dev;
410 	ctx->dev = dst->dev;
411 
412 	switch (path->bridge.vlan_mode) {
413 	case DEV_PATH_BR_VLAN_TAG:
414 		if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))
415 			return -ENOSPC;
416 		ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id;
417 		ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
418 		ctx->num_vlans++;
419 		break;
420 	case DEV_PATH_BR_VLAN_UNTAG_HW:
421 	case DEV_PATH_BR_VLAN_UNTAG:
422 		ctx->num_vlans--;
423 		break;
424 	case DEV_PATH_BR_VLAN_KEEP:
425 		break;
426 	}
427 
428 	return 0;
429 }
430 
431 static const struct ethtool_ops br_ethtool_ops = {
432 	.get_drvinfo		 = br_getinfo,
433 	.get_link		 = ethtool_op_get_link,
434 	.get_link_ksettings	 = br_get_link_ksettings,
435 };
436 
437 static const struct net_device_ops br_netdev_ops = {
438 	.ndo_open		 = br_dev_open,
439 	.ndo_stop		 = br_dev_stop,
440 	.ndo_init		 = br_dev_init,
441 	.ndo_uninit		 = br_dev_uninit,
442 	.ndo_start_xmit		 = br_dev_xmit,
443 	.ndo_get_stats64	 = dev_get_tstats64,
444 	.ndo_set_mac_address	 = br_set_mac_address,
445 	.ndo_set_rx_mode	 = br_dev_set_multicast_list,
446 	.ndo_change_rx_flags	 = br_dev_change_rx_flags,
447 	.ndo_change_mtu		 = br_change_mtu,
448 	.ndo_siocdevprivate	 = br_dev_siocdevprivate,
449 #ifdef CONFIG_NET_POLL_CONTROLLER
450 	.ndo_netpoll_setup	 = br_netpoll_setup,
451 	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
452 	.ndo_poll_controller	 = br_poll_controller,
453 #endif
454 	.ndo_add_slave		 = br_add_slave,
455 	.ndo_del_slave		 = br_del_slave,
456 	.ndo_fix_features        = br_fix_features,
457 	.ndo_fdb_add		 = br_fdb_add,
458 	.ndo_fdb_del		 = br_fdb_delete,
459 	.ndo_fdb_del_bulk	 = br_fdb_delete_bulk,
460 	.ndo_fdb_dump		 = br_fdb_dump,
461 	.ndo_fdb_get		 = br_fdb_get,
462 	.ndo_mdb_add		 = br_mdb_add,
463 	.ndo_mdb_del		 = br_mdb_del,
464 	.ndo_mdb_del_bulk	 = br_mdb_del_bulk,
465 	.ndo_mdb_dump		 = br_mdb_dump,
466 	.ndo_mdb_get		 = br_mdb_get,
467 	.ndo_bridge_getlink	 = br_getlink,
468 	.ndo_bridge_setlink	 = br_setlink,
469 	.ndo_bridge_dellink	 = br_dellink,
470 	.ndo_features_check	 = passthru_features_check,
471 	.ndo_fill_forward_path	 = br_fill_forward_path,
472 };
473 
474 static const struct device_type br_type = {
475 	.name	= "bridge",
476 };
477 
478 void br_dev_setup(struct net_device *dev)
479 {
480 	struct net_bridge *br = netdev_priv(dev);
481 
482 	eth_hw_addr_random(dev);
483 	ether_setup(dev);
484 
485 	dev->netdev_ops = &br_netdev_ops;
486 	dev->needs_free_netdev = true;
487 	dev->ethtool_ops = &br_ethtool_ops;
488 	SET_NETDEV_DEVTYPE(dev, &br_type);
489 	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
490 
491 	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
492 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
493 	dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
494 			   NETIF_F_HW_VLAN_STAG_TX;
495 	dev->vlan_features = COMMON_FEATURES;
496 	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
497 
498 	br->dev = dev;
499 	spin_lock_init(&br->lock);
500 	INIT_LIST_HEAD(&br->port_list);
501 	INIT_HLIST_HEAD(&br->fdb_list);
502 	INIT_HLIST_HEAD(&br->frame_type_list);
503 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
504 	INIT_HLIST_HEAD(&br->mrp_list);
505 #endif
506 #if IS_ENABLED(CONFIG_BRIDGE_CFM)
507 	INIT_HLIST_HEAD(&br->mep_list);
508 #endif
509 	spin_lock_init(&br->hash_lock);
510 
511 	br->bridge_id.prio[0] = 0x80;
512 	br->bridge_id.prio[1] = 0x00;
513 
514 	ether_addr_copy(br->group_addr, eth_stp_addr);
515 
516 	br->stp_enabled = BR_NO_STP;
517 	br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
518 	br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
519 
520 	br->designated_root = br->bridge_id;
521 	br->bridge_max_age = br->max_age = 20 * HZ;
522 	br->bridge_hello_time = br->hello_time = 2 * HZ;
523 	br->bridge_forward_delay = br->forward_delay = 15 * HZ;
524 	br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
525 	dev->max_mtu = ETH_MAX_MTU;
526 
527 	br_netfilter_rtable_init(br);
528 	br_stp_timer_init(br);
529 	br_multicast_init(br);
530 	INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
531 }
532