xref: /linux/net/bridge/br_fdb.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Forwarding database
4  *	Linux ethernet bridge
5  *
6  *	Authors:
7  *	Lennert Buytenhek		<buytenh@gnu.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
14 #include <linux/times.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/atomic.h>
21 #include <linux/unaligned.h>
22 #include <linux/if_vlan.h>
23 #include <net/switchdev.h>
24 #include <trace/events/bridge.h>
25 #include "br_private.h"
26 
27 static const struct rhashtable_params br_fdb_rht_params = {
28 	.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
29 	.key_offset = offsetof(struct net_bridge_fdb_entry, key),
30 	.key_len = sizeof(struct net_bridge_fdb_key),
31 	.automatic_shrinking = true,
32 };
33 
34 static struct kmem_cache *br_fdb_cache __read_mostly;
35 
br_fdb_init(void)36 int __init br_fdb_init(void)
37 {
38 	br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
39 	if (!br_fdb_cache)
40 		return -ENOMEM;
41 
42 	return 0;
43 }
44 
br_fdb_fini(void)45 void br_fdb_fini(void)
46 {
47 	kmem_cache_destroy(br_fdb_cache);
48 }
49 
br_fdb_hash_init(struct net_bridge * br)50 int br_fdb_hash_init(struct net_bridge *br)
51 {
52 	return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
53 }
54 
br_fdb_hash_fini(struct net_bridge * br)55 void br_fdb_hash_fini(struct net_bridge *br)
56 {
57 	rhashtable_destroy(&br->fdb_hash_tbl);
58 }
59 
60 /* if topology_changing then use forward_delay (default 15 sec)
61  * otherwise keep longer (default 5 minutes)
62  */
hold_time(const struct net_bridge * br)63 static inline unsigned long hold_time(const struct net_bridge *br)
64 {
65 	return br->topology_change ? br->forward_delay : br->ageing_time;
66 }
67 
has_expired(const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb)68 static inline int has_expired(const struct net_bridge *br,
69 				  const struct net_bridge_fdb_entry *fdb)
70 {
71 	return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
72 	       !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
73 	       time_before_eq(fdb->updated + hold_time(br), jiffies);
74 }
75 
fdb_rcu_free(struct rcu_head * head)76 static void fdb_rcu_free(struct rcu_head *head)
77 {
78 	struct net_bridge_fdb_entry *ent
79 		= container_of(head, struct net_bridge_fdb_entry, rcu);
80 	kmem_cache_free(br_fdb_cache, ent);
81 }
82 
fdb_to_nud(const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb)83 static int fdb_to_nud(const struct net_bridge *br,
84 		      const struct net_bridge_fdb_entry *fdb)
85 {
86 	if (test_bit(BR_FDB_LOCAL, &fdb->flags))
87 		return NUD_PERMANENT;
88 	else if (test_bit(BR_FDB_STATIC, &fdb->flags))
89 		return NUD_NOARP;
90 	else if (has_expired(br, fdb))
91 		return NUD_STALE;
92 	else
93 		return NUD_REACHABLE;
94 }
95 
fdb_fill_info(struct sk_buff * skb,const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb,u32 portid,u32 seq,int type,unsigned int flags)96 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
97 			 const struct net_bridge_fdb_entry *fdb,
98 			 u32 portid, u32 seq, int type, unsigned int flags)
99 {
100 	const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
101 	unsigned long now = jiffies;
102 	struct nda_cacheinfo ci;
103 	struct nlmsghdr *nlh;
104 	struct ndmsg *ndm;
105 	u32 ext_flags = 0;
106 
107 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
108 	if (nlh == NULL)
109 		return -EMSGSIZE;
110 
111 	ndm = nlmsg_data(nlh);
112 	ndm->ndm_family	 = AF_BRIDGE;
113 	ndm->ndm_pad1    = 0;
114 	ndm->ndm_pad2    = 0;
115 	ndm->ndm_flags	 = 0;
116 	ndm->ndm_type	 = 0;
117 	ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
118 	ndm->ndm_state   = fdb_to_nud(br, fdb);
119 
120 	if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
121 		ndm->ndm_flags |= NTF_OFFLOADED;
122 	if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
123 		ndm->ndm_flags |= NTF_EXT_LEARNED;
124 	if (test_bit(BR_FDB_STICKY, &fdb->flags))
125 		ndm->ndm_flags |= NTF_STICKY;
126 	if (test_bit(BR_FDB_LOCKED, &fdb->flags))
127 		ext_flags |= NTF_EXT_LOCKED;
128 
129 	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
130 		goto nla_put_failure;
131 	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
132 		goto nla_put_failure;
133 	if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
134 		goto nla_put_failure;
135 
136 	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
137 	ci.ndm_confirmed = 0;
138 	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
139 	ci.ndm_refcnt	 = 0;
140 	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
141 		goto nla_put_failure;
142 
143 	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
144 					&fdb->key.vlan_id))
145 		goto nla_put_failure;
146 
147 	if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
148 		struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
149 		u8 notify_bits = FDB_NOTIFY_BIT;
150 
151 		if (!nest)
152 			goto nla_put_failure;
153 		if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
154 			notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
155 
156 		if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
157 			nla_nest_cancel(skb, nest);
158 			goto nla_put_failure;
159 		}
160 
161 		nla_nest_end(skb, nest);
162 	}
163 
164 	nlmsg_end(skb, nlh);
165 	return 0;
166 
167 nla_put_failure:
168 	nlmsg_cancel(skb, nlh);
169 	return -EMSGSIZE;
170 }
171 
fdb_nlmsg_size(void)172 static inline size_t fdb_nlmsg_size(void)
173 {
174 	return NLMSG_ALIGN(sizeof(struct ndmsg))
175 		+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
176 		+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
177 		+ nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
178 		+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
179 		+ nla_total_size(sizeof(struct nda_cacheinfo))
180 		+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
181 		+ nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
182 }
183 
fdb_notify(struct net_bridge * br,const struct net_bridge_fdb_entry * fdb,int type,bool swdev_notify)184 static void fdb_notify(struct net_bridge *br,
185 		       const struct net_bridge_fdb_entry *fdb, int type,
186 		       bool swdev_notify)
187 {
188 	struct net *net = dev_net(br->dev);
189 	struct sk_buff *skb;
190 	int err = -ENOBUFS;
191 
192 	if (swdev_notify)
193 		br_switchdev_fdb_notify(br, fdb, type);
194 
195 	skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
196 	if (skb == NULL)
197 		goto errout;
198 
199 	err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
200 	if (err < 0) {
201 		/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
202 		WARN_ON(err == -EMSGSIZE);
203 		kfree_skb(skb);
204 		goto errout;
205 	}
206 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
207 	return;
208 errout:
209 	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
210 }
211 
fdb_find_rcu(struct rhashtable * tbl,const unsigned char * addr,__u16 vid)212 static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
213 						 const unsigned char *addr,
214 						 __u16 vid)
215 {
216 	struct net_bridge_fdb_key key;
217 
218 	WARN_ON_ONCE(!rcu_read_lock_held());
219 
220 	key.vlan_id = vid;
221 	memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
222 
223 	return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
224 }
225 
226 /* requires bridge hash_lock */
br_fdb_find(struct net_bridge * br,const unsigned char * addr,__u16 vid)227 static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
228 						const unsigned char *addr,
229 						__u16 vid)
230 {
231 	struct net_bridge_fdb_entry *fdb;
232 
233 	lockdep_assert_held_once(&br->hash_lock);
234 
235 	rcu_read_lock();
236 	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
237 	rcu_read_unlock();
238 
239 	return fdb;
240 }
241 
br_fdb_find_port(const struct net_device * br_dev,const unsigned char * addr,__u16 vid)242 struct net_device *br_fdb_find_port(const struct net_device *br_dev,
243 				    const unsigned char *addr,
244 				    __u16 vid)
245 {
246 	struct net_bridge_fdb_entry *f;
247 	struct net_device *dev = NULL;
248 	struct net_bridge *br;
249 
250 	ASSERT_RTNL();
251 
252 	if (!netif_is_bridge_master(br_dev))
253 		return NULL;
254 
255 	br = netdev_priv(br_dev);
256 	rcu_read_lock();
257 	f = br_fdb_find_rcu(br, addr, vid);
258 	if (f && f->dst)
259 		dev = f->dst->dev;
260 	rcu_read_unlock();
261 
262 	return dev;
263 }
264 EXPORT_SYMBOL_GPL(br_fdb_find_port);
265 
br_fdb_find_rcu(struct net_bridge * br,const unsigned char * addr,__u16 vid)266 struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
267 					     const unsigned char *addr,
268 					     __u16 vid)
269 {
270 	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
271 }
272 
273 /* When a static FDB entry is added, the mac address from the entry is
274  * added to the bridge private HW address list and all required ports
275  * are then updated with the new information.
276  * Called under RTNL.
277  */
fdb_add_hw_addr(struct net_bridge * br,const unsigned char * addr)278 static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
279 {
280 	int err;
281 	struct net_bridge_port *p;
282 
283 	ASSERT_RTNL();
284 
285 	list_for_each_entry(p, &br->port_list, list) {
286 		if (!br_promisc_port(p)) {
287 			err = dev_uc_add(p->dev, addr);
288 			if (err)
289 				goto undo;
290 		}
291 	}
292 
293 	return;
294 undo:
295 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
296 		if (!br_promisc_port(p))
297 			dev_uc_del(p->dev, addr);
298 	}
299 }
300 
301 /* When a static FDB entry is deleted, the HW address from that entry is
302  * also removed from the bridge private HW address list and updates all
303  * the ports with needed information.
304  * Called under RTNL.
305  */
fdb_del_hw_addr(struct net_bridge * br,const unsigned char * addr)306 static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
307 {
308 	struct net_bridge_port *p;
309 
310 	ASSERT_RTNL();
311 
312 	list_for_each_entry(p, &br->port_list, list) {
313 		if (!br_promisc_port(p))
314 			dev_uc_del(p->dev, addr);
315 	}
316 }
317 
fdb_delete(struct net_bridge * br,struct net_bridge_fdb_entry * f,bool swdev_notify)318 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
319 		       bool swdev_notify)
320 {
321 	trace_fdb_delete(br, f);
322 
323 	if (test_bit(BR_FDB_STATIC, &f->flags))
324 		fdb_del_hw_addr(br, f->key.addr.addr);
325 
326 	hlist_del_init_rcu(&f->fdb_node);
327 	rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
328 			       br_fdb_rht_params);
329 	if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags))
330 		atomic_dec(&br->fdb_n_learned);
331 	fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
332 	call_rcu(&f->rcu, fdb_rcu_free);
333 }
334 
335 /* Delete a local entry if no other port had the same address.
336  *
337  * This function should only be called on entries with BR_FDB_LOCAL set,
338  * so even with BR_FDB_ADDED_BY_USER cleared we never need to increase
339  * the accounting for dynamically learned entries again.
340  */
fdb_delete_local(struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_fdb_entry * f)341 static void fdb_delete_local(struct net_bridge *br,
342 			     const struct net_bridge_port *p,
343 			     struct net_bridge_fdb_entry *f)
344 {
345 	const unsigned char *addr = f->key.addr.addr;
346 	struct net_bridge_vlan_group *vg;
347 	const struct net_bridge_vlan *v;
348 	struct net_bridge_port *op;
349 	u16 vid = f->key.vlan_id;
350 
351 	/* Maybe another port has same hw addr? */
352 	list_for_each_entry(op, &br->port_list, list) {
353 		vg = nbp_vlan_group(op);
354 		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
355 		    (!vid || br_vlan_find(vg, vid))) {
356 			f->dst = op;
357 			clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
358 			return;
359 		}
360 	}
361 
362 	vg = br_vlan_group(br);
363 	v = br_vlan_find(vg, vid);
364 	/* Maybe bridge device has same hw addr? */
365 	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
366 	    (!vid || (v && br_vlan_should_use(v)))) {
367 		f->dst = NULL;
368 		clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
369 		return;
370 	}
371 
372 	fdb_delete(br, f, true);
373 }
374 
br_fdb_find_delete_local(struct net_bridge * br,const struct net_bridge_port * p,const unsigned char * addr,u16 vid)375 void br_fdb_find_delete_local(struct net_bridge *br,
376 			      const struct net_bridge_port *p,
377 			      const unsigned char *addr, u16 vid)
378 {
379 	struct net_bridge_fdb_entry *f;
380 
381 	spin_lock_bh(&br->hash_lock);
382 	f = br_fdb_find(br, addr, vid);
383 	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
384 	    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
385 		fdb_delete_local(br, p, f);
386 	spin_unlock_bh(&br->hash_lock);
387 }
388 
fdb_create(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,__u16 vid,unsigned long flags)389 static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
390 					       struct net_bridge_port *source,
391 					       const unsigned char *addr,
392 					       __u16 vid,
393 					       unsigned long flags)
394 {
395 	bool learned = !test_bit(BR_FDB_ADDED_BY_USER, &flags) &&
396 		       !test_bit(BR_FDB_LOCAL, &flags);
397 	u32 max_learned = READ_ONCE(br->fdb_max_learned);
398 	struct net_bridge_fdb_entry *fdb;
399 	int err;
400 
401 	if (likely(learned)) {
402 		int n_learned = atomic_read(&br->fdb_n_learned);
403 
404 		if (unlikely(max_learned && n_learned >= max_learned))
405 			return NULL;
406 		__set_bit(BR_FDB_DYNAMIC_LEARNED, &flags);
407 	}
408 
409 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
410 	if (!fdb)
411 		return NULL;
412 
413 	memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
414 	WRITE_ONCE(fdb->dst, source);
415 	fdb->key.vlan_id = vid;
416 	fdb->flags = flags;
417 	fdb->updated = fdb->used = jiffies;
418 	err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
419 					    br_fdb_rht_params);
420 	if (err) {
421 		kmem_cache_free(br_fdb_cache, fdb);
422 		return NULL;
423 	}
424 
425 	if (likely(learned))
426 		atomic_inc(&br->fdb_n_learned);
427 
428 	hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
429 
430 	return fdb;
431 }
432 
fdb_add_local(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid)433 static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
434 			 const unsigned char *addr, u16 vid)
435 {
436 	struct net_bridge_fdb_entry *fdb;
437 
438 	if (!is_valid_ether_addr(addr))
439 		return -EINVAL;
440 
441 	fdb = br_fdb_find(br, addr, vid);
442 	if (fdb) {
443 		/* it is okay to have multiple ports with same
444 		 * address, just use the first one.
445 		 */
446 		if (test_bit(BR_FDB_LOCAL, &fdb->flags))
447 			return 0;
448 		br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
449 			source ? source->dev->name : br->dev->name, addr, vid);
450 		fdb_delete(br, fdb, true);
451 	}
452 
453 	fdb = fdb_create(br, source, addr, vid,
454 			 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
455 	if (!fdb)
456 		return -ENOMEM;
457 
458 	fdb_add_hw_addr(br, addr);
459 	fdb_notify(br, fdb, RTM_NEWNEIGH, true);
460 	return 0;
461 }
462 
br_fdb_changeaddr(struct net_bridge_port * p,const unsigned char * newaddr)463 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
464 {
465 	struct net_bridge_vlan_group *vg;
466 	struct net_bridge_fdb_entry *f;
467 	struct net_bridge *br = p->br;
468 	struct net_bridge_vlan *v;
469 
470 	spin_lock_bh(&br->hash_lock);
471 	vg = nbp_vlan_group(p);
472 	hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
473 		if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
474 		    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
475 			/* delete old one */
476 			fdb_delete_local(br, p, f);
477 
478 			/* if this port has no vlan information
479 			 * configured, we can safely be done at
480 			 * this point.
481 			 */
482 			if (!vg || !vg->num_vlans)
483 				goto insert;
484 		}
485 	}
486 
487 insert:
488 	/* insert new address,  may fail if invalid address or dup. */
489 	fdb_add_local(br, p, newaddr, 0);
490 
491 	if (!vg || !vg->num_vlans)
492 		goto done;
493 
494 	/* Now add entries for every VLAN configured on the port.
495 	 * This function runs under RTNL so the bitmap will not change
496 	 * from under us.
497 	 */
498 	list_for_each_entry(v, &vg->vlan_list, vlist)
499 		fdb_add_local(br, p, newaddr, v->vid);
500 
501 done:
502 	spin_unlock_bh(&br->hash_lock);
503 }
504 
br_fdb_change_mac_address(struct net_bridge * br,const u8 * newaddr)505 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
506 {
507 	struct net_bridge_vlan_group *vg;
508 	struct net_bridge_fdb_entry *f;
509 	struct net_bridge_vlan *v;
510 
511 	spin_lock_bh(&br->hash_lock);
512 
513 	/* If old entry was unassociated with any port, then delete it. */
514 	f = br_fdb_find(br, br->dev->dev_addr, 0);
515 	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
516 	    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
517 		fdb_delete_local(br, NULL, f);
518 
519 	fdb_add_local(br, NULL, newaddr, 0);
520 	vg = br_vlan_group(br);
521 	if (!vg || !vg->num_vlans)
522 		goto out;
523 	/* Now remove and add entries for every VLAN configured on the
524 	 * bridge.  This function runs under RTNL so the bitmap will not
525 	 * change from under us.
526 	 */
527 	list_for_each_entry(v, &vg->vlan_list, vlist) {
528 		if (!br_vlan_should_use(v))
529 			continue;
530 		f = br_fdb_find(br, br->dev->dev_addr, v->vid);
531 		if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
532 		    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
533 			fdb_delete_local(br, NULL, f);
534 		fdb_add_local(br, NULL, newaddr, v->vid);
535 	}
536 out:
537 	spin_unlock_bh(&br->hash_lock);
538 }
539 
br_fdb_cleanup(struct work_struct * work)540 void br_fdb_cleanup(struct work_struct *work)
541 {
542 	struct net_bridge *br = container_of(work, struct net_bridge,
543 					     gc_work.work);
544 	struct net_bridge_fdb_entry *f = NULL;
545 	unsigned long delay = hold_time(br);
546 	unsigned long work_delay = delay;
547 	unsigned long now = jiffies;
548 
549 	/* this part is tricky, in order to avoid blocking learning and
550 	 * consequently forwarding, we rely on rcu to delete objects with
551 	 * delayed freeing allowing us to continue traversing
552 	 */
553 	rcu_read_lock();
554 	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
555 		unsigned long this_timer = f->updated + delay;
556 
557 		if (test_bit(BR_FDB_STATIC, &f->flags) ||
558 		    test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
559 			if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
560 				if (time_after(this_timer, now))
561 					work_delay = min(work_delay,
562 							 this_timer - now);
563 				else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
564 							   &f->flags))
565 					fdb_notify(br, f, RTM_NEWNEIGH, false);
566 			}
567 			continue;
568 		}
569 
570 		if (time_after(this_timer, now)) {
571 			work_delay = min(work_delay, this_timer - now);
572 		} else {
573 			spin_lock_bh(&br->hash_lock);
574 			if (!hlist_unhashed(&f->fdb_node))
575 				fdb_delete(br, f, true);
576 			spin_unlock_bh(&br->hash_lock);
577 		}
578 	}
579 	rcu_read_unlock();
580 
581 	/* Cleanup minimum 10 milliseconds apart */
582 	work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
583 	mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
584 }
585 
__fdb_flush_matches(const struct net_bridge * br,const struct net_bridge_fdb_entry * f,const struct net_bridge_fdb_flush_desc * desc)586 static bool __fdb_flush_matches(const struct net_bridge *br,
587 				const struct net_bridge_fdb_entry *f,
588 				const struct net_bridge_fdb_flush_desc *desc)
589 {
590 	const struct net_bridge_port *dst = READ_ONCE(f->dst);
591 	int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
592 
593 	if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
594 		return false;
595 	if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
596 		return false;
597 	if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
598 		return false;
599 
600 	return true;
601 }
602 
603 /* Flush forwarding database entries matching the description */
br_fdb_flush(struct net_bridge * br,const struct net_bridge_fdb_flush_desc * desc)604 void br_fdb_flush(struct net_bridge *br,
605 		  const struct net_bridge_fdb_flush_desc *desc)
606 {
607 	struct net_bridge_fdb_entry *f;
608 
609 	rcu_read_lock();
610 	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
611 		if (!__fdb_flush_matches(br, f, desc))
612 			continue;
613 
614 		spin_lock_bh(&br->hash_lock);
615 		if (!hlist_unhashed(&f->fdb_node))
616 			fdb_delete(br, f, true);
617 		spin_unlock_bh(&br->hash_lock);
618 	}
619 	rcu_read_unlock();
620 }
621 
__ndm_state_to_fdb_flags(u16 ndm_state)622 static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
623 {
624 	unsigned long flags = 0;
625 
626 	if (ndm_state & NUD_PERMANENT)
627 		__set_bit(BR_FDB_LOCAL, &flags);
628 	if (ndm_state & NUD_NOARP)
629 		__set_bit(BR_FDB_STATIC, &flags);
630 
631 	return flags;
632 }
633 
__ndm_flags_to_fdb_flags(u8 ndm_flags)634 static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
635 {
636 	unsigned long flags = 0;
637 
638 	if (ndm_flags & NTF_USE)
639 		__set_bit(BR_FDB_ADDED_BY_USER, &flags);
640 	if (ndm_flags & NTF_EXT_LEARNED)
641 		__set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
642 	if (ndm_flags & NTF_OFFLOADED)
643 		__set_bit(BR_FDB_OFFLOADED, &flags);
644 	if (ndm_flags & NTF_STICKY)
645 		__set_bit(BR_FDB_STICKY, &flags);
646 
647 	return flags;
648 }
649 
__fdb_flush_validate_ifindex(const struct net_bridge * br,int ifindex,struct netlink_ext_ack * extack)650 static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
651 					int ifindex,
652 					struct netlink_ext_ack *extack)
653 {
654 	const struct net_device *dev;
655 
656 	dev = __dev_get_by_index(dev_net(br->dev), ifindex);
657 	if (!dev) {
658 		NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
659 		return -ENODEV;
660 	}
661 	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
662 		NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
663 		return -EINVAL;
664 	}
665 	if (netif_is_bridge_master(dev) && dev != br->dev) {
666 		NL_SET_ERR_MSG_MOD(extack,
667 				   "Flush bridge device does not match target bridge device");
668 		return -EINVAL;
669 	}
670 	if (netif_is_bridge_port(dev)) {
671 		struct net_bridge_port *p = br_port_get_rtnl(dev);
672 
673 		if (p->br != br) {
674 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
675 			return -EINVAL;
676 		}
677 	}
678 
679 	return 0;
680 }
681 
682 static const struct nla_policy br_fdb_del_bulk_policy[NDA_MAX + 1] = {
683 	[NDA_VLAN]	= NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
684 	[NDA_IFINDEX]	= NLA_POLICY_MIN(NLA_S32, 1),
685 	[NDA_NDM_STATE_MASK]	= { .type = NLA_U16 },
686 	[NDA_NDM_FLAGS_MASK]	= { .type = NLA_U8 },
687 };
688 
br_fdb_delete_bulk(struct nlmsghdr * nlh,struct net_device * dev,struct netlink_ext_ack * extack)689 int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev,
690 		       struct netlink_ext_ack *extack)
691 {
692 	struct net_bridge_fdb_flush_desc desc = {};
693 	struct ndmsg *ndm = nlmsg_data(nlh);
694 	struct net_bridge_port *p = NULL;
695 	struct nlattr *tb[NDA_MAX + 1];
696 	struct net_bridge *br;
697 	u8 ndm_flags;
698 	int err;
699 
700 	ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
701 
702 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
703 			  br_fdb_del_bulk_policy, extack);
704 	if (err)
705 		return err;
706 
707 	if (netif_is_bridge_master(dev)) {
708 		br = netdev_priv(dev);
709 	} else {
710 		p = br_port_get_rtnl(dev);
711 		if (!p) {
712 			NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
713 			return -EINVAL;
714 		}
715 		br = p->br;
716 	}
717 
718 	if (tb[NDA_VLAN])
719 		desc.vlan_id = nla_get_u16(tb[NDA_VLAN]);
720 
721 	if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
722 		NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
723 		return -EINVAL;
724 	}
725 	if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
726 		NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
727 		return -EINVAL;
728 	}
729 
730 	desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
731 	desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
732 	if (tb[NDA_NDM_STATE_MASK]) {
733 		u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
734 
735 		desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
736 	}
737 	if (tb[NDA_NDM_FLAGS_MASK]) {
738 		u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
739 
740 		desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
741 	}
742 	if (tb[NDA_IFINDEX]) {
743 		int ifidx = nla_get_s32(tb[NDA_IFINDEX]);
744 
745 		err = __fdb_flush_validate_ifindex(br, ifidx, extack);
746 		if (err)
747 			return err;
748 		desc.port_ifindex = ifidx;
749 	} else if (p) {
750 		/* flush was invoked with port device and NTF_MASTER */
751 		desc.port_ifindex = p->dev->ifindex;
752 	}
753 
754 	br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
755 		 desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
756 
757 	br_fdb_flush(br, &desc);
758 
759 	return 0;
760 }
761 
762 /* Flush all entries referring to a specific port.
763  * if do_all is set also flush static entries
764  * if vid is set delete all entries that match the vlan_id
765  */
br_fdb_delete_by_port(struct net_bridge * br,const struct net_bridge_port * p,u16 vid,int do_all)766 void br_fdb_delete_by_port(struct net_bridge *br,
767 			   const struct net_bridge_port *p,
768 			   u16 vid,
769 			   int do_all)
770 {
771 	struct net_bridge_fdb_entry *f;
772 	struct hlist_node *tmp;
773 
774 	spin_lock_bh(&br->hash_lock);
775 	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
776 		if (f->dst != p)
777 			continue;
778 
779 		if (!do_all)
780 			if (test_bit(BR_FDB_STATIC, &f->flags) ||
781 			    (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
782 			     !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
783 			    (vid && f->key.vlan_id != vid))
784 				continue;
785 
786 		if (test_bit(BR_FDB_LOCAL, &f->flags))
787 			fdb_delete_local(br, p, f);
788 		else
789 			fdb_delete(br, f, true);
790 	}
791 	spin_unlock_bh(&br->hash_lock);
792 }
793 
794 #if IS_ENABLED(CONFIG_ATM_LANE)
795 /* Interface used by ATM LANE hook to test
796  * if an addr is on some other bridge port */
br_fdb_test_addr(struct net_device * dev,unsigned char * addr)797 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
798 {
799 	struct net_bridge_fdb_entry *fdb;
800 	struct net_bridge_port *port;
801 	int ret;
802 
803 	rcu_read_lock();
804 	port = br_port_get_rcu(dev);
805 	if (!port)
806 		ret = 0;
807 	else {
808 		const struct net_bridge_port *dst = NULL;
809 
810 		fdb = br_fdb_find_rcu(port->br, addr, 0);
811 		if (fdb)
812 			dst = READ_ONCE(fdb->dst);
813 
814 		ret = dst && dst->dev != dev &&
815 		      dst->state == BR_STATE_FORWARDING;
816 	}
817 	rcu_read_unlock();
818 
819 	return ret;
820 }
821 #endif /* CONFIG_ATM_LANE */
822 
823 /*
824  * Fill buffer with forwarding table records in
825  * the API format.
826  */
br_fdb_fillbuf(struct net_bridge * br,void * buf,unsigned long maxnum,unsigned long skip)827 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
828 		   unsigned long maxnum, unsigned long skip)
829 {
830 	struct net_bridge_fdb_entry *f;
831 	struct __fdb_entry *fe = buf;
832 	int num = 0;
833 
834 	memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
835 
836 	rcu_read_lock();
837 	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
838 		if (num >= maxnum)
839 			break;
840 
841 		if (has_expired(br, f))
842 			continue;
843 
844 		/* ignore pseudo entry for local MAC address */
845 		if (!f->dst)
846 			continue;
847 
848 		if (skip) {
849 			--skip;
850 			continue;
851 		}
852 
853 		/* convert from internal format to API */
854 		memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
855 
856 		/* due to ABI compat need to split into hi/lo */
857 		fe->port_no = f->dst->port_no;
858 		fe->port_hi = f->dst->port_no >> 8;
859 
860 		fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
861 		if (!test_bit(BR_FDB_STATIC, &f->flags))
862 			fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
863 		++fe;
864 		++num;
865 	}
866 	rcu_read_unlock();
867 
868 	return num;
869 }
870 
871 /* Add entry for local address of interface */
br_fdb_add_local(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid)872 int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
873 		     const unsigned char *addr, u16 vid)
874 {
875 	int ret;
876 
877 	spin_lock_bh(&br->hash_lock);
878 	ret = fdb_add_local(br, source, addr, vid);
879 	spin_unlock_bh(&br->hash_lock);
880 	return ret;
881 }
882 
883 /* returns true if the fdb was modified */
__fdb_mark_active(struct net_bridge_fdb_entry * fdb)884 static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
885 {
886 	return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
887 		  test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
888 }
889 
br_fdb_update(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid,unsigned long flags)890 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
891 		   const unsigned char *addr, u16 vid, unsigned long flags)
892 {
893 	struct net_bridge_fdb_entry *fdb;
894 
895 	/* some users want to always flood. */
896 	if (hold_time(br) == 0)
897 		return;
898 
899 	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
900 	if (likely(fdb)) {
901 		/* attempt to update an entry for a local interface */
902 		if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
903 			if (net_ratelimit())
904 				br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
905 					source->dev->name, addr, vid);
906 		} else {
907 			unsigned long now = jiffies;
908 			bool fdb_modified = false;
909 
910 			if (now != fdb->updated) {
911 				fdb->updated = now;
912 				fdb_modified = __fdb_mark_active(fdb);
913 			}
914 
915 			/* fastpath: update of existing entry */
916 			if (unlikely(source != READ_ONCE(fdb->dst) &&
917 				     !test_bit(BR_FDB_STICKY, &fdb->flags))) {
918 				br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
919 				WRITE_ONCE(fdb->dst, source);
920 				fdb_modified = true;
921 				/* Take over HW learned entry */
922 				if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
923 						      &fdb->flags)))
924 					clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
925 						  &fdb->flags);
926 				/* Clear locked flag when roaming to an
927 				 * unlocked port.
928 				 */
929 				if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
930 					clear_bit(BR_FDB_LOCKED, &fdb->flags);
931 			}
932 
933 			if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) {
934 				set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
935 				if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED,
936 						       &fdb->flags))
937 					atomic_dec(&br->fdb_n_learned);
938 			}
939 			if (unlikely(fdb_modified)) {
940 				trace_br_fdb_update(br, source, addr, vid, flags);
941 				fdb_notify(br, fdb, RTM_NEWNEIGH, true);
942 			}
943 		}
944 	} else {
945 		spin_lock(&br->hash_lock);
946 		fdb = fdb_create(br, source, addr, vid, flags);
947 		if (fdb) {
948 			trace_br_fdb_update(br, source, addr, vid, flags);
949 			fdb_notify(br, fdb, RTM_NEWNEIGH, true);
950 		}
951 		/* else  we lose race and someone else inserts
952 		 * it first, don't bother updating
953 		 */
954 		spin_unlock(&br->hash_lock);
955 	}
956 }
957 
958 /* Dump information about entries, in response to GETNEIGH */
br_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)959 int br_fdb_dump(struct sk_buff *skb,
960 		struct netlink_callback *cb,
961 		struct net_device *dev,
962 		struct net_device *filter_dev,
963 		int *idx)
964 {
965 	struct net_bridge *br = netdev_priv(dev);
966 	struct net_bridge_fdb_entry *f;
967 	int err = 0;
968 
969 	if (!netif_is_bridge_master(dev))
970 		return err;
971 
972 	if (!filter_dev) {
973 		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
974 		if (err < 0)
975 			return err;
976 	}
977 
978 	rcu_read_lock();
979 	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
980 		if (*idx < cb->args[2])
981 			goto skip;
982 		if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
983 			if (filter_dev != dev)
984 				goto skip;
985 			/* !f->dst is a special case for bridge
986 			 * It means the MAC belongs to the bridge
987 			 * Therefore need a little more filtering
988 			 * we only want to dump the !f->dst case
989 			 */
990 			if (f->dst)
991 				goto skip;
992 		}
993 		if (!filter_dev && f->dst)
994 			goto skip;
995 
996 		err = fdb_fill_info(skb, br, f,
997 				    NETLINK_CB(cb->skb).portid,
998 				    cb->nlh->nlmsg_seq,
999 				    RTM_NEWNEIGH,
1000 				    NLM_F_MULTI);
1001 		if (err < 0)
1002 			break;
1003 skip:
1004 		*idx += 1;
1005 	}
1006 	rcu_read_unlock();
1007 
1008 	return err;
1009 }
1010 
br_fdb_get(struct sk_buff * skb,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u32 portid,u32 seq,struct netlink_ext_ack * extack)1011 int br_fdb_get(struct sk_buff *skb,
1012 	       struct nlattr *tb[],
1013 	       struct net_device *dev,
1014 	       const unsigned char *addr,
1015 	       u16 vid, u32 portid, u32 seq,
1016 	       struct netlink_ext_ack *extack)
1017 {
1018 	struct net_bridge *br = netdev_priv(dev);
1019 	struct net_bridge_fdb_entry *f;
1020 	int err = 0;
1021 
1022 	rcu_read_lock();
1023 	f = br_fdb_find_rcu(br, addr, vid);
1024 	if (!f) {
1025 		NL_SET_ERR_MSG(extack, "Fdb entry not found");
1026 		err = -ENOENT;
1027 		goto errout;
1028 	}
1029 
1030 	err = fdb_fill_info(skb, br, f, portid, seq,
1031 			    RTM_NEWNEIGH, 0);
1032 errout:
1033 	rcu_read_unlock();
1034 	return err;
1035 }
1036 
1037 /* returns true if the fdb is modified */
fdb_handle_notify(struct net_bridge_fdb_entry * fdb,u8 notify)1038 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
1039 {
1040 	bool modified = false;
1041 
1042 	/* allow to mark an entry as inactive, usually done on creation */
1043 	if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
1044 	    !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
1045 		modified = true;
1046 
1047 	if ((notify & FDB_NOTIFY_BIT) &&
1048 	    !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1049 		/* enabled activity tracking */
1050 		modified = true;
1051 	} else if (!(notify & FDB_NOTIFY_BIT) &&
1052 		   test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1053 		/* disabled activity tracking, clear notify state */
1054 		clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
1055 		modified = true;
1056 	}
1057 
1058 	return modified;
1059 }
1060 
1061 /* Update (create or replace) forwarding database entry */
fdb_add_entry(struct net_bridge * br,struct net_bridge_port * source,const u8 * addr,struct ndmsg * ndm,u16 flags,u16 vid,struct nlattr * nfea_tb[])1062 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
1063 			 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
1064 			 struct nlattr *nfea_tb[])
1065 {
1066 	bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
1067 	bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
1068 	struct net_bridge_fdb_entry *fdb;
1069 	u16 state = ndm->ndm_state;
1070 	bool modified = false;
1071 	u8 notify = 0;
1072 
1073 	/* If the port cannot learn allow only local and static entries */
1074 	if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
1075 	    !(source->state == BR_STATE_LEARNING ||
1076 	      source->state == BR_STATE_FORWARDING))
1077 		return -EPERM;
1078 
1079 	if (!source && !(state & NUD_PERMANENT)) {
1080 		pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
1081 			br->dev->name);
1082 		return -EINVAL;
1083 	}
1084 
1085 	if (is_sticky && (state & NUD_PERMANENT))
1086 		return -EINVAL;
1087 
1088 	if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
1089 		notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
1090 		if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
1091 		    (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
1092 			return -EINVAL;
1093 	}
1094 
1095 	fdb = br_fdb_find(br, addr, vid);
1096 	if (fdb == NULL) {
1097 		if (!(flags & NLM_F_CREATE))
1098 			return -ENOENT;
1099 
1100 		fdb = fdb_create(br, source, addr, vid,
1101 				 BIT(BR_FDB_ADDED_BY_USER));
1102 		if (!fdb)
1103 			return -ENOMEM;
1104 
1105 		modified = true;
1106 	} else {
1107 		if (flags & NLM_F_EXCL)
1108 			return -EEXIST;
1109 
1110 		if (READ_ONCE(fdb->dst) != source) {
1111 			WRITE_ONCE(fdb->dst, source);
1112 			modified = true;
1113 		}
1114 
1115 		set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1116 		if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
1117 			atomic_dec(&br->fdb_n_learned);
1118 	}
1119 
1120 	if (fdb_to_nud(br, fdb) != state) {
1121 		if (state & NUD_PERMANENT) {
1122 			set_bit(BR_FDB_LOCAL, &fdb->flags);
1123 			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1124 				fdb_add_hw_addr(br, addr);
1125 		} else if (state & NUD_NOARP) {
1126 			clear_bit(BR_FDB_LOCAL, &fdb->flags);
1127 			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1128 				fdb_add_hw_addr(br, addr);
1129 		} else {
1130 			clear_bit(BR_FDB_LOCAL, &fdb->flags);
1131 			if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
1132 				fdb_del_hw_addr(br, addr);
1133 		}
1134 
1135 		modified = true;
1136 	}
1137 
1138 	if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
1139 		change_bit(BR_FDB_STICKY, &fdb->flags);
1140 		modified = true;
1141 	}
1142 
1143 	if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
1144 		modified = true;
1145 
1146 	if (fdb_handle_notify(fdb, notify))
1147 		modified = true;
1148 
1149 	fdb->used = jiffies;
1150 	if (modified) {
1151 		if (refresh)
1152 			fdb->updated = jiffies;
1153 		fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1154 	}
1155 
1156 	return 0;
1157 }
1158 
__br_fdb_add(struct ndmsg * ndm,struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 nlh_flags,u16 vid,struct nlattr * nfea_tb[],struct netlink_ext_ack * extack)1159 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
1160 			struct net_bridge_port *p, const unsigned char *addr,
1161 			u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
1162 			struct netlink_ext_ack *extack)
1163 {
1164 	int err = 0;
1165 
1166 	if (ndm->ndm_flags & NTF_USE) {
1167 		if (!p) {
1168 			pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
1169 				br->dev->name);
1170 			return -EINVAL;
1171 		}
1172 		if (!nbp_state_should_learn(p))
1173 			return 0;
1174 
1175 		local_bh_disable();
1176 		rcu_read_lock();
1177 		br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
1178 		rcu_read_unlock();
1179 		local_bh_enable();
1180 	} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
1181 		if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
1182 			NL_SET_ERR_MSG_MOD(extack,
1183 					   "FDB entry towards bridge must be permanent");
1184 			return -EINVAL;
1185 		}
1186 		err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
1187 	} else {
1188 		spin_lock_bh(&br->hash_lock);
1189 		err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
1190 		spin_unlock_bh(&br->hash_lock);
1191 	}
1192 
1193 	return err;
1194 }
1195 
1196 static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
1197 	[NFEA_ACTIVITY_NOTIFY]	= { .type = NLA_U8 },
1198 	[NFEA_DONT_REFRESH]	= { .type = NLA_FLAG },
1199 };
1200 
1201 /* Add new permanent fdb entry with RTM_NEWNEIGH */
br_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 nlh_flags,struct netlink_ext_ack * extack)1202 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1203 	       struct net_device *dev,
1204 	       const unsigned char *addr, u16 vid, u16 nlh_flags,
1205 	       struct netlink_ext_ack *extack)
1206 {
1207 	struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
1208 	struct net_bridge_vlan_group *vg;
1209 	struct net_bridge_port *p = NULL;
1210 	struct net_bridge_vlan *v;
1211 	struct net_bridge *br = NULL;
1212 	u32 ext_flags = 0;
1213 	int err = 0;
1214 
1215 	trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
1216 
1217 	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
1218 		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
1219 		return -EINVAL;
1220 	}
1221 
1222 	if (is_zero_ether_addr(addr)) {
1223 		pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
1224 		return -EINVAL;
1225 	}
1226 
1227 	if (netif_is_bridge_master(dev)) {
1228 		br = netdev_priv(dev);
1229 		vg = br_vlan_group(br);
1230 	} else {
1231 		p = br_port_get_rtnl(dev);
1232 		if (!p) {
1233 			pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
1234 				dev->name);
1235 			return -EINVAL;
1236 		}
1237 		br = p->br;
1238 		vg = nbp_vlan_group(p);
1239 	}
1240 
1241 	if (tb[NDA_FLAGS_EXT])
1242 		ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
1243 
1244 	if (ext_flags & NTF_EXT_LOCKED) {
1245 		NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
1246 		return -EINVAL;
1247 	}
1248 
1249 	if (tb[NDA_FDB_EXT_ATTRS]) {
1250 		attr = tb[NDA_FDB_EXT_ATTRS];
1251 		err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
1252 				       br_nda_fdb_pol, extack);
1253 		if (err)
1254 			return err;
1255 	} else {
1256 		memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
1257 	}
1258 
1259 	if (vid) {
1260 		v = br_vlan_find(vg, vid);
1261 		if (!v || !br_vlan_should_use(v)) {
1262 			pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1263 			return -EINVAL;
1264 		}
1265 
1266 		/* VID was specified, so use it. */
1267 		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
1268 				   extack);
1269 	} else {
1270 		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
1271 				   extack);
1272 		if (err || !vg || !vg->num_vlans)
1273 			goto out;
1274 
1275 		/* We have vlans configured on this port and user didn't
1276 		 * specify a VLAN.  To be nice, add/update entry for every
1277 		 * vlan on this port.
1278 		 */
1279 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1280 			if (!br_vlan_should_use(v))
1281 				continue;
1282 			err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
1283 					   nfea_tb, extack);
1284 			if (err)
1285 				goto out;
1286 		}
1287 	}
1288 
1289 out:
1290 	return err;
1291 }
1292 
fdb_delete_by_addr_and_port(struct net_bridge * br,const struct net_bridge_port * p,const u8 * addr,u16 vlan)1293 static int fdb_delete_by_addr_and_port(struct net_bridge *br,
1294 				       const struct net_bridge_port *p,
1295 				       const u8 *addr, u16 vlan)
1296 {
1297 	struct net_bridge_fdb_entry *fdb;
1298 
1299 	fdb = br_fdb_find(br, addr, vlan);
1300 	if (!fdb || READ_ONCE(fdb->dst) != p)
1301 		return -ENOENT;
1302 
1303 	fdb_delete(br, fdb, true);
1304 
1305 	return 0;
1306 }
1307 
__br_fdb_delete(struct net_bridge * br,const struct net_bridge_port * p,const unsigned char * addr,u16 vid)1308 static int __br_fdb_delete(struct net_bridge *br,
1309 			   const struct net_bridge_port *p,
1310 			   const unsigned char *addr, u16 vid)
1311 {
1312 	int err;
1313 
1314 	spin_lock_bh(&br->hash_lock);
1315 	err = fdb_delete_by_addr_and_port(br, p, addr, vid);
1316 	spin_unlock_bh(&br->hash_lock);
1317 
1318 	return err;
1319 }
1320 
1321 /* Remove neighbor entry with RTM_DELNEIGH */
br_fdb_delete(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,struct netlink_ext_ack * extack)1322 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1323 		  struct net_device *dev,
1324 		  const unsigned char *addr, u16 vid,
1325 		  struct netlink_ext_ack *extack)
1326 {
1327 	struct net_bridge_vlan_group *vg;
1328 	struct net_bridge_port *p = NULL;
1329 	struct net_bridge_vlan *v;
1330 	struct net_bridge *br;
1331 	int err;
1332 
1333 	if (netif_is_bridge_master(dev)) {
1334 		br = netdev_priv(dev);
1335 		vg = br_vlan_group(br);
1336 	} else {
1337 		p = br_port_get_rtnl(dev);
1338 		if (!p) {
1339 			pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
1340 				dev->name);
1341 			return -EINVAL;
1342 		}
1343 		vg = nbp_vlan_group(p);
1344 		br = p->br;
1345 	}
1346 
1347 	if (vid) {
1348 		v = br_vlan_find(vg, vid);
1349 		if (!v) {
1350 			pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1351 			return -EINVAL;
1352 		}
1353 
1354 		err = __br_fdb_delete(br, p, addr, vid);
1355 	} else {
1356 		err = -ENOENT;
1357 		err &= __br_fdb_delete(br, p, addr, 0);
1358 		if (!vg || !vg->num_vlans)
1359 			return err;
1360 
1361 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1362 			if (!br_vlan_should_use(v))
1363 				continue;
1364 			err &= __br_fdb_delete(br, p, addr, v->vid);
1365 		}
1366 	}
1367 
1368 	return err;
1369 }
1370 
br_fdb_sync_static(struct net_bridge * br,struct net_bridge_port * p)1371 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
1372 {
1373 	struct net_bridge_fdb_entry *f, *tmp;
1374 	int err = 0;
1375 
1376 	ASSERT_RTNL();
1377 
1378 	/* the key here is that static entries change only under rtnl */
1379 	rcu_read_lock();
1380 	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1381 		/* We only care for static entries */
1382 		if (!test_bit(BR_FDB_STATIC, &f->flags))
1383 			continue;
1384 		err = dev_uc_add(p->dev, f->key.addr.addr);
1385 		if (err)
1386 			goto rollback;
1387 	}
1388 done:
1389 	rcu_read_unlock();
1390 
1391 	return err;
1392 
1393 rollback:
1394 	hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
1395 		/* We only care for static entries */
1396 		if (!test_bit(BR_FDB_STATIC, &tmp->flags))
1397 			continue;
1398 		if (tmp == f)
1399 			break;
1400 		dev_uc_del(p->dev, tmp->key.addr.addr);
1401 	}
1402 
1403 	goto done;
1404 }
1405 
br_fdb_unsync_static(struct net_bridge * br,struct net_bridge_port * p)1406 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
1407 {
1408 	struct net_bridge_fdb_entry *f;
1409 
1410 	ASSERT_RTNL();
1411 
1412 	rcu_read_lock();
1413 	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1414 		/* We only care for static entries */
1415 		if (!test_bit(BR_FDB_STATIC, &f->flags))
1416 			continue;
1417 
1418 		dev_uc_del(p->dev, f->key.addr.addr);
1419 	}
1420 	rcu_read_unlock();
1421 }
1422 
br_fdb_external_learn_add(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool locked,bool swdev_notify)1423 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1424 			      const unsigned char *addr, u16 vid, bool locked,
1425 			      bool swdev_notify)
1426 {
1427 	struct net_bridge_fdb_entry *fdb;
1428 	bool modified = false;
1429 	int err = 0;
1430 
1431 	trace_br_fdb_external_learn_add(br, p, addr, vid);
1432 
1433 	if (locked && (!p || !(p->flags & BR_PORT_MAB)))
1434 		return -EINVAL;
1435 
1436 	spin_lock_bh(&br->hash_lock);
1437 
1438 	fdb = br_fdb_find(br, addr, vid);
1439 	if (!fdb) {
1440 		unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
1441 
1442 		if (swdev_notify)
1443 			flags |= BIT(BR_FDB_ADDED_BY_USER);
1444 
1445 		if (!p)
1446 			flags |= BIT(BR_FDB_LOCAL);
1447 
1448 		if (locked)
1449 			flags |= BIT(BR_FDB_LOCKED);
1450 
1451 		fdb = fdb_create(br, p, addr, vid, flags);
1452 		if (!fdb) {
1453 			err = -ENOMEM;
1454 			goto err_unlock;
1455 		}
1456 		fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1457 	} else {
1458 		if (locked &&
1459 		    (!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
1460 		     READ_ONCE(fdb->dst) != p)) {
1461 			err = -EINVAL;
1462 			goto err_unlock;
1463 		}
1464 
1465 		fdb->updated = jiffies;
1466 
1467 		if (READ_ONCE(fdb->dst) != p) {
1468 			WRITE_ONCE(fdb->dst, p);
1469 			modified = true;
1470 		}
1471 
1472 		if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
1473 			/* Refresh entry */
1474 			fdb->used = jiffies;
1475 		} else {
1476 			modified = true;
1477 		}
1478 
1479 		if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
1480 			change_bit(BR_FDB_LOCKED, &fdb->flags);
1481 			modified = true;
1482 		}
1483 
1484 		if (swdev_notify)
1485 			set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1486 
1487 		if (!p)
1488 			set_bit(BR_FDB_LOCAL, &fdb->flags);
1489 
1490 		if ((swdev_notify || !p) &&
1491 		    test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
1492 			atomic_dec(&br->fdb_n_learned);
1493 
1494 		if (modified)
1495 			fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1496 	}
1497 
1498 err_unlock:
1499 	spin_unlock_bh(&br->hash_lock);
1500 
1501 	return err;
1502 }
1503 
br_fdb_external_learn_del(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool swdev_notify)1504 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1505 			      const unsigned char *addr, u16 vid,
1506 			      bool swdev_notify)
1507 {
1508 	struct net_bridge_fdb_entry *fdb;
1509 	int err = 0;
1510 
1511 	spin_lock_bh(&br->hash_lock);
1512 
1513 	fdb = br_fdb_find(br, addr, vid);
1514 	if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
1515 		fdb_delete(br, fdb, swdev_notify);
1516 	else
1517 		err = -ENOENT;
1518 
1519 	spin_unlock_bh(&br->hash_lock);
1520 
1521 	return err;
1522 }
1523 
br_fdb_offloaded_set(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool offloaded)1524 void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
1525 			  const unsigned char *addr, u16 vid, bool offloaded)
1526 {
1527 	struct net_bridge_fdb_entry *fdb;
1528 
1529 	spin_lock_bh(&br->hash_lock);
1530 
1531 	fdb = br_fdb_find(br, addr, vid);
1532 	if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
1533 		change_bit(BR_FDB_OFFLOADED, &fdb->flags);
1534 
1535 	spin_unlock_bh(&br->hash_lock);
1536 }
1537 
br_fdb_clear_offload(const struct net_device * dev,u16 vid)1538 void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
1539 {
1540 	struct net_bridge_fdb_entry *f;
1541 	struct net_bridge_port *p;
1542 
1543 	ASSERT_RTNL();
1544 
1545 	p = br_port_get_rtnl(dev);
1546 	if (!p)
1547 		return;
1548 
1549 	spin_lock_bh(&p->br->hash_lock);
1550 	hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
1551 		if (f->dst == p && f->key.vlan_id == vid)
1552 			clear_bit(BR_FDB_OFFLOADED, &f->flags);
1553 	}
1554 	spin_unlock_bh(&p->br->hash_lock);
1555 }
1556 EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
1557