1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Forwarding database
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
14 #include <linux/times.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/atomic.h>
21 #include <linux/unaligned.h>
22 #include <linux/if_vlan.h>
23 #include <net/switchdev.h>
24 #include <trace/events/bridge.h>
25 #include "br_private.h"
26
27 static const struct rhashtable_params br_fdb_rht_params = {
28 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
29 .key_offset = offsetof(struct net_bridge_fdb_entry, key),
30 .key_len = sizeof(struct net_bridge_fdb_key),
31 .automatic_shrinking = true,
32 };
33
34 static struct kmem_cache *br_fdb_cache __read_mostly;
35
br_fdb_init(void)36 int __init br_fdb_init(void)
37 {
38 br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
39 if (!br_fdb_cache)
40 return -ENOMEM;
41
42 return 0;
43 }
44
br_fdb_fini(void)45 void br_fdb_fini(void)
46 {
47 kmem_cache_destroy(br_fdb_cache);
48 }
49
br_fdb_hash_init(struct net_bridge * br)50 int br_fdb_hash_init(struct net_bridge *br)
51 {
52 return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
53 }
54
br_fdb_hash_fini(struct net_bridge * br)55 void br_fdb_hash_fini(struct net_bridge *br)
56 {
57 rhashtable_destroy(&br->fdb_hash_tbl);
58 }
59
60 /* if topology_changing then use forward_delay (default 15 sec)
61 * otherwise keep longer (default 5 minutes)
62 */
hold_time(const struct net_bridge * br)63 static inline unsigned long hold_time(const struct net_bridge *br)
64 {
65 return br->topology_change ? br->forward_delay : br->ageing_time;
66 }
67
has_expired(const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb)68 static inline int has_expired(const struct net_bridge *br,
69 const struct net_bridge_fdb_entry *fdb)
70 {
71 return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
72 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
73 time_before_eq(READ_ONCE(fdb->updated) + hold_time(br), jiffies);
74 }
75
fdb_to_nud(const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb)76 static int fdb_to_nud(const struct net_bridge *br,
77 const struct net_bridge_fdb_entry *fdb)
78 {
79 if (test_bit(BR_FDB_LOCAL, &fdb->flags))
80 return NUD_PERMANENT;
81 else if (test_bit(BR_FDB_STATIC, &fdb->flags))
82 return NUD_NOARP;
83 else if (has_expired(br, fdb))
84 return NUD_STALE;
85 else
86 return NUD_REACHABLE;
87 }
88
fdb_fill_info(struct sk_buff * skb,const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb,u32 portid,u32 seq,int type,unsigned int flags)89 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
90 const struct net_bridge_fdb_entry *fdb,
91 u32 portid, u32 seq, int type, unsigned int flags)
92 {
93 const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
94 unsigned long now = jiffies;
95 struct nda_cacheinfo ci;
96 struct nlmsghdr *nlh;
97 struct ndmsg *ndm;
98 u32 ext_flags = 0;
99
100 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
101 if (nlh == NULL)
102 return -EMSGSIZE;
103
104 ndm = nlmsg_data(nlh);
105 ndm->ndm_family = AF_BRIDGE;
106 ndm->ndm_pad1 = 0;
107 ndm->ndm_pad2 = 0;
108 ndm->ndm_flags = 0;
109 ndm->ndm_type = 0;
110 ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
111 ndm->ndm_state = fdb_to_nud(br, fdb);
112
113 if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
114 ndm->ndm_flags |= NTF_OFFLOADED;
115 if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
116 ndm->ndm_flags |= NTF_EXT_LEARNED;
117 if (test_bit(BR_FDB_STICKY, &fdb->flags))
118 ndm->ndm_flags |= NTF_STICKY;
119 if (test_bit(BR_FDB_LOCKED, &fdb->flags))
120 ext_flags |= NTF_EXT_LOCKED;
121
122 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
123 goto nla_put_failure;
124 if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
125 goto nla_put_failure;
126 if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
127 goto nla_put_failure;
128
129 ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
130 ci.ndm_confirmed = 0;
131 ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
132 ci.ndm_refcnt = 0;
133 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
134 goto nla_put_failure;
135
136 if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
137 &fdb->key.vlan_id))
138 goto nla_put_failure;
139
140 if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
141 struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
142 u8 notify_bits = FDB_NOTIFY_BIT;
143
144 if (!nest)
145 goto nla_put_failure;
146 if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
147 notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
148
149 if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
150 nla_nest_cancel(skb, nest);
151 goto nla_put_failure;
152 }
153
154 nla_nest_end(skb, nest);
155 }
156
157 nlmsg_end(skb, nlh);
158 return 0;
159
160 nla_put_failure:
161 nlmsg_cancel(skb, nlh);
162 return -EMSGSIZE;
163 }
164
fdb_nlmsg_size(void)165 static inline size_t fdb_nlmsg_size(void)
166 {
167 return NLMSG_ALIGN(sizeof(struct ndmsg))
168 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
169 + nla_total_size(sizeof(u32)) /* NDA_MASTER */
170 + nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
171 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
172 + nla_total_size(sizeof(struct nda_cacheinfo))
173 + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
174 + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
175 }
176
fdb_notify(struct net_bridge * br,const struct net_bridge_fdb_entry * fdb,int type,bool swdev_notify)177 static void fdb_notify(struct net_bridge *br,
178 const struct net_bridge_fdb_entry *fdb, int type,
179 bool swdev_notify)
180 {
181 struct net *net = dev_net(br->dev);
182 struct sk_buff *skb;
183 int err = -ENOBUFS;
184
185 if (swdev_notify)
186 br_switchdev_fdb_notify(br, fdb, type);
187
188 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
189 if (skb == NULL)
190 goto errout;
191
192 err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
193 if (err < 0) {
194 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
195 WARN_ON(err == -EMSGSIZE);
196 kfree_skb(skb);
197 goto errout;
198 }
199 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
200 return;
201 errout:
202 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
203 }
204
fdb_find_rcu(struct rhashtable * tbl,const unsigned char * addr,__u16 vid)205 static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
206 const unsigned char *addr,
207 __u16 vid)
208 {
209 struct net_bridge_fdb_key key;
210
211 WARN_ON_ONCE(!rcu_read_lock_held());
212
213 key.vlan_id = vid;
214 memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
215
216 return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
217 }
218
219 /* requires bridge hash_lock */
br_fdb_find(struct net_bridge * br,const unsigned char * addr,__u16 vid)220 static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
221 const unsigned char *addr,
222 __u16 vid)
223 {
224 struct net_bridge_fdb_entry *fdb;
225
226 lockdep_assert_held_once(&br->hash_lock);
227
228 rcu_read_lock();
229 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
230 rcu_read_unlock();
231
232 return fdb;
233 }
234
br_fdb_find_port(const struct net_device * br_dev,const unsigned char * addr,__u16 vid)235 struct net_device *br_fdb_find_port(const struct net_device *br_dev,
236 const unsigned char *addr,
237 __u16 vid)
238 {
239 const struct net_bridge_port *dst;
240 struct net_bridge_fdb_entry *f;
241 struct net_device *dev = NULL;
242 struct net_bridge *br;
243
244 ASSERT_RTNL();
245
246 if (!netif_is_bridge_master(br_dev))
247 return NULL;
248
249 br = netdev_priv(br_dev);
250 rcu_read_lock();
251 f = br_fdb_find_rcu(br, addr, vid);
252 if (f) {
253 dst = READ_ONCE(f->dst);
254 if (dst)
255 dev = dst->dev;
256 }
257 rcu_read_unlock();
258
259 return dev;
260 }
261 EXPORT_SYMBOL_GPL(br_fdb_find_port);
262
br_fdb_find_rcu(struct net_bridge * br,const unsigned char * addr,__u16 vid)263 struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
264 const unsigned char *addr,
265 __u16 vid)
266 {
267 return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
268 }
269
270 /* When a static FDB entry is added, the mac address from the entry is
271 * added to the bridge private HW address list and all required ports
272 * are then updated with the new information.
273 * Called under RTNL.
274 */
fdb_add_hw_addr(struct net_bridge * br,const unsigned char * addr)275 static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
276 {
277 int err;
278 struct net_bridge_port *p;
279
280 ASSERT_RTNL();
281
282 list_for_each_entry(p, &br->port_list, list) {
283 if (!br_promisc_port(p)) {
284 err = dev_uc_add(p->dev, addr);
285 if (err)
286 goto undo;
287 }
288 }
289
290 return;
291 undo:
292 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
293 if (!br_promisc_port(p))
294 dev_uc_del(p->dev, addr);
295 }
296 }
297
298 /* When a static FDB entry is deleted, the HW address from that entry is
299 * also removed from the bridge private HW address list and updates all
300 * the ports with needed information.
301 * Called under RTNL.
302 */
fdb_del_hw_addr(struct net_bridge * br,const unsigned char * addr)303 static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
304 {
305 struct net_bridge_port *p;
306
307 ASSERT_RTNL();
308
309 list_for_each_entry(p, &br->port_list, list) {
310 if (!br_promisc_port(p))
311 dev_uc_del(p->dev, addr);
312 }
313 }
314
fdb_delete(struct net_bridge * br,struct net_bridge_fdb_entry * f,bool swdev_notify)315 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
316 bool swdev_notify)
317 {
318 trace_fdb_delete(br, f);
319
320 if (test_bit(BR_FDB_STATIC, &f->flags))
321 fdb_del_hw_addr(br, f->key.addr.addr);
322
323 hlist_del_init_rcu(&f->fdb_node);
324 rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
325 br_fdb_rht_params);
326 if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags))
327 atomic_dec(&br->fdb_n_learned);
328 fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
329 kfree_rcu(f, rcu);
330 }
331
332 /* Delete a local entry if no other port had the same address.
333 *
334 * This function should only be called on entries with BR_FDB_LOCAL set,
335 * so even with BR_FDB_ADDED_BY_USER cleared we never need to increase
336 * the accounting for dynamically learned entries again.
337 */
fdb_delete_local(struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_fdb_entry * f)338 static void fdb_delete_local(struct net_bridge *br,
339 const struct net_bridge_port *p,
340 struct net_bridge_fdb_entry *f)
341 {
342 const unsigned char *addr = f->key.addr.addr;
343 struct net_bridge_vlan_group *vg;
344 const struct net_bridge_vlan *v;
345 struct net_bridge_port *op;
346 u16 vid = f->key.vlan_id;
347
348 /* Maybe another port has same hw addr? */
349 list_for_each_entry(op, &br->port_list, list) {
350 vg = nbp_vlan_group(op);
351 if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
352 (!vid || br_vlan_find(vg, vid))) {
353 WRITE_ONCE(f->dst, op);
354 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
355 return;
356 }
357 }
358
359 vg = br_vlan_group(br);
360 v = br_vlan_find(vg, vid);
361 /* Maybe bridge device has same hw addr? */
362 if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
363 (!vid || (v && br_vlan_should_use(v)))) {
364 WRITE_ONCE(f->dst, NULL);
365 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
366 return;
367 }
368
369 fdb_delete(br, f, true);
370 }
371
br_fdb_find_delete_local(struct net_bridge * br,const struct net_bridge_port * p,const unsigned char * addr,u16 vid)372 void br_fdb_find_delete_local(struct net_bridge *br,
373 const struct net_bridge_port *p,
374 const unsigned char *addr, u16 vid)
375 {
376 struct net_bridge_fdb_entry *f;
377
378 spin_lock_bh(&br->hash_lock);
379 f = br_fdb_find(br, addr, vid);
380 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
381 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
382 fdb_delete_local(br, p, f);
383 spin_unlock_bh(&br->hash_lock);
384 }
385
fdb_create(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,__u16 vid,unsigned long flags)386 static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
387 struct net_bridge_port *source,
388 const unsigned char *addr,
389 __u16 vid,
390 unsigned long flags)
391 {
392 bool learned = !test_bit(BR_FDB_ADDED_BY_USER, &flags) &&
393 !test_bit(BR_FDB_LOCAL, &flags);
394 u32 max_learned = READ_ONCE(br->fdb_max_learned);
395 struct net_bridge_fdb_entry *fdb;
396 int err;
397
398 if (likely(learned)) {
399 int n_learned = atomic_read(&br->fdb_n_learned);
400
401 if (unlikely(max_learned && n_learned >= max_learned))
402 return NULL;
403 __set_bit(BR_FDB_DYNAMIC_LEARNED, &flags);
404 }
405
406 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
407 if (!fdb)
408 return NULL;
409
410 memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
411 WRITE_ONCE(fdb->dst, source);
412 fdb->key.vlan_id = vid;
413 fdb->flags = flags;
414 fdb->updated = fdb->used = jiffies;
415 err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
416 br_fdb_rht_params);
417 if (err) {
418 kmem_cache_free(br_fdb_cache, fdb);
419 return NULL;
420 }
421
422 if (likely(learned))
423 atomic_inc(&br->fdb_n_learned);
424
425 hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
426
427 return fdb;
428 }
429
fdb_add_local(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid)430 static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
431 const unsigned char *addr, u16 vid)
432 {
433 struct net_bridge_fdb_entry *fdb;
434
435 if (!is_valid_ether_addr(addr))
436 return -EINVAL;
437
438 fdb = br_fdb_find(br, addr, vid);
439 if (fdb) {
440 /* it is okay to have multiple ports with same
441 * address, just use the first one.
442 */
443 if (test_bit(BR_FDB_LOCAL, &fdb->flags))
444 return 0;
445 br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
446 source ? source->dev->name : br->dev->name, addr, vid);
447 fdb_delete(br, fdb, true);
448 }
449
450 fdb = fdb_create(br, source, addr, vid,
451 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
452 if (!fdb)
453 return -ENOMEM;
454
455 fdb_add_hw_addr(br, addr);
456 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
457 return 0;
458 }
459
br_fdb_changeaddr(struct net_bridge_port * p,const unsigned char * newaddr)460 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
461 {
462 struct net_bridge_vlan_group *vg;
463 struct net_bridge_fdb_entry *f;
464 struct net_bridge *br = p->br;
465 struct net_bridge_vlan *v;
466 bool local_vlan_0;
467
468 local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
469
470 spin_lock_bh(&br->hash_lock);
471 vg = nbp_vlan_group(p);
472 hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
473 if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
474 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
475 /* delete old one */
476 fdb_delete_local(br, p, f);
477
478 /* if this port has no vlan information configured, or
479 * local entries are only kept on VLAN 0, we can safely
480 * be done at this point.
481 */
482 if (!vg || !vg->num_vlans || local_vlan_0)
483 goto insert;
484 }
485 }
486
487 insert:
488 /* insert new address, may fail if invalid address or dup. */
489 fdb_add_local(br, p, newaddr, 0);
490
491 if (!vg || !vg->num_vlans || local_vlan_0)
492 goto done;
493
494 /* Now add entries for every VLAN configured on the port.
495 * This function runs under RTNL so the bitmap will not change
496 * from under us.
497 */
498 list_for_each_entry(v, &vg->vlan_list, vlist)
499 fdb_add_local(br, p, newaddr, v->vid);
500
501 done:
502 spin_unlock_bh(&br->hash_lock);
503 }
504
br_fdb_change_mac_address(struct net_bridge * br,const u8 * newaddr)505 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
506 {
507 struct net_bridge_vlan_group *vg;
508 struct net_bridge_fdb_entry *f;
509 struct net_bridge_vlan *v;
510 bool local_vlan_0;
511
512 local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
513
514 spin_lock_bh(&br->hash_lock);
515
516 /* If old entry was unassociated with any port, then delete it. */
517 f = br_fdb_find(br, br->dev->dev_addr, 0);
518 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
519 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
520 fdb_delete_local(br, NULL, f);
521
522 fdb_add_local(br, NULL, newaddr, 0);
523 vg = br_vlan_group(br);
524 if (!vg || !vg->num_vlans || local_vlan_0)
525 goto out;
526 /* Now remove and add entries for every VLAN configured on the
527 * bridge. This function runs under RTNL so the bitmap will not
528 * change from under us.
529 */
530 list_for_each_entry(v, &vg->vlan_list, vlist) {
531 if (!br_vlan_should_use(v))
532 continue;
533 f = br_fdb_find(br, br->dev->dev_addr, v->vid);
534 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
535 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
536 fdb_delete_local(br, NULL, f);
537 fdb_add_local(br, NULL, newaddr, v->vid);
538 }
539 out:
540 spin_unlock_bh(&br->hash_lock);
541 }
542
br_fdb_cleanup(struct work_struct * work)543 void br_fdb_cleanup(struct work_struct *work)
544 {
545 struct net_bridge *br = container_of(work, struct net_bridge,
546 gc_work.work);
547 struct net_bridge_fdb_entry *f = NULL;
548 unsigned long delay = hold_time(br);
549 unsigned long work_delay = delay;
550 unsigned long now = jiffies;
551
552 /* this part is tricky, in order to avoid blocking learning and
553 * consequently forwarding, we rely on rcu to delete objects with
554 * delayed freeing allowing us to continue traversing
555 */
556 rcu_read_lock();
557 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
558 unsigned long this_timer = READ_ONCE(f->updated) + delay;
559
560 if (test_bit(BR_FDB_STATIC, &f->flags) ||
561 test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
562 if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
563 if (time_after(this_timer, now))
564 work_delay = min(work_delay,
565 this_timer - now);
566 else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
567 &f->flags))
568 fdb_notify(br, f, RTM_NEWNEIGH, false);
569 }
570 continue;
571 }
572
573 if (time_after(this_timer, now)) {
574 work_delay = min(work_delay, this_timer - now);
575 } else {
576 spin_lock_bh(&br->hash_lock);
577 if (!hlist_unhashed(&f->fdb_node))
578 fdb_delete(br, f, true);
579 spin_unlock_bh(&br->hash_lock);
580 }
581 }
582 rcu_read_unlock();
583
584 /* Cleanup minimum 10 milliseconds apart */
585 work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
586 mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
587 }
588
br_fdb_delete_locals_per_vlan_port(struct net_bridge * br,struct net_bridge_port * p)589 static void br_fdb_delete_locals_per_vlan_port(struct net_bridge *br,
590 struct net_bridge_port *p)
591 {
592 struct net_bridge_vlan_group *vg;
593 struct net_bridge_vlan *v;
594 struct net_device *dev;
595
596 if (p) {
597 vg = nbp_vlan_group(p);
598 dev = p->dev;
599 } else {
600 vg = br_vlan_group(br);
601 dev = br->dev;
602 }
603
604 if (!vg)
605 return;
606
607 list_for_each_entry(v, &vg->vlan_list, vlist)
608 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
609 }
610
br_fdb_delete_locals_per_vlan(struct net_bridge * br)611 static void br_fdb_delete_locals_per_vlan(struct net_bridge *br)
612 {
613 struct net_bridge_port *p;
614
615 ASSERT_RTNL();
616
617 list_for_each_entry(p, &br->port_list, list)
618 br_fdb_delete_locals_per_vlan_port(br, p);
619
620 br_fdb_delete_locals_per_vlan_port(br, NULL);
621 }
622
br_fdb_insert_locals_per_vlan_port(struct net_bridge * br,struct net_bridge_port * p,struct netlink_ext_ack * extack)623 static int br_fdb_insert_locals_per_vlan_port(struct net_bridge *br,
624 struct net_bridge_port *p,
625 struct netlink_ext_ack *extack)
626 {
627 struct net_bridge_vlan_group *vg;
628 struct net_bridge_vlan *v;
629 struct net_device *dev;
630 int err;
631
632 if (p) {
633 vg = nbp_vlan_group(p);
634 dev = p->dev;
635 } else {
636 vg = br_vlan_group(br);
637 dev = br->dev;
638 }
639
640 if (!vg)
641 return 0;
642
643 list_for_each_entry(v, &vg->vlan_list, vlist) {
644 if (!br_vlan_should_use(v))
645 continue;
646
647 err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
648 if (err)
649 return err;
650 }
651
652 return 0;
653 }
654
br_fdb_insert_locals_per_vlan(struct net_bridge * br,struct netlink_ext_ack * extack)655 static int br_fdb_insert_locals_per_vlan(struct net_bridge *br,
656 struct netlink_ext_ack *extack)
657 {
658 struct net_bridge_port *p;
659 int err;
660
661 ASSERT_RTNL();
662
663 list_for_each_entry(p, &br->port_list, list) {
664 err = br_fdb_insert_locals_per_vlan_port(br, p, extack);
665 if (err)
666 goto rollback;
667 }
668
669 err = br_fdb_insert_locals_per_vlan_port(br, NULL, extack);
670 if (err)
671 goto rollback;
672
673 return 0;
674
675 rollback:
676 NL_SET_ERR_MSG_MOD(extack, "fdb_local_vlan_0 toggle: FDB entry insertion failed");
677 br_fdb_delete_locals_per_vlan(br);
678 return err;
679 }
680
br_fdb_toggle_local_vlan_0(struct net_bridge * br,bool on,struct netlink_ext_ack * extack)681 int br_fdb_toggle_local_vlan_0(struct net_bridge *br, bool on,
682 struct netlink_ext_ack *extack)
683 {
684 if (!on)
685 return br_fdb_insert_locals_per_vlan(br, extack);
686
687 br_fdb_delete_locals_per_vlan(br);
688 return 0;
689 }
690
__fdb_flush_matches(const struct net_bridge * br,const struct net_bridge_fdb_entry * f,const struct net_bridge_fdb_flush_desc * desc)691 static bool __fdb_flush_matches(const struct net_bridge *br,
692 const struct net_bridge_fdb_entry *f,
693 const struct net_bridge_fdb_flush_desc *desc)
694 {
695 const struct net_bridge_port *dst = READ_ONCE(f->dst);
696 int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
697
698 if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
699 return false;
700 if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
701 return false;
702 if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
703 return false;
704
705 return true;
706 }
707
708 /* Flush forwarding database entries matching the description */
br_fdb_flush(struct net_bridge * br,const struct net_bridge_fdb_flush_desc * desc)709 void br_fdb_flush(struct net_bridge *br,
710 const struct net_bridge_fdb_flush_desc *desc)
711 {
712 struct net_bridge_fdb_entry *f;
713
714 rcu_read_lock();
715 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
716 if (!__fdb_flush_matches(br, f, desc))
717 continue;
718
719 spin_lock_bh(&br->hash_lock);
720 if (!hlist_unhashed(&f->fdb_node))
721 fdb_delete(br, f, true);
722 spin_unlock_bh(&br->hash_lock);
723 }
724 rcu_read_unlock();
725 }
726
__ndm_state_to_fdb_flags(u16 ndm_state)727 static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
728 {
729 unsigned long flags = 0;
730
731 if (ndm_state & NUD_PERMANENT)
732 __set_bit(BR_FDB_LOCAL, &flags);
733 if (ndm_state & NUD_NOARP)
734 __set_bit(BR_FDB_STATIC, &flags);
735
736 return flags;
737 }
738
__ndm_flags_to_fdb_flags(u8 ndm_flags)739 static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
740 {
741 unsigned long flags = 0;
742
743 if (ndm_flags & NTF_USE)
744 __set_bit(BR_FDB_ADDED_BY_USER, &flags);
745 if (ndm_flags & NTF_EXT_LEARNED)
746 __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
747 if (ndm_flags & NTF_OFFLOADED)
748 __set_bit(BR_FDB_OFFLOADED, &flags);
749 if (ndm_flags & NTF_STICKY)
750 __set_bit(BR_FDB_STICKY, &flags);
751
752 return flags;
753 }
754
__fdb_flush_validate_ifindex(const struct net_bridge * br,int ifindex,struct netlink_ext_ack * extack)755 static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
756 int ifindex,
757 struct netlink_ext_ack *extack)
758 {
759 const struct net_device *dev;
760
761 dev = __dev_get_by_index(dev_net(br->dev), ifindex);
762 if (!dev) {
763 NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
764 return -ENODEV;
765 }
766 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
767 NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
768 return -EINVAL;
769 }
770 if (netif_is_bridge_master(dev) && dev != br->dev) {
771 NL_SET_ERR_MSG_MOD(extack,
772 "Flush bridge device does not match target bridge device");
773 return -EINVAL;
774 }
775 if (netif_is_bridge_port(dev)) {
776 struct net_bridge_port *p = br_port_get_rtnl(dev);
777
778 if (p->br != br) {
779 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
780 return -EINVAL;
781 }
782 }
783
784 return 0;
785 }
786
787 static const struct nla_policy br_fdb_del_bulk_policy[NDA_MAX + 1] = {
788 [NDA_VLAN] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
789 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
790 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
791 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
792 };
793
br_fdb_delete_bulk(struct nlmsghdr * nlh,struct net_device * dev,struct netlink_ext_ack * extack)794 int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev,
795 struct netlink_ext_ack *extack)
796 {
797 struct net_bridge_fdb_flush_desc desc = {};
798 struct ndmsg *ndm = nlmsg_data(nlh);
799 struct net_bridge_port *p = NULL;
800 struct nlattr *tb[NDA_MAX + 1];
801 struct net_bridge *br;
802 u8 ndm_flags;
803 int err;
804
805 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
806
807 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
808 br_fdb_del_bulk_policy, extack);
809 if (err)
810 return err;
811
812 if (netif_is_bridge_master(dev)) {
813 br = netdev_priv(dev);
814 } else {
815 p = br_port_get_rtnl(dev);
816 if (!p) {
817 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
818 return -EINVAL;
819 }
820 br = p->br;
821 }
822
823 if (tb[NDA_VLAN])
824 desc.vlan_id = nla_get_u16(tb[NDA_VLAN]);
825
826 if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
827 NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
828 return -EINVAL;
829 }
830 if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
831 NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
832 return -EINVAL;
833 }
834
835 desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
836 desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
837 if (tb[NDA_NDM_STATE_MASK]) {
838 u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
839
840 desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
841 }
842 if (tb[NDA_NDM_FLAGS_MASK]) {
843 u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
844
845 desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
846 }
847 if (tb[NDA_IFINDEX]) {
848 int ifidx = nla_get_s32(tb[NDA_IFINDEX]);
849
850 err = __fdb_flush_validate_ifindex(br, ifidx, extack);
851 if (err)
852 return err;
853 desc.port_ifindex = ifidx;
854 } else if (p) {
855 /* flush was invoked with port device and NTF_MASTER */
856 desc.port_ifindex = p->dev->ifindex;
857 }
858
859 br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
860 desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
861
862 br_fdb_flush(br, &desc);
863
864 return 0;
865 }
866
867 /* Flush all entries referring to a specific port.
868 * if do_all is set also flush static entries
869 * if vid is set delete all entries that match the vlan_id
870 */
br_fdb_delete_by_port(struct net_bridge * br,const struct net_bridge_port * p,u16 vid,int do_all)871 void br_fdb_delete_by_port(struct net_bridge *br,
872 const struct net_bridge_port *p,
873 u16 vid,
874 int do_all)
875 {
876 struct net_bridge_fdb_entry *f;
877 struct hlist_node *tmp;
878
879 spin_lock_bh(&br->hash_lock);
880 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
881 if (f->dst != p)
882 continue;
883
884 if (!do_all)
885 if (test_bit(BR_FDB_STATIC, &f->flags) ||
886 (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
887 !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
888 (vid && f->key.vlan_id != vid))
889 continue;
890
891 if (test_bit(BR_FDB_LOCAL, &f->flags))
892 fdb_delete_local(br, p, f);
893 else
894 fdb_delete(br, f, true);
895 }
896 spin_unlock_bh(&br->hash_lock);
897 }
898
899 /*
900 * Fill buffer with forwarding table records in
901 * the API format.
902 */
br_fdb_fillbuf(struct net_bridge * br,void * buf,unsigned long maxnum,unsigned long skip)903 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
904 unsigned long maxnum, unsigned long skip)
905 {
906 const struct net_bridge_port *dst;
907 struct net_bridge_fdb_entry *f;
908 struct __fdb_entry *fe = buf;
909 unsigned long delta;
910 int num = 0;
911
912 memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
913
914 rcu_read_lock();
915 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
916 if (num >= maxnum)
917 break;
918
919 if (has_expired(br, f))
920 continue;
921
922 /* ignore pseudo entry for local MAC address */
923 dst = READ_ONCE(f->dst);
924 if (!dst)
925 continue;
926
927 if (skip) {
928 --skip;
929 continue;
930 }
931
932 /* convert from internal format to API */
933 memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
934
935 /* due to ABI compat need to split into hi/lo */
936 fe->port_no = dst->port_no;
937 fe->port_hi = dst->port_no >> 8;
938
939 fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
940 if (!test_bit(BR_FDB_STATIC, &f->flags)) {
941 delta = jiffies - READ_ONCE(f->updated);
942 fe->ageing_timer_value =
943 jiffies_delta_to_clock_t(delta);
944 }
945 ++fe;
946 ++num;
947 }
948 rcu_read_unlock();
949
950 return num;
951 }
952
953 /* Add entry for local address of interface */
br_fdb_add_local(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid)954 int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
955 const unsigned char *addr, u16 vid)
956 {
957 int ret;
958
959 spin_lock_bh(&br->hash_lock);
960 ret = fdb_add_local(br, source, addr, vid);
961 spin_unlock_bh(&br->hash_lock);
962 return ret;
963 }
964
965 /* returns true if the fdb was modified */
__fdb_mark_active(struct net_bridge_fdb_entry * fdb)966 static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
967 {
968 return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
969 test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
970 }
971
br_fdb_update(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid,unsigned long flags)972 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
973 const unsigned char *addr, u16 vid, unsigned long flags)
974 {
975 struct net_bridge_fdb_entry *fdb;
976
977 /* some users want to always flood. */
978 if (hold_time(br) == 0)
979 return;
980
981 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
982 if (likely(fdb)) {
983 /* attempt to update an entry for a local interface */
984 if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
985 if (net_ratelimit())
986 br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
987 source->dev->name, addr, vid);
988 } else {
989 unsigned long now = jiffies;
990 bool fdb_modified = false;
991
992 if (now != READ_ONCE(fdb->updated)) {
993 WRITE_ONCE(fdb->updated, now);
994 fdb_modified = __fdb_mark_active(fdb);
995 }
996
997 /* fastpath: update of existing entry */
998 if (unlikely(source != READ_ONCE(fdb->dst) &&
999 !test_bit(BR_FDB_STICKY, &fdb->flags))) {
1000 br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
1001 WRITE_ONCE(fdb->dst, source);
1002 fdb_modified = true;
1003 /* Take over HW learned entry */
1004 if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
1005 &fdb->flags)))
1006 clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
1007 &fdb->flags);
1008 /* Clear locked flag when roaming to an
1009 * unlocked port.
1010 */
1011 if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
1012 clear_bit(BR_FDB_LOCKED, &fdb->flags);
1013 }
1014
1015 if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) {
1016 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1017 if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED,
1018 &fdb->flags))
1019 atomic_dec(&br->fdb_n_learned);
1020 }
1021 if (unlikely(fdb_modified)) {
1022 trace_br_fdb_update(br, source, addr, vid, flags);
1023 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1024 }
1025 }
1026 } else {
1027 spin_lock(&br->hash_lock);
1028 fdb = fdb_create(br, source, addr, vid, flags);
1029 if (fdb) {
1030 trace_br_fdb_update(br, source, addr, vid, flags);
1031 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1032 }
1033 /* else we lose race and someone else inserts
1034 * it first, don't bother updating
1035 */
1036 spin_unlock(&br->hash_lock);
1037 }
1038 }
1039
1040 /* Dump information about entries, in response to GETNEIGH */
br_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)1041 int br_fdb_dump(struct sk_buff *skb,
1042 struct netlink_callback *cb,
1043 struct net_device *dev,
1044 struct net_device *filter_dev,
1045 int *idx)
1046 {
1047 struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
1048 struct net_bridge *br = netdev_priv(dev);
1049 struct net_bridge_fdb_entry *f;
1050 int err = 0;
1051
1052 if (!netif_is_bridge_master(dev))
1053 return err;
1054
1055 if (!filter_dev) {
1056 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
1057 if (err < 0)
1058 return err;
1059 }
1060
1061 rcu_read_lock();
1062 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1063 const struct net_bridge_port *dst = READ_ONCE(f->dst);
1064
1065 if (*idx < ctx->fdb_idx)
1066 goto skip;
1067 if (filter_dev && (!dst || dst->dev != filter_dev)) {
1068 if (filter_dev != dev)
1069 goto skip;
1070 /* !f->dst is a special case for bridge
1071 * It means the MAC belongs to the bridge
1072 * Therefore need a little more filtering
1073 * we only want to dump the !f->dst case
1074 */
1075 if (dst)
1076 goto skip;
1077 }
1078 if (!filter_dev && dst)
1079 goto skip;
1080
1081 err = fdb_fill_info(skb, br, f,
1082 NETLINK_CB(cb->skb).portid,
1083 cb->nlh->nlmsg_seq,
1084 RTM_NEWNEIGH,
1085 NLM_F_MULTI);
1086 if (err < 0)
1087 break;
1088 skip:
1089 *idx += 1;
1090 }
1091 rcu_read_unlock();
1092
1093 return err;
1094 }
1095
br_fdb_get(struct sk_buff * skb,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u32 portid,u32 seq,struct netlink_ext_ack * extack)1096 int br_fdb_get(struct sk_buff *skb,
1097 struct nlattr *tb[],
1098 struct net_device *dev,
1099 const unsigned char *addr,
1100 u16 vid, u32 portid, u32 seq,
1101 struct netlink_ext_ack *extack)
1102 {
1103 struct net_bridge *br = netdev_priv(dev);
1104 struct net_bridge_fdb_entry *f;
1105 int err = 0;
1106
1107 rcu_read_lock();
1108 f = br_fdb_find_rcu(br, addr, vid);
1109 if (!f) {
1110 NL_SET_ERR_MSG(extack, "Fdb entry not found");
1111 err = -ENOENT;
1112 goto errout;
1113 }
1114
1115 err = fdb_fill_info(skb, br, f, portid, seq,
1116 RTM_NEWNEIGH, 0);
1117 errout:
1118 rcu_read_unlock();
1119 return err;
1120 }
1121
1122 /* returns true if the fdb is modified */
fdb_handle_notify(struct net_bridge_fdb_entry * fdb,u8 notify)1123 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
1124 {
1125 bool modified = false;
1126
1127 /* allow to mark an entry as inactive, usually done on creation */
1128 if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
1129 !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
1130 modified = true;
1131
1132 if ((notify & FDB_NOTIFY_BIT) &&
1133 !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1134 /* enabled activity tracking */
1135 modified = true;
1136 } else if (!(notify & FDB_NOTIFY_BIT) &&
1137 test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1138 /* disabled activity tracking, clear notify state */
1139 clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
1140 modified = true;
1141 }
1142
1143 return modified;
1144 }
1145
1146 /* Update (create or replace) forwarding database entry */
fdb_add_entry(struct net_bridge * br,struct net_bridge_port * source,const u8 * addr,struct ndmsg * ndm,u16 flags,u16 vid,struct nlattr * nfea_tb[])1147 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
1148 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
1149 struct nlattr *nfea_tb[])
1150 {
1151 bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
1152 bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
1153 struct net_bridge_fdb_entry *fdb;
1154 u16 state = ndm->ndm_state;
1155 bool modified = false;
1156 u8 notify = 0;
1157
1158 /* If the port cannot learn allow only local and static entries */
1159 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
1160 !(source->state == BR_STATE_LEARNING ||
1161 source->state == BR_STATE_FORWARDING))
1162 return -EPERM;
1163
1164 if (!source && !(state & NUD_PERMANENT)) {
1165 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
1166 br->dev->name);
1167 return -EINVAL;
1168 }
1169
1170 if (is_sticky && (state & NUD_PERMANENT))
1171 return -EINVAL;
1172
1173 if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
1174 notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
1175 if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
1176 (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
1177 return -EINVAL;
1178 }
1179
1180 fdb = br_fdb_find(br, addr, vid);
1181 if (fdb == NULL) {
1182 if (!(flags & NLM_F_CREATE))
1183 return -ENOENT;
1184
1185 fdb = fdb_create(br, source, addr, vid,
1186 BIT(BR_FDB_ADDED_BY_USER));
1187 if (!fdb)
1188 return -ENOMEM;
1189
1190 modified = true;
1191 } else {
1192 if (flags & NLM_F_EXCL)
1193 return -EEXIST;
1194
1195 if (READ_ONCE(fdb->dst) != source) {
1196 WRITE_ONCE(fdb->dst, source);
1197 modified = true;
1198 }
1199
1200 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1201 if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
1202 atomic_dec(&br->fdb_n_learned);
1203 }
1204
1205 if (fdb_to_nud(br, fdb) != state) {
1206 if (state & NUD_PERMANENT) {
1207 set_bit(BR_FDB_LOCAL, &fdb->flags);
1208 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1209 fdb_add_hw_addr(br, addr);
1210 } else if (state & NUD_NOARP) {
1211 clear_bit(BR_FDB_LOCAL, &fdb->flags);
1212 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1213 fdb_add_hw_addr(br, addr);
1214 } else {
1215 clear_bit(BR_FDB_LOCAL, &fdb->flags);
1216 if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
1217 fdb_del_hw_addr(br, addr);
1218 }
1219
1220 modified = true;
1221 }
1222
1223 if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
1224 change_bit(BR_FDB_STICKY, &fdb->flags);
1225 modified = true;
1226 }
1227
1228 if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
1229 modified = true;
1230
1231 if (fdb_handle_notify(fdb, notify))
1232 modified = true;
1233
1234 WRITE_ONCE(fdb->used, jiffies);
1235 if (modified) {
1236 if (refresh)
1237 WRITE_ONCE(fdb->updated, jiffies);
1238 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1239 }
1240
1241 return 0;
1242 }
1243
__br_fdb_add(struct ndmsg * ndm,struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 nlh_flags,u16 vid,struct nlattr * nfea_tb[],bool * notified,struct netlink_ext_ack * extack)1244 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
1245 struct net_bridge_port *p, const unsigned char *addr,
1246 u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
1247 bool *notified, struct netlink_ext_ack *extack)
1248 {
1249 int err = 0;
1250
1251 if (ndm->ndm_flags & NTF_USE) {
1252 if (!p) {
1253 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
1254 br->dev->name);
1255 return -EINVAL;
1256 }
1257 if (!nbp_state_should_learn(p))
1258 return 0;
1259
1260 local_bh_disable();
1261 rcu_read_lock();
1262 br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
1263 rcu_read_unlock();
1264 local_bh_enable();
1265 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
1266 if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
1267 NL_SET_ERR_MSG_MOD(extack,
1268 "FDB entry towards bridge must be permanent");
1269 return -EINVAL;
1270 }
1271 err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
1272 } else {
1273 spin_lock_bh(&br->hash_lock);
1274 err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
1275 spin_unlock_bh(&br->hash_lock);
1276 }
1277
1278 if (!err)
1279 *notified = true;
1280 return err;
1281 }
1282
1283 static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
1284 [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
1285 [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
1286 };
1287
1288 /* Add new permanent fdb entry with RTM_NEWNEIGH */
br_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 nlh_flags,bool * notified,struct netlink_ext_ack * extack)1289 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1290 struct net_device *dev,
1291 const unsigned char *addr, u16 vid, u16 nlh_flags,
1292 bool *notified, struct netlink_ext_ack *extack)
1293 {
1294 struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
1295 struct net_bridge_vlan_group *vg;
1296 struct net_bridge_port *p = NULL;
1297 struct net_bridge_vlan *v;
1298 struct net_bridge *br = NULL;
1299 u32 ext_flags = 0;
1300 int err = 0;
1301
1302 trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
1303
1304 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
1305 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
1306 return -EINVAL;
1307 }
1308
1309 if (is_zero_ether_addr(addr)) {
1310 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
1311 return -EINVAL;
1312 }
1313
1314 if (netif_is_bridge_master(dev)) {
1315 br = netdev_priv(dev);
1316 vg = br_vlan_group(br);
1317 } else {
1318 p = br_port_get_rtnl(dev);
1319 if (!p) {
1320 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
1321 dev->name);
1322 return -EINVAL;
1323 }
1324 br = p->br;
1325 vg = nbp_vlan_group(p);
1326 }
1327
1328 if (tb[NDA_FLAGS_EXT])
1329 ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
1330
1331 if (ext_flags & NTF_EXT_LOCKED) {
1332 NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
1333 return -EINVAL;
1334 }
1335
1336 if (tb[NDA_FDB_EXT_ATTRS]) {
1337 attr = tb[NDA_FDB_EXT_ATTRS];
1338 err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
1339 br_nda_fdb_pol, extack);
1340 if (err)
1341 return err;
1342 } else {
1343 memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
1344 }
1345
1346 if (vid) {
1347 v = br_vlan_find(vg, vid);
1348 if (!v || !br_vlan_should_use(v)) {
1349 pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1350 return -EINVAL;
1351 }
1352
1353 /* VID was specified, so use it. */
1354 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
1355 notified, extack);
1356 } else {
1357 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
1358 notified, extack);
1359 if (err || !vg || !vg->num_vlans)
1360 goto out;
1361
1362 /* We have vlans configured on this port and user didn't
1363 * specify a VLAN. To be nice, add/update entry for every
1364 * vlan on this port.
1365 */
1366 list_for_each_entry(v, &vg->vlan_list, vlist) {
1367 if (!br_vlan_should_use(v))
1368 continue;
1369 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
1370 nfea_tb, notified, extack);
1371 if (err)
1372 goto out;
1373 }
1374 }
1375
1376 out:
1377 return err;
1378 }
1379
fdb_delete_by_addr_and_port(struct net_bridge * br,const struct net_bridge_port * p,const u8 * addr,u16 vlan,bool * notified)1380 static int fdb_delete_by_addr_and_port(struct net_bridge *br,
1381 const struct net_bridge_port *p,
1382 const u8 *addr, u16 vlan, bool *notified)
1383 {
1384 struct net_bridge_fdb_entry *fdb;
1385
1386 fdb = br_fdb_find(br, addr, vlan);
1387 if (!fdb || READ_ONCE(fdb->dst) != p)
1388 return -ENOENT;
1389
1390 fdb_delete(br, fdb, true);
1391 *notified = true;
1392
1393 return 0;
1394 }
1395
__br_fdb_delete(struct net_bridge * br,const struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool * notified)1396 static int __br_fdb_delete(struct net_bridge *br,
1397 const struct net_bridge_port *p,
1398 const unsigned char *addr, u16 vid, bool *notified)
1399 {
1400 int err;
1401
1402 spin_lock_bh(&br->hash_lock);
1403 err = fdb_delete_by_addr_and_port(br, p, addr, vid, notified);
1404 spin_unlock_bh(&br->hash_lock);
1405
1406 return err;
1407 }
1408
1409 /* Remove neighbor entry with RTM_DELNEIGH */
br_fdb_delete(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,bool * notified,struct netlink_ext_ack * extack)1410 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1411 struct net_device *dev,
1412 const unsigned char *addr, u16 vid, bool *notified,
1413 struct netlink_ext_ack *extack)
1414 {
1415 struct net_bridge_vlan_group *vg;
1416 struct net_bridge_port *p = NULL;
1417 struct net_bridge *br;
1418 int err;
1419
1420 if (netif_is_bridge_master(dev)) {
1421 br = netdev_priv(dev);
1422 vg = br_vlan_group(br);
1423 } else {
1424 p = br_port_get_rtnl(dev);
1425 if (!p) {
1426 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
1427 dev->name);
1428 return -EINVAL;
1429 }
1430 vg = nbp_vlan_group(p);
1431 br = p->br;
1432 }
1433
1434 if (vid) {
1435 err = __br_fdb_delete(br, p, addr, vid, notified);
1436 } else {
1437 struct net_bridge_vlan *v;
1438
1439 err = -ENOENT;
1440 err &= __br_fdb_delete(br, p, addr, 0, notified);
1441 if (!vg || !vg->num_vlans)
1442 return err;
1443
1444 list_for_each_entry(v, &vg->vlan_list, vlist) {
1445 if (!br_vlan_should_use(v))
1446 continue;
1447 err &= __br_fdb_delete(br, p, addr, v->vid, notified);
1448 }
1449 }
1450
1451 return err;
1452 }
1453
br_fdb_sync_static(struct net_bridge * br,struct net_bridge_port * p)1454 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
1455 {
1456 struct net_bridge_fdb_entry *f, *tmp;
1457 int err = 0;
1458
1459 ASSERT_RTNL();
1460
1461 /* the key here is that static entries change only under rtnl */
1462 rcu_read_lock();
1463 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1464 /* We only care for static entries */
1465 if (!test_bit(BR_FDB_STATIC, &f->flags))
1466 continue;
1467 err = dev_uc_add(p->dev, f->key.addr.addr);
1468 if (err)
1469 goto rollback;
1470 }
1471 done:
1472 rcu_read_unlock();
1473
1474 return err;
1475
1476 rollback:
1477 hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
1478 /* We only care for static entries */
1479 if (!test_bit(BR_FDB_STATIC, &tmp->flags))
1480 continue;
1481 if (tmp == f)
1482 break;
1483 dev_uc_del(p->dev, tmp->key.addr.addr);
1484 }
1485
1486 goto done;
1487 }
1488
br_fdb_unsync_static(struct net_bridge * br,struct net_bridge_port * p)1489 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
1490 {
1491 struct net_bridge_fdb_entry *f;
1492
1493 ASSERT_RTNL();
1494
1495 rcu_read_lock();
1496 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1497 /* We only care for static entries */
1498 if (!test_bit(BR_FDB_STATIC, &f->flags))
1499 continue;
1500
1501 dev_uc_del(p->dev, f->key.addr.addr);
1502 }
1503 rcu_read_unlock();
1504 }
1505
br_fdb_external_learn_add(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool locked,bool swdev_notify)1506 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1507 const unsigned char *addr, u16 vid, bool locked,
1508 bool swdev_notify)
1509 {
1510 struct net_bridge_fdb_entry *fdb;
1511 bool modified = false;
1512 int err = 0;
1513
1514 trace_br_fdb_external_learn_add(br, p, addr, vid);
1515
1516 if (locked && (!p || !(p->flags & BR_PORT_MAB)))
1517 return -EINVAL;
1518
1519 spin_lock_bh(&br->hash_lock);
1520
1521 fdb = br_fdb_find(br, addr, vid);
1522 if (!fdb) {
1523 unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
1524
1525 if (swdev_notify)
1526 flags |= BIT(BR_FDB_ADDED_BY_USER);
1527
1528 if (!p)
1529 flags |= BIT(BR_FDB_LOCAL);
1530
1531 if (locked)
1532 flags |= BIT(BR_FDB_LOCKED);
1533
1534 fdb = fdb_create(br, p, addr, vid, flags);
1535 if (!fdb) {
1536 err = -ENOMEM;
1537 goto err_unlock;
1538 }
1539 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1540 } else {
1541 if (locked &&
1542 (!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
1543 READ_ONCE(fdb->dst) != p)) {
1544 err = -EINVAL;
1545 goto err_unlock;
1546 }
1547
1548 WRITE_ONCE(fdb->updated, jiffies);
1549
1550 if (READ_ONCE(fdb->dst) != p) {
1551 WRITE_ONCE(fdb->dst, p);
1552 modified = true;
1553 }
1554
1555 if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
1556 /* Refresh entry */
1557 WRITE_ONCE(fdb->used, jiffies);
1558 } else {
1559 modified = true;
1560 }
1561
1562 if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
1563 change_bit(BR_FDB_LOCKED, &fdb->flags);
1564 modified = true;
1565 }
1566
1567 if (swdev_notify)
1568 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1569
1570 if (!p)
1571 set_bit(BR_FDB_LOCAL, &fdb->flags);
1572
1573 if ((swdev_notify || !p) &&
1574 test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
1575 atomic_dec(&br->fdb_n_learned);
1576
1577 if (modified)
1578 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1579 }
1580
1581 err_unlock:
1582 spin_unlock_bh(&br->hash_lock);
1583
1584 return err;
1585 }
1586
br_fdb_external_learn_del(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool swdev_notify)1587 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1588 const unsigned char *addr, u16 vid,
1589 bool swdev_notify)
1590 {
1591 struct net_bridge_fdb_entry *fdb;
1592 int err = 0;
1593
1594 spin_lock_bh(&br->hash_lock);
1595
1596 fdb = br_fdb_find(br, addr, vid);
1597 if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
1598 fdb_delete(br, fdb, swdev_notify);
1599 else
1600 err = -ENOENT;
1601
1602 spin_unlock_bh(&br->hash_lock);
1603
1604 return err;
1605 }
1606
br_fdb_offloaded_set(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool offloaded)1607 void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
1608 const unsigned char *addr, u16 vid, bool offloaded)
1609 {
1610 struct net_bridge_fdb_entry *fdb;
1611
1612 spin_lock_bh(&br->hash_lock);
1613
1614 fdb = br_fdb_find(br, addr, vid);
1615 if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
1616 change_bit(BR_FDB_OFFLOADED, &fdb->flags);
1617
1618 spin_unlock_bh(&br->hash_lock);
1619 }
1620
br_fdb_clear_offload(const struct net_device * dev,u16 vid)1621 void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
1622 {
1623 struct net_bridge_fdb_entry *f;
1624 struct net_bridge_port *p;
1625
1626 ASSERT_RTNL();
1627
1628 p = br_port_get_rtnl(dev);
1629 if (!p)
1630 return;
1631
1632 spin_lock_bh(&p->br->hash_lock);
1633 hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
1634 if (f->dst == p && f->key.vlan_id == vid)
1635 clear_bit(BR_FDB_OFFLOADED, &f->flags);
1636 }
1637 spin_unlock_bh(&p->br->hash_lock);
1638 }
1639 EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
1640