1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Forwarding database
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
14 #include <linux/times.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/atomic.h>
21 #include <linux/unaligned.h>
22 #include <linux/if_vlan.h>
23 #include <net/switchdev.h>
24 #include <trace/events/bridge.h>
25 #include "br_private.h"
26
27 static const struct rhashtable_params br_fdb_rht_params = {
28 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
29 .key_offset = offsetof(struct net_bridge_fdb_entry, key),
30 .key_len = sizeof(struct net_bridge_fdb_key),
31 .automatic_shrinking = true,
32 };
33
34 static struct kmem_cache *br_fdb_cache __read_mostly;
35
br_fdb_init(void)36 int __init br_fdb_init(void)
37 {
38 br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
39 if (!br_fdb_cache)
40 return -ENOMEM;
41
42 return 0;
43 }
44
br_fdb_fini(void)45 void br_fdb_fini(void)
46 {
47 kmem_cache_destroy(br_fdb_cache);
48 }
49
br_fdb_hash_init(struct net_bridge * br)50 int br_fdb_hash_init(struct net_bridge *br)
51 {
52 return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
53 }
54
br_fdb_hash_fini(struct net_bridge * br)55 void br_fdb_hash_fini(struct net_bridge *br)
56 {
57 rhashtable_destroy(&br->fdb_hash_tbl);
58 }
59
60 /* if topology_changing then use forward_delay (default 15 sec)
61 * otherwise keep longer (default 5 minutes)
62 */
hold_time(const struct net_bridge * br)63 static inline unsigned long hold_time(const struct net_bridge *br)
64 {
65 return br->topology_change ? br->forward_delay : br->ageing_time;
66 }
67
has_expired(const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb)68 static inline int has_expired(const struct net_bridge *br,
69 const struct net_bridge_fdb_entry *fdb)
70 {
71 return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
72 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
73 time_before_eq(READ_ONCE(fdb->updated) + hold_time(br), jiffies);
74 }
75
fdb_to_nud(const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb)76 static int fdb_to_nud(const struct net_bridge *br,
77 const struct net_bridge_fdb_entry *fdb)
78 {
79 if (test_bit(BR_FDB_LOCAL, &fdb->flags))
80 return NUD_PERMANENT;
81 else if (test_bit(BR_FDB_STATIC, &fdb->flags))
82 return NUD_NOARP;
83 else if (has_expired(br, fdb))
84 return NUD_STALE;
85 else
86 return NUD_REACHABLE;
87 }
88
fdb_fill_info(struct sk_buff * skb,const struct net_bridge * br,const struct net_bridge_fdb_entry * fdb,u32 portid,u32 seq,int type,unsigned int flags)89 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
90 const struct net_bridge_fdb_entry *fdb,
91 u32 portid, u32 seq, int type, unsigned int flags)
92 {
93 const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
94 unsigned long now = jiffies;
95 struct nda_cacheinfo ci;
96 struct nlmsghdr *nlh;
97 struct ndmsg *ndm;
98 u32 ext_flags = 0;
99
100 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
101 if (nlh == NULL)
102 return -EMSGSIZE;
103
104 ndm = nlmsg_data(nlh);
105 ndm->ndm_family = AF_BRIDGE;
106 ndm->ndm_pad1 = 0;
107 ndm->ndm_pad2 = 0;
108 ndm->ndm_flags = 0;
109 ndm->ndm_type = 0;
110 ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
111 ndm->ndm_state = fdb_to_nud(br, fdb);
112
113 if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
114 ndm->ndm_flags |= NTF_OFFLOADED;
115 if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
116 ndm->ndm_flags |= NTF_EXT_LEARNED;
117 if (test_bit(BR_FDB_STICKY, &fdb->flags))
118 ndm->ndm_flags |= NTF_STICKY;
119 if (test_bit(BR_FDB_LOCKED, &fdb->flags))
120 ext_flags |= NTF_EXT_LOCKED;
121
122 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
123 goto nla_put_failure;
124 if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
125 goto nla_put_failure;
126 if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
127 goto nla_put_failure;
128
129 ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
130 ci.ndm_confirmed = 0;
131 ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
132 ci.ndm_refcnt = 0;
133 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
134 goto nla_put_failure;
135
136 if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
137 &fdb->key.vlan_id))
138 goto nla_put_failure;
139
140 if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
141 struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
142 u8 notify_bits = FDB_NOTIFY_BIT;
143
144 if (!nest)
145 goto nla_put_failure;
146 if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
147 notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
148
149 if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
150 nla_nest_cancel(skb, nest);
151 goto nla_put_failure;
152 }
153
154 nla_nest_end(skb, nest);
155 }
156
157 nlmsg_end(skb, nlh);
158 return 0;
159
160 nla_put_failure:
161 nlmsg_cancel(skb, nlh);
162 return -EMSGSIZE;
163 }
164
fdb_nlmsg_size(void)165 static inline size_t fdb_nlmsg_size(void)
166 {
167 return NLMSG_ALIGN(sizeof(struct ndmsg))
168 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
169 + nla_total_size(sizeof(u32)) /* NDA_MASTER */
170 + nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
171 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
172 + nla_total_size(sizeof(struct nda_cacheinfo))
173 + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
174 + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
175 }
176
fdb_notify(struct net_bridge * br,const struct net_bridge_fdb_entry * fdb,int type,bool swdev_notify)177 static void fdb_notify(struct net_bridge *br,
178 const struct net_bridge_fdb_entry *fdb, int type,
179 bool swdev_notify)
180 {
181 struct net *net = dev_net(br->dev);
182 struct sk_buff *skb;
183 int err = -ENOBUFS;
184
185 if (swdev_notify)
186 br_switchdev_fdb_notify(br, fdb, type);
187
188 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
189 if (skb == NULL)
190 goto errout;
191
192 err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
193 if (err < 0) {
194 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
195 WARN_ON(err == -EMSGSIZE);
196 kfree_skb(skb);
197 goto errout;
198 }
199 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
200 return;
201 errout:
202 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
203 }
204
fdb_find_rcu(struct rhashtable * tbl,const unsigned char * addr,__u16 vid)205 static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
206 const unsigned char *addr,
207 __u16 vid)
208 {
209 struct net_bridge_fdb_key key;
210
211 WARN_ON_ONCE(!rcu_read_lock_held());
212
213 key.vlan_id = vid;
214 memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
215
216 return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
217 }
218
219 /* requires bridge hash_lock */
br_fdb_find(struct net_bridge * br,const unsigned char * addr,__u16 vid)220 static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
221 const unsigned char *addr,
222 __u16 vid)
223 {
224 struct net_bridge_fdb_entry *fdb;
225
226 lockdep_assert_held_once(&br->hash_lock);
227
228 rcu_read_lock();
229 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
230 rcu_read_unlock();
231
232 return fdb;
233 }
234
br_fdb_find_port(const struct net_device * br_dev,const unsigned char * addr,__u16 vid)235 struct net_device *br_fdb_find_port(const struct net_device *br_dev,
236 const unsigned char *addr,
237 __u16 vid)
238 {
239 struct net_bridge_fdb_entry *f;
240 struct net_device *dev = NULL;
241 struct net_bridge *br;
242
243 ASSERT_RTNL();
244
245 if (!netif_is_bridge_master(br_dev))
246 return NULL;
247
248 br = netdev_priv(br_dev);
249 rcu_read_lock();
250 f = br_fdb_find_rcu(br, addr, vid);
251 if (f && f->dst)
252 dev = f->dst->dev;
253 rcu_read_unlock();
254
255 return dev;
256 }
257 EXPORT_SYMBOL_GPL(br_fdb_find_port);
258
br_fdb_find_rcu(struct net_bridge * br,const unsigned char * addr,__u16 vid)259 struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
260 const unsigned char *addr,
261 __u16 vid)
262 {
263 return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
264 }
265
266 /* When a static FDB entry is added, the mac address from the entry is
267 * added to the bridge private HW address list and all required ports
268 * are then updated with the new information.
269 * Called under RTNL.
270 */
fdb_add_hw_addr(struct net_bridge * br,const unsigned char * addr)271 static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
272 {
273 int err;
274 struct net_bridge_port *p;
275
276 ASSERT_RTNL();
277
278 list_for_each_entry(p, &br->port_list, list) {
279 if (!br_promisc_port(p)) {
280 err = dev_uc_add(p->dev, addr);
281 if (err)
282 goto undo;
283 }
284 }
285
286 return;
287 undo:
288 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
289 if (!br_promisc_port(p))
290 dev_uc_del(p->dev, addr);
291 }
292 }
293
294 /* When a static FDB entry is deleted, the HW address from that entry is
295 * also removed from the bridge private HW address list and updates all
296 * the ports with needed information.
297 * Called under RTNL.
298 */
fdb_del_hw_addr(struct net_bridge * br,const unsigned char * addr)299 static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
300 {
301 struct net_bridge_port *p;
302
303 ASSERT_RTNL();
304
305 list_for_each_entry(p, &br->port_list, list) {
306 if (!br_promisc_port(p))
307 dev_uc_del(p->dev, addr);
308 }
309 }
310
fdb_delete(struct net_bridge * br,struct net_bridge_fdb_entry * f,bool swdev_notify)311 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
312 bool swdev_notify)
313 {
314 trace_fdb_delete(br, f);
315
316 if (test_bit(BR_FDB_STATIC, &f->flags))
317 fdb_del_hw_addr(br, f->key.addr.addr);
318
319 hlist_del_init_rcu(&f->fdb_node);
320 rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
321 br_fdb_rht_params);
322 if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags))
323 atomic_dec(&br->fdb_n_learned);
324 fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
325 kfree_rcu(f, rcu);
326 }
327
328 /* Delete a local entry if no other port had the same address.
329 *
330 * This function should only be called on entries with BR_FDB_LOCAL set,
331 * so even with BR_FDB_ADDED_BY_USER cleared we never need to increase
332 * the accounting for dynamically learned entries again.
333 */
fdb_delete_local(struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_fdb_entry * f)334 static void fdb_delete_local(struct net_bridge *br,
335 const struct net_bridge_port *p,
336 struct net_bridge_fdb_entry *f)
337 {
338 const unsigned char *addr = f->key.addr.addr;
339 struct net_bridge_vlan_group *vg;
340 const struct net_bridge_vlan *v;
341 struct net_bridge_port *op;
342 u16 vid = f->key.vlan_id;
343
344 /* Maybe another port has same hw addr? */
345 list_for_each_entry(op, &br->port_list, list) {
346 vg = nbp_vlan_group(op);
347 if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
348 (!vid || br_vlan_find(vg, vid))) {
349 f->dst = op;
350 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
351 return;
352 }
353 }
354
355 vg = br_vlan_group(br);
356 v = br_vlan_find(vg, vid);
357 /* Maybe bridge device has same hw addr? */
358 if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
359 (!vid || (v && br_vlan_should_use(v)))) {
360 f->dst = NULL;
361 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
362 return;
363 }
364
365 fdb_delete(br, f, true);
366 }
367
br_fdb_find_delete_local(struct net_bridge * br,const struct net_bridge_port * p,const unsigned char * addr,u16 vid)368 void br_fdb_find_delete_local(struct net_bridge *br,
369 const struct net_bridge_port *p,
370 const unsigned char *addr, u16 vid)
371 {
372 struct net_bridge_fdb_entry *f;
373
374 spin_lock_bh(&br->hash_lock);
375 f = br_fdb_find(br, addr, vid);
376 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
377 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
378 fdb_delete_local(br, p, f);
379 spin_unlock_bh(&br->hash_lock);
380 }
381
fdb_create(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,__u16 vid,unsigned long flags)382 static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
383 struct net_bridge_port *source,
384 const unsigned char *addr,
385 __u16 vid,
386 unsigned long flags)
387 {
388 bool learned = !test_bit(BR_FDB_ADDED_BY_USER, &flags) &&
389 !test_bit(BR_FDB_LOCAL, &flags);
390 u32 max_learned = READ_ONCE(br->fdb_max_learned);
391 struct net_bridge_fdb_entry *fdb;
392 int err;
393
394 if (likely(learned)) {
395 int n_learned = atomic_read(&br->fdb_n_learned);
396
397 if (unlikely(max_learned && n_learned >= max_learned))
398 return NULL;
399 __set_bit(BR_FDB_DYNAMIC_LEARNED, &flags);
400 }
401
402 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
403 if (!fdb)
404 return NULL;
405
406 memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
407 WRITE_ONCE(fdb->dst, source);
408 fdb->key.vlan_id = vid;
409 fdb->flags = flags;
410 fdb->updated = fdb->used = jiffies;
411 err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
412 br_fdb_rht_params);
413 if (err) {
414 kmem_cache_free(br_fdb_cache, fdb);
415 return NULL;
416 }
417
418 if (likely(learned))
419 atomic_inc(&br->fdb_n_learned);
420
421 hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
422
423 return fdb;
424 }
425
fdb_add_local(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid)426 static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
427 const unsigned char *addr, u16 vid)
428 {
429 struct net_bridge_fdb_entry *fdb;
430
431 if (!is_valid_ether_addr(addr))
432 return -EINVAL;
433
434 fdb = br_fdb_find(br, addr, vid);
435 if (fdb) {
436 /* it is okay to have multiple ports with same
437 * address, just use the first one.
438 */
439 if (test_bit(BR_FDB_LOCAL, &fdb->flags))
440 return 0;
441 br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
442 source ? source->dev->name : br->dev->name, addr, vid);
443 fdb_delete(br, fdb, true);
444 }
445
446 fdb = fdb_create(br, source, addr, vid,
447 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
448 if (!fdb)
449 return -ENOMEM;
450
451 fdb_add_hw_addr(br, addr);
452 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
453 return 0;
454 }
455
br_fdb_changeaddr(struct net_bridge_port * p,const unsigned char * newaddr)456 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
457 {
458 struct net_bridge_vlan_group *vg;
459 struct net_bridge_fdb_entry *f;
460 struct net_bridge *br = p->br;
461 struct net_bridge_vlan *v;
462 bool local_vlan_0;
463
464 local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
465
466 spin_lock_bh(&br->hash_lock);
467 vg = nbp_vlan_group(p);
468 hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
469 if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
470 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
471 /* delete old one */
472 fdb_delete_local(br, p, f);
473
474 /* if this port has no vlan information configured, or
475 * local entries are only kept on VLAN 0, we can safely
476 * be done at this point.
477 */
478 if (!vg || !vg->num_vlans || local_vlan_0)
479 goto insert;
480 }
481 }
482
483 insert:
484 /* insert new address, may fail if invalid address or dup. */
485 fdb_add_local(br, p, newaddr, 0);
486
487 if (!vg || !vg->num_vlans || local_vlan_0)
488 goto done;
489
490 /* Now add entries for every VLAN configured on the port.
491 * This function runs under RTNL so the bitmap will not change
492 * from under us.
493 */
494 list_for_each_entry(v, &vg->vlan_list, vlist)
495 fdb_add_local(br, p, newaddr, v->vid);
496
497 done:
498 spin_unlock_bh(&br->hash_lock);
499 }
500
br_fdb_change_mac_address(struct net_bridge * br,const u8 * newaddr)501 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
502 {
503 struct net_bridge_vlan_group *vg;
504 struct net_bridge_fdb_entry *f;
505 struct net_bridge_vlan *v;
506 bool local_vlan_0;
507
508 local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
509
510 spin_lock_bh(&br->hash_lock);
511
512 /* If old entry was unassociated with any port, then delete it. */
513 f = br_fdb_find(br, br->dev->dev_addr, 0);
514 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
515 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
516 fdb_delete_local(br, NULL, f);
517
518 fdb_add_local(br, NULL, newaddr, 0);
519 vg = br_vlan_group(br);
520 if (!vg || !vg->num_vlans || local_vlan_0)
521 goto out;
522 /* Now remove and add entries for every VLAN configured on the
523 * bridge. This function runs under RTNL so the bitmap will not
524 * change from under us.
525 */
526 list_for_each_entry(v, &vg->vlan_list, vlist) {
527 if (!br_vlan_should_use(v))
528 continue;
529 f = br_fdb_find(br, br->dev->dev_addr, v->vid);
530 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
531 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
532 fdb_delete_local(br, NULL, f);
533 fdb_add_local(br, NULL, newaddr, v->vid);
534 }
535 out:
536 spin_unlock_bh(&br->hash_lock);
537 }
538
br_fdb_cleanup(struct work_struct * work)539 void br_fdb_cleanup(struct work_struct *work)
540 {
541 struct net_bridge *br = container_of(work, struct net_bridge,
542 gc_work.work);
543 struct net_bridge_fdb_entry *f = NULL;
544 unsigned long delay = hold_time(br);
545 unsigned long work_delay = delay;
546 unsigned long now = jiffies;
547
548 /* this part is tricky, in order to avoid blocking learning and
549 * consequently forwarding, we rely on rcu to delete objects with
550 * delayed freeing allowing us to continue traversing
551 */
552 rcu_read_lock();
553 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
554 unsigned long this_timer = READ_ONCE(f->updated) + delay;
555
556 if (test_bit(BR_FDB_STATIC, &f->flags) ||
557 test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
558 if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
559 if (time_after(this_timer, now))
560 work_delay = min(work_delay,
561 this_timer - now);
562 else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
563 &f->flags))
564 fdb_notify(br, f, RTM_NEWNEIGH, false);
565 }
566 continue;
567 }
568
569 if (time_after(this_timer, now)) {
570 work_delay = min(work_delay, this_timer - now);
571 } else {
572 spin_lock_bh(&br->hash_lock);
573 if (!hlist_unhashed(&f->fdb_node))
574 fdb_delete(br, f, true);
575 spin_unlock_bh(&br->hash_lock);
576 }
577 }
578 rcu_read_unlock();
579
580 /* Cleanup minimum 10 milliseconds apart */
581 work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
582 mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
583 }
584
br_fdb_delete_locals_per_vlan_port(struct net_bridge * br,struct net_bridge_port * p)585 static void br_fdb_delete_locals_per_vlan_port(struct net_bridge *br,
586 struct net_bridge_port *p)
587 {
588 struct net_bridge_vlan_group *vg;
589 struct net_bridge_vlan *v;
590 struct net_device *dev;
591
592 if (p) {
593 vg = nbp_vlan_group(p);
594 dev = p->dev;
595 } else {
596 vg = br_vlan_group(br);
597 dev = br->dev;
598 }
599
600 list_for_each_entry(v, &vg->vlan_list, vlist)
601 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
602 }
603
br_fdb_delete_locals_per_vlan(struct net_bridge * br)604 static void br_fdb_delete_locals_per_vlan(struct net_bridge *br)
605 {
606 struct net_bridge_port *p;
607
608 ASSERT_RTNL();
609
610 list_for_each_entry(p, &br->port_list, list)
611 br_fdb_delete_locals_per_vlan_port(br, p);
612
613 br_fdb_delete_locals_per_vlan_port(br, NULL);
614 }
615
br_fdb_insert_locals_per_vlan_port(struct net_bridge * br,struct net_bridge_port * p,struct netlink_ext_ack * extack)616 static int br_fdb_insert_locals_per_vlan_port(struct net_bridge *br,
617 struct net_bridge_port *p,
618 struct netlink_ext_ack *extack)
619 {
620 struct net_bridge_vlan_group *vg;
621 struct net_bridge_vlan *v;
622 struct net_device *dev;
623 int err;
624
625 if (p) {
626 vg = nbp_vlan_group(p);
627 dev = p->dev;
628 } else {
629 vg = br_vlan_group(br);
630 dev = br->dev;
631 }
632
633 list_for_each_entry(v, &vg->vlan_list, vlist) {
634 if (!br_vlan_should_use(v))
635 continue;
636
637 err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
638 if (err)
639 return err;
640 }
641
642 return 0;
643 }
644
br_fdb_insert_locals_per_vlan(struct net_bridge * br,struct netlink_ext_ack * extack)645 static int br_fdb_insert_locals_per_vlan(struct net_bridge *br,
646 struct netlink_ext_ack *extack)
647 {
648 struct net_bridge_port *p;
649 int err;
650
651 ASSERT_RTNL();
652
653 list_for_each_entry(p, &br->port_list, list) {
654 err = br_fdb_insert_locals_per_vlan_port(br, p, extack);
655 if (err)
656 goto rollback;
657 }
658
659 err = br_fdb_insert_locals_per_vlan_port(br, NULL, extack);
660 if (err)
661 goto rollback;
662
663 return 0;
664
665 rollback:
666 NL_SET_ERR_MSG_MOD(extack, "fdb_local_vlan_0 toggle: FDB entry insertion failed");
667 br_fdb_delete_locals_per_vlan(br);
668 return err;
669 }
670
br_fdb_toggle_local_vlan_0(struct net_bridge * br,bool on,struct netlink_ext_ack * extack)671 int br_fdb_toggle_local_vlan_0(struct net_bridge *br, bool on,
672 struct netlink_ext_ack *extack)
673 {
674 if (!on)
675 return br_fdb_insert_locals_per_vlan(br, extack);
676
677 br_fdb_delete_locals_per_vlan(br);
678 return 0;
679 }
680
__fdb_flush_matches(const struct net_bridge * br,const struct net_bridge_fdb_entry * f,const struct net_bridge_fdb_flush_desc * desc)681 static bool __fdb_flush_matches(const struct net_bridge *br,
682 const struct net_bridge_fdb_entry *f,
683 const struct net_bridge_fdb_flush_desc *desc)
684 {
685 const struct net_bridge_port *dst = READ_ONCE(f->dst);
686 int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
687
688 if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
689 return false;
690 if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
691 return false;
692 if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
693 return false;
694
695 return true;
696 }
697
698 /* Flush forwarding database entries matching the description */
br_fdb_flush(struct net_bridge * br,const struct net_bridge_fdb_flush_desc * desc)699 void br_fdb_flush(struct net_bridge *br,
700 const struct net_bridge_fdb_flush_desc *desc)
701 {
702 struct net_bridge_fdb_entry *f;
703
704 rcu_read_lock();
705 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
706 if (!__fdb_flush_matches(br, f, desc))
707 continue;
708
709 spin_lock_bh(&br->hash_lock);
710 if (!hlist_unhashed(&f->fdb_node))
711 fdb_delete(br, f, true);
712 spin_unlock_bh(&br->hash_lock);
713 }
714 rcu_read_unlock();
715 }
716
__ndm_state_to_fdb_flags(u16 ndm_state)717 static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
718 {
719 unsigned long flags = 0;
720
721 if (ndm_state & NUD_PERMANENT)
722 __set_bit(BR_FDB_LOCAL, &flags);
723 if (ndm_state & NUD_NOARP)
724 __set_bit(BR_FDB_STATIC, &flags);
725
726 return flags;
727 }
728
__ndm_flags_to_fdb_flags(u8 ndm_flags)729 static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
730 {
731 unsigned long flags = 0;
732
733 if (ndm_flags & NTF_USE)
734 __set_bit(BR_FDB_ADDED_BY_USER, &flags);
735 if (ndm_flags & NTF_EXT_LEARNED)
736 __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
737 if (ndm_flags & NTF_OFFLOADED)
738 __set_bit(BR_FDB_OFFLOADED, &flags);
739 if (ndm_flags & NTF_STICKY)
740 __set_bit(BR_FDB_STICKY, &flags);
741
742 return flags;
743 }
744
__fdb_flush_validate_ifindex(const struct net_bridge * br,int ifindex,struct netlink_ext_ack * extack)745 static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
746 int ifindex,
747 struct netlink_ext_ack *extack)
748 {
749 const struct net_device *dev;
750
751 dev = __dev_get_by_index(dev_net(br->dev), ifindex);
752 if (!dev) {
753 NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
754 return -ENODEV;
755 }
756 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
757 NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
758 return -EINVAL;
759 }
760 if (netif_is_bridge_master(dev) && dev != br->dev) {
761 NL_SET_ERR_MSG_MOD(extack,
762 "Flush bridge device does not match target bridge device");
763 return -EINVAL;
764 }
765 if (netif_is_bridge_port(dev)) {
766 struct net_bridge_port *p = br_port_get_rtnl(dev);
767
768 if (p->br != br) {
769 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
770 return -EINVAL;
771 }
772 }
773
774 return 0;
775 }
776
777 static const struct nla_policy br_fdb_del_bulk_policy[NDA_MAX + 1] = {
778 [NDA_VLAN] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
779 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
780 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
781 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
782 };
783
br_fdb_delete_bulk(struct nlmsghdr * nlh,struct net_device * dev,struct netlink_ext_ack * extack)784 int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev,
785 struct netlink_ext_ack *extack)
786 {
787 struct net_bridge_fdb_flush_desc desc = {};
788 struct ndmsg *ndm = nlmsg_data(nlh);
789 struct net_bridge_port *p = NULL;
790 struct nlattr *tb[NDA_MAX + 1];
791 struct net_bridge *br;
792 u8 ndm_flags;
793 int err;
794
795 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
796
797 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
798 br_fdb_del_bulk_policy, extack);
799 if (err)
800 return err;
801
802 if (netif_is_bridge_master(dev)) {
803 br = netdev_priv(dev);
804 } else {
805 p = br_port_get_rtnl(dev);
806 if (!p) {
807 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
808 return -EINVAL;
809 }
810 br = p->br;
811 }
812
813 if (tb[NDA_VLAN])
814 desc.vlan_id = nla_get_u16(tb[NDA_VLAN]);
815
816 if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
817 NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
818 return -EINVAL;
819 }
820 if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
821 NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
822 return -EINVAL;
823 }
824
825 desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
826 desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
827 if (tb[NDA_NDM_STATE_MASK]) {
828 u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
829
830 desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
831 }
832 if (tb[NDA_NDM_FLAGS_MASK]) {
833 u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
834
835 desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
836 }
837 if (tb[NDA_IFINDEX]) {
838 int ifidx = nla_get_s32(tb[NDA_IFINDEX]);
839
840 err = __fdb_flush_validate_ifindex(br, ifidx, extack);
841 if (err)
842 return err;
843 desc.port_ifindex = ifidx;
844 } else if (p) {
845 /* flush was invoked with port device and NTF_MASTER */
846 desc.port_ifindex = p->dev->ifindex;
847 }
848
849 br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
850 desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
851
852 br_fdb_flush(br, &desc);
853
854 return 0;
855 }
856
857 /* Flush all entries referring to a specific port.
858 * if do_all is set also flush static entries
859 * if vid is set delete all entries that match the vlan_id
860 */
br_fdb_delete_by_port(struct net_bridge * br,const struct net_bridge_port * p,u16 vid,int do_all)861 void br_fdb_delete_by_port(struct net_bridge *br,
862 const struct net_bridge_port *p,
863 u16 vid,
864 int do_all)
865 {
866 struct net_bridge_fdb_entry *f;
867 struct hlist_node *tmp;
868
869 spin_lock_bh(&br->hash_lock);
870 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
871 if (f->dst != p)
872 continue;
873
874 if (!do_all)
875 if (test_bit(BR_FDB_STATIC, &f->flags) ||
876 (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
877 !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
878 (vid && f->key.vlan_id != vid))
879 continue;
880
881 if (test_bit(BR_FDB_LOCAL, &f->flags))
882 fdb_delete_local(br, p, f);
883 else
884 fdb_delete(br, f, true);
885 }
886 spin_unlock_bh(&br->hash_lock);
887 }
888
889 #if IS_ENABLED(CONFIG_ATM_LANE)
890 /* Interface used by ATM LANE hook to test
891 * if an addr is on some other bridge port */
br_fdb_test_addr(struct net_device * dev,unsigned char * addr)892 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
893 {
894 struct net_bridge_fdb_entry *fdb;
895 struct net_bridge_port *port;
896 int ret;
897
898 rcu_read_lock();
899 port = br_port_get_rcu(dev);
900 if (!port)
901 ret = 0;
902 else {
903 const struct net_bridge_port *dst = NULL;
904
905 fdb = br_fdb_find_rcu(port->br, addr, 0);
906 if (fdb)
907 dst = READ_ONCE(fdb->dst);
908
909 ret = dst && dst->dev != dev &&
910 dst->state == BR_STATE_FORWARDING;
911 }
912 rcu_read_unlock();
913
914 return ret;
915 }
916 #endif /* CONFIG_ATM_LANE */
917
918 /*
919 * Fill buffer with forwarding table records in
920 * the API format.
921 */
br_fdb_fillbuf(struct net_bridge * br,void * buf,unsigned long maxnum,unsigned long skip)922 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
923 unsigned long maxnum, unsigned long skip)
924 {
925 struct net_bridge_fdb_entry *f;
926 struct __fdb_entry *fe = buf;
927 unsigned long delta;
928 int num = 0;
929
930 memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
931
932 rcu_read_lock();
933 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
934 if (num >= maxnum)
935 break;
936
937 if (has_expired(br, f))
938 continue;
939
940 /* ignore pseudo entry for local MAC address */
941 if (!f->dst)
942 continue;
943
944 if (skip) {
945 --skip;
946 continue;
947 }
948
949 /* convert from internal format to API */
950 memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
951
952 /* due to ABI compat need to split into hi/lo */
953 fe->port_no = f->dst->port_no;
954 fe->port_hi = f->dst->port_no >> 8;
955
956 fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
957 if (!test_bit(BR_FDB_STATIC, &f->flags)) {
958 delta = jiffies - READ_ONCE(f->updated);
959 fe->ageing_timer_value =
960 jiffies_delta_to_clock_t(delta);
961 }
962 ++fe;
963 ++num;
964 }
965 rcu_read_unlock();
966
967 return num;
968 }
969
970 /* Add entry for local address of interface */
br_fdb_add_local(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid)971 int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
972 const unsigned char *addr, u16 vid)
973 {
974 int ret;
975
976 spin_lock_bh(&br->hash_lock);
977 ret = fdb_add_local(br, source, addr, vid);
978 spin_unlock_bh(&br->hash_lock);
979 return ret;
980 }
981
982 /* returns true if the fdb was modified */
__fdb_mark_active(struct net_bridge_fdb_entry * fdb)983 static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
984 {
985 return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
986 test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
987 }
988
br_fdb_update(struct net_bridge * br,struct net_bridge_port * source,const unsigned char * addr,u16 vid,unsigned long flags)989 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
990 const unsigned char *addr, u16 vid, unsigned long flags)
991 {
992 struct net_bridge_fdb_entry *fdb;
993
994 /* some users want to always flood. */
995 if (hold_time(br) == 0)
996 return;
997
998 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
999 if (likely(fdb)) {
1000 /* attempt to update an entry for a local interface */
1001 if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
1002 if (net_ratelimit())
1003 br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
1004 source->dev->name, addr, vid);
1005 } else {
1006 unsigned long now = jiffies;
1007 bool fdb_modified = false;
1008
1009 if (now != READ_ONCE(fdb->updated)) {
1010 WRITE_ONCE(fdb->updated, now);
1011 fdb_modified = __fdb_mark_active(fdb);
1012 }
1013
1014 /* fastpath: update of existing entry */
1015 if (unlikely(source != READ_ONCE(fdb->dst) &&
1016 !test_bit(BR_FDB_STICKY, &fdb->flags))) {
1017 br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
1018 WRITE_ONCE(fdb->dst, source);
1019 fdb_modified = true;
1020 /* Take over HW learned entry */
1021 if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
1022 &fdb->flags)))
1023 clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
1024 &fdb->flags);
1025 /* Clear locked flag when roaming to an
1026 * unlocked port.
1027 */
1028 if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
1029 clear_bit(BR_FDB_LOCKED, &fdb->flags);
1030 }
1031
1032 if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) {
1033 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1034 if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED,
1035 &fdb->flags))
1036 atomic_dec(&br->fdb_n_learned);
1037 }
1038 if (unlikely(fdb_modified)) {
1039 trace_br_fdb_update(br, source, addr, vid, flags);
1040 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1041 }
1042 }
1043 } else {
1044 spin_lock(&br->hash_lock);
1045 fdb = fdb_create(br, source, addr, vid, flags);
1046 if (fdb) {
1047 trace_br_fdb_update(br, source, addr, vid, flags);
1048 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1049 }
1050 /* else we lose race and someone else inserts
1051 * it first, don't bother updating
1052 */
1053 spin_unlock(&br->hash_lock);
1054 }
1055 }
1056
1057 /* Dump information about entries, in response to GETNEIGH */
br_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)1058 int br_fdb_dump(struct sk_buff *skb,
1059 struct netlink_callback *cb,
1060 struct net_device *dev,
1061 struct net_device *filter_dev,
1062 int *idx)
1063 {
1064 struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
1065 struct net_bridge *br = netdev_priv(dev);
1066 struct net_bridge_fdb_entry *f;
1067 int err = 0;
1068
1069 if (!netif_is_bridge_master(dev))
1070 return err;
1071
1072 if (!filter_dev) {
1073 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
1074 if (err < 0)
1075 return err;
1076 }
1077
1078 rcu_read_lock();
1079 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1080 if (*idx < ctx->fdb_idx)
1081 goto skip;
1082 if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
1083 if (filter_dev != dev)
1084 goto skip;
1085 /* !f->dst is a special case for bridge
1086 * It means the MAC belongs to the bridge
1087 * Therefore need a little more filtering
1088 * we only want to dump the !f->dst case
1089 */
1090 if (f->dst)
1091 goto skip;
1092 }
1093 if (!filter_dev && f->dst)
1094 goto skip;
1095
1096 err = fdb_fill_info(skb, br, f,
1097 NETLINK_CB(cb->skb).portid,
1098 cb->nlh->nlmsg_seq,
1099 RTM_NEWNEIGH,
1100 NLM_F_MULTI);
1101 if (err < 0)
1102 break;
1103 skip:
1104 *idx += 1;
1105 }
1106 rcu_read_unlock();
1107
1108 return err;
1109 }
1110
br_fdb_get(struct sk_buff * skb,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u32 portid,u32 seq,struct netlink_ext_ack * extack)1111 int br_fdb_get(struct sk_buff *skb,
1112 struct nlattr *tb[],
1113 struct net_device *dev,
1114 const unsigned char *addr,
1115 u16 vid, u32 portid, u32 seq,
1116 struct netlink_ext_ack *extack)
1117 {
1118 struct net_bridge *br = netdev_priv(dev);
1119 struct net_bridge_fdb_entry *f;
1120 int err = 0;
1121
1122 rcu_read_lock();
1123 f = br_fdb_find_rcu(br, addr, vid);
1124 if (!f) {
1125 NL_SET_ERR_MSG(extack, "Fdb entry not found");
1126 err = -ENOENT;
1127 goto errout;
1128 }
1129
1130 err = fdb_fill_info(skb, br, f, portid, seq,
1131 RTM_NEWNEIGH, 0);
1132 errout:
1133 rcu_read_unlock();
1134 return err;
1135 }
1136
1137 /* returns true if the fdb is modified */
fdb_handle_notify(struct net_bridge_fdb_entry * fdb,u8 notify)1138 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
1139 {
1140 bool modified = false;
1141
1142 /* allow to mark an entry as inactive, usually done on creation */
1143 if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
1144 !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
1145 modified = true;
1146
1147 if ((notify & FDB_NOTIFY_BIT) &&
1148 !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1149 /* enabled activity tracking */
1150 modified = true;
1151 } else if (!(notify & FDB_NOTIFY_BIT) &&
1152 test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
1153 /* disabled activity tracking, clear notify state */
1154 clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
1155 modified = true;
1156 }
1157
1158 return modified;
1159 }
1160
1161 /* Update (create or replace) forwarding database entry */
fdb_add_entry(struct net_bridge * br,struct net_bridge_port * source,const u8 * addr,struct ndmsg * ndm,u16 flags,u16 vid,struct nlattr * nfea_tb[])1162 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
1163 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
1164 struct nlattr *nfea_tb[])
1165 {
1166 bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
1167 bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
1168 struct net_bridge_fdb_entry *fdb;
1169 u16 state = ndm->ndm_state;
1170 bool modified = false;
1171 u8 notify = 0;
1172
1173 /* If the port cannot learn allow only local and static entries */
1174 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
1175 !(source->state == BR_STATE_LEARNING ||
1176 source->state == BR_STATE_FORWARDING))
1177 return -EPERM;
1178
1179 if (!source && !(state & NUD_PERMANENT)) {
1180 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
1181 br->dev->name);
1182 return -EINVAL;
1183 }
1184
1185 if (is_sticky && (state & NUD_PERMANENT))
1186 return -EINVAL;
1187
1188 if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
1189 notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
1190 if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
1191 (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
1192 return -EINVAL;
1193 }
1194
1195 fdb = br_fdb_find(br, addr, vid);
1196 if (fdb == NULL) {
1197 if (!(flags & NLM_F_CREATE))
1198 return -ENOENT;
1199
1200 fdb = fdb_create(br, source, addr, vid,
1201 BIT(BR_FDB_ADDED_BY_USER));
1202 if (!fdb)
1203 return -ENOMEM;
1204
1205 modified = true;
1206 } else {
1207 if (flags & NLM_F_EXCL)
1208 return -EEXIST;
1209
1210 if (READ_ONCE(fdb->dst) != source) {
1211 WRITE_ONCE(fdb->dst, source);
1212 modified = true;
1213 }
1214
1215 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1216 if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
1217 atomic_dec(&br->fdb_n_learned);
1218 }
1219
1220 if (fdb_to_nud(br, fdb) != state) {
1221 if (state & NUD_PERMANENT) {
1222 set_bit(BR_FDB_LOCAL, &fdb->flags);
1223 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1224 fdb_add_hw_addr(br, addr);
1225 } else if (state & NUD_NOARP) {
1226 clear_bit(BR_FDB_LOCAL, &fdb->flags);
1227 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
1228 fdb_add_hw_addr(br, addr);
1229 } else {
1230 clear_bit(BR_FDB_LOCAL, &fdb->flags);
1231 if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
1232 fdb_del_hw_addr(br, addr);
1233 }
1234
1235 modified = true;
1236 }
1237
1238 if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
1239 change_bit(BR_FDB_STICKY, &fdb->flags);
1240 modified = true;
1241 }
1242
1243 if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
1244 modified = true;
1245
1246 if (fdb_handle_notify(fdb, notify))
1247 modified = true;
1248
1249 WRITE_ONCE(fdb->used, jiffies);
1250 if (modified) {
1251 if (refresh)
1252 WRITE_ONCE(fdb->updated, jiffies);
1253 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1254 }
1255
1256 return 0;
1257 }
1258
__br_fdb_add(struct ndmsg * ndm,struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 nlh_flags,u16 vid,struct nlattr * nfea_tb[],bool * notified,struct netlink_ext_ack * extack)1259 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
1260 struct net_bridge_port *p, const unsigned char *addr,
1261 u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
1262 bool *notified, struct netlink_ext_ack *extack)
1263 {
1264 int err = 0;
1265
1266 if (ndm->ndm_flags & NTF_USE) {
1267 if (!p) {
1268 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
1269 br->dev->name);
1270 return -EINVAL;
1271 }
1272 if (!nbp_state_should_learn(p))
1273 return 0;
1274
1275 local_bh_disable();
1276 rcu_read_lock();
1277 br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
1278 rcu_read_unlock();
1279 local_bh_enable();
1280 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
1281 if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
1282 NL_SET_ERR_MSG_MOD(extack,
1283 "FDB entry towards bridge must be permanent");
1284 return -EINVAL;
1285 }
1286 err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
1287 } else {
1288 spin_lock_bh(&br->hash_lock);
1289 err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
1290 spin_unlock_bh(&br->hash_lock);
1291 }
1292
1293 if (!err)
1294 *notified = true;
1295 return err;
1296 }
1297
1298 static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
1299 [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
1300 [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
1301 };
1302
1303 /* Add new permanent fdb entry with RTM_NEWNEIGH */
br_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 nlh_flags,bool * notified,struct netlink_ext_ack * extack)1304 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1305 struct net_device *dev,
1306 const unsigned char *addr, u16 vid, u16 nlh_flags,
1307 bool *notified, struct netlink_ext_ack *extack)
1308 {
1309 struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
1310 struct net_bridge_vlan_group *vg;
1311 struct net_bridge_port *p = NULL;
1312 struct net_bridge_vlan *v;
1313 struct net_bridge *br = NULL;
1314 u32 ext_flags = 0;
1315 int err = 0;
1316
1317 trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
1318
1319 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
1320 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
1321 return -EINVAL;
1322 }
1323
1324 if (is_zero_ether_addr(addr)) {
1325 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
1326 return -EINVAL;
1327 }
1328
1329 if (netif_is_bridge_master(dev)) {
1330 br = netdev_priv(dev);
1331 vg = br_vlan_group(br);
1332 } else {
1333 p = br_port_get_rtnl(dev);
1334 if (!p) {
1335 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
1336 dev->name);
1337 return -EINVAL;
1338 }
1339 br = p->br;
1340 vg = nbp_vlan_group(p);
1341 }
1342
1343 if (tb[NDA_FLAGS_EXT])
1344 ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
1345
1346 if (ext_flags & NTF_EXT_LOCKED) {
1347 NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
1348 return -EINVAL;
1349 }
1350
1351 if (tb[NDA_FDB_EXT_ATTRS]) {
1352 attr = tb[NDA_FDB_EXT_ATTRS];
1353 err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
1354 br_nda_fdb_pol, extack);
1355 if (err)
1356 return err;
1357 } else {
1358 memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
1359 }
1360
1361 if (vid) {
1362 v = br_vlan_find(vg, vid);
1363 if (!v || !br_vlan_should_use(v)) {
1364 pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1365 return -EINVAL;
1366 }
1367
1368 /* VID was specified, so use it. */
1369 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
1370 notified, extack);
1371 } else {
1372 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
1373 notified, extack);
1374 if (err || !vg || !vg->num_vlans)
1375 goto out;
1376
1377 /* We have vlans configured on this port and user didn't
1378 * specify a VLAN. To be nice, add/update entry for every
1379 * vlan on this port.
1380 */
1381 list_for_each_entry(v, &vg->vlan_list, vlist) {
1382 if (!br_vlan_should_use(v))
1383 continue;
1384 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
1385 nfea_tb, notified, extack);
1386 if (err)
1387 goto out;
1388 }
1389 }
1390
1391 out:
1392 return err;
1393 }
1394
fdb_delete_by_addr_and_port(struct net_bridge * br,const struct net_bridge_port * p,const u8 * addr,u16 vlan,bool * notified)1395 static int fdb_delete_by_addr_and_port(struct net_bridge *br,
1396 const struct net_bridge_port *p,
1397 const u8 *addr, u16 vlan, bool *notified)
1398 {
1399 struct net_bridge_fdb_entry *fdb;
1400
1401 fdb = br_fdb_find(br, addr, vlan);
1402 if (!fdb || READ_ONCE(fdb->dst) != p)
1403 return -ENOENT;
1404
1405 fdb_delete(br, fdb, true);
1406 *notified = true;
1407
1408 return 0;
1409 }
1410
__br_fdb_delete(struct net_bridge * br,const struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool * notified)1411 static int __br_fdb_delete(struct net_bridge *br,
1412 const struct net_bridge_port *p,
1413 const unsigned char *addr, u16 vid, bool *notified)
1414 {
1415 int err;
1416
1417 spin_lock_bh(&br->hash_lock);
1418 err = fdb_delete_by_addr_and_port(br, p, addr, vid, notified);
1419 spin_unlock_bh(&br->hash_lock);
1420
1421 return err;
1422 }
1423
1424 /* Remove neighbor entry with RTM_DELNEIGH */
br_fdb_delete(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,bool * notified,struct netlink_ext_ack * extack)1425 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1426 struct net_device *dev,
1427 const unsigned char *addr, u16 vid, bool *notified,
1428 struct netlink_ext_ack *extack)
1429 {
1430 struct net_bridge_vlan_group *vg;
1431 struct net_bridge_port *p = NULL;
1432 struct net_bridge *br;
1433 int err;
1434
1435 if (netif_is_bridge_master(dev)) {
1436 br = netdev_priv(dev);
1437 vg = br_vlan_group(br);
1438 } else {
1439 p = br_port_get_rtnl(dev);
1440 if (!p) {
1441 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
1442 dev->name);
1443 return -EINVAL;
1444 }
1445 vg = nbp_vlan_group(p);
1446 br = p->br;
1447 }
1448
1449 if (vid) {
1450 err = __br_fdb_delete(br, p, addr, vid, notified);
1451 } else {
1452 struct net_bridge_vlan *v;
1453
1454 err = -ENOENT;
1455 err &= __br_fdb_delete(br, p, addr, 0, notified);
1456 if (!vg || !vg->num_vlans)
1457 return err;
1458
1459 list_for_each_entry(v, &vg->vlan_list, vlist) {
1460 if (!br_vlan_should_use(v))
1461 continue;
1462 err &= __br_fdb_delete(br, p, addr, v->vid, notified);
1463 }
1464 }
1465
1466 return err;
1467 }
1468
br_fdb_sync_static(struct net_bridge * br,struct net_bridge_port * p)1469 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
1470 {
1471 struct net_bridge_fdb_entry *f, *tmp;
1472 int err = 0;
1473
1474 ASSERT_RTNL();
1475
1476 /* the key here is that static entries change only under rtnl */
1477 rcu_read_lock();
1478 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1479 /* We only care for static entries */
1480 if (!test_bit(BR_FDB_STATIC, &f->flags))
1481 continue;
1482 err = dev_uc_add(p->dev, f->key.addr.addr);
1483 if (err)
1484 goto rollback;
1485 }
1486 done:
1487 rcu_read_unlock();
1488
1489 return err;
1490
1491 rollback:
1492 hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
1493 /* We only care for static entries */
1494 if (!test_bit(BR_FDB_STATIC, &tmp->flags))
1495 continue;
1496 if (tmp == f)
1497 break;
1498 dev_uc_del(p->dev, tmp->key.addr.addr);
1499 }
1500
1501 goto done;
1502 }
1503
br_fdb_unsync_static(struct net_bridge * br,struct net_bridge_port * p)1504 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
1505 {
1506 struct net_bridge_fdb_entry *f;
1507
1508 ASSERT_RTNL();
1509
1510 rcu_read_lock();
1511 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1512 /* We only care for static entries */
1513 if (!test_bit(BR_FDB_STATIC, &f->flags))
1514 continue;
1515
1516 dev_uc_del(p->dev, f->key.addr.addr);
1517 }
1518 rcu_read_unlock();
1519 }
1520
br_fdb_external_learn_add(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool locked,bool swdev_notify)1521 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1522 const unsigned char *addr, u16 vid, bool locked,
1523 bool swdev_notify)
1524 {
1525 struct net_bridge_fdb_entry *fdb;
1526 bool modified = false;
1527 int err = 0;
1528
1529 trace_br_fdb_external_learn_add(br, p, addr, vid);
1530
1531 if (locked && (!p || !(p->flags & BR_PORT_MAB)))
1532 return -EINVAL;
1533
1534 spin_lock_bh(&br->hash_lock);
1535
1536 fdb = br_fdb_find(br, addr, vid);
1537 if (!fdb) {
1538 unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
1539
1540 if (swdev_notify)
1541 flags |= BIT(BR_FDB_ADDED_BY_USER);
1542
1543 if (!p)
1544 flags |= BIT(BR_FDB_LOCAL);
1545
1546 if (locked)
1547 flags |= BIT(BR_FDB_LOCKED);
1548
1549 fdb = fdb_create(br, p, addr, vid, flags);
1550 if (!fdb) {
1551 err = -ENOMEM;
1552 goto err_unlock;
1553 }
1554 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1555 } else {
1556 if (locked &&
1557 (!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
1558 READ_ONCE(fdb->dst) != p)) {
1559 err = -EINVAL;
1560 goto err_unlock;
1561 }
1562
1563 WRITE_ONCE(fdb->updated, jiffies);
1564
1565 if (READ_ONCE(fdb->dst) != p) {
1566 WRITE_ONCE(fdb->dst, p);
1567 modified = true;
1568 }
1569
1570 if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
1571 /* Refresh entry */
1572 WRITE_ONCE(fdb->used, jiffies);
1573 } else {
1574 modified = true;
1575 }
1576
1577 if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
1578 change_bit(BR_FDB_LOCKED, &fdb->flags);
1579 modified = true;
1580 }
1581
1582 if (swdev_notify)
1583 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1584
1585 if (!p)
1586 set_bit(BR_FDB_LOCAL, &fdb->flags);
1587
1588 if ((swdev_notify || !p) &&
1589 test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
1590 atomic_dec(&br->fdb_n_learned);
1591
1592 if (modified)
1593 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1594 }
1595
1596 err_unlock:
1597 spin_unlock_bh(&br->hash_lock);
1598
1599 return err;
1600 }
1601
br_fdb_external_learn_del(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool swdev_notify)1602 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1603 const unsigned char *addr, u16 vid,
1604 bool swdev_notify)
1605 {
1606 struct net_bridge_fdb_entry *fdb;
1607 int err = 0;
1608
1609 spin_lock_bh(&br->hash_lock);
1610
1611 fdb = br_fdb_find(br, addr, vid);
1612 if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
1613 fdb_delete(br, fdb, swdev_notify);
1614 else
1615 err = -ENOENT;
1616
1617 spin_unlock_bh(&br->hash_lock);
1618
1619 return err;
1620 }
1621
br_fdb_offloaded_set(struct net_bridge * br,struct net_bridge_port * p,const unsigned char * addr,u16 vid,bool offloaded)1622 void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
1623 const unsigned char *addr, u16 vid, bool offloaded)
1624 {
1625 struct net_bridge_fdb_entry *fdb;
1626
1627 spin_lock_bh(&br->hash_lock);
1628
1629 fdb = br_fdb_find(br, addr, vid);
1630 if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
1631 change_bit(BR_FDB_OFFLOADED, &fdb->flags);
1632
1633 spin_unlock_bh(&br->hash_lock);
1634 }
1635
br_fdb_clear_offload(const struct net_device * dev,u16 vid)1636 void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
1637 {
1638 struct net_bridge_fdb_entry *f;
1639 struct net_bridge_port *p;
1640
1641 ASSERT_RTNL();
1642
1643 p = br_port_get_rtnl(dev);
1644 if (!p)
1645 return;
1646
1647 spin_lock_bh(&p->br->hash_lock);
1648 hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
1649 if (f->dst == p && f->key.vlan_id == vid)
1650 clear_bit(BR_FDB_OFFLOADED, &f->flags);
1651 }
1652 spin_unlock_bh(&p->br->hash_lock);
1653 }
1654 EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
1655