1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
7
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
10
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
br_vlan_cmp(struct rhashtable_compare_arg * arg,const void * ptr)13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14 const void *ptr)
15 {
16 const struct net_bridge_vlan *vle = ptr;
17 u16 vid = *(u16 *)arg->key;
18
19 return vle->vid != vid;
20 }
21
22 static const struct rhashtable_params br_vlan_rht_params = {
23 .head_offset = offsetof(struct net_bridge_vlan, vnode),
24 .key_offset = offsetof(struct net_bridge_vlan, vid),
25 .key_len = sizeof(u16),
26 .nelem_hint = 3,
27 .max_size = VLAN_N_VID,
28 .obj_cmpfn = br_vlan_cmp,
29 .automatic_shrinking = true,
30 };
31
br_vlan_lookup(struct rhashtable * tbl,u16 vid)32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33 {
34 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35 }
36
__vlan_add_pvid(struct net_bridge_vlan_group * vg,const struct net_bridge_vlan * v)37 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
38 const struct net_bridge_vlan *v)
39 {
40 if (vg->pvid == v->vid)
41 return;
42
43 smp_wmb();
44 br_vlan_set_pvid_state(vg, v->state);
45 vg->pvid = v->vid;
46 }
47
__vlan_delete_pvid(struct net_bridge_vlan_group * vg,u16 vid)48 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
49 {
50 if (vg->pvid != vid)
51 return;
52
53 smp_wmb();
54 vg->pvid = 0;
55 }
56
57 /* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
58 * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
59 * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
60 */
__vlan_flags_update(struct net_bridge_vlan * v,u16 flags,bool commit)61 static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
62 bool commit)
63 {
64 struct net_bridge_vlan_group *vg;
65 bool change;
66
67 if (br_vlan_is_master(v))
68 vg = br_vlan_group(v->br);
69 else
70 vg = nbp_vlan_group(v->port);
71
72 /* check if anything would be changed on commit */
73 change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
74 ((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
75
76 if (!commit)
77 goto out;
78
79 if (flags & BRIDGE_VLAN_INFO_PVID)
80 __vlan_add_pvid(vg, v);
81 else
82 __vlan_delete_pvid(vg, v->vid);
83
84 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
85 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
86 else
87 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
88
89 out:
90 return change;
91 }
92
__vlan_flags_would_change(struct net_bridge_vlan * v,u16 flags)93 static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
94 {
95 return __vlan_flags_update(v, flags, false);
96 }
97
__vlan_flags_commit(struct net_bridge_vlan * v,u16 flags)98 static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
99 {
100 __vlan_flags_update(v, flags, true);
101 }
102
__vlan_vid_add(struct net_device * dev,struct net_bridge * br,struct net_bridge_vlan * v,u16 flags,struct netlink_ext_ack * extack)103 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
104 struct net_bridge_vlan *v, u16 flags,
105 struct netlink_ext_ack *extack)
106 {
107 int err;
108
109 /* Try switchdev op first. In case it is not supported, fallback to
110 * 8021q add.
111 */
112 err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
113 if (err == -EOPNOTSUPP)
114 return vlan_vid_add(dev, br->vlan_proto, v->vid);
115 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
116 return err;
117 }
118
__vlan_add_list(struct net_bridge_vlan * v)119 static void __vlan_add_list(struct net_bridge_vlan *v)
120 {
121 struct net_bridge_vlan_group *vg;
122 struct list_head *headp, *hpos;
123 struct net_bridge_vlan *vent;
124
125 if (br_vlan_is_master(v))
126 vg = br_vlan_group(v->br);
127 else
128 vg = nbp_vlan_group(v->port);
129
130 headp = &vg->vlan_list;
131 list_for_each_prev(hpos, headp) {
132 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
133 if (v->vid >= vent->vid)
134 break;
135 }
136 list_add_rcu(&v->vlist, hpos);
137 }
138
__vlan_del_list(struct net_bridge_vlan * v)139 static void __vlan_del_list(struct net_bridge_vlan *v)
140 {
141 list_del_rcu(&v->vlist);
142 }
143
__vlan_vid_del(struct net_device * dev,struct net_bridge * br,const struct net_bridge_vlan * v)144 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
145 const struct net_bridge_vlan *v)
146 {
147 int err;
148
149 /* Try switchdev op first. In case it is not supported, fallback to
150 * 8021q del.
151 */
152 err = br_switchdev_port_vlan_del(dev, v->vid);
153 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
154 vlan_vid_del(dev, br->vlan_proto, v->vid);
155 return err == -EOPNOTSUPP ? 0 : err;
156 }
157
158 /* Returns a master vlan, if it didn't exist it gets created. In all cases
159 * a reference is taken to the master vlan before returning.
160 */
161 static struct net_bridge_vlan *
br_vlan_get_master(struct net_bridge * br,u16 vid,struct netlink_ext_ack * extack)162 br_vlan_get_master(struct net_bridge *br, u16 vid,
163 struct netlink_ext_ack *extack)
164 {
165 struct net_bridge_vlan_group *vg;
166 struct net_bridge_vlan *masterv;
167
168 vg = br_vlan_group(br);
169 masterv = br_vlan_find(vg, vid);
170 if (!masterv) {
171 bool changed;
172
173 /* missing global ctx, create it now */
174 if (br_vlan_add(br, vid, 0, &changed, extack))
175 return NULL;
176 masterv = br_vlan_find(vg, vid);
177 if (WARN_ON(!masterv))
178 return NULL;
179 refcount_set(&masterv->refcnt, 1);
180 return masterv;
181 }
182 refcount_inc(&masterv->refcnt);
183
184 return masterv;
185 }
186
br_master_vlan_rcu_free(struct rcu_head * rcu)187 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
188 {
189 struct net_bridge_vlan *v;
190
191 v = container_of(rcu, struct net_bridge_vlan, rcu);
192 WARN_ON(!br_vlan_is_master(v));
193 free_percpu(v->stats);
194 v->stats = NULL;
195 kfree(v);
196 }
197
br_vlan_put_master(struct net_bridge_vlan * masterv)198 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
199 {
200 struct net_bridge_vlan_group *vg;
201
202 if (!br_vlan_is_master(masterv))
203 return;
204
205 vg = br_vlan_group(masterv->br);
206 if (refcount_dec_and_test(&masterv->refcnt)) {
207 rhashtable_remove_fast(&vg->vlan_hash,
208 &masterv->vnode, br_vlan_rht_params);
209 __vlan_del_list(masterv);
210 br_multicast_toggle_one_vlan(masterv, false);
211 br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
212 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
213 }
214 }
215
nbp_vlan_rcu_free(struct rcu_head * rcu)216 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
217 {
218 struct net_bridge_vlan *v;
219
220 v = container_of(rcu, struct net_bridge_vlan, rcu);
221 WARN_ON(br_vlan_is_master(v));
222 /* if we had per-port stats configured then free them here */
223 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
224 free_percpu(v->stats);
225 v->stats = NULL;
226 kfree(v);
227 }
228
br_vlan_init_state(struct net_bridge_vlan * v)229 static void br_vlan_init_state(struct net_bridge_vlan *v)
230 {
231 struct net_bridge *br;
232
233 if (br_vlan_is_master(v))
234 br = v->br;
235 else
236 br = v->port->br;
237
238 if (br_opt_get(br, BROPT_MST_ENABLED)) {
239 br_mst_vlan_init_state(v);
240 return;
241 }
242
243 v->state = BR_STATE_FORWARDING;
244 v->msti = 0;
245 }
246
247 /* This is the shared VLAN add function which works for both ports and bridge
248 * devices. There are four possible calls to this function in terms of the
249 * vlan entry type:
250 * 1. vlan is being added on a port (no master flags, global entry exists)
251 * 2. vlan is being added on a bridge (both master and brentry flags)
252 * 3. vlan is being added on a port, but a global entry didn't exist which
253 * is being created right now (master flag set, brentry flag unset), the
254 * global entry is used for global per-vlan features, but not for filtering
255 * 4. same as 3 but with both master and brentry flags set so the entry
256 * will be used for filtering in both the port and the bridge
257 */
__vlan_add(struct net_bridge_vlan * v,u16 flags,struct netlink_ext_ack * extack)258 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
259 struct netlink_ext_ack *extack)
260 {
261 struct net_bridge_vlan *masterv = NULL;
262 struct net_bridge_port *p = NULL;
263 struct net_bridge_vlan_group *vg;
264 struct net_device *dev;
265 struct net_bridge *br;
266 int err;
267
268 if (br_vlan_is_master(v)) {
269 br = v->br;
270 dev = br->dev;
271 vg = br_vlan_group(br);
272 } else {
273 p = v->port;
274 br = p->br;
275 dev = p->dev;
276 vg = nbp_vlan_group(p);
277 }
278
279 if (p) {
280 /* Add VLAN to the device filter if it is supported.
281 * This ensures tagged traffic enters the bridge when
282 * promiscuous mode is disabled by br_manage_promisc().
283 */
284 err = __vlan_vid_add(dev, br, v, flags, extack);
285 if (err)
286 goto out;
287
288 /* need to work on the master vlan too */
289 if (flags & BRIDGE_VLAN_INFO_MASTER) {
290 bool changed;
291
292 err = br_vlan_add(br, v->vid,
293 flags | BRIDGE_VLAN_INFO_BRENTRY,
294 &changed, extack);
295 if (err)
296 goto out_filt;
297
298 if (changed)
299 br_vlan_notify(br, NULL, v->vid, 0,
300 RTM_NEWVLAN);
301 }
302
303 masterv = br_vlan_get_master(br, v->vid, extack);
304 if (!masterv) {
305 err = -ENOMEM;
306 goto out_filt;
307 }
308 v->brvlan = masterv;
309 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
310 v->stats =
311 netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
312 if (!v->stats) {
313 err = -ENOMEM;
314 goto out_filt;
315 }
316 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
317 } else {
318 v->stats = masterv->stats;
319 }
320 br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
321 } else {
322 if (br_vlan_should_use(v)) {
323 err = br_switchdev_port_vlan_add(dev, v->vid, flags,
324 false, extack);
325 if (err && err != -EOPNOTSUPP)
326 goto out;
327 }
328 br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
329 v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
330 }
331
332 /* Add the dev mac and count the vlan only if it's usable */
333 if (br_vlan_should_use(v)) {
334 if (!br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0)) {
335 err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
336 if (err) {
337 br_err(br, "failed insert local address into bridge forwarding table\n");
338 goto out_filt;
339 }
340 }
341 vg->num_vlans++;
342 }
343
344 /* set the state before publishing */
345 br_vlan_init_state(v);
346
347 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
348 br_vlan_rht_params);
349 if (err)
350 goto out_fdb_insert;
351
352 __vlan_add_list(v);
353 __vlan_flags_commit(v, flags);
354 br_multicast_toggle_one_vlan(v, true);
355
356 if (p)
357 nbp_vlan_set_vlan_dev_state(p, v->vid);
358 out:
359 return err;
360
361 out_fdb_insert:
362 if (br_vlan_should_use(v)) {
363 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
364 vg->num_vlans--;
365 }
366
367 out_filt:
368 if (p) {
369 __vlan_vid_del(dev, br, v);
370 if (masterv) {
371 if (v->stats && masterv->stats != v->stats)
372 free_percpu(v->stats);
373 v->stats = NULL;
374
375 br_vlan_put_master(masterv);
376 v->brvlan = NULL;
377 }
378 } else {
379 br_switchdev_port_vlan_del(dev, v->vid);
380 }
381
382 goto out;
383 }
384
__vlan_del(struct net_bridge_vlan * v)385 static int __vlan_del(struct net_bridge_vlan *v)
386 {
387 struct net_bridge_vlan *masterv = v;
388 struct net_bridge_vlan_group *vg;
389 struct net_bridge_port *p = NULL;
390 int err = 0;
391
392 if (br_vlan_is_master(v)) {
393 vg = br_vlan_group(v->br);
394 } else {
395 p = v->port;
396 vg = nbp_vlan_group(v->port);
397 masterv = v->brvlan;
398 }
399
400 __vlan_delete_pvid(vg, v->vid);
401 if (p) {
402 err = __vlan_vid_del(p->dev, p->br, v);
403 if (err)
404 goto out;
405 } else {
406 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
407 if (err && err != -EOPNOTSUPP)
408 goto out;
409 err = 0;
410 }
411
412 if (br_vlan_should_use(v)) {
413 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
414 vg->num_vlans--;
415 }
416
417 if (masterv != v) {
418 vlan_tunnel_info_del(vg, v);
419 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
420 br_vlan_rht_params);
421 __vlan_del_list(v);
422 nbp_vlan_set_vlan_dev_state(p, v->vid);
423 br_multicast_toggle_one_vlan(v, false);
424 br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
425 call_rcu(&v->rcu, nbp_vlan_rcu_free);
426 }
427
428 br_vlan_put_master(masterv);
429 out:
430 return err;
431 }
432
__vlan_group_free(struct net_bridge_vlan_group * vg)433 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
434 {
435 WARN_ON(!list_empty(&vg->vlan_list));
436 rhashtable_destroy(&vg->vlan_hash);
437 vlan_tunnel_deinit(vg);
438 kfree(vg);
439 }
440
__vlan_flush(const struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_vlan_group * vg)441 static void __vlan_flush(const struct net_bridge *br,
442 const struct net_bridge_port *p,
443 struct net_bridge_vlan_group *vg)
444 {
445 struct net_bridge_vlan *vlan, *tmp;
446 u16 v_start = 0, v_end = 0;
447 int err;
448
449 __vlan_delete_pvid(vg, vg->pvid);
450 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
451 /* take care of disjoint ranges */
452 if (!v_start) {
453 v_start = vlan->vid;
454 } else if (vlan->vid - v_end != 1) {
455 /* found range end, notify and start next one */
456 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
457 v_start = vlan->vid;
458 }
459 v_end = vlan->vid;
460
461 err = __vlan_del(vlan);
462 if (err) {
463 br_err(br,
464 "port %u(%s) failed to delete vlan %d: %pe\n",
465 (unsigned int) p->port_no, p->dev->name,
466 vlan->vid, ERR_PTR(err));
467 }
468 }
469
470 /* notify about the last/whole vlan range */
471 if (v_start)
472 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
473 }
474
br_handle_vlan(struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_vlan_group * vg,struct sk_buff * skb)475 struct sk_buff *br_handle_vlan(struct net_bridge *br,
476 const struct net_bridge_port *p,
477 struct net_bridge_vlan_group *vg,
478 struct sk_buff *skb)
479 {
480 struct pcpu_sw_netstats *stats;
481 struct net_bridge_vlan *v;
482 u16 vid;
483
484 /* If this packet was not filtered at input, let it pass */
485 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
486 goto out;
487
488 /* At this point, we know that the frame was filtered and contains
489 * a valid vlan id. If the vlan id has untagged flag set,
490 * send untagged; otherwise, send tagged.
491 */
492 br_vlan_get_tag(skb, &vid);
493 v = br_vlan_find(vg, vid);
494 /* Vlan entry must be configured at this point. The
495 * only exception is the bridge is set in promisc mode and the
496 * packet is destined for the bridge device. In this case
497 * pass the packet as is.
498 */
499 if (!v || !br_vlan_should_use(v)) {
500 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
501 goto out;
502 } else {
503 kfree_skb(skb);
504 return NULL;
505 }
506 }
507 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
508 stats = this_cpu_ptr(v->stats);
509 u64_stats_update_begin(&stats->syncp);
510 u64_stats_add(&stats->tx_bytes, skb->len);
511 u64_stats_inc(&stats->tx_packets);
512 u64_stats_update_end(&stats->syncp);
513 }
514
515 /* If the skb will be sent using forwarding offload, the assumption is
516 * that the switchdev will inject the packet into hardware together
517 * with the bridge VLAN, so that it can be forwarded according to that
518 * VLAN. The switchdev should deal with popping the VLAN header in
519 * hardware on each egress port as appropriate. So only strip the VLAN
520 * header if forwarding offload is not being used.
521 */
522 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
523 !br_switchdev_frame_uses_tx_fwd_offload(skb))
524 __vlan_hwaccel_clear_tag(skb);
525
526 if (p && (p->flags & BR_VLAN_TUNNEL) &&
527 br_handle_egress_vlan_tunnel(skb, v)) {
528 kfree_skb(skb);
529 return NULL;
530 }
531 out:
532 return skb;
533 }
534
535 /* Called under RCU */
__allowed_ingress(const struct net_bridge * br,struct net_bridge_vlan_group * vg,struct sk_buff * skb,u16 * vid,u8 * state,struct net_bridge_vlan ** vlan)536 static bool __allowed_ingress(const struct net_bridge *br,
537 struct net_bridge_vlan_group *vg,
538 struct sk_buff *skb, u16 *vid,
539 u8 *state,
540 struct net_bridge_vlan **vlan)
541 {
542 struct pcpu_sw_netstats *stats;
543 struct net_bridge_vlan *v;
544 bool tagged;
545
546 BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
547 /* If vlan tx offload is disabled on bridge device and frame was
548 * sent from vlan device on the bridge device, it does not have
549 * HW accelerated vlan tag.
550 */
551 if (unlikely(!skb_vlan_tag_present(skb) &&
552 skb->protocol == br->vlan_proto)) {
553 skb = skb_vlan_untag(skb);
554 if (unlikely(!skb))
555 return false;
556 }
557
558 if (!br_vlan_get_tag(skb, vid)) {
559 /* Tagged frame */
560 if (skb->vlan_proto != br->vlan_proto) {
561 /* Protocol-mismatch, empty out vlan_tci for new tag */
562 skb_push(skb, ETH_HLEN);
563 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
564 skb_vlan_tag_get(skb));
565 if (unlikely(!skb))
566 return false;
567
568 skb_pull(skb, ETH_HLEN);
569 skb_reset_mac_len(skb);
570 *vid = 0;
571 tagged = false;
572 } else {
573 tagged = true;
574 }
575 } else {
576 /* Untagged frame */
577 tagged = false;
578 }
579
580 if (!*vid) {
581 u16 pvid = br_get_pvid(vg);
582
583 /* Frame had a tag with VID 0 or did not have a tag.
584 * See if pvid is set on this port. That tells us which
585 * vlan untagged or priority-tagged traffic belongs to.
586 */
587 if (!pvid)
588 goto drop;
589
590 /* PVID is set on this port. Any untagged or priority-tagged
591 * ingress frame is considered to belong to this vlan.
592 */
593 *vid = pvid;
594 if (likely(!tagged))
595 /* Untagged Frame. */
596 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
597 else
598 /* Priority-tagged Frame.
599 * At this point, we know that skb->vlan_tci VID
600 * field was 0.
601 * We update only VID field and preserve PCP field.
602 */
603 skb->vlan_tci |= pvid;
604
605 /* if snooping and stats are disabled we can avoid the lookup */
606 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
607 !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
608 if (*state == BR_STATE_FORWARDING) {
609 *state = br_vlan_get_pvid_state(vg);
610 if (!br_vlan_state_allowed(*state, true))
611 goto drop;
612 }
613 return true;
614 }
615 }
616 v = br_vlan_find(vg, *vid);
617 if (!v || !br_vlan_should_use(v))
618 goto drop;
619
620 if (*state == BR_STATE_FORWARDING) {
621 *state = br_vlan_get_state(v);
622 if (!br_vlan_state_allowed(*state, true))
623 goto drop;
624 }
625
626 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
627 stats = this_cpu_ptr(v->stats);
628 u64_stats_update_begin(&stats->syncp);
629 u64_stats_add(&stats->rx_bytes, skb->len);
630 u64_stats_inc(&stats->rx_packets);
631 u64_stats_update_end(&stats->syncp);
632 }
633
634 *vlan = v;
635
636 return true;
637
638 drop:
639 kfree_skb(skb);
640 return false;
641 }
642
br_allowed_ingress(const struct net_bridge * br,struct net_bridge_vlan_group * vg,struct sk_buff * skb,u16 * vid,u8 * state,struct net_bridge_vlan ** vlan)643 bool br_allowed_ingress(const struct net_bridge *br,
644 struct net_bridge_vlan_group *vg, struct sk_buff *skb,
645 u16 *vid, u8 *state,
646 struct net_bridge_vlan **vlan)
647 {
648 /* If VLAN filtering is disabled on the bridge, all packets are
649 * permitted.
650 */
651 *vlan = NULL;
652 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
653 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
654 return true;
655 }
656
657 return __allowed_ingress(br, vg, skb, vid, state, vlan);
658 }
659
660 /* Called under RCU. */
br_allowed_egress(struct net_bridge_vlan_group * vg,const struct sk_buff * skb)661 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
662 const struct sk_buff *skb)
663 {
664 const struct net_bridge_vlan *v;
665 u16 vid;
666
667 /* If this packet was not filtered at input, let it pass */
668 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
669 return true;
670
671 br_vlan_get_tag(skb, &vid);
672 v = br_vlan_find(vg, vid);
673 if (v && br_vlan_should_use(v) &&
674 br_vlan_state_allowed(br_vlan_get_state(v), false))
675 return true;
676
677 return false;
678 }
679
680 /* Called under RCU */
br_should_learn(struct net_bridge_port * p,struct sk_buff * skb,u16 * vid)681 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
682 {
683 struct net_bridge_vlan_group *vg;
684 struct net_bridge *br = p->br;
685 struct net_bridge_vlan *v;
686
687 /* If filtering was disabled at input, let it pass. */
688 if (!br_opt_get(br, BROPT_VLAN_ENABLED))
689 return true;
690
691 vg = nbp_vlan_group_rcu(p);
692 if (!vg || !vg->num_vlans)
693 return false;
694
695 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
696 *vid = 0;
697
698 if (!*vid) {
699 *vid = br_get_pvid(vg);
700 if (!*vid ||
701 !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
702 return false;
703
704 return true;
705 }
706
707 v = br_vlan_find(vg, *vid);
708 if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
709 return true;
710
711 return false;
712 }
713
br_vlan_add_existing(struct net_bridge * br,struct net_bridge_vlan_group * vg,struct net_bridge_vlan * vlan,u16 flags,bool * changed,struct netlink_ext_ack * extack)714 static int br_vlan_add_existing(struct net_bridge *br,
715 struct net_bridge_vlan_group *vg,
716 struct net_bridge_vlan *vlan,
717 u16 flags, bool *changed,
718 struct netlink_ext_ack *extack)
719 {
720 bool becomes_brentry = false;
721 bool would_change = false;
722 int err;
723
724 if (!br_vlan_is_brentry(vlan)) {
725 /* Trying to change flags of non-existent bridge vlan */
726 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
727 return -EINVAL;
728
729 becomes_brentry = true;
730 } else {
731 would_change = __vlan_flags_would_change(vlan, flags);
732 }
733
734 /* Master VLANs that aren't brentries weren't notified before,
735 * time to notify them now.
736 */
737 if (becomes_brentry || would_change) {
738 err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
739 would_change, extack);
740 if (err && err != -EOPNOTSUPP)
741 return err;
742 }
743
744 if (becomes_brentry) {
745 /* It was only kept for port vlans, now make it real */
746 err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
747 if (err) {
748 br_err(br, "failed to insert local address into bridge forwarding table\n");
749 goto err_fdb_insert;
750 }
751
752 refcount_inc(&vlan->refcnt);
753 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
754 vg->num_vlans++;
755 *changed = true;
756 br_multicast_toggle_one_vlan(vlan, true);
757 }
758
759 __vlan_flags_commit(vlan, flags);
760 if (would_change)
761 *changed = true;
762
763 return 0;
764
765 err_fdb_insert:
766 br_switchdev_port_vlan_del(br->dev, vlan->vid);
767 return err;
768 }
769
770 /* Must be protected by RTNL.
771 * Must be called with vid in range from 1 to 4094 inclusive.
772 * changed must be true only if the vlan was created or updated
773 */
br_vlan_add(struct net_bridge * br,u16 vid,u16 flags,bool * changed,struct netlink_ext_ack * extack)774 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
775 struct netlink_ext_ack *extack)
776 {
777 struct net_bridge_vlan_group *vg;
778 struct net_bridge_vlan *vlan;
779 int ret;
780
781 ASSERT_RTNL();
782
783 *changed = false;
784 vg = br_vlan_group(br);
785 vlan = br_vlan_find(vg, vid);
786 if (vlan)
787 return br_vlan_add_existing(br, vg, vlan, flags, changed,
788 extack);
789
790 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
791 if (!vlan)
792 return -ENOMEM;
793
794 vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
795 if (!vlan->stats) {
796 kfree(vlan);
797 return -ENOMEM;
798 }
799 vlan->vid = vid;
800 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
801 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
802 vlan->br = br;
803 if (flags & BRIDGE_VLAN_INFO_BRENTRY)
804 refcount_set(&vlan->refcnt, 1);
805 ret = __vlan_add(vlan, flags, extack);
806 if (ret) {
807 free_percpu(vlan->stats);
808 kfree(vlan);
809 } else {
810 *changed = true;
811 }
812
813 return ret;
814 }
815
816 /* Must be protected by RTNL.
817 * Must be called with vid in range from 1 to 4094 inclusive.
818 */
br_vlan_delete(struct net_bridge * br,u16 vid)819 int br_vlan_delete(struct net_bridge *br, u16 vid)
820 {
821 struct net_bridge_vlan_group *vg;
822 struct net_bridge_vlan *v;
823
824 ASSERT_RTNL();
825
826 vg = br_vlan_group(br);
827 v = br_vlan_find(vg, vid);
828 if (!v || !br_vlan_is_brentry(v))
829 return -ENOENT;
830
831 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
832 br_fdb_delete_by_port(br, NULL, vid, 0);
833
834 vlan_tunnel_info_del(vg, v);
835
836 return __vlan_del(v);
837 }
838
br_vlan_flush(struct net_bridge * br)839 void br_vlan_flush(struct net_bridge *br)
840 {
841 struct net_bridge_vlan_group *vg;
842
843 ASSERT_RTNL();
844
845 vg = br_vlan_group(br);
846 __vlan_flush(br, NULL, vg);
847 RCU_INIT_POINTER(br->vlgrp, NULL);
848 synchronize_net();
849 __vlan_group_free(vg);
850 }
851
br_vlan_find(struct net_bridge_vlan_group * vg,u16 vid)852 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
853 {
854 if (!vg)
855 return NULL;
856
857 return br_vlan_lookup(&vg->vlan_hash, vid);
858 }
859
860 /* Must be protected by RTNL. */
recalculate_group_addr(struct net_bridge * br)861 static void recalculate_group_addr(struct net_bridge *br)
862 {
863 if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
864 return;
865
866 spin_lock_bh(&br->lock);
867 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
868 br->vlan_proto == htons(ETH_P_8021Q)) {
869 /* Bridge Group Address */
870 br->group_addr[5] = 0x00;
871 } else { /* vlan_enabled && ETH_P_8021AD */
872 /* Provider Bridge Group Address */
873 br->group_addr[5] = 0x08;
874 }
875 spin_unlock_bh(&br->lock);
876 }
877
878 /* Must be protected by RTNL. */
br_recalculate_fwd_mask(struct net_bridge * br)879 void br_recalculate_fwd_mask(struct net_bridge *br)
880 {
881 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
882 br->vlan_proto == htons(ETH_P_8021Q))
883 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
884 else /* vlan_enabled && ETH_P_8021AD */
885 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
886 ~(1u << br->group_addr[5]);
887 }
888
br_vlan_filter_toggle(struct net_bridge * br,unsigned long val,struct netlink_ext_ack * extack)889 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
890 struct netlink_ext_ack *extack)
891 {
892 struct switchdev_attr attr = {
893 .orig_dev = br->dev,
894 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
895 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
896 .u.vlan_filtering = val,
897 };
898 int err;
899
900 if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
901 return 0;
902
903 br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
904
905 err = switchdev_port_attr_set(br->dev, &attr, extack);
906 if (err && err != -EOPNOTSUPP) {
907 br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
908 return err;
909 }
910
911 br_manage_promisc(br);
912 recalculate_group_addr(br);
913 br_recalculate_fwd_mask(br);
914 if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
915 br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
916 br_multicast_toggle_vlan_snooping(br, false, NULL);
917 }
918
919 return 0;
920 }
921
br_vlan_enabled(const struct net_device * dev)922 bool br_vlan_enabled(const struct net_device *dev)
923 {
924 struct net_bridge *br = netdev_priv(dev);
925
926 return br_opt_get(br, BROPT_VLAN_ENABLED);
927 }
928 EXPORT_SYMBOL_GPL(br_vlan_enabled);
929
br_vlan_get_proto(const struct net_device * dev,u16 * p_proto)930 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
931 {
932 struct net_bridge *br = netdev_priv(dev);
933
934 *p_proto = ntohs(br->vlan_proto);
935
936 return 0;
937 }
938 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
939
__br_vlan_set_proto(struct net_bridge * br,__be16 proto,struct netlink_ext_ack * extack)940 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
941 struct netlink_ext_ack *extack)
942 {
943 struct switchdev_attr attr = {
944 .orig_dev = br->dev,
945 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
946 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
947 .u.vlan_protocol = ntohs(proto),
948 };
949 int err = 0;
950 struct net_bridge_port *p;
951 struct net_bridge_vlan *vlan;
952 struct net_bridge_vlan_group *vg;
953 __be16 oldproto = br->vlan_proto;
954
955 if (br->vlan_proto == proto)
956 return 0;
957
958 err = switchdev_port_attr_set(br->dev, &attr, extack);
959 if (err && err != -EOPNOTSUPP)
960 return err;
961
962 /* Add VLANs for the new proto to the device filter. */
963 list_for_each_entry(p, &br->port_list, list) {
964 vg = nbp_vlan_group(p);
965 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
966 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
967 continue;
968 err = vlan_vid_add(p->dev, proto, vlan->vid);
969 if (err)
970 goto err_filt;
971 }
972 }
973
974 br->vlan_proto = proto;
975
976 recalculate_group_addr(br);
977 br_recalculate_fwd_mask(br);
978
979 /* Delete VLANs for the old proto from the device filter. */
980 list_for_each_entry(p, &br->port_list, list) {
981 vg = nbp_vlan_group(p);
982 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
983 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
984 continue;
985 vlan_vid_del(p->dev, oldproto, vlan->vid);
986 }
987 }
988
989 return 0;
990
991 err_filt:
992 attr.u.vlan_protocol = ntohs(oldproto);
993 switchdev_port_attr_set(br->dev, &attr, NULL);
994
995 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
996 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
997 continue;
998 vlan_vid_del(p->dev, proto, vlan->vid);
999 }
1000
1001 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1002 vg = nbp_vlan_group(p);
1003 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1004 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1005 continue;
1006 vlan_vid_del(p->dev, proto, vlan->vid);
1007 }
1008 }
1009
1010 return err;
1011 }
1012
br_vlan_set_proto(struct net_bridge * br,unsigned long val,struct netlink_ext_ack * extack)1013 int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
1014 struct netlink_ext_ack *extack)
1015 {
1016 if (!eth_type_vlan(htons(val)))
1017 return -EPROTONOSUPPORT;
1018
1019 return __br_vlan_set_proto(br, htons(val), extack);
1020 }
1021
br_vlan_set_stats(struct net_bridge * br,unsigned long val)1022 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
1023 {
1024 switch (val) {
1025 case 0:
1026 case 1:
1027 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
1028 break;
1029 default:
1030 return -EINVAL;
1031 }
1032
1033 return 0;
1034 }
1035
br_vlan_set_stats_per_port(struct net_bridge * br,unsigned long val)1036 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
1037 {
1038 struct net_bridge_port *p;
1039
1040 /* allow to change the option if there are no port vlans configured */
1041 list_for_each_entry(p, &br->port_list, list) {
1042 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1043
1044 if (vg->num_vlans)
1045 return -EBUSY;
1046 }
1047
1048 switch (val) {
1049 case 0:
1050 case 1:
1051 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
1052 break;
1053 default:
1054 return -EINVAL;
1055 }
1056
1057 return 0;
1058 }
1059
vlan_default_pvid(struct net_bridge_vlan_group * vg,u16 vid)1060 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
1061 {
1062 struct net_bridge_vlan *v;
1063
1064 if (vid != vg->pvid)
1065 return false;
1066
1067 v = br_vlan_lookup(&vg->vlan_hash, vid);
1068 if (v && br_vlan_should_use(v) &&
1069 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1070 return true;
1071
1072 return false;
1073 }
1074
br_vlan_disable_default_pvid(struct net_bridge * br)1075 static void br_vlan_disable_default_pvid(struct net_bridge *br)
1076 {
1077 struct net_bridge_port *p;
1078 u16 pvid = br->default_pvid;
1079
1080 /* Disable default_pvid on all ports where it is still
1081 * configured.
1082 */
1083 if (vlan_default_pvid(br_vlan_group(br), pvid)) {
1084 if (!br_vlan_delete(br, pvid))
1085 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1086 }
1087
1088 list_for_each_entry(p, &br->port_list, list) {
1089 if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1090 !nbp_vlan_delete(p, pvid))
1091 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1092 }
1093
1094 br->default_pvid = 0;
1095 }
1096
__br_vlan_set_default_pvid(struct net_bridge * br,u16 pvid,struct netlink_ext_ack * extack)1097 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1098 struct netlink_ext_ack *extack)
1099 {
1100 const struct net_bridge_vlan *pvent;
1101 struct net_bridge_vlan_group *vg;
1102 struct net_bridge_port *p;
1103 unsigned long *changed;
1104 bool vlchange;
1105 u16 old_pvid;
1106 int err = 0;
1107
1108 if (!pvid) {
1109 br_vlan_disable_default_pvid(br);
1110 return 0;
1111 }
1112
1113 changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1114 if (!changed)
1115 return -ENOMEM;
1116
1117 old_pvid = br->default_pvid;
1118
1119 /* Update default_pvid config only if we do not conflict with
1120 * user configuration.
1121 */
1122 vg = br_vlan_group(br);
1123 pvent = br_vlan_find(vg, pvid);
1124 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1125 (!pvent || !br_vlan_should_use(pvent))) {
1126 err = br_vlan_add(br, pvid,
1127 BRIDGE_VLAN_INFO_PVID |
1128 BRIDGE_VLAN_INFO_UNTAGGED |
1129 BRIDGE_VLAN_INFO_BRENTRY,
1130 &vlchange, extack);
1131 if (err)
1132 goto out;
1133
1134 if (br_vlan_delete(br, old_pvid))
1135 br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1136 br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1137 __set_bit(0, changed);
1138 }
1139
1140 list_for_each_entry(p, &br->port_list, list) {
1141 /* Update default_pvid config only if we do not conflict with
1142 * user configuration.
1143 */
1144 vg = nbp_vlan_group(p);
1145 if ((old_pvid &&
1146 !vlan_default_pvid(vg, old_pvid)) ||
1147 br_vlan_find(vg, pvid))
1148 continue;
1149
1150 err = nbp_vlan_add(p, pvid,
1151 BRIDGE_VLAN_INFO_PVID |
1152 BRIDGE_VLAN_INFO_UNTAGGED,
1153 &vlchange, extack);
1154 if (err)
1155 goto err_port;
1156 if (nbp_vlan_delete(p, old_pvid))
1157 br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1158 br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1159 __set_bit(p->port_no, changed);
1160 }
1161
1162 br->default_pvid = pvid;
1163
1164 out:
1165 bitmap_free(changed);
1166 return err;
1167
1168 err_port:
1169 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1170 if (!test_bit(p->port_no, changed))
1171 continue;
1172
1173 if (old_pvid) {
1174 nbp_vlan_add(p, old_pvid,
1175 BRIDGE_VLAN_INFO_PVID |
1176 BRIDGE_VLAN_INFO_UNTAGGED,
1177 &vlchange, NULL);
1178 br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1179 }
1180 nbp_vlan_delete(p, pvid);
1181 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1182 }
1183
1184 if (test_bit(0, changed)) {
1185 if (old_pvid) {
1186 br_vlan_add(br, old_pvid,
1187 BRIDGE_VLAN_INFO_PVID |
1188 BRIDGE_VLAN_INFO_UNTAGGED |
1189 BRIDGE_VLAN_INFO_BRENTRY,
1190 &vlchange, NULL);
1191 br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1192 }
1193 br_vlan_delete(br, pvid);
1194 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1195 }
1196 goto out;
1197 }
1198
br_vlan_set_default_pvid(struct net_bridge * br,unsigned long val,struct netlink_ext_ack * extack)1199 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
1200 struct netlink_ext_ack *extack)
1201 {
1202 u16 pvid = val;
1203 int err = 0;
1204
1205 if (val >= VLAN_VID_MASK)
1206 return -EINVAL;
1207
1208 if (pvid == br->default_pvid)
1209 goto out;
1210
1211 /* Only allow default pvid change when filtering is disabled */
1212 if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1213 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1214 err = -EPERM;
1215 goto out;
1216 }
1217 err = __br_vlan_set_default_pvid(br, pvid, extack);
1218 out:
1219 return err;
1220 }
1221
br_vlan_init(struct net_bridge * br)1222 int br_vlan_init(struct net_bridge *br)
1223 {
1224 struct net_bridge_vlan_group *vg;
1225 int ret = -ENOMEM;
1226
1227 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1228 if (!vg)
1229 goto out;
1230 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1231 if (ret)
1232 goto err_rhtbl;
1233 ret = vlan_tunnel_init(vg);
1234 if (ret)
1235 goto err_tunnel_init;
1236 INIT_LIST_HEAD(&vg->vlan_list);
1237 br->vlan_proto = htons(ETH_P_8021Q);
1238 br->default_pvid = 1;
1239 rcu_assign_pointer(br->vlgrp, vg);
1240
1241 out:
1242 return ret;
1243
1244 err_tunnel_init:
1245 rhashtable_destroy(&vg->vlan_hash);
1246 err_rhtbl:
1247 kfree(vg);
1248
1249 goto out;
1250 }
1251
nbp_vlan_init(struct net_bridge_port * p,struct netlink_ext_ack * extack)1252 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1253 {
1254 struct switchdev_attr attr = {
1255 .orig_dev = p->br->dev,
1256 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1257 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1258 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1259 };
1260 struct net_bridge_vlan_group *vg;
1261 int ret = -ENOMEM;
1262
1263 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1264 if (!vg)
1265 goto out;
1266
1267 ret = switchdev_port_attr_set(p->dev, &attr, extack);
1268 if (ret && ret != -EOPNOTSUPP)
1269 goto err_vlan_enabled;
1270
1271 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1272 if (ret)
1273 goto err_rhtbl;
1274 ret = vlan_tunnel_init(vg);
1275 if (ret)
1276 goto err_tunnel_init;
1277 INIT_LIST_HEAD(&vg->vlan_list);
1278 rcu_assign_pointer(p->vlgrp, vg);
1279 if (p->br->default_pvid) {
1280 bool changed;
1281
1282 ret = nbp_vlan_add(p, p->br->default_pvid,
1283 BRIDGE_VLAN_INFO_PVID |
1284 BRIDGE_VLAN_INFO_UNTAGGED,
1285 &changed, extack);
1286 if (ret)
1287 goto err_vlan_add;
1288 br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1289 }
1290 out:
1291 return ret;
1292
1293 err_vlan_add:
1294 RCU_INIT_POINTER(p->vlgrp, NULL);
1295 synchronize_rcu();
1296 vlan_tunnel_deinit(vg);
1297 err_tunnel_init:
1298 rhashtable_destroy(&vg->vlan_hash);
1299 err_rhtbl:
1300 err_vlan_enabled:
1301 kfree(vg);
1302
1303 goto out;
1304 }
1305
1306 /* Must be protected by RTNL.
1307 * Must be called with vid in range from 1 to 4094 inclusive.
1308 * changed must be true only if the vlan was created or updated
1309 */
nbp_vlan_add(struct net_bridge_port * port,u16 vid,u16 flags,bool * changed,struct netlink_ext_ack * extack)1310 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1311 bool *changed, struct netlink_ext_ack *extack)
1312 {
1313 struct net_bridge_vlan *vlan;
1314 int ret;
1315
1316 ASSERT_RTNL();
1317
1318 *changed = false;
1319 vlan = br_vlan_find(nbp_vlan_group(port), vid);
1320 if (vlan) {
1321 bool would_change = __vlan_flags_would_change(vlan, flags);
1322
1323 if (would_change) {
1324 /* Pass the flags to the hardware bridge */
1325 ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
1326 true, extack);
1327 if (ret && ret != -EOPNOTSUPP)
1328 return ret;
1329 }
1330
1331 __vlan_flags_commit(vlan, flags);
1332 *changed = would_change;
1333
1334 return 0;
1335 }
1336
1337 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1338 if (!vlan)
1339 return -ENOMEM;
1340
1341 vlan->vid = vid;
1342 vlan->port = port;
1343 ret = __vlan_add(vlan, flags, extack);
1344 if (ret)
1345 kfree(vlan);
1346 else
1347 *changed = true;
1348
1349 return ret;
1350 }
1351
1352 /* Must be protected by RTNL.
1353 * Must be called with vid in range from 1 to 4094 inclusive.
1354 */
nbp_vlan_delete(struct net_bridge_port * port,u16 vid)1355 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1356 {
1357 struct net_bridge_vlan *v;
1358
1359 ASSERT_RTNL();
1360
1361 v = br_vlan_find(nbp_vlan_group(port), vid);
1362 if (!v)
1363 return -ENOENT;
1364 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1365 br_fdb_delete_by_port(port->br, port, vid, 0);
1366
1367 return __vlan_del(v);
1368 }
1369
nbp_vlan_flush(struct net_bridge_port * port)1370 void nbp_vlan_flush(struct net_bridge_port *port)
1371 {
1372 struct net_bridge_vlan_group *vg;
1373
1374 ASSERT_RTNL();
1375
1376 vg = nbp_vlan_group(port);
1377 __vlan_flush(port->br, port, vg);
1378 RCU_INIT_POINTER(port->vlgrp, NULL);
1379 synchronize_net();
1380 __vlan_group_free(vg);
1381 }
1382
br_vlan_get_stats(const struct net_bridge_vlan * v,struct pcpu_sw_netstats * stats)1383 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1384 struct pcpu_sw_netstats *stats)
1385 {
1386 int i;
1387
1388 memset(stats, 0, sizeof(*stats));
1389 for_each_possible_cpu(i) {
1390 u64 rxpackets, rxbytes, txpackets, txbytes;
1391 struct pcpu_sw_netstats *cpu_stats;
1392 unsigned int start;
1393
1394 cpu_stats = per_cpu_ptr(v->stats, i);
1395 do {
1396 start = u64_stats_fetch_begin(&cpu_stats->syncp);
1397 rxpackets = u64_stats_read(&cpu_stats->rx_packets);
1398 rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
1399 txbytes = u64_stats_read(&cpu_stats->tx_bytes);
1400 txpackets = u64_stats_read(&cpu_stats->tx_packets);
1401 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1402
1403 u64_stats_add(&stats->rx_packets, rxpackets);
1404 u64_stats_add(&stats->rx_bytes, rxbytes);
1405 u64_stats_add(&stats->tx_bytes, txbytes);
1406 u64_stats_add(&stats->tx_packets, txpackets);
1407 }
1408 }
1409
br_vlan_get_pvid(const struct net_device * dev,u16 * p_pvid)1410 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1411 {
1412 struct net_bridge_vlan_group *vg;
1413 struct net_bridge_port *p;
1414
1415 ASSERT_RTNL();
1416 p = br_port_get_check_rtnl(dev);
1417 if (p)
1418 vg = nbp_vlan_group(p);
1419 else if (netif_is_bridge_master(dev))
1420 vg = br_vlan_group(netdev_priv(dev));
1421 else
1422 return -EINVAL;
1423
1424 *p_pvid = br_get_pvid(vg);
1425 return 0;
1426 }
1427 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1428
br_vlan_get_pvid_rcu(const struct net_device * dev,u16 * p_pvid)1429 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1430 {
1431 struct net_bridge_vlan_group *vg;
1432 struct net_bridge_port *p;
1433
1434 p = br_port_get_check_rcu(dev);
1435 if (p)
1436 vg = nbp_vlan_group_rcu(p);
1437 else if (netif_is_bridge_master(dev))
1438 vg = br_vlan_group_rcu(netdev_priv(dev));
1439 else
1440 return -EINVAL;
1441
1442 *p_pvid = br_get_pvid(vg);
1443 return 0;
1444 }
1445 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1446
br_vlan_fill_forward_path_pvid(struct net_bridge * br,struct net_device_path_ctx * ctx,struct net_device_path * path)1447 void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
1448 struct net_device_path_ctx *ctx,
1449 struct net_device_path *path)
1450 {
1451 struct net_bridge_vlan_group *vg;
1452 int idx = ctx->num_vlans - 1;
1453 u16 vid;
1454
1455 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1456
1457 if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1458 return;
1459
1460 vg = br_vlan_group_rcu(br);
1461
1462 if (idx >= 0 &&
1463 ctx->vlan[idx].proto == br->vlan_proto) {
1464 vid = ctx->vlan[idx].id;
1465 } else {
1466 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
1467 vid = br_get_pvid(vg);
1468 }
1469
1470 path->bridge.vlan_id = vid;
1471 path->bridge.vlan_proto = br->vlan_proto;
1472 }
1473
br_vlan_fill_forward_path_mode(struct net_bridge * br,struct net_bridge_port * dst,struct net_device_path * path)1474 int br_vlan_fill_forward_path_mode(struct net_bridge *br,
1475 struct net_bridge_port *dst,
1476 struct net_device_path *path)
1477 {
1478 struct net_bridge_vlan_group *vg;
1479 struct net_bridge_vlan *v;
1480
1481 if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1482 return 0;
1483
1484 vg = nbp_vlan_group_rcu(dst);
1485 v = br_vlan_find(vg, path->bridge.vlan_id);
1486 if (!v || !br_vlan_should_use(v))
1487 return -EINVAL;
1488
1489 if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1490 return 0;
1491
1492 if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
1493 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1494 else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1495 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1496 else
1497 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
1498
1499 return 0;
1500 }
1501
br_vlan_get_info(const struct net_device * dev,u16 vid,struct bridge_vlan_info * p_vinfo)1502 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1503 struct bridge_vlan_info *p_vinfo)
1504 {
1505 struct net_bridge_vlan_group *vg;
1506 struct net_bridge_vlan *v;
1507 struct net_bridge_port *p;
1508
1509 ASSERT_RTNL();
1510 p = br_port_get_check_rtnl(dev);
1511 if (p)
1512 vg = nbp_vlan_group(p);
1513 else if (netif_is_bridge_master(dev))
1514 vg = br_vlan_group(netdev_priv(dev));
1515 else
1516 return -EINVAL;
1517
1518 v = br_vlan_find(vg, vid);
1519 if (!v)
1520 return -ENOENT;
1521
1522 p_vinfo->vid = vid;
1523 p_vinfo->flags = v->flags;
1524 if (vid == br_get_pvid(vg))
1525 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1526 return 0;
1527 }
1528 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1529
br_vlan_get_info_rcu(const struct net_device * dev,u16 vid,struct bridge_vlan_info * p_vinfo)1530 int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
1531 struct bridge_vlan_info *p_vinfo)
1532 {
1533 struct net_bridge_vlan_group *vg;
1534 struct net_bridge_vlan *v;
1535 struct net_bridge_port *p;
1536
1537 p = br_port_get_check_rcu(dev);
1538 if (p)
1539 vg = nbp_vlan_group_rcu(p);
1540 else if (netif_is_bridge_master(dev))
1541 vg = br_vlan_group_rcu(netdev_priv(dev));
1542 else
1543 return -EINVAL;
1544
1545 v = br_vlan_find(vg, vid);
1546 if (!v)
1547 return -ENOENT;
1548
1549 p_vinfo->vid = vid;
1550 p_vinfo->flags = v->flags;
1551 if (vid == br_get_pvid(vg))
1552 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1553 return 0;
1554 }
1555 EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
1556
br_vlan_is_bind_vlan_dev(const struct net_device * dev)1557 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1558 {
1559 return is_vlan_dev(dev) &&
1560 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1561 }
1562
br_vlan_is_bind_vlan_dev_fn(struct net_device * dev,__always_unused struct netdev_nested_priv * priv)1563 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1564 __always_unused struct netdev_nested_priv *priv)
1565 {
1566 return br_vlan_is_bind_vlan_dev(dev);
1567 }
1568
br_vlan_has_upper_bind_vlan_dev(struct net_device * dev)1569 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1570 {
1571 int found;
1572
1573 rcu_read_lock();
1574 found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1575 NULL);
1576 rcu_read_unlock();
1577
1578 return !!found;
1579 }
1580
1581 struct br_vlan_bind_walk_data {
1582 u16 vid;
1583 struct net_device *result;
1584 };
1585
br_vlan_match_bind_vlan_dev_fn(struct net_device * dev,struct netdev_nested_priv * priv)1586 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1587 struct netdev_nested_priv *priv)
1588 {
1589 struct br_vlan_bind_walk_data *data = priv->data;
1590 int found = 0;
1591
1592 if (br_vlan_is_bind_vlan_dev(dev) &&
1593 vlan_dev_priv(dev)->vlan_id == data->vid) {
1594 data->result = dev;
1595 found = 1;
1596 }
1597
1598 return found;
1599 }
1600
1601 static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device * dev,u16 vid)1602 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1603 {
1604 struct br_vlan_bind_walk_data data = {
1605 .vid = vid,
1606 };
1607 struct netdev_nested_priv priv = {
1608 .data = (void *)&data,
1609 };
1610
1611 rcu_read_lock();
1612 netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1613 &priv);
1614 rcu_read_unlock();
1615
1616 return data.result;
1617 }
1618
br_vlan_is_dev_up(const struct net_device * dev)1619 static bool br_vlan_is_dev_up(const struct net_device *dev)
1620 {
1621 return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1622 }
1623
br_vlan_set_vlan_dev_state(const struct net_bridge * br,struct net_device * vlan_dev)1624 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1625 struct net_device *vlan_dev)
1626 {
1627 u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1628 struct net_bridge_vlan_group *vg;
1629 struct net_bridge_port *p;
1630 bool has_carrier = false;
1631
1632 if (!netif_carrier_ok(br->dev)) {
1633 netif_carrier_off(vlan_dev);
1634 return;
1635 }
1636
1637 list_for_each_entry(p, &br->port_list, list) {
1638 vg = nbp_vlan_group(p);
1639 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1640 has_carrier = true;
1641 break;
1642 }
1643 }
1644
1645 if (has_carrier)
1646 netif_carrier_on(vlan_dev);
1647 else
1648 netif_carrier_off(vlan_dev);
1649 }
1650
br_vlan_set_all_vlan_dev_state(struct net_bridge_port * p)1651 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1652 {
1653 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1654 struct net_bridge_vlan *vlan;
1655 struct net_device *vlan_dev;
1656
1657 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1658 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1659 vlan->vid);
1660 if (vlan_dev) {
1661 if (br_vlan_is_dev_up(p->dev)) {
1662 if (netif_carrier_ok(p->br->dev))
1663 netif_carrier_on(vlan_dev);
1664 } else {
1665 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1666 }
1667 }
1668 }
1669 }
1670
br_vlan_toggle_bridge_binding(struct net_device * br_dev,bool enable)1671 static void br_vlan_toggle_bridge_binding(struct net_device *br_dev,
1672 bool enable)
1673 {
1674 struct net_bridge *br = netdev_priv(br_dev);
1675
1676 if (enable)
1677 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1678 else
1679 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1680 br_vlan_has_upper_bind_vlan_dev(br_dev));
1681 }
1682
br_vlan_upper_change(struct net_device * dev,struct net_device * upper_dev,bool linking)1683 static void br_vlan_upper_change(struct net_device *dev,
1684 struct net_device *upper_dev,
1685 bool linking)
1686 {
1687 struct net_bridge *br = netdev_priv(dev);
1688
1689 if (!br_vlan_is_bind_vlan_dev(upper_dev))
1690 return;
1691
1692 br_vlan_toggle_bridge_binding(dev, linking);
1693 if (linking)
1694 br_vlan_set_vlan_dev_state(br, upper_dev);
1695 }
1696
1697 struct br_vlan_link_state_walk_data {
1698 struct net_bridge *br;
1699 };
1700
br_vlan_link_state_change_fn(struct net_device * vlan_dev,struct netdev_nested_priv * priv)1701 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1702 struct netdev_nested_priv *priv)
1703 {
1704 struct br_vlan_link_state_walk_data *data = priv->data;
1705
1706 if (br_vlan_is_bind_vlan_dev(vlan_dev))
1707 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1708
1709 return 0;
1710 }
1711
br_vlan_link_state_change(struct net_device * dev,struct net_bridge * br)1712 static void br_vlan_link_state_change(struct net_device *dev,
1713 struct net_bridge *br)
1714 {
1715 struct br_vlan_link_state_walk_data data = {
1716 .br = br
1717 };
1718 struct netdev_nested_priv priv = {
1719 .data = (void *)&data,
1720 };
1721
1722 rcu_read_lock();
1723 netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1724 &priv);
1725 rcu_read_unlock();
1726 }
1727
1728 /* Must be protected by RTNL. */
nbp_vlan_set_vlan_dev_state(struct net_bridge_port * p,u16 vid)1729 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1730 {
1731 struct net_device *vlan_dev;
1732
1733 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1734 return;
1735
1736 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1737 if (vlan_dev)
1738 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1739 }
1740
1741 /* Must be protected by RTNL. */
br_vlan_bridge_event(struct net_device * dev,unsigned long event,void * ptr)1742 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1743 {
1744 struct netdev_notifier_changeupper_info *info;
1745 struct net_bridge *br = netdev_priv(dev);
1746 int vlcmd = 0, ret = 0;
1747 bool changed = false;
1748
1749 switch (event) {
1750 case NETDEV_REGISTER:
1751 ret = br_vlan_add(br, br->default_pvid,
1752 BRIDGE_VLAN_INFO_PVID |
1753 BRIDGE_VLAN_INFO_UNTAGGED |
1754 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1755 vlcmd = RTM_NEWVLAN;
1756 break;
1757 case NETDEV_UNREGISTER:
1758 changed = !br_vlan_delete(br, br->default_pvid);
1759 vlcmd = RTM_DELVLAN;
1760 break;
1761 case NETDEV_CHANGEUPPER:
1762 info = ptr;
1763 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1764 break;
1765
1766 case NETDEV_CHANGE:
1767 case NETDEV_UP:
1768 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1769 break;
1770 br_vlan_link_state_change(dev, br);
1771 break;
1772 }
1773 if (changed)
1774 br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1775
1776 return ret;
1777 }
1778
br_vlan_vlan_upper_event(struct net_device * br_dev,struct net_device * vlan_dev,unsigned long event)1779 void br_vlan_vlan_upper_event(struct net_device *br_dev,
1780 struct net_device *vlan_dev,
1781 unsigned long event)
1782 {
1783 struct vlan_dev_priv *vlan = vlan_dev_priv(vlan_dev);
1784 struct net_bridge *br = netdev_priv(br_dev);
1785 bool bridge_binding;
1786
1787 switch (event) {
1788 case NETDEV_CHANGE:
1789 case NETDEV_UP:
1790 break;
1791 default:
1792 return;
1793 }
1794
1795 bridge_binding = vlan->flags & VLAN_FLAG_BRIDGE_BINDING;
1796 br_vlan_toggle_bridge_binding(br_dev, bridge_binding);
1797 if (bridge_binding)
1798 br_vlan_set_vlan_dev_state(br, vlan_dev);
1799 else if (!bridge_binding && netif_carrier_ok(br_dev))
1800 netif_carrier_on(vlan_dev);
1801 }
1802
1803 /* Must be protected by RTNL. */
br_vlan_port_event(struct net_bridge_port * p,unsigned long event)1804 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1805 {
1806 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1807 return;
1808
1809 switch (event) {
1810 case NETDEV_CHANGE:
1811 case NETDEV_DOWN:
1812 case NETDEV_UP:
1813 br_vlan_set_all_vlan_dev_state(p);
1814 break;
1815 }
1816 }
1817
br_vlan_stats_fill(struct sk_buff * skb,const struct net_bridge_vlan * v)1818 static bool br_vlan_stats_fill(struct sk_buff *skb,
1819 const struct net_bridge_vlan *v)
1820 {
1821 struct pcpu_sw_netstats stats;
1822 struct nlattr *nest;
1823
1824 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1825 if (!nest)
1826 return false;
1827
1828 br_vlan_get_stats(v, &stats);
1829 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
1830 u64_stats_read(&stats.rx_bytes),
1831 BRIDGE_VLANDB_STATS_PAD) ||
1832 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1833 u64_stats_read(&stats.rx_packets),
1834 BRIDGE_VLANDB_STATS_PAD) ||
1835 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
1836 u64_stats_read(&stats.tx_bytes),
1837 BRIDGE_VLANDB_STATS_PAD) ||
1838 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1839 u64_stats_read(&stats.tx_packets),
1840 BRIDGE_VLANDB_STATS_PAD))
1841 goto out_err;
1842
1843 nla_nest_end(skb, nest);
1844
1845 return true;
1846
1847 out_err:
1848 nla_nest_cancel(skb, nest);
1849 return false;
1850 }
1851
1852 /* v_opts is used to dump the options which must be equal in the whole range */
br_vlan_fill_vids(struct sk_buff * skb,u16 vid,u16 vid_range,const struct net_bridge_vlan * v_opts,const struct net_bridge_port * p,u16 flags,bool dump_stats)1853 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1854 const struct net_bridge_vlan *v_opts,
1855 const struct net_bridge_port *p,
1856 u16 flags,
1857 bool dump_stats)
1858 {
1859 struct bridge_vlan_info info;
1860 struct nlattr *nest;
1861
1862 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1863 if (!nest)
1864 return false;
1865
1866 memset(&info, 0, sizeof(info));
1867 info.vid = vid;
1868 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1869 info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1870 if (flags & BRIDGE_VLAN_INFO_PVID)
1871 info.flags |= BRIDGE_VLAN_INFO_PVID;
1872
1873 if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1874 goto out_err;
1875
1876 if (vid_range && vid < vid_range &&
1877 !(flags & BRIDGE_VLAN_INFO_PVID) &&
1878 nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1879 goto out_err;
1880
1881 if (v_opts) {
1882 if (!br_vlan_opts_fill(skb, v_opts, p))
1883 goto out_err;
1884
1885 if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1886 goto out_err;
1887 }
1888
1889 nla_nest_end(skb, nest);
1890
1891 return true;
1892
1893 out_err:
1894 nla_nest_cancel(skb, nest);
1895 return false;
1896 }
1897
rtnl_vlan_nlmsg_size(void)1898 static size_t rtnl_vlan_nlmsg_size(void)
1899 {
1900 return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1901 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1902 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1903 + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1904 + br_vlan_opts_nl_size(); /* bridge vlan options */
1905 }
1906
br_vlan_notify(const struct net_bridge * br,const struct net_bridge_port * p,u16 vid,u16 vid_range,int cmd)1907 void br_vlan_notify(const struct net_bridge *br,
1908 const struct net_bridge_port *p,
1909 u16 vid, u16 vid_range,
1910 int cmd)
1911 {
1912 struct net_bridge_vlan_group *vg;
1913 struct net_bridge_vlan *v = NULL;
1914 struct br_vlan_msg *bvm;
1915 struct nlmsghdr *nlh;
1916 struct sk_buff *skb;
1917 int err = -ENOBUFS;
1918 struct net *net;
1919 u16 flags = 0;
1920 int ifindex;
1921
1922 /* right now notifications are done only with rtnl held */
1923 ASSERT_RTNL();
1924
1925 if (p) {
1926 ifindex = p->dev->ifindex;
1927 vg = nbp_vlan_group(p);
1928 net = dev_net(p->dev);
1929 } else {
1930 ifindex = br->dev->ifindex;
1931 vg = br_vlan_group(br);
1932 net = dev_net(br->dev);
1933 }
1934
1935 skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1936 if (!skb)
1937 goto out_err;
1938
1939 err = -EMSGSIZE;
1940 nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1941 if (!nlh)
1942 goto out_err;
1943 bvm = nlmsg_data(nlh);
1944 memset(bvm, 0, sizeof(*bvm));
1945 bvm->family = AF_BRIDGE;
1946 bvm->ifindex = ifindex;
1947
1948 switch (cmd) {
1949 case RTM_NEWVLAN:
1950 /* need to find the vlan due to flags/options */
1951 v = br_vlan_find(vg, vid);
1952 if (!v || !br_vlan_should_use(v))
1953 goto out_kfree;
1954
1955 flags = v->flags;
1956 if (br_get_pvid(vg) == v->vid)
1957 flags |= BRIDGE_VLAN_INFO_PVID;
1958 break;
1959 case RTM_DELVLAN:
1960 break;
1961 default:
1962 goto out_kfree;
1963 }
1964
1965 if (!br_vlan_fill_vids(skb, vid, vid_range, v, p, flags, false))
1966 goto out_err;
1967
1968 nlmsg_end(skb, nlh);
1969 rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1970 return;
1971
1972 out_err:
1973 rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1974 out_kfree:
1975 kfree_skb(skb);
1976 }
1977
1978 /* check if v_curr can enter a range ending in range_end */
br_vlan_can_enter_range(const struct net_bridge_vlan * v_curr,const struct net_bridge_vlan * range_end)1979 bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1980 const struct net_bridge_vlan *range_end)
1981 {
1982 return v_curr->vid - range_end->vid == 1 &&
1983 range_end->flags == v_curr->flags &&
1984 br_vlan_opts_eq_range(v_curr, range_end);
1985 }
1986
br_vlan_dump_dev(const struct net_device * dev,struct sk_buff * skb,struct netlink_callback * cb,u32 dump_flags)1987 static int br_vlan_dump_dev(const struct net_device *dev,
1988 struct sk_buff *skb,
1989 struct netlink_callback *cb,
1990 u32 dump_flags)
1991 {
1992 struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1993 bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1994 bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1995 struct net_bridge_vlan_group *vg;
1996 int idx = 0, s_idx = cb->args[1];
1997 struct nlmsghdr *nlh = NULL;
1998 struct net_bridge_port *p;
1999 struct br_vlan_msg *bvm;
2000 struct net_bridge *br;
2001 int err = 0;
2002 u16 pvid;
2003
2004 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
2005 return -EINVAL;
2006
2007 if (netif_is_bridge_master(dev)) {
2008 br = netdev_priv(dev);
2009 vg = br_vlan_group_rcu(br);
2010 p = NULL;
2011 } else {
2012 /* global options are dumped only for bridge devices */
2013 if (dump_global)
2014 return 0;
2015
2016 p = br_port_get_rcu(dev);
2017 if (WARN_ON(!p))
2018 return -EINVAL;
2019 vg = nbp_vlan_group_rcu(p);
2020 br = p->br;
2021 }
2022
2023 if (!vg)
2024 return 0;
2025
2026 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2027 RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
2028 if (!nlh)
2029 return -EMSGSIZE;
2030 bvm = nlmsg_data(nlh);
2031 memset(bvm, 0, sizeof(*bvm));
2032 bvm->family = PF_BRIDGE;
2033 bvm->ifindex = dev->ifindex;
2034 pvid = br_get_pvid(vg);
2035
2036 /* idx must stay at range's beginning until it is filled in */
2037 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
2038 if (!dump_global && !br_vlan_should_use(v))
2039 continue;
2040 if (idx < s_idx) {
2041 idx++;
2042 continue;
2043 }
2044
2045 if (!range_start) {
2046 range_start = v;
2047 range_end = v;
2048 continue;
2049 }
2050
2051 if (dump_global) {
2052 if (br_vlan_global_opts_can_enter_range(v, range_end))
2053 goto update_end;
2054 if (!br_vlan_global_opts_fill(skb, range_start->vid,
2055 range_end->vid,
2056 range_start)) {
2057 err = -EMSGSIZE;
2058 break;
2059 }
2060 /* advance number of filled vlans */
2061 idx += range_end->vid - range_start->vid + 1;
2062
2063 range_start = v;
2064 } else if (dump_stats || v->vid == pvid ||
2065 !br_vlan_can_enter_range(v, range_end)) {
2066 u16 vlan_flags = br_vlan_flags(range_start, pvid);
2067
2068 if (!br_vlan_fill_vids(skb, range_start->vid,
2069 range_end->vid, range_start,
2070 p, vlan_flags, dump_stats)) {
2071 err = -EMSGSIZE;
2072 break;
2073 }
2074 /* advance number of filled vlans */
2075 idx += range_end->vid - range_start->vid + 1;
2076
2077 range_start = v;
2078 }
2079 update_end:
2080 range_end = v;
2081 }
2082
2083 /* err will be 0 and range_start will be set in 3 cases here:
2084 * - first vlan (range_start == range_end)
2085 * - last vlan (range_start == range_end, not in range)
2086 * - last vlan range (range_start != range_end, in range)
2087 */
2088 if (!err && range_start) {
2089 if (dump_global &&
2090 !br_vlan_global_opts_fill(skb, range_start->vid,
2091 range_end->vid, range_start))
2092 err = -EMSGSIZE;
2093 else if (!dump_global &&
2094 !br_vlan_fill_vids(skb, range_start->vid,
2095 range_end->vid, range_start,
2096 p, br_vlan_flags(range_start, pvid),
2097 dump_stats))
2098 err = -EMSGSIZE;
2099 }
2100
2101 cb->args[1] = err ? idx : 0;
2102
2103 nlmsg_end(skb, nlh);
2104
2105 return err;
2106 }
2107
2108 static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
2109 [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
2110 };
2111
br_vlan_rtm_dump(struct sk_buff * skb,struct netlink_callback * cb)2112 static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
2113 {
2114 struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2115 int idx = 0, err = 0, s_idx = cb->args[0];
2116 struct net *net = sock_net(skb->sk);
2117 struct br_vlan_msg *bvm;
2118 struct net_device *dev;
2119 u32 dump_flags = 0;
2120
2121 err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
2122 br_vlan_db_dump_pol, cb->extack);
2123 if (err < 0)
2124 return err;
2125
2126 bvm = nlmsg_data(cb->nlh);
2127 if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
2128 dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2129
2130 rcu_read_lock();
2131 if (bvm->ifindex) {
2132 dev = dev_get_by_index_rcu(net, bvm->ifindex);
2133 if (!dev) {
2134 err = -ENODEV;
2135 goto out_err;
2136 }
2137 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2138 /* if the dump completed without an error we return 0 here */
2139 if (err != -EMSGSIZE)
2140 goto out_err;
2141 } else {
2142 for_each_netdev_rcu(net, dev) {
2143 if (idx < s_idx)
2144 goto skip;
2145
2146 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2147 if (err == -EMSGSIZE)
2148 break;
2149 skip:
2150 idx++;
2151 }
2152 }
2153 cb->args[0] = idx;
2154 rcu_read_unlock();
2155
2156 return skb->len;
2157
2158 out_err:
2159 rcu_read_unlock();
2160
2161 return err;
2162 }
2163
2164 static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2165 [BRIDGE_VLANDB_ENTRY_INFO] =
2166 NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2167 [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
2168 [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
2169 [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2170 [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
2171 [BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS] = { .type = NLA_REJECT },
2172 [BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
2173 [BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1),
2174 };
2175
br_vlan_rtm_process_one(struct net_device * dev,const struct nlattr * attr,int cmd,struct netlink_ext_ack * extack)2176 static int br_vlan_rtm_process_one(struct net_device *dev,
2177 const struct nlattr *attr,
2178 int cmd, struct netlink_ext_ack *extack)
2179 {
2180 struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2181 struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2182 bool changed = false, skip_processing = false;
2183 struct net_bridge_vlan_group *vg;
2184 struct net_bridge_port *p = NULL;
2185 int err = 0, cmdmap = 0;
2186 struct net_bridge *br;
2187
2188 if (netif_is_bridge_master(dev)) {
2189 br = netdev_priv(dev);
2190 vg = br_vlan_group(br);
2191 } else {
2192 p = br_port_get_rtnl(dev);
2193 if (WARN_ON(!p))
2194 return -ENODEV;
2195 br = p->br;
2196 vg = nbp_vlan_group(p);
2197 }
2198
2199 if (WARN_ON(!vg))
2200 return -ENODEV;
2201
2202 err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
2203 br_vlan_db_policy, extack);
2204 if (err)
2205 return err;
2206
2207 if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
2208 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
2209 return -EINVAL;
2210 }
2211 memset(&vrange_end, 0, sizeof(vrange_end));
2212
2213 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
2214 if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
2215 BRIDGE_VLAN_INFO_RANGE_END)) {
2216 NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
2217 return -EINVAL;
2218 }
2219 if (!br_vlan_valid_id(vinfo->vid, extack))
2220 return -EINVAL;
2221
2222 if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
2223 vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
2224 /* validate user-provided flags without RANGE_BEGIN */
2225 vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
2226 vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
2227
2228 /* vinfo_last is the range start, vinfo the range end */
2229 vinfo_last = vinfo;
2230 vinfo = &vrange_end;
2231
2232 if (!br_vlan_valid_id(vinfo->vid, extack) ||
2233 !br_vlan_valid_range(vinfo, vinfo_last, extack))
2234 return -EINVAL;
2235 }
2236
2237 switch (cmd) {
2238 case RTM_NEWVLAN:
2239 cmdmap = RTM_SETLINK;
2240 skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2241 break;
2242 case RTM_DELVLAN:
2243 cmdmap = RTM_DELLINK;
2244 break;
2245 }
2246
2247 if (!skip_processing) {
2248 struct bridge_vlan_info *tmp_last = vinfo_last;
2249
2250 /* br_process_vlan_info may overwrite vinfo_last */
2251 err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2252 &changed, extack);
2253
2254 /* notify first if anything changed */
2255 if (changed)
2256 br_ifinfo_notify(cmdmap, br, p);
2257
2258 if (err)
2259 return err;
2260 }
2261
2262 /* deal with options */
2263 if (cmd == RTM_NEWVLAN) {
2264 struct net_bridge_vlan *range_start, *range_end;
2265
2266 if (vinfo_last) {
2267 range_start = br_vlan_find(vg, vinfo_last->vid);
2268 range_end = br_vlan_find(vg, vinfo->vid);
2269 } else {
2270 range_start = br_vlan_find(vg, vinfo->vid);
2271 range_end = range_start;
2272 }
2273
2274 err = br_vlan_process_options(br, p, range_start, range_end,
2275 tb, extack);
2276 }
2277
2278 return err;
2279 }
2280
br_vlan_rtm_process(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2281 static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2282 struct netlink_ext_ack *extack)
2283 {
2284 struct net *net = sock_net(skb->sk);
2285 struct br_vlan_msg *bvm;
2286 struct net_device *dev;
2287 struct nlattr *attr;
2288 int err, vlans = 0;
2289 int rem;
2290
2291 /* this should validate the header and check for remaining bytes */
2292 err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2293 extack);
2294 if (err < 0)
2295 return err;
2296
2297 bvm = nlmsg_data(nlh);
2298 dev = __dev_get_by_index(net, bvm->ifindex);
2299 if (!dev)
2300 return -ENODEV;
2301
2302 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2303 NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2304 return -EINVAL;
2305 }
2306
2307 nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2308 switch (nla_type(attr)) {
2309 case BRIDGE_VLANDB_ENTRY:
2310 err = br_vlan_rtm_process_one(dev, attr,
2311 nlh->nlmsg_type,
2312 extack);
2313 break;
2314 case BRIDGE_VLANDB_GLOBAL_OPTIONS:
2315 err = br_vlan_rtm_process_global_options(dev, attr,
2316 nlh->nlmsg_type,
2317 extack);
2318 break;
2319 default:
2320 continue;
2321 }
2322
2323 vlans++;
2324 if (err)
2325 break;
2326 }
2327 if (!vlans) {
2328 NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2329 err = -EINVAL;
2330 }
2331
2332 return err;
2333 }
2334
2335 static const struct rtnl_msg_handler br_vlan_rtnl_msg_handlers[] = {
2336 {THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, br_vlan_rtm_process, NULL, 0},
2337 {THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, br_vlan_rtm_process, NULL, 0},
2338 {THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, br_vlan_rtm_dump, 0},
2339 };
2340
br_vlan_rtnl_init(void)2341 int br_vlan_rtnl_init(void)
2342 {
2343 return rtnl_register_many(br_vlan_rtnl_msg_handlers);
2344 }
2345
br_vlan_rtnl_uninit(void)2346 void br_vlan_rtnl_uninit(void)
2347 {
2348 rtnl_unregister_many(br_vlan_rtnl_msg_handlers);
2349 }
2350