1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/netdevice.h> 4 #include <linux/rtnetlink.h> 5 #include <linux/slab.h> 6 #include <net/switchdev.h> 7 8 #include "br_private.h" 9 #include "br_private_tunnel.h" 10 11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid); 12 13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg, 14 const void *ptr) 15 { 16 const struct net_bridge_vlan *vle = ptr; 17 u16 vid = *(u16 *)arg->key; 18 19 return vle->vid != vid; 20 } 21 22 static const struct rhashtable_params br_vlan_rht_params = { 23 .head_offset = offsetof(struct net_bridge_vlan, vnode), 24 .key_offset = offsetof(struct net_bridge_vlan, vid), 25 .key_len = sizeof(u16), 26 .nelem_hint = 3, 27 .max_size = VLAN_N_VID, 28 .obj_cmpfn = br_vlan_cmp, 29 .automatic_shrinking = true, 30 }; 31 32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid) 33 { 34 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params); 35 } 36 37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, 38 const struct net_bridge_vlan *v) 39 { 40 if (vg->pvid == v->vid) 41 return false; 42 43 smp_wmb(); 44 br_vlan_set_pvid_state(vg, v->state); 45 vg->pvid = v->vid; 46 47 return true; 48 } 49 50 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid) 51 { 52 if (vg->pvid != vid) 53 return false; 54 55 smp_wmb(); 56 vg->pvid = 0; 57 58 return true; 59 } 60 61 /* return true if anything changed, false otherwise */ 62 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags) 63 { 64 struct net_bridge_vlan_group *vg; 65 u16 old_flags = v->flags; 66 bool ret; 67 68 if (br_vlan_is_master(v)) 69 vg = br_vlan_group(v->br); 70 else 71 vg = nbp_vlan_group(v->port); 72 73 if (flags & BRIDGE_VLAN_INFO_PVID) 74 ret = __vlan_add_pvid(vg, v); 75 else 76 ret = __vlan_delete_pvid(vg, v->vid); 77 78 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) 79 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED; 80 else 81 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; 82 83 return ret || !!(old_flags ^ v->flags); 84 } 85 86 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, 87 struct net_bridge_vlan *v, u16 flags, 88 struct netlink_ext_ack *extack) 89 { 90 int err; 91 92 /* Try switchdev op first. In case it is not supported, fallback to 93 * 8021q add. 94 */ 95 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack); 96 if (err == -EOPNOTSUPP) 97 return vlan_vid_add(dev, br->vlan_proto, v->vid); 98 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV; 99 return err; 100 } 101 102 static void __vlan_add_list(struct net_bridge_vlan *v) 103 { 104 struct net_bridge_vlan_group *vg; 105 struct list_head *headp, *hpos; 106 struct net_bridge_vlan *vent; 107 108 if (br_vlan_is_master(v)) 109 vg = br_vlan_group(v->br); 110 else 111 vg = nbp_vlan_group(v->port); 112 113 headp = &vg->vlan_list; 114 list_for_each_prev(hpos, headp) { 115 vent = list_entry(hpos, struct net_bridge_vlan, vlist); 116 if (v->vid >= vent->vid) 117 break; 118 } 119 list_add_rcu(&v->vlist, hpos); 120 } 121 122 static void __vlan_del_list(struct net_bridge_vlan *v) 123 { 124 list_del_rcu(&v->vlist); 125 } 126 127 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, 128 const struct net_bridge_vlan *v) 129 { 130 int err; 131 132 /* Try switchdev op first. In case it is not supported, fallback to 133 * 8021q del. 134 */ 135 err = br_switchdev_port_vlan_del(dev, v->vid); 136 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)) 137 vlan_vid_del(dev, br->vlan_proto, v->vid); 138 return err == -EOPNOTSUPP ? 0 : err; 139 } 140 141 /* Returns a master vlan, if it didn't exist it gets created. In all cases 142 * a reference is taken to the master vlan before returning. 143 */ 144 static struct net_bridge_vlan * 145 br_vlan_get_master(struct net_bridge *br, u16 vid, 146 struct netlink_ext_ack *extack) 147 { 148 struct net_bridge_vlan_group *vg; 149 struct net_bridge_vlan *masterv; 150 151 vg = br_vlan_group(br); 152 masterv = br_vlan_find(vg, vid); 153 if (!masterv) { 154 bool changed; 155 156 /* missing global ctx, create it now */ 157 if (br_vlan_add(br, vid, 0, &changed, extack)) 158 return NULL; 159 masterv = br_vlan_find(vg, vid); 160 if (WARN_ON(!masterv)) 161 return NULL; 162 refcount_set(&masterv->refcnt, 1); 163 return masterv; 164 } 165 refcount_inc(&masterv->refcnt); 166 167 return masterv; 168 } 169 170 static void br_master_vlan_rcu_free(struct rcu_head *rcu) 171 { 172 struct net_bridge_vlan *v; 173 174 v = container_of(rcu, struct net_bridge_vlan, rcu); 175 WARN_ON(!br_vlan_is_master(v)); 176 free_percpu(v->stats); 177 v->stats = NULL; 178 kfree(v); 179 } 180 181 static void br_vlan_put_master(struct net_bridge_vlan *masterv) 182 { 183 struct net_bridge_vlan_group *vg; 184 185 if (!br_vlan_is_master(masterv)) 186 return; 187 188 vg = br_vlan_group(masterv->br); 189 if (refcount_dec_and_test(&masterv->refcnt)) { 190 rhashtable_remove_fast(&vg->vlan_hash, 191 &masterv->vnode, br_vlan_rht_params); 192 __vlan_del_list(masterv); 193 br_multicast_toggle_one_vlan(masterv, false); 194 br_multicast_ctx_deinit(&masterv->br_mcast_ctx); 195 call_rcu(&masterv->rcu, br_master_vlan_rcu_free); 196 } 197 } 198 199 static void nbp_vlan_rcu_free(struct rcu_head *rcu) 200 { 201 struct net_bridge_vlan *v; 202 203 v = container_of(rcu, struct net_bridge_vlan, rcu); 204 WARN_ON(br_vlan_is_master(v)); 205 /* if we had per-port stats configured then free them here */ 206 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS) 207 free_percpu(v->stats); 208 v->stats = NULL; 209 kfree(v); 210 } 211 212 /* This is the shared VLAN add function which works for both ports and bridge 213 * devices. There are four possible calls to this function in terms of the 214 * vlan entry type: 215 * 1. vlan is being added on a port (no master flags, global entry exists) 216 * 2. vlan is being added on a bridge (both master and brentry flags) 217 * 3. vlan is being added on a port, but a global entry didn't exist which 218 * is being created right now (master flag set, brentry flag unset), the 219 * global entry is used for global per-vlan features, but not for filtering 220 * 4. same as 3 but with both master and brentry flags set so the entry 221 * will be used for filtering in both the port and the bridge 222 */ 223 static int __vlan_add(struct net_bridge_vlan *v, u16 flags, 224 struct netlink_ext_ack *extack) 225 { 226 struct net_bridge_vlan *masterv = NULL; 227 struct net_bridge_port *p = NULL; 228 struct net_bridge_vlan_group *vg; 229 struct net_device *dev; 230 struct net_bridge *br; 231 int err; 232 233 if (br_vlan_is_master(v)) { 234 br = v->br; 235 dev = br->dev; 236 vg = br_vlan_group(br); 237 } else { 238 p = v->port; 239 br = p->br; 240 dev = p->dev; 241 vg = nbp_vlan_group(p); 242 } 243 244 if (p) { 245 /* Add VLAN to the device filter if it is supported. 246 * This ensures tagged traffic enters the bridge when 247 * promiscuous mode is disabled by br_manage_promisc(). 248 */ 249 err = __vlan_vid_add(dev, br, v, flags, extack); 250 if (err) 251 goto out; 252 253 /* need to work on the master vlan too */ 254 if (flags & BRIDGE_VLAN_INFO_MASTER) { 255 bool changed; 256 257 err = br_vlan_add(br, v->vid, 258 flags | BRIDGE_VLAN_INFO_BRENTRY, 259 &changed, extack); 260 if (err) 261 goto out_filt; 262 263 if (changed) 264 br_vlan_notify(br, NULL, v->vid, 0, 265 RTM_NEWVLAN); 266 } 267 268 masterv = br_vlan_get_master(br, v->vid, extack); 269 if (!masterv) { 270 err = -ENOMEM; 271 goto out_filt; 272 } 273 v->brvlan = masterv; 274 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) { 275 v->stats = 276 netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 277 if (!v->stats) { 278 err = -ENOMEM; 279 goto out_filt; 280 } 281 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS; 282 } else { 283 v->stats = masterv->stats; 284 } 285 br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx); 286 } else { 287 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack); 288 if (err && err != -EOPNOTSUPP) 289 goto out; 290 br_multicast_ctx_init(br, v, &v->br_mcast_ctx); 291 v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED; 292 } 293 294 /* Add the dev mac and count the vlan only if it's usable */ 295 if (br_vlan_should_use(v)) { 296 err = br_fdb_add_local(br, p, dev->dev_addr, v->vid); 297 if (err) { 298 br_err(br, "failed insert local address into bridge forwarding table\n"); 299 goto out_filt; 300 } 301 vg->num_vlans++; 302 } 303 304 /* set the state before publishing */ 305 v->state = BR_STATE_FORWARDING; 306 307 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode, 308 br_vlan_rht_params); 309 if (err) 310 goto out_fdb_insert; 311 312 __vlan_add_list(v); 313 __vlan_add_flags(v, flags); 314 br_multicast_toggle_one_vlan(v, true); 315 316 if (p) 317 nbp_vlan_set_vlan_dev_state(p, v->vid); 318 out: 319 return err; 320 321 out_fdb_insert: 322 if (br_vlan_should_use(v)) { 323 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid); 324 vg->num_vlans--; 325 } 326 327 out_filt: 328 if (p) { 329 __vlan_vid_del(dev, br, v); 330 if (masterv) { 331 if (v->stats && masterv->stats != v->stats) 332 free_percpu(v->stats); 333 v->stats = NULL; 334 335 br_vlan_put_master(masterv); 336 v->brvlan = NULL; 337 } 338 } else { 339 br_switchdev_port_vlan_del(dev, v->vid); 340 } 341 342 goto out; 343 } 344 345 static int __vlan_del(struct net_bridge_vlan *v) 346 { 347 struct net_bridge_vlan *masterv = v; 348 struct net_bridge_vlan_group *vg; 349 struct net_bridge_port *p = NULL; 350 int err = 0; 351 352 if (br_vlan_is_master(v)) { 353 vg = br_vlan_group(v->br); 354 } else { 355 p = v->port; 356 vg = nbp_vlan_group(v->port); 357 masterv = v->brvlan; 358 } 359 360 __vlan_delete_pvid(vg, v->vid); 361 if (p) { 362 err = __vlan_vid_del(p->dev, p->br, v); 363 if (err) 364 goto out; 365 } else { 366 err = br_switchdev_port_vlan_del(v->br->dev, v->vid); 367 if (err && err != -EOPNOTSUPP) 368 goto out; 369 err = 0; 370 } 371 372 if (br_vlan_should_use(v)) { 373 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY; 374 vg->num_vlans--; 375 } 376 377 if (masterv != v) { 378 vlan_tunnel_info_del(vg, v); 379 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode, 380 br_vlan_rht_params); 381 __vlan_del_list(v); 382 nbp_vlan_set_vlan_dev_state(p, v->vid); 383 br_multicast_toggle_one_vlan(v, false); 384 br_multicast_port_ctx_deinit(&v->port_mcast_ctx); 385 call_rcu(&v->rcu, nbp_vlan_rcu_free); 386 } 387 388 br_vlan_put_master(masterv); 389 out: 390 return err; 391 } 392 393 static void __vlan_group_free(struct net_bridge_vlan_group *vg) 394 { 395 WARN_ON(!list_empty(&vg->vlan_list)); 396 rhashtable_destroy(&vg->vlan_hash); 397 vlan_tunnel_deinit(vg); 398 kfree(vg); 399 } 400 401 static void __vlan_flush(const struct net_bridge *br, 402 const struct net_bridge_port *p, 403 struct net_bridge_vlan_group *vg) 404 { 405 struct net_bridge_vlan *vlan, *tmp; 406 u16 v_start = 0, v_end = 0; 407 408 __vlan_delete_pvid(vg, vg->pvid); 409 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) { 410 /* take care of disjoint ranges */ 411 if (!v_start) { 412 v_start = vlan->vid; 413 } else if (vlan->vid - v_end != 1) { 414 /* found range end, notify and start next one */ 415 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN); 416 v_start = vlan->vid; 417 } 418 v_end = vlan->vid; 419 420 __vlan_del(vlan); 421 } 422 423 /* notify about the last/whole vlan range */ 424 if (v_start) 425 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN); 426 } 427 428 struct sk_buff *br_handle_vlan(struct net_bridge *br, 429 const struct net_bridge_port *p, 430 struct net_bridge_vlan_group *vg, 431 struct sk_buff *skb) 432 { 433 struct pcpu_sw_netstats *stats; 434 struct net_bridge_vlan *v; 435 u16 vid; 436 437 /* If this packet was not filtered at input, let it pass */ 438 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) 439 goto out; 440 441 /* At this point, we know that the frame was filtered and contains 442 * a valid vlan id. If the vlan id has untagged flag set, 443 * send untagged; otherwise, send tagged. 444 */ 445 br_vlan_get_tag(skb, &vid); 446 v = br_vlan_find(vg, vid); 447 /* Vlan entry must be configured at this point. The 448 * only exception is the bridge is set in promisc mode and the 449 * packet is destined for the bridge device. In this case 450 * pass the packet as is. 451 */ 452 if (!v || !br_vlan_should_use(v)) { 453 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) { 454 goto out; 455 } else { 456 kfree_skb(skb); 457 return NULL; 458 } 459 } 460 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { 461 stats = this_cpu_ptr(v->stats); 462 u64_stats_update_begin(&stats->syncp); 463 stats->tx_bytes += skb->len; 464 stats->tx_packets++; 465 u64_stats_update_end(&stats->syncp); 466 } 467 468 /* If the skb will be sent using forwarding offload, the assumption is 469 * that the switchdev will inject the packet into hardware together 470 * with the bridge VLAN, so that it can be forwarded according to that 471 * VLAN. The switchdev should deal with popping the VLAN header in 472 * hardware on each egress port as appropriate. So only strip the VLAN 473 * header if forwarding offload is not being used. 474 */ 475 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED && 476 !br_switchdev_frame_uses_tx_fwd_offload(skb)) 477 __vlan_hwaccel_clear_tag(skb); 478 479 if (p && (p->flags & BR_VLAN_TUNNEL) && 480 br_handle_egress_vlan_tunnel(skb, v)) { 481 kfree_skb(skb); 482 return NULL; 483 } 484 out: 485 return skb; 486 } 487 488 /* Called under RCU */ 489 static bool __allowed_ingress(const struct net_bridge *br, 490 struct net_bridge_vlan_group *vg, 491 struct sk_buff *skb, u16 *vid, 492 u8 *state, 493 struct net_bridge_vlan **vlan) 494 { 495 struct pcpu_sw_netstats *stats; 496 struct net_bridge_vlan *v; 497 bool tagged; 498 499 BR_INPUT_SKB_CB(skb)->vlan_filtered = true; 500 /* If vlan tx offload is disabled on bridge device and frame was 501 * sent from vlan device on the bridge device, it does not have 502 * HW accelerated vlan tag. 503 */ 504 if (unlikely(!skb_vlan_tag_present(skb) && 505 skb->protocol == br->vlan_proto)) { 506 skb = skb_vlan_untag(skb); 507 if (unlikely(!skb)) 508 return false; 509 } 510 511 if (!br_vlan_get_tag(skb, vid)) { 512 /* Tagged frame */ 513 if (skb->vlan_proto != br->vlan_proto) { 514 /* Protocol-mismatch, empty out vlan_tci for new tag */ 515 skb_push(skb, ETH_HLEN); 516 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, 517 skb_vlan_tag_get(skb)); 518 if (unlikely(!skb)) 519 return false; 520 521 skb_pull(skb, ETH_HLEN); 522 skb_reset_mac_len(skb); 523 *vid = 0; 524 tagged = false; 525 } else { 526 tagged = true; 527 } 528 } else { 529 /* Untagged frame */ 530 tagged = false; 531 } 532 533 if (!*vid) { 534 u16 pvid = br_get_pvid(vg); 535 536 /* Frame had a tag with VID 0 or did not have a tag. 537 * See if pvid is set on this port. That tells us which 538 * vlan untagged or priority-tagged traffic belongs to. 539 */ 540 if (!pvid) 541 goto drop; 542 543 /* PVID is set on this port. Any untagged or priority-tagged 544 * ingress frame is considered to belong to this vlan. 545 */ 546 *vid = pvid; 547 if (likely(!tagged)) 548 /* Untagged Frame. */ 549 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid); 550 else 551 /* Priority-tagged Frame. 552 * At this point, we know that skb->vlan_tci VID 553 * field was 0. 554 * We update only VID field and preserve PCP field. 555 */ 556 skb->vlan_tci |= pvid; 557 558 /* if snooping and stats are disabled we can avoid the lookup */ 559 if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && 560 !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { 561 if (*state == BR_STATE_FORWARDING) { 562 *state = br_vlan_get_pvid_state(vg); 563 return br_vlan_state_allowed(*state, true); 564 } else { 565 return true; 566 } 567 } 568 } 569 v = br_vlan_find(vg, *vid); 570 if (!v || !br_vlan_should_use(v)) 571 goto drop; 572 573 if (*state == BR_STATE_FORWARDING) { 574 *state = br_vlan_get_state(v); 575 if (!br_vlan_state_allowed(*state, true)) 576 goto drop; 577 } 578 579 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { 580 stats = this_cpu_ptr(v->stats); 581 u64_stats_update_begin(&stats->syncp); 582 stats->rx_bytes += skb->len; 583 stats->rx_packets++; 584 u64_stats_update_end(&stats->syncp); 585 } 586 587 *vlan = v; 588 589 return true; 590 591 drop: 592 kfree_skb(skb); 593 return false; 594 } 595 596 bool br_allowed_ingress(const struct net_bridge *br, 597 struct net_bridge_vlan_group *vg, struct sk_buff *skb, 598 u16 *vid, u8 *state, 599 struct net_bridge_vlan **vlan) 600 { 601 /* If VLAN filtering is disabled on the bridge, all packets are 602 * permitted. 603 */ 604 *vlan = NULL; 605 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) { 606 BR_INPUT_SKB_CB(skb)->vlan_filtered = false; 607 return true; 608 } 609 610 return __allowed_ingress(br, vg, skb, vid, state, vlan); 611 } 612 613 /* Called under RCU. */ 614 bool br_allowed_egress(struct net_bridge_vlan_group *vg, 615 const struct sk_buff *skb) 616 { 617 const struct net_bridge_vlan *v; 618 u16 vid; 619 620 /* If this packet was not filtered at input, let it pass */ 621 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) 622 return true; 623 624 br_vlan_get_tag(skb, &vid); 625 v = br_vlan_find(vg, vid); 626 if (v && br_vlan_should_use(v) && 627 br_vlan_state_allowed(br_vlan_get_state(v), false)) 628 return true; 629 630 return false; 631 } 632 633 /* Called under RCU */ 634 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) 635 { 636 struct net_bridge_vlan_group *vg; 637 struct net_bridge *br = p->br; 638 struct net_bridge_vlan *v; 639 640 /* If filtering was disabled at input, let it pass. */ 641 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) 642 return true; 643 644 vg = nbp_vlan_group_rcu(p); 645 if (!vg || !vg->num_vlans) 646 return false; 647 648 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto) 649 *vid = 0; 650 651 if (!*vid) { 652 *vid = br_get_pvid(vg); 653 if (!*vid || 654 !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true)) 655 return false; 656 657 return true; 658 } 659 660 v = br_vlan_find(vg, *vid); 661 if (v && br_vlan_state_allowed(br_vlan_get_state(v), true)) 662 return true; 663 664 return false; 665 } 666 667 static int br_vlan_add_existing(struct net_bridge *br, 668 struct net_bridge_vlan_group *vg, 669 struct net_bridge_vlan *vlan, 670 u16 flags, bool *changed, 671 struct netlink_ext_ack *extack) 672 { 673 int err; 674 675 err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack); 676 if (err && err != -EOPNOTSUPP) 677 return err; 678 679 if (!br_vlan_is_brentry(vlan)) { 680 /* Trying to change flags of non-existent bridge vlan */ 681 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) { 682 err = -EINVAL; 683 goto err_flags; 684 } 685 /* It was only kept for port vlans, now make it real */ 686 err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid); 687 if (err) { 688 br_err(br, "failed to insert local address into bridge forwarding table\n"); 689 goto err_fdb_insert; 690 } 691 692 refcount_inc(&vlan->refcnt); 693 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY; 694 vg->num_vlans++; 695 *changed = true; 696 br_multicast_toggle_one_vlan(vlan, true); 697 } 698 699 if (__vlan_add_flags(vlan, flags)) 700 *changed = true; 701 702 return 0; 703 704 err_fdb_insert: 705 err_flags: 706 br_switchdev_port_vlan_del(br->dev, vlan->vid); 707 return err; 708 } 709 710 /* Must be protected by RTNL. 711 * Must be called with vid in range from 1 to 4094 inclusive. 712 * changed must be true only if the vlan was created or updated 713 */ 714 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed, 715 struct netlink_ext_ack *extack) 716 { 717 struct net_bridge_vlan_group *vg; 718 struct net_bridge_vlan *vlan; 719 int ret; 720 721 ASSERT_RTNL(); 722 723 *changed = false; 724 vg = br_vlan_group(br); 725 vlan = br_vlan_find(vg, vid); 726 if (vlan) 727 return br_vlan_add_existing(br, vg, vlan, flags, changed, 728 extack); 729 730 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 731 if (!vlan) 732 return -ENOMEM; 733 734 vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 735 if (!vlan->stats) { 736 kfree(vlan); 737 return -ENOMEM; 738 } 739 vlan->vid = vid; 740 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER; 741 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID; 742 vlan->br = br; 743 if (flags & BRIDGE_VLAN_INFO_BRENTRY) 744 refcount_set(&vlan->refcnt, 1); 745 ret = __vlan_add(vlan, flags, extack); 746 if (ret) { 747 free_percpu(vlan->stats); 748 kfree(vlan); 749 } else { 750 *changed = true; 751 } 752 753 return ret; 754 } 755 756 /* Must be protected by RTNL. 757 * Must be called with vid in range from 1 to 4094 inclusive. 758 */ 759 int br_vlan_delete(struct net_bridge *br, u16 vid) 760 { 761 struct net_bridge_vlan_group *vg; 762 struct net_bridge_vlan *v; 763 764 ASSERT_RTNL(); 765 766 vg = br_vlan_group(br); 767 v = br_vlan_find(vg, vid); 768 if (!v || !br_vlan_is_brentry(v)) 769 return -ENOENT; 770 771 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid); 772 br_fdb_delete_by_port(br, NULL, vid, 0); 773 774 vlan_tunnel_info_del(vg, v); 775 776 return __vlan_del(v); 777 } 778 779 void br_vlan_flush(struct net_bridge *br) 780 { 781 struct net_bridge_vlan_group *vg; 782 783 ASSERT_RTNL(); 784 785 vg = br_vlan_group(br); 786 __vlan_flush(br, NULL, vg); 787 RCU_INIT_POINTER(br->vlgrp, NULL); 788 synchronize_rcu(); 789 __vlan_group_free(vg); 790 } 791 792 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid) 793 { 794 if (!vg) 795 return NULL; 796 797 return br_vlan_lookup(&vg->vlan_hash, vid); 798 } 799 800 /* Must be protected by RTNL. */ 801 static void recalculate_group_addr(struct net_bridge *br) 802 { 803 if (br_opt_get(br, BROPT_GROUP_ADDR_SET)) 804 return; 805 806 spin_lock_bh(&br->lock); 807 if (!br_opt_get(br, BROPT_VLAN_ENABLED) || 808 br->vlan_proto == htons(ETH_P_8021Q)) { 809 /* Bridge Group Address */ 810 br->group_addr[5] = 0x00; 811 } else { /* vlan_enabled && ETH_P_8021AD */ 812 /* Provider Bridge Group Address */ 813 br->group_addr[5] = 0x08; 814 } 815 spin_unlock_bh(&br->lock); 816 } 817 818 /* Must be protected by RTNL. */ 819 void br_recalculate_fwd_mask(struct net_bridge *br) 820 { 821 if (!br_opt_get(br, BROPT_VLAN_ENABLED) || 822 br->vlan_proto == htons(ETH_P_8021Q)) 823 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; 824 else /* vlan_enabled && ETH_P_8021AD */ 825 br->group_fwd_mask_required = BR_GROUPFWD_8021AD & 826 ~(1u << br->group_addr[5]); 827 } 828 829 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val, 830 struct netlink_ext_ack *extack) 831 { 832 struct switchdev_attr attr = { 833 .orig_dev = br->dev, 834 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, 835 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, 836 .u.vlan_filtering = val, 837 }; 838 int err; 839 840 if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val) 841 return 0; 842 843 br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val); 844 845 err = switchdev_port_attr_set(br->dev, &attr, extack); 846 if (err && err != -EOPNOTSUPP) { 847 br_opt_toggle(br, BROPT_VLAN_ENABLED, !val); 848 return err; 849 } 850 851 br_manage_promisc(br); 852 recalculate_group_addr(br); 853 br_recalculate_fwd_mask(br); 854 if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { 855 br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n"); 856 br_multicast_toggle_vlan_snooping(br, false, NULL); 857 } 858 859 return 0; 860 } 861 862 bool br_vlan_enabled(const struct net_device *dev) 863 { 864 struct net_bridge *br = netdev_priv(dev); 865 866 return br_opt_get(br, BROPT_VLAN_ENABLED); 867 } 868 EXPORT_SYMBOL_GPL(br_vlan_enabled); 869 870 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto) 871 { 872 struct net_bridge *br = netdev_priv(dev); 873 874 *p_proto = ntohs(br->vlan_proto); 875 876 return 0; 877 } 878 EXPORT_SYMBOL_GPL(br_vlan_get_proto); 879 880 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto, 881 struct netlink_ext_ack *extack) 882 { 883 struct switchdev_attr attr = { 884 .orig_dev = br->dev, 885 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, 886 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, 887 .u.vlan_protocol = ntohs(proto), 888 }; 889 int err = 0; 890 struct net_bridge_port *p; 891 struct net_bridge_vlan *vlan; 892 struct net_bridge_vlan_group *vg; 893 __be16 oldproto = br->vlan_proto; 894 895 if (br->vlan_proto == proto) 896 return 0; 897 898 err = switchdev_port_attr_set(br->dev, &attr, extack); 899 if (err && err != -EOPNOTSUPP) 900 return err; 901 902 /* Add VLANs for the new proto to the device filter. */ 903 list_for_each_entry(p, &br->port_list, list) { 904 vg = nbp_vlan_group(p); 905 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 906 err = vlan_vid_add(p->dev, proto, vlan->vid); 907 if (err) 908 goto err_filt; 909 } 910 } 911 912 br->vlan_proto = proto; 913 914 recalculate_group_addr(br); 915 br_recalculate_fwd_mask(br); 916 917 /* Delete VLANs for the old proto from the device filter. */ 918 list_for_each_entry(p, &br->port_list, list) { 919 vg = nbp_vlan_group(p); 920 list_for_each_entry(vlan, &vg->vlan_list, vlist) 921 vlan_vid_del(p->dev, oldproto, vlan->vid); 922 } 923 924 return 0; 925 926 err_filt: 927 attr.u.vlan_protocol = ntohs(oldproto); 928 switchdev_port_attr_set(br->dev, &attr, NULL); 929 930 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) 931 vlan_vid_del(p->dev, proto, vlan->vid); 932 933 list_for_each_entry_continue_reverse(p, &br->port_list, list) { 934 vg = nbp_vlan_group(p); 935 list_for_each_entry(vlan, &vg->vlan_list, vlist) 936 vlan_vid_del(p->dev, proto, vlan->vid); 937 } 938 939 return err; 940 } 941 942 int br_vlan_set_proto(struct net_bridge *br, unsigned long val, 943 struct netlink_ext_ack *extack) 944 { 945 if (!eth_type_vlan(htons(val))) 946 return -EPROTONOSUPPORT; 947 948 return __br_vlan_set_proto(br, htons(val), extack); 949 } 950 951 int br_vlan_set_stats(struct net_bridge *br, unsigned long val) 952 { 953 switch (val) { 954 case 0: 955 case 1: 956 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val); 957 break; 958 default: 959 return -EINVAL; 960 } 961 962 return 0; 963 } 964 965 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val) 966 { 967 struct net_bridge_port *p; 968 969 /* allow to change the option if there are no port vlans configured */ 970 list_for_each_entry(p, &br->port_list, list) { 971 struct net_bridge_vlan_group *vg = nbp_vlan_group(p); 972 973 if (vg->num_vlans) 974 return -EBUSY; 975 } 976 977 switch (val) { 978 case 0: 979 case 1: 980 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val); 981 break; 982 default: 983 return -EINVAL; 984 } 985 986 return 0; 987 } 988 989 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid) 990 { 991 struct net_bridge_vlan *v; 992 993 if (vid != vg->pvid) 994 return false; 995 996 v = br_vlan_lookup(&vg->vlan_hash, vid); 997 if (v && br_vlan_should_use(v) && 998 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 999 return true; 1000 1001 return false; 1002 } 1003 1004 static void br_vlan_disable_default_pvid(struct net_bridge *br) 1005 { 1006 struct net_bridge_port *p; 1007 u16 pvid = br->default_pvid; 1008 1009 /* Disable default_pvid on all ports where it is still 1010 * configured. 1011 */ 1012 if (vlan_default_pvid(br_vlan_group(br), pvid)) { 1013 if (!br_vlan_delete(br, pvid)) 1014 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN); 1015 } 1016 1017 list_for_each_entry(p, &br->port_list, list) { 1018 if (vlan_default_pvid(nbp_vlan_group(p), pvid) && 1019 !nbp_vlan_delete(p, pvid)) 1020 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN); 1021 } 1022 1023 br->default_pvid = 0; 1024 } 1025 1026 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid, 1027 struct netlink_ext_ack *extack) 1028 { 1029 const struct net_bridge_vlan *pvent; 1030 struct net_bridge_vlan_group *vg; 1031 struct net_bridge_port *p; 1032 unsigned long *changed; 1033 bool vlchange; 1034 u16 old_pvid; 1035 int err = 0; 1036 1037 if (!pvid) { 1038 br_vlan_disable_default_pvid(br); 1039 return 0; 1040 } 1041 1042 changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL); 1043 if (!changed) 1044 return -ENOMEM; 1045 1046 old_pvid = br->default_pvid; 1047 1048 /* Update default_pvid config only if we do not conflict with 1049 * user configuration. 1050 */ 1051 vg = br_vlan_group(br); 1052 pvent = br_vlan_find(vg, pvid); 1053 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) && 1054 (!pvent || !br_vlan_should_use(pvent))) { 1055 err = br_vlan_add(br, pvid, 1056 BRIDGE_VLAN_INFO_PVID | 1057 BRIDGE_VLAN_INFO_UNTAGGED | 1058 BRIDGE_VLAN_INFO_BRENTRY, 1059 &vlchange, extack); 1060 if (err) 1061 goto out; 1062 1063 if (br_vlan_delete(br, old_pvid)) 1064 br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN); 1065 br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN); 1066 __set_bit(0, changed); 1067 } 1068 1069 list_for_each_entry(p, &br->port_list, list) { 1070 /* Update default_pvid config only if we do not conflict with 1071 * user configuration. 1072 */ 1073 vg = nbp_vlan_group(p); 1074 if ((old_pvid && 1075 !vlan_default_pvid(vg, old_pvid)) || 1076 br_vlan_find(vg, pvid)) 1077 continue; 1078 1079 err = nbp_vlan_add(p, pvid, 1080 BRIDGE_VLAN_INFO_PVID | 1081 BRIDGE_VLAN_INFO_UNTAGGED, 1082 &vlchange, extack); 1083 if (err) 1084 goto err_port; 1085 if (nbp_vlan_delete(p, old_pvid)) 1086 br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN); 1087 br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN); 1088 __set_bit(p->port_no, changed); 1089 } 1090 1091 br->default_pvid = pvid; 1092 1093 out: 1094 bitmap_free(changed); 1095 return err; 1096 1097 err_port: 1098 list_for_each_entry_continue_reverse(p, &br->port_list, list) { 1099 if (!test_bit(p->port_no, changed)) 1100 continue; 1101 1102 if (old_pvid) { 1103 nbp_vlan_add(p, old_pvid, 1104 BRIDGE_VLAN_INFO_PVID | 1105 BRIDGE_VLAN_INFO_UNTAGGED, 1106 &vlchange, NULL); 1107 br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN); 1108 } 1109 nbp_vlan_delete(p, pvid); 1110 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN); 1111 } 1112 1113 if (test_bit(0, changed)) { 1114 if (old_pvid) { 1115 br_vlan_add(br, old_pvid, 1116 BRIDGE_VLAN_INFO_PVID | 1117 BRIDGE_VLAN_INFO_UNTAGGED | 1118 BRIDGE_VLAN_INFO_BRENTRY, 1119 &vlchange, NULL); 1120 br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN); 1121 } 1122 br_vlan_delete(br, pvid); 1123 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN); 1124 } 1125 goto out; 1126 } 1127 1128 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val, 1129 struct netlink_ext_ack *extack) 1130 { 1131 u16 pvid = val; 1132 int err = 0; 1133 1134 if (val >= VLAN_VID_MASK) 1135 return -EINVAL; 1136 1137 if (pvid == br->default_pvid) 1138 goto out; 1139 1140 /* Only allow default pvid change when filtering is disabled */ 1141 if (br_opt_get(br, BROPT_VLAN_ENABLED)) { 1142 pr_info_once("Please disable vlan filtering to change default_pvid\n"); 1143 err = -EPERM; 1144 goto out; 1145 } 1146 err = __br_vlan_set_default_pvid(br, pvid, extack); 1147 out: 1148 return err; 1149 } 1150 1151 int br_vlan_init(struct net_bridge *br) 1152 { 1153 struct net_bridge_vlan_group *vg; 1154 int ret = -ENOMEM; 1155 1156 vg = kzalloc(sizeof(*vg), GFP_KERNEL); 1157 if (!vg) 1158 goto out; 1159 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); 1160 if (ret) 1161 goto err_rhtbl; 1162 ret = vlan_tunnel_init(vg); 1163 if (ret) 1164 goto err_tunnel_init; 1165 INIT_LIST_HEAD(&vg->vlan_list); 1166 br->vlan_proto = htons(ETH_P_8021Q); 1167 br->default_pvid = 1; 1168 rcu_assign_pointer(br->vlgrp, vg); 1169 1170 out: 1171 return ret; 1172 1173 err_tunnel_init: 1174 rhashtable_destroy(&vg->vlan_hash); 1175 err_rhtbl: 1176 kfree(vg); 1177 1178 goto out; 1179 } 1180 1181 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack) 1182 { 1183 struct switchdev_attr attr = { 1184 .orig_dev = p->br->dev, 1185 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, 1186 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, 1187 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED), 1188 }; 1189 struct net_bridge_vlan_group *vg; 1190 int ret = -ENOMEM; 1191 1192 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL); 1193 if (!vg) 1194 goto out; 1195 1196 ret = switchdev_port_attr_set(p->dev, &attr, extack); 1197 if (ret && ret != -EOPNOTSUPP) 1198 goto err_vlan_enabled; 1199 1200 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); 1201 if (ret) 1202 goto err_rhtbl; 1203 ret = vlan_tunnel_init(vg); 1204 if (ret) 1205 goto err_tunnel_init; 1206 INIT_LIST_HEAD(&vg->vlan_list); 1207 rcu_assign_pointer(p->vlgrp, vg); 1208 if (p->br->default_pvid) { 1209 bool changed; 1210 1211 ret = nbp_vlan_add(p, p->br->default_pvid, 1212 BRIDGE_VLAN_INFO_PVID | 1213 BRIDGE_VLAN_INFO_UNTAGGED, 1214 &changed, extack); 1215 if (ret) 1216 goto err_vlan_add; 1217 br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN); 1218 } 1219 out: 1220 return ret; 1221 1222 err_vlan_add: 1223 RCU_INIT_POINTER(p->vlgrp, NULL); 1224 synchronize_rcu(); 1225 vlan_tunnel_deinit(vg); 1226 err_tunnel_init: 1227 rhashtable_destroy(&vg->vlan_hash); 1228 err_rhtbl: 1229 err_vlan_enabled: 1230 kfree(vg); 1231 1232 goto out; 1233 } 1234 1235 /* Must be protected by RTNL. 1236 * Must be called with vid in range from 1 to 4094 inclusive. 1237 * changed must be true only if the vlan was created or updated 1238 */ 1239 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags, 1240 bool *changed, struct netlink_ext_ack *extack) 1241 { 1242 struct net_bridge_vlan *vlan; 1243 int ret; 1244 1245 ASSERT_RTNL(); 1246 1247 *changed = false; 1248 vlan = br_vlan_find(nbp_vlan_group(port), vid); 1249 if (vlan) { 1250 /* Pass the flags to the hardware bridge */ 1251 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack); 1252 if (ret && ret != -EOPNOTSUPP) 1253 return ret; 1254 *changed = __vlan_add_flags(vlan, flags); 1255 1256 return 0; 1257 } 1258 1259 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1260 if (!vlan) 1261 return -ENOMEM; 1262 1263 vlan->vid = vid; 1264 vlan->port = port; 1265 ret = __vlan_add(vlan, flags, extack); 1266 if (ret) 1267 kfree(vlan); 1268 else 1269 *changed = true; 1270 1271 return ret; 1272 } 1273 1274 /* Must be protected by RTNL. 1275 * Must be called with vid in range from 1 to 4094 inclusive. 1276 */ 1277 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) 1278 { 1279 struct net_bridge_vlan *v; 1280 1281 ASSERT_RTNL(); 1282 1283 v = br_vlan_find(nbp_vlan_group(port), vid); 1284 if (!v) 1285 return -ENOENT; 1286 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid); 1287 br_fdb_delete_by_port(port->br, port, vid, 0); 1288 1289 return __vlan_del(v); 1290 } 1291 1292 void nbp_vlan_flush(struct net_bridge_port *port) 1293 { 1294 struct net_bridge_vlan_group *vg; 1295 1296 ASSERT_RTNL(); 1297 1298 vg = nbp_vlan_group(port); 1299 __vlan_flush(port->br, port, vg); 1300 RCU_INIT_POINTER(port->vlgrp, NULL); 1301 synchronize_rcu(); 1302 __vlan_group_free(vg); 1303 } 1304 1305 void br_vlan_get_stats(const struct net_bridge_vlan *v, 1306 struct pcpu_sw_netstats *stats) 1307 { 1308 int i; 1309 1310 memset(stats, 0, sizeof(*stats)); 1311 for_each_possible_cpu(i) { 1312 u64 rxpackets, rxbytes, txpackets, txbytes; 1313 struct pcpu_sw_netstats *cpu_stats; 1314 unsigned int start; 1315 1316 cpu_stats = per_cpu_ptr(v->stats, i); 1317 do { 1318 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 1319 rxpackets = cpu_stats->rx_packets; 1320 rxbytes = cpu_stats->rx_bytes; 1321 txbytes = cpu_stats->tx_bytes; 1322 txpackets = cpu_stats->tx_packets; 1323 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 1324 1325 stats->rx_packets += rxpackets; 1326 stats->rx_bytes += rxbytes; 1327 stats->tx_bytes += txbytes; 1328 stats->tx_packets += txpackets; 1329 } 1330 } 1331 1332 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) 1333 { 1334 struct net_bridge_vlan_group *vg; 1335 struct net_bridge_port *p; 1336 1337 ASSERT_RTNL(); 1338 p = br_port_get_check_rtnl(dev); 1339 if (p) 1340 vg = nbp_vlan_group(p); 1341 else if (netif_is_bridge_master(dev)) 1342 vg = br_vlan_group(netdev_priv(dev)); 1343 else 1344 return -EINVAL; 1345 1346 *p_pvid = br_get_pvid(vg); 1347 return 0; 1348 } 1349 EXPORT_SYMBOL_GPL(br_vlan_get_pvid); 1350 1351 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) 1352 { 1353 struct net_bridge_vlan_group *vg; 1354 struct net_bridge_port *p; 1355 1356 p = br_port_get_check_rcu(dev); 1357 if (p) 1358 vg = nbp_vlan_group_rcu(p); 1359 else if (netif_is_bridge_master(dev)) 1360 vg = br_vlan_group_rcu(netdev_priv(dev)); 1361 else 1362 return -EINVAL; 1363 1364 *p_pvid = br_get_pvid(vg); 1365 return 0; 1366 } 1367 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu); 1368 1369 void br_vlan_fill_forward_path_pvid(struct net_bridge *br, 1370 struct net_device_path_ctx *ctx, 1371 struct net_device_path *path) 1372 { 1373 struct net_bridge_vlan_group *vg; 1374 int idx = ctx->num_vlans - 1; 1375 u16 vid; 1376 1377 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP; 1378 1379 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) 1380 return; 1381 1382 vg = br_vlan_group(br); 1383 1384 if (idx >= 0 && 1385 ctx->vlan[idx].proto == br->vlan_proto) { 1386 vid = ctx->vlan[idx].id; 1387 } else { 1388 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG; 1389 vid = br_get_pvid(vg); 1390 } 1391 1392 path->bridge.vlan_id = vid; 1393 path->bridge.vlan_proto = br->vlan_proto; 1394 } 1395 1396 int br_vlan_fill_forward_path_mode(struct net_bridge *br, 1397 struct net_bridge_port *dst, 1398 struct net_device_path *path) 1399 { 1400 struct net_bridge_vlan_group *vg; 1401 struct net_bridge_vlan *v; 1402 1403 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) 1404 return 0; 1405 1406 vg = nbp_vlan_group_rcu(dst); 1407 v = br_vlan_find(vg, path->bridge.vlan_id); 1408 if (!v || !br_vlan_should_use(v)) 1409 return -EINVAL; 1410 1411 if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED)) 1412 return 0; 1413 1414 if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG) 1415 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP; 1416 else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) 1417 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW; 1418 else 1419 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG; 1420 1421 return 0; 1422 } 1423 1424 int br_vlan_get_info(const struct net_device *dev, u16 vid, 1425 struct bridge_vlan_info *p_vinfo) 1426 { 1427 struct net_bridge_vlan_group *vg; 1428 struct net_bridge_vlan *v; 1429 struct net_bridge_port *p; 1430 1431 ASSERT_RTNL(); 1432 p = br_port_get_check_rtnl(dev); 1433 if (p) 1434 vg = nbp_vlan_group(p); 1435 else if (netif_is_bridge_master(dev)) 1436 vg = br_vlan_group(netdev_priv(dev)); 1437 else 1438 return -EINVAL; 1439 1440 v = br_vlan_find(vg, vid); 1441 if (!v) 1442 return -ENOENT; 1443 1444 p_vinfo->vid = vid; 1445 p_vinfo->flags = v->flags; 1446 if (vid == br_get_pvid(vg)) 1447 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID; 1448 return 0; 1449 } 1450 EXPORT_SYMBOL_GPL(br_vlan_get_info); 1451 1452 int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, 1453 struct bridge_vlan_info *p_vinfo) 1454 { 1455 struct net_bridge_vlan_group *vg; 1456 struct net_bridge_vlan *v; 1457 struct net_bridge_port *p; 1458 1459 p = br_port_get_check_rcu(dev); 1460 if (p) 1461 vg = nbp_vlan_group_rcu(p); 1462 else if (netif_is_bridge_master(dev)) 1463 vg = br_vlan_group_rcu(netdev_priv(dev)); 1464 else 1465 return -EINVAL; 1466 1467 v = br_vlan_find(vg, vid); 1468 if (!v) 1469 return -ENOENT; 1470 1471 p_vinfo->vid = vid; 1472 p_vinfo->flags = v->flags; 1473 if (vid == br_get_pvid(vg)) 1474 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID; 1475 return 0; 1476 } 1477 EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu); 1478 1479 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev) 1480 { 1481 return is_vlan_dev(dev) && 1482 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING); 1483 } 1484 1485 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev, 1486 __always_unused struct netdev_nested_priv *priv) 1487 { 1488 return br_vlan_is_bind_vlan_dev(dev); 1489 } 1490 1491 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev) 1492 { 1493 int found; 1494 1495 rcu_read_lock(); 1496 found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn, 1497 NULL); 1498 rcu_read_unlock(); 1499 1500 return !!found; 1501 } 1502 1503 struct br_vlan_bind_walk_data { 1504 u16 vid; 1505 struct net_device *result; 1506 }; 1507 1508 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev, 1509 struct netdev_nested_priv *priv) 1510 { 1511 struct br_vlan_bind_walk_data *data = priv->data; 1512 int found = 0; 1513 1514 if (br_vlan_is_bind_vlan_dev(dev) && 1515 vlan_dev_priv(dev)->vlan_id == data->vid) { 1516 data->result = dev; 1517 found = 1; 1518 } 1519 1520 return found; 1521 } 1522 1523 static struct net_device * 1524 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid) 1525 { 1526 struct br_vlan_bind_walk_data data = { 1527 .vid = vid, 1528 }; 1529 struct netdev_nested_priv priv = { 1530 .data = (void *)&data, 1531 }; 1532 1533 rcu_read_lock(); 1534 netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn, 1535 &priv); 1536 rcu_read_unlock(); 1537 1538 return data.result; 1539 } 1540 1541 static bool br_vlan_is_dev_up(const struct net_device *dev) 1542 { 1543 return !!(dev->flags & IFF_UP) && netif_oper_up(dev); 1544 } 1545 1546 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br, 1547 struct net_device *vlan_dev) 1548 { 1549 u16 vid = vlan_dev_priv(vlan_dev)->vlan_id; 1550 struct net_bridge_vlan_group *vg; 1551 struct net_bridge_port *p; 1552 bool has_carrier = false; 1553 1554 if (!netif_carrier_ok(br->dev)) { 1555 netif_carrier_off(vlan_dev); 1556 return; 1557 } 1558 1559 list_for_each_entry(p, &br->port_list, list) { 1560 vg = nbp_vlan_group(p); 1561 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) { 1562 has_carrier = true; 1563 break; 1564 } 1565 } 1566 1567 if (has_carrier) 1568 netif_carrier_on(vlan_dev); 1569 else 1570 netif_carrier_off(vlan_dev); 1571 } 1572 1573 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p) 1574 { 1575 struct net_bridge_vlan_group *vg = nbp_vlan_group(p); 1576 struct net_bridge_vlan *vlan; 1577 struct net_device *vlan_dev; 1578 1579 list_for_each_entry(vlan, &vg->vlan_list, vlist) { 1580 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, 1581 vlan->vid); 1582 if (vlan_dev) { 1583 if (br_vlan_is_dev_up(p->dev)) { 1584 if (netif_carrier_ok(p->br->dev)) 1585 netif_carrier_on(vlan_dev); 1586 } else { 1587 br_vlan_set_vlan_dev_state(p->br, vlan_dev); 1588 } 1589 } 1590 } 1591 } 1592 1593 static void br_vlan_upper_change(struct net_device *dev, 1594 struct net_device *upper_dev, 1595 bool linking) 1596 { 1597 struct net_bridge *br = netdev_priv(dev); 1598 1599 if (!br_vlan_is_bind_vlan_dev(upper_dev)) 1600 return; 1601 1602 if (linking) { 1603 br_vlan_set_vlan_dev_state(br, upper_dev); 1604 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true); 1605 } else { 1606 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, 1607 br_vlan_has_upper_bind_vlan_dev(dev)); 1608 } 1609 } 1610 1611 struct br_vlan_link_state_walk_data { 1612 struct net_bridge *br; 1613 }; 1614 1615 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev, 1616 struct netdev_nested_priv *priv) 1617 { 1618 struct br_vlan_link_state_walk_data *data = priv->data; 1619 1620 if (br_vlan_is_bind_vlan_dev(vlan_dev)) 1621 br_vlan_set_vlan_dev_state(data->br, vlan_dev); 1622 1623 return 0; 1624 } 1625 1626 static void br_vlan_link_state_change(struct net_device *dev, 1627 struct net_bridge *br) 1628 { 1629 struct br_vlan_link_state_walk_data data = { 1630 .br = br 1631 }; 1632 struct netdev_nested_priv priv = { 1633 .data = (void *)&data, 1634 }; 1635 1636 rcu_read_lock(); 1637 netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn, 1638 &priv); 1639 rcu_read_unlock(); 1640 } 1641 1642 /* Must be protected by RTNL. */ 1643 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid) 1644 { 1645 struct net_device *vlan_dev; 1646 1647 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING)) 1648 return; 1649 1650 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid); 1651 if (vlan_dev) 1652 br_vlan_set_vlan_dev_state(p->br, vlan_dev); 1653 } 1654 1655 /* Must be protected by RTNL. */ 1656 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr) 1657 { 1658 struct netdev_notifier_changeupper_info *info; 1659 struct net_bridge *br = netdev_priv(dev); 1660 int vlcmd = 0, ret = 0; 1661 bool changed = false; 1662 1663 switch (event) { 1664 case NETDEV_REGISTER: 1665 ret = br_vlan_add(br, br->default_pvid, 1666 BRIDGE_VLAN_INFO_PVID | 1667 BRIDGE_VLAN_INFO_UNTAGGED | 1668 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL); 1669 vlcmd = RTM_NEWVLAN; 1670 break; 1671 case NETDEV_UNREGISTER: 1672 changed = !br_vlan_delete(br, br->default_pvid); 1673 vlcmd = RTM_DELVLAN; 1674 break; 1675 case NETDEV_CHANGEUPPER: 1676 info = ptr; 1677 br_vlan_upper_change(dev, info->upper_dev, info->linking); 1678 break; 1679 1680 case NETDEV_CHANGE: 1681 case NETDEV_UP: 1682 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING)) 1683 break; 1684 br_vlan_link_state_change(dev, br); 1685 break; 1686 } 1687 if (changed) 1688 br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd); 1689 1690 return ret; 1691 } 1692 1693 /* Must be protected by RTNL. */ 1694 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event) 1695 { 1696 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING)) 1697 return; 1698 1699 switch (event) { 1700 case NETDEV_CHANGE: 1701 case NETDEV_DOWN: 1702 case NETDEV_UP: 1703 br_vlan_set_all_vlan_dev_state(p); 1704 break; 1705 } 1706 } 1707 1708 static bool br_vlan_stats_fill(struct sk_buff *skb, 1709 const struct net_bridge_vlan *v) 1710 { 1711 struct pcpu_sw_netstats stats; 1712 struct nlattr *nest; 1713 1714 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS); 1715 if (!nest) 1716 return false; 1717 1718 br_vlan_get_stats(v, &stats); 1719 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes, 1720 BRIDGE_VLANDB_STATS_PAD) || 1721 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS, 1722 stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) || 1723 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes, 1724 BRIDGE_VLANDB_STATS_PAD) || 1725 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS, 1726 stats.tx_packets, BRIDGE_VLANDB_STATS_PAD)) 1727 goto out_err; 1728 1729 nla_nest_end(skb, nest); 1730 1731 return true; 1732 1733 out_err: 1734 nla_nest_cancel(skb, nest); 1735 return false; 1736 } 1737 1738 /* v_opts is used to dump the options which must be equal in the whole range */ 1739 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range, 1740 const struct net_bridge_vlan *v_opts, 1741 u16 flags, 1742 bool dump_stats) 1743 { 1744 struct bridge_vlan_info info; 1745 struct nlattr *nest; 1746 1747 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY); 1748 if (!nest) 1749 return false; 1750 1751 memset(&info, 0, sizeof(info)); 1752 info.vid = vid; 1753 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) 1754 info.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 1755 if (flags & BRIDGE_VLAN_INFO_PVID) 1756 info.flags |= BRIDGE_VLAN_INFO_PVID; 1757 1758 if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info)) 1759 goto out_err; 1760 1761 if (vid_range && vid < vid_range && 1762 !(flags & BRIDGE_VLAN_INFO_PVID) && 1763 nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range)) 1764 goto out_err; 1765 1766 if (v_opts) { 1767 if (!br_vlan_opts_fill(skb, v_opts)) 1768 goto out_err; 1769 1770 if (dump_stats && !br_vlan_stats_fill(skb, v_opts)) 1771 goto out_err; 1772 } 1773 1774 nla_nest_end(skb, nest); 1775 1776 return true; 1777 1778 out_err: 1779 nla_nest_cancel(skb, nest); 1780 return false; 1781 } 1782 1783 static size_t rtnl_vlan_nlmsg_size(void) 1784 { 1785 return NLMSG_ALIGN(sizeof(struct br_vlan_msg)) 1786 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */ 1787 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */ 1788 + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */ 1789 + br_vlan_opts_nl_size(); /* bridge vlan options */ 1790 } 1791 1792 void br_vlan_notify(const struct net_bridge *br, 1793 const struct net_bridge_port *p, 1794 u16 vid, u16 vid_range, 1795 int cmd) 1796 { 1797 struct net_bridge_vlan_group *vg; 1798 struct net_bridge_vlan *v = NULL; 1799 struct br_vlan_msg *bvm; 1800 struct nlmsghdr *nlh; 1801 struct sk_buff *skb; 1802 int err = -ENOBUFS; 1803 struct net *net; 1804 u16 flags = 0; 1805 int ifindex; 1806 1807 /* right now notifications are done only with rtnl held */ 1808 ASSERT_RTNL(); 1809 1810 if (p) { 1811 ifindex = p->dev->ifindex; 1812 vg = nbp_vlan_group(p); 1813 net = dev_net(p->dev); 1814 } else { 1815 ifindex = br->dev->ifindex; 1816 vg = br_vlan_group(br); 1817 net = dev_net(br->dev); 1818 } 1819 1820 skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL); 1821 if (!skb) 1822 goto out_err; 1823 1824 err = -EMSGSIZE; 1825 nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0); 1826 if (!nlh) 1827 goto out_err; 1828 bvm = nlmsg_data(nlh); 1829 memset(bvm, 0, sizeof(*bvm)); 1830 bvm->family = AF_BRIDGE; 1831 bvm->ifindex = ifindex; 1832 1833 switch (cmd) { 1834 case RTM_NEWVLAN: 1835 /* need to find the vlan due to flags/options */ 1836 v = br_vlan_find(vg, vid); 1837 if (!v || !br_vlan_should_use(v)) 1838 goto out_kfree; 1839 1840 flags = v->flags; 1841 if (br_get_pvid(vg) == v->vid) 1842 flags |= BRIDGE_VLAN_INFO_PVID; 1843 break; 1844 case RTM_DELVLAN: 1845 break; 1846 default: 1847 goto out_kfree; 1848 } 1849 1850 if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false)) 1851 goto out_err; 1852 1853 nlmsg_end(skb, nlh); 1854 rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL); 1855 return; 1856 1857 out_err: 1858 rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err); 1859 out_kfree: 1860 kfree_skb(skb); 1861 } 1862 1863 /* check if v_curr can enter a range ending in range_end */ 1864 bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, 1865 const struct net_bridge_vlan *range_end) 1866 { 1867 return v_curr->vid - range_end->vid == 1 && 1868 range_end->flags == v_curr->flags && 1869 br_vlan_opts_eq_range(v_curr, range_end); 1870 } 1871 1872 static int br_vlan_dump_dev(const struct net_device *dev, 1873 struct sk_buff *skb, 1874 struct netlink_callback *cb, 1875 u32 dump_flags) 1876 { 1877 struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL; 1878 bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL); 1879 bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS); 1880 struct net_bridge_vlan_group *vg; 1881 int idx = 0, s_idx = cb->args[1]; 1882 struct nlmsghdr *nlh = NULL; 1883 struct net_bridge_port *p; 1884 struct br_vlan_msg *bvm; 1885 struct net_bridge *br; 1886 int err = 0; 1887 u16 pvid; 1888 1889 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) 1890 return -EINVAL; 1891 1892 if (netif_is_bridge_master(dev)) { 1893 br = netdev_priv(dev); 1894 vg = br_vlan_group_rcu(br); 1895 p = NULL; 1896 } else { 1897 /* global options are dumped only for bridge devices */ 1898 if (dump_global) 1899 return 0; 1900 1901 p = br_port_get_rcu(dev); 1902 if (WARN_ON(!p)) 1903 return -EINVAL; 1904 vg = nbp_vlan_group_rcu(p); 1905 br = p->br; 1906 } 1907 1908 if (!vg) 1909 return 0; 1910 1911 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1912 RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI); 1913 if (!nlh) 1914 return -EMSGSIZE; 1915 bvm = nlmsg_data(nlh); 1916 memset(bvm, 0, sizeof(*bvm)); 1917 bvm->family = PF_BRIDGE; 1918 bvm->ifindex = dev->ifindex; 1919 pvid = br_get_pvid(vg); 1920 1921 /* idx must stay at range's beginning until it is filled in */ 1922 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 1923 if (!dump_global && !br_vlan_should_use(v)) 1924 continue; 1925 if (idx < s_idx) { 1926 idx++; 1927 continue; 1928 } 1929 1930 if (!range_start) { 1931 range_start = v; 1932 range_end = v; 1933 continue; 1934 } 1935 1936 if (dump_global) { 1937 if (br_vlan_global_opts_can_enter_range(v, range_end)) 1938 goto update_end; 1939 if (!br_vlan_global_opts_fill(skb, range_start->vid, 1940 range_end->vid, 1941 range_start)) { 1942 err = -EMSGSIZE; 1943 break; 1944 } 1945 /* advance number of filled vlans */ 1946 idx += range_end->vid - range_start->vid + 1; 1947 1948 range_start = v; 1949 } else if (dump_stats || v->vid == pvid || 1950 !br_vlan_can_enter_range(v, range_end)) { 1951 u16 vlan_flags = br_vlan_flags(range_start, pvid); 1952 1953 if (!br_vlan_fill_vids(skb, range_start->vid, 1954 range_end->vid, range_start, 1955 vlan_flags, dump_stats)) { 1956 err = -EMSGSIZE; 1957 break; 1958 } 1959 /* advance number of filled vlans */ 1960 idx += range_end->vid - range_start->vid + 1; 1961 1962 range_start = v; 1963 } 1964 update_end: 1965 range_end = v; 1966 } 1967 1968 /* err will be 0 and range_start will be set in 3 cases here: 1969 * - first vlan (range_start == range_end) 1970 * - last vlan (range_start == range_end, not in range) 1971 * - last vlan range (range_start != range_end, in range) 1972 */ 1973 if (!err && range_start) { 1974 if (dump_global && 1975 !br_vlan_global_opts_fill(skb, range_start->vid, 1976 range_end->vid, range_start)) 1977 err = -EMSGSIZE; 1978 else if (!dump_global && 1979 !br_vlan_fill_vids(skb, range_start->vid, 1980 range_end->vid, range_start, 1981 br_vlan_flags(range_start, pvid), 1982 dump_stats)) 1983 err = -EMSGSIZE; 1984 } 1985 1986 cb->args[1] = err ? idx : 0; 1987 1988 nlmsg_end(skb, nlh); 1989 1990 return err; 1991 } 1992 1993 static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = { 1994 [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 }, 1995 }; 1996 1997 static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb) 1998 { 1999 struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1]; 2000 int idx = 0, err = 0, s_idx = cb->args[0]; 2001 struct net *net = sock_net(skb->sk); 2002 struct br_vlan_msg *bvm; 2003 struct net_device *dev; 2004 u32 dump_flags = 0; 2005 2006 err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX, 2007 br_vlan_db_dump_pol, cb->extack); 2008 if (err < 0) 2009 return err; 2010 2011 bvm = nlmsg_data(cb->nlh); 2012 if (dtb[BRIDGE_VLANDB_DUMP_FLAGS]) 2013 dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]); 2014 2015 rcu_read_lock(); 2016 if (bvm->ifindex) { 2017 dev = dev_get_by_index_rcu(net, bvm->ifindex); 2018 if (!dev) { 2019 err = -ENODEV; 2020 goto out_err; 2021 } 2022 err = br_vlan_dump_dev(dev, skb, cb, dump_flags); 2023 if (err && err != -EMSGSIZE) 2024 goto out_err; 2025 } else { 2026 for_each_netdev_rcu(net, dev) { 2027 if (idx < s_idx) 2028 goto skip; 2029 2030 err = br_vlan_dump_dev(dev, skb, cb, dump_flags); 2031 if (err == -EMSGSIZE) 2032 break; 2033 skip: 2034 idx++; 2035 } 2036 } 2037 cb->args[0] = idx; 2038 rcu_read_unlock(); 2039 2040 return skb->len; 2041 2042 out_err: 2043 rcu_read_unlock(); 2044 2045 return err; 2046 } 2047 2048 static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = { 2049 [BRIDGE_VLANDB_ENTRY_INFO] = 2050 NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)), 2051 [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 }, 2052 [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 }, 2053 [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED }, 2054 [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 }, 2055 }; 2056 2057 static int br_vlan_rtm_process_one(struct net_device *dev, 2058 const struct nlattr *attr, 2059 int cmd, struct netlink_ext_ack *extack) 2060 { 2061 struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL; 2062 struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1]; 2063 bool changed = false, skip_processing = false; 2064 struct net_bridge_vlan_group *vg; 2065 struct net_bridge_port *p = NULL; 2066 int err = 0, cmdmap = 0; 2067 struct net_bridge *br; 2068 2069 if (netif_is_bridge_master(dev)) { 2070 br = netdev_priv(dev); 2071 vg = br_vlan_group(br); 2072 } else { 2073 p = br_port_get_rtnl(dev); 2074 if (WARN_ON(!p)) 2075 return -ENODEV; 2076 br = p->br; 2077 vg = nbp_vlan_group(p); 2078 } 2079 2080 if (WARN_ON(!vg)) 2081 return -ENODEV; 2082 2083 err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr, 2084 br_vlan_db_policy, extack); 2085 if (err) 2086 return err; 2087 2088 if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) { 2089 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info"); 2090 return -EINVAL; 2091 } 2092 memset(&vrange_end, 0, sizeof(vrange_end)); 2093 2094 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]); 2095 if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN | 2096 BRIDGE_VLAN_INFO_RANGE_END)) { 2097 NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls"); 2098 return -EINVAL; 2099 } 2100 if (!br_vlan_valid_id(vinfo->vid, extack)) 2101 return -EINVAL; 2102 2103 if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) { 2104 vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]); 2105 /* validate user-provided flags without RANGE_BEGIN */ 2106 vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags; 2107 vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; 2108 2109 /* vinfo_last is the range start, vinfo the range end */ 2110 vinfo_last = vinfo; 2111 vinfo = &vrange_end; 2112 2113 if (!br_vlan_valid_id(vinfo->vid, extack) || 2114 !br_vlan_valid_range(vinfo, vinfo_last, extack)) 2115 return -EINVAL; 2116 } 2117 2118 switch (cmd) { 2119 case RTM_NEWVLAN: 2120 cmdmap = RTM_SETLINK; 2121 skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS); 2122 break; 2123 case RTM_DELVLAN: 2124 cmdmap = RTM_DELLINK; 2125 break; 2126 } 2127 2128 if (!skip_processing) { 2129 struct bridge_vlan_info *tmp_last = vinfo_last; 2130 2131 /* br_process_vlan_info may overwrite vinfo_last */ 2132 err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last, 2133 &changed, extack); 2134 2135 /* notify first if anything changed */ 2136 if (changed) 2137 br_ifinfo_notify(cmdmap, br, p); 2138 2139 if (err) 2140 return err; 2141 } 2142 2143 /* deal with options */ 2144 if (cmd == RTM_NEWVLAN) { 2145 struct net_bridge_vlan *range_start, *range_end; 2146 2147 if (vinfo_last) { 2148 range_start = br_vlan_find(vg, vinfo_last->vid); 2149 range_end = br_vlan_find(vg, vinfo->vid); 2150 } else { 2151 range_start = br_vlan_find(vg, vinfo->vid); 2152 range_end = range_start; 2153 } 2154 2155 err = br_vlan_process_options(br, p, range_start, range_end, 2156 tb, extack); 2157 } 2158 2159 return err; 2160 } 2161 2162 static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh, 2163 struct netlink_ext_ack *extack) 2164 { 2165 struct net *net = sock_net(skb->sk); 2166 struct br_vlan_msg *bvm; 2167 struct net_device *dev; 2168 struct nlattr *attr; 2169 int err, vlans = 0; 2170 int rem; 2171 2172 /* this should validate the header and check for remaining bytes */ 2173 err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL, 2174 extack); 2175 if (err < 0) 2176 return err; 2177 2178 bvm = nlmsg_data(nlh); 2179 dev = __dev_get_by_index(net, bvm->ifindex); 2180 if (!dev) 2181 return -ENODEV; 2182 2183 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) { 2184 NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port"); 2185 return -EINVAL; 2186 } 2187 2188 nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) { 2189 switch (nla_type(attr)) { 2190 case BRIDGE_VLANDB_ENTRY: 2191 err = br_vlan_rtm_process_one(dev, attr, 2192 nlh->nlmsg_type, 2193 extack); 2194 break; 2195 case BRIDGE_VLANDB_GLOBAL_OPTIONS: 2196 err = br_vlan_rtm_process_global_options(dev, attr, 2197 nlh->nlmsg_type, 2198 extack); 2199 break; 2200 default: 2201 continue; 2202 } 2203 2204 vlans++; 2205 if (err) 2206 break; 2207 } 2208 if (!vlans) { 2209 NL_SET_ERR_MSG_MOD(extack, "No vlans found to process"); 2210 err = -EINVAL; 2211 } 2212 2213 return err; 2214 } 2215 2216 void br_vlan_rtnl_init(void) 2217 { 2218 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, 2219 br_vlan_rtm_dump, 0); 2220 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, 2221 br_vlan_rtm_process, NULL, 0); 2222 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, 2223 br_vlan_rtm_process, NULL, 0); 2224 } 2225 2226 void br_vlan_rtnl_uninit(void) 2227 { 2228 rtnl_unregister(PF_BRIDGE, RTM_GETVLAN); 2229 rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN); 2230 rtnl_unregister(PF_BRIDGE, RTM_DELVLAN); 2231 } 2232