1 #include <linux/err.h> 2 #include <linux/igmp.h> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rculist.h> 6 #include <linux/skbuff.h> 7 #include <linux/if_ether.h> 8 #include <net/ip.h> 9 #include <net/netlink.h> 10 #include <net/switchdev.h> 11 #if IS_ENABLED(CONFIG_IPV6) 12 #include <net/ipv6.h> 13 #include <net/addrconf.h> 14 #endif 15 16 #include "br_private.h" 17 18 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 19 struct net_device *dev) 20 { 21 struct net_bridge *br = netdev_priv(dev); 22 struct net_bridge_port *p; 23 struct nlattr *nest, *port_nest; 24 25 if (!br->multicast_router || hlist_empty(&br->router_list)) 26 return 0; 27 28 nest = nla_nest_start(skb, MDBA_ROUTER); 29 if (nest == NULL) 30 return -EMSGSIZE; 31 32 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 33 if (!p) 34 continue; 35 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT); 36 if (!port_nest) 37 goto fail; 38 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 39 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 40 br_timer_value(&p->multicast_router_timer)) || 41 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 42 p->multicast_router)) { 43 nla_nest_cancel(skb, port_nest); 44 goto fail; 45 } 46 nla_nest_end(skb, port_nest); 47 } 48 49 nla_nest_end(skb, nest); 50 return 0; 51 fail: 52 nla_nest_cancel(skb, nest); 53 return -EMSGSIZE; 54 } 55 56 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 57 { 58 e->state = flags & MDB_PG_FLAGS_PERMANENT; 59 e->flags = 0; 60 if (flags & MDB_PG_FLAGS_OFFLOAD) 61 e->flags |= MDB_FLAGS_OFFLOAD; 62 } 63 64 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 65 { 66 memset(ip, 0, sizeof(struct br_ip)); 67 ip->vid = entry->vid; 68 ip->proto = entry->addr.proto; 69 if (ip->proto == htons(ETH_P_IP)) 70 ip->u.ip4 = entry->addr.u.ip4; 71 #if IS_ENABLED(CONFIG_IPV6) 72 else 73 ip->u.ip6 = entry->addr.u.ip6; 74 #endif 75 } 76 77 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 78 struct net_device *dev) 79 { 80 struct net_bridge *br = netdev_priv(dev); 81 struct net_bridge_mdb_htable *mdb; 82 struct nlattr *nest, *nest2; 83 int i, err = 0; 84 int idx = 0, s_idx = cb->args[1]; 85 86 if (br->multicast_disabled) 87 return 0; 88 89 mdb = rcu_dereference(br->mdb); 90 if (!mdb) 91 return 0; 92 93 nest = nla_nest_start(skb, MDBA_MDB); 94 if (nest == NULL) 95 return -EMSGSIZE; 96 97 for (i = 0; i < mdb->max; i++) { 98 struct net_bridge_mdb_entry *mp; 99 struct net_bridge_port_group *p; 100 struct net_bridge_port_group __rcu **pp; 101 struct net_bridge_port *port; 102 103 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { 104 if (idx < s_idx) 105 goto skip; 106 107 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 108 if (nest2 == NULL) { 109 err = -EMSGSIZE; 110 goto out; 111 } 112 113 for (pp = &mp->ports; 114 (p = rcu_dereference(*pp)) != NULL; 115 pp = &p->next) { 116 struct nlattr *nest_ent; 117 struct br_mdb_entry e; 118 119 port = p->port; 120 if (!port) 121 continue; 122 123 memset(&e, 0, sizeof(e)); 124 e.ifindex = port->dev->ifindex; 125 e.vid = p->addr.vid; 126 __mdb_entry_fill_flags(&e, p->flags); 127 if (p->addr.proto == htons(ETH_P_IP)) 128 e.addr.u.ip4 = p->addr.u.ip4; 129 #if IS_ENABLED(CONFIG_IPV6) 130 if (p->addr.proto == htons(ETH_P_IPV6)) 131 e.addr.u.ip6 = p->addr.u.ip6; 132 #endif 133 e.addr.proto = p->addr.proto; 134 nest_ent = nla_nest_start(skb, 135 MDBA_MDB_ENTRY_INFO); 136 if (!nest_ent) { 137 nla_nest_cancel(skb, nest2); 138 err = -EMSGSIZE; 139 goto out; 140 } 141 if (nla_put_nohdr(skb, sizeof(e), &e) || 142 nla_put_u32(skb, 143 MDBA_MDB_EATTR_TIMER, 144 br_timer_value(&p->timer))) { 145 nla_nest_cancel(skb, nest_ent); 146 nla_nest_cancel(skb, nest2); 147 err = -EMSGSIZE; 148 goto out; 149 } 150 nla_nest_end(skb, nest_ent); 151 } 152 nla_nest_end(skb, nest2); 153 skip: 154 idx++; 155 } 156 } 157 158 out: 159 cb->args[1] = idx; 160 nla_nest_end(skb, nest); 161 return err; 162 } 163 164 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 165 { 166 struct net_device *dev; 167 struct net *net = sock_net(skb->sk); 168 struct nlmsghdr *nlh = NULL; 169 int idx = 0, s_idx; 170 171 s_idx = cb->args[0]; 172 173 rcu_read_lock(); 174 175 /* In theory this could be wrapped to 0... */ 176 cb->seq = net->dev_base_seq + br_mdb_rehash_seq; 177 178 for_each_netdev_rcu(net, dev) { 179 if (dev->priv_flags & IFF_EBRIDGE) { 180 struct br_port_msg *bpm; 181 182 if (idx < s_idx) 183 goto skip; 184 185 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 186 cb->nlh->nlmsg_seq, RTM_GETMDB, 187 sizeof(*bpm), NLM_F_MULTI); 188 if (nlh == NULL) 189 break; 190 191 bpm = nlmsg_data(nlh); 192 memset(bpm, 0, sizeof(*bpm)); 193 bpm->ifindex = dev->ifindex; 194 if (br_mdb_fill_info(skb, cb, dev) < 0) 195 goto out; 196 if (br_rports_fill_info(skb, cb, dev) < 0) 197 goto out; 198 199 cb->args[1] = 0; 200 nlmsg_end(skb, nlh); 201 skip: 202 idx++; 203 } 204 } 205 206 out: 207 if (nlh) 208 nlmsg_end(skb, nlh); 209 rcu_read_unlock(); 210 cb->args[0] = idx; 211 return skb->len; 212 } 213 214 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 215 struct net_device *dev, 216 struct br_mdb_entry *entry, u32 pid, 217 u32 seq, int type, unsigned int flags) 218 { 219 struct nlmsghdr *nlh; 220 struct br_port_msg *bpm; 221 struct nlattr *nest, *nest2; 222 223 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 224 if (!nlh) 225 return -EMSGSIZE; 226 227 bpm = nlmsg_data(nlh); 228 memset(bpm, 0, sizeof(*bpm)); 229 bpm->family = AF_BRIDGE; 230 bpm->ifindex = dev->ifindex; 231 nest = nla_nest_start(skb, MDBA_MDB); 232 if (nest == NULL) 233 goto cancel; 234 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 235 if (nest2 == NULL) 236 goto end; 237 238 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 239 goto end; 240 241 nla_nest_end(skb, nest2); 242 nla_nest_end(skb, nest); 243 nlmsg_end(skb, nlh); 244 return 0; 245 246 end: 247 nla_nest_end(skb, nest); 248 cancel: 249 nlmsg_cancel(skb, nlh); 250 return -EMSGSIZE; 251 } 252 253 static inline size_t rtnl_mdb_nlmsg_size(void) 254 { 255 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 256 + nla_total_size(sizeof(struct br_mdb_entry)); 257 } 258 259 struct br_mdb_complete_info { 260 struct net_bridge_port *port; 261 struct br_ip ip; 262 }; 263 264 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 265 { 266 struct br_mdb_complete_info *data = priv; 267 struct net_bridge_port_group __rcu **pp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_mdb_htable *mdb; 270 struct net_bridge_mdb_entry *mp; 271 struct net_bridge_port *port = data->port; 272 struct net_bridge *br = port->br; 273 274 if (err) 275 goto err; 276 277 spin_lock_bh(&br->multicast_lock); 278 mdb = mlock_dereference(br->mdb, br); 279 mp = br_mdb_ip_get(mdb, &data->ip); 280 if (!mp) 281 goto out; 282 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 283 pp = &p->next) { 284 if (p->port != port) 285 continue; 286 p->flags |= MDB_PG_FLAGS_OFFLOAD; 287 } 288 out: 289 spin_unlock_bh(&br->multicast_lock); 290 err: 291 kfree(priv); 292 } 293 294 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 295 struct br_mdb_entry *entry, int type) 296 { 297 struct br_mdb_complete_info *complete_info; 298 struct switchdev_obj_port_mdb mdb = { 299 .obj = { 300 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 301 .flags = SWITCHDEV_F_DEFER, 302 }, 303 .vid = entry->vid, 304 }; 305 struct net_device *port_dev; 306 struct net *net = dev_net(dev); 307 struct sk_buff *skb; 308 int err = -ENOBUFS; 309 310 port_dev = __dev_get_by_index(net, entry->ifindex); 311 if (entry->addr.proto == htons(ETH_P_IP)) 312 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 313 #if IS_ENABLED(CONFIG_IPV6) 314 else 315 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 316 #endif 317 318 mdb.obj.orig_dev = port_dev; 319 if (port_dev && type == RTM_NEWMDB) { 320 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 321 if (complete_info) { 322 complete_info->port = p; 323 __mdb_entry_to_br_ip(entry, &complete_info->ip); 324 mdb.obj.complete_priv = complete_info; 325 mdb.obj.complete = br_mdb_complete; 326 if (switchdev_port_obj_add(port_dev, &mdb.obj)) 327 kfree(complete_info); 328 } 329 } else if (port_dev && type == RTM_DELMDB) { 330 switchdev_port_obj_del(port_dev, &mdb.obj); 331 } 332 333 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 334 if (!skb) 335 goto errout; 336 337 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 338 if (err < 0) { 339 kfree_skb(skb); 340 goto errout; 341 } 342 343 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 344 return; 345 errout: 346 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 347 } 348 349 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 350 struct br_ip *group, int type, u8 flags) 351 { 352 struct br_mdb_entry entry; 353 354 memset(&entry, 0, sizeof(entry)); 355 entry.ifindex = port->dev->ifindex; 356 entry.addr.proto = group->proto; 357 entry.addr.u.ip4 = group->u.ip4; 358 #if IS_ENABLED(CONFIG_IPV6) 359 entry.addr.u.ip6 = group->u.ip6; 360 #endif 361 entry.vid = group->vid; 362 __mdb_entry_fill_flags(&entry, flags); 363 __br_mdb_notify(dev, port, &entry, type); 364 } 365 366 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 367 struct net_device *dev, 368 int ifindex, u32 pid, 369 u32 seq, int type, unsigned int flags) 370 { 371 struct br_port_msg *bpm; 372 struct nlmsghdr *nlh; 373 struct nlattr *nest; 374 375 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 376 if (!nlh) 377 return -EMSGSIZE; 378 379 bpm = nlmsg_data(nlh); 380 memset(bpm, 0, sizeof(*bpm)); 381 bpm->family = AF_BRIDGE; 382 bpm->ifindex = dev->ifindex; 383 nest = nla_nest_start(skb, MDBA_ROUTER); 384 if (!nest) 385 goto cancel; 386 387 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 388 goto end; 389 390 nla_nest_end(skb, nest); 391 nlmsg_end(skb, nlh); 392 return 0; 393 394 end: 395 nla_nest_end(skb, nest); 396 cancel: 397 nlmsg_cancel(skb, nlh); 398 return -EMSGSIZE; 399 } 400 401 static inline size_t rtnl_rtr_nlmsg_size(void) 402 { 403 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 404 + nla_total_size(sizeof(__u32)); 405 } 406 407 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 408 int type) 409 { 410 struct net *net = dev_net(dev); 411 struct sk_buff *skb; 412 int err = -ENOBUFS; 413 int ifindex; 414 415 ifindex = port ? port->dev->ifindex : 0; 416 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 417 if (!skb) 418 goto errout; 419 420 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 421 if (err < 0) { 422 kfree_skb(skb); 423 goto errout; 424 } 425 426 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 427 return; 428 429 errout: 430 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 431 } 432 433 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 434 { 435 if (entry->ifindex == 0) 436 return false; 437 438 if (entry->addr.proto == htons(ETH_P_IP)) { 439 if (!ipv4_is_multicast(entry->addr.u.ip4)) 440 return false; 441 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 442 return false; 443 #if IS_ENABLED(CONFIG_IPV6) 444 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 445 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 446 return false; 447 #endif 448 } else 449 return false; 450 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 451 return false; 452 if (entry->vid >= VLAN_VID_MASK) 453 return false; 454 455 return true; 456 } 457 458 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 459 struct net_device **pdev, struct br_mdb_entry **pentry) 460 { 461 struct net *net = sock_net(skb->sk); 462 struct br_mdb_entry *entry; 463 struct br_port_msg *bpm; 464 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 465 struct net_device *dev; 466 int err; 467 468 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL, 469 NULL); 470 if (err < 0) 471 return err; 472 473 bpm = nlmsg_data(nlh); 474 if (bpm->ifindex == 0) { 475 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 476 return -EINVAL; 477 } 478 479 dev = __dev_get_by_index(net, bpm->ifindex); 480 if (dev == NULL) { 481 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 482 return -ENODEV; 483 } 484 485 if (!(dev->priv_flags & IFF_EBRIDGE)) { 486 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 487 return -EOPNOTSUPP; 488 } 489 490 *pdev = dev; 491 492 if (!tb[MDBA_SET_ENTRY] || 493 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 494 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 495 return -EINVAL; 496 } 497 498 entry = nla_data(tb[MDBA_SET_ENTRY]); 499 if (!is_valid_mdb_entry(entry)) { 500 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 501 return -EINVAL; 502 } 503 504 *pentry = entry; 505 return 0; 506 } 507 508 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 509 struct br_ip *group, unsigned char state) 510 { 511 struct net_bridge_mdb_entry *mp; 512 struct net_bridge_port_group *p; 513 struct net_bridge_port_group __rcu **pp; 514 struct net_bridge_mdb_htable *mdb; 515 unsigned long now = jiffies; 516 int err; 517 518 mdb = mlock_dereference(br->mdb, br); 519 mp = br_mdb_ip_get(mdb, group); 520 if (!mp) { 521 mp = br_multicast_new_group(br, port, group); 522 err = PTR_ERR_OR_ZERO(mp); 523 if (err) 524 return err; 525 } 526 527 for (pp = &mp->ports; 528 (p = mlock_dereference(*pp, br)) != NULL; 529 pp = &p->next) { 530 if (p->port == port) 531 return -EEXIST; 532 if ((unsigned long)p->port < (unsigned long)port) 533 break; 534 } 535 536 p = br_multicast_new_port_group(port, group, *pp, state, NULL); 537 if (unlikely(!p)) 538 return -ENOMEM; 539 rcu_assign_pointer(*pp, p); 540 if (state == MDB_TEMPORARY) 541 mod_timer(&p->timer, now + br->multicast_membership_interval); 542 543 return 0; 544 } 545 546 static int __br_mdb_add(struct net *net, struct net_bridge *br, 547 struct br_mdb_entry *entry) 548 { 549 struct br_ip ip; 550 struct net_device *dev; 551 struct net_bridge_port *p; 552 int ret; 553 554 if (!netif_running(br->dev) || br->multicast_disabled) 555 return -EINVAL; 556 557 dev = __dev_get_by_index(net, entry->ifindex); 558 if (!dev) 559 return -ENODEV; 560 561 p = br_port_get_rtnl(dev); 562 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 563 return -EINVAL; 564 565 __mdb_entry_to_br_ip(entry, &ip); 566 567 spin_lock_bh(&br->multicast_lock); 568 ret = br_mdb_add_group(br, p, &ip, entry->state); 569 spin_unlock_bh(&br->multicast_lock); 570 return ret; 571 } 572 573 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 574 struct netlink_ext_ack *extack) 575 { 576 struct net *net = sock_net(skb->sk); 577 struct net_bridge_vlan_group *vg; 578 struct net_device *dev, *pdev; 579 struct br_mdb_entry *entry; 580 struct net_bridge_port *p; 581 struct net_bridge_vlan *v; 582 struct net_bridge *br; 583 int err; 584 585 err = br_mdb_parse(skb, nlh, &dev, &entry); 586 if (err < 0) 587 return err; 588 589 br = netdev_priv(dev); 590 591 /* If vlan filtering is enabled and VLAN is not specified 592 * install mdb entry on all vlans configured on the port. 593 */ 594 pdev = __dev_get_by_index(net, entry->ifindex); 595 if (!pdev) 596 return -ENODEV; 597 598 p = br_port_get_rtnl(pdev); 599 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 600 return -EINVAL; 601 602 vg = nbp_vlan_group(p); 603 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 604 list_for_each_entry(v, &vg->vlan_list, vlist) { 605 entry->vid = v->vid; 606 err = __br_mdb_add(net, br, entry); 607 if (err) 608 break; 609 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 610 } 611 } else { 612 err = __br_mdb_add(net, br, entry); 613 if (!err) 614 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 615 } 616 617 return err; 618 } 619 620 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 621 { 622 struct net_bridge_mdb_htable *mdb; 623 struct net_bridge_mdb_entry *mp; 624 struct net_bridge_port_group *p; 625 struct net_bridge_port_group __rcu **pp; 626 struct br_ip ip; 627 int err = -EINVAL; 628 629 if (!netif_running(br->dev) || br->multicast_disabled) 630 return -EINVAL; 631 632 __mdb_entry_to_br_ip(entry, &ip); 633 634 spin_lock_bh(&br->multicast_lock); 635 mdb = mlock_dereference(br->mdb, br); 636 637 mp = br_mdb_ip_get(mdb, &ip); 638 if (!mp) 639 goto unlock; 640 641 for (pp = &mp->ports; 642 (p = mlock_dereference(*pp, br)) != NULL; 643 pp = &p->next) { 644 if (!p->port || p->port->dev->ifindex != entry->ifindex) 645 continue; 646 647 if (p->port->state == BR_STATE_DISABLED) 648 goto unlock; 649 650 __mdb_entry_fill_flags(entry, p->flags); 651 rcu_assign_pointer(*pp, p->next); 652 hlist_del_init(&p->mglist); 653 del_timer(&p->timer); 654 call_rcu_bh(&p->rcu, br_multicast_free_pg); 655 err = 0; 656 657 if (!mp->ports && !mp->mglist && 658 netif_running(br->dev)) 659 mod_timer(&mp->timer, jiffies); 660 break; 661 } 662 663 unlock: 664 spin_unlock_bh(&br->multicast_lock); 665 return err; 666 } 667 668 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 669 struct netlink_ext_ack *extack) 670 { 671 struct net *net = sock_net(skb->sk); 672 struct net_bridge_vlan_group *vg; 673 struct net_device *dev, *pdev; 674 struct br_mdb_entry *entry; 675 struct net_bridge_port *p; 676 struct net_bridge_vlan *v; 677 struct net_bridge *br; 678 int err; 679 680 err = br_mdb_parse(skb, nlh, &dev, &entry); 681 if (err < 0) 682 return err; 683 684 br = netdev_priv(dev); 685 686 /* If vlan filtering is enabled and VLAN is not specified 687 * delete mdb entry on all vlans configured on the port. 688 */ 689 pdev = __dev_get_by_index(net, entry->ifindex); 690 if (!pdev) 691 return -ENODEV; 692 693 p = br_port_get_rtnl(pdev); 694 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 695 return -EINVAL; 696 697 vg = nbp_vlan_group(p); 698 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 699 list_for_each_entry(v, &vg->vlan_list, vlist) { 700 entry->vid = v->vid; 701 err = __br_mdb_del(br, entry); 702 if (!err) 703 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 704 } 705 } else { 706 err = __br_mdb_del(br, entry); 707 if (!err) 708 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 709 } 710 711 return err; 712 } 713 714 void br_mdb_init(void) 715 { 716 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0); 717 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0); 718 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0); 719 } 720 721 void br_mdb_uninit(void) 722 { 723 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 724 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 725 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 726 } 727