1 #include <linux/err.h> 2 #include <linux/igmp.h> 3 #include <linux/kernel.h> 4 #include <linux/netdevice.h> 5 #include <linux/rculist.h> 6 #include <linux/skbuff.h> 7 #include <linux/if_ether.h> 8 #include <net/ip.h> 9 #include <net/netlink.h> 10 #include <net/switchdev.h> 11 #if IS_ENABLED(CONFIG_IPV6) 12 #include <net/ipv6.h> 13 #include <net/addrconf.h> 14 #endif 15 16 #include "br_private.h" 17 18 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 19 struct net_device *dev) 20 { 21 struct net_bridge *br = netdev_priv(dev); 22 struct net_bridge_port *p; 23 struct nlattr *nest, *port_nest; 24 25 if (!br->multicast_router || hlist_empty(&br->router_list)) 26 return 0; 27 28 nest = nla_nest_start(skb, MDBA_ROUTER); 29 if (nest == NULL) 30 return -EMSGSIZE; 31 32 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 33 if (!p) 34 continue; 35 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT); 36 if (!port_nest) 37 goto fail; 38 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 39 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 40 br_timer_value(&p->multicast_router_timer)) || 41 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 42 p->multicast_router)) { 43 nla_nest_cancel(skb, port_nest); 44 goto fail; 45 } 46 nla_nest_end(skb, port_nest); 47 } 48 49 nla_nest_end(skb, nest); 50 return 0; 51 fail: 52 nla_nest_cancel(skb, nest); 53 return -EMSGSIZE; 54 } 55 56 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 57 { 58 e->state = flags & MDB_PG_FLAGS_PERMANENT; 59 e->flags = 0; 60 if (flags & MDB_PG_FLAGS_OFFLOAD) 61 e->flags |= MDB_FLAGS_OFFLOAD; 62 } 63 64 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 65 { 66 memset(ip, 0, sizeof(struct br_ip)); 67 ip->vid = entry->vid; 68 ip->proto = entry->addr.proto; 69 if (ip->proto == htons(ETH_P_IP)) 70 ip->u.ip4 = entry->addr.u.ip4; 71 #if IS_ENABLED(CONFIG_IPV6) 72 else 73 ip->u.ip6 = entry->addr.u.ip6; 74 #endif 75 } 76 77 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 78 struct net_device *dev) 79 { 80 struct net_bridge *br = netdev_priv(dev); 81 struct net_bridge_mdb_htable *mdb; 82 struct nlattr *nest, *nest2; 83 int i, err = 0; 84 int idx = 0, s_idx = cb->args[1]; 85 86 if (br->multicast_disabled) 87 return 0; 88 89 mdb = rcu_dereference(br->mdb); 90 if (!mdb) 91 return 0; 92 93 nest = nla_nest_start(skb, MDBA_MDB); 94 if (nest == NULL) 95 return -EMSGSIZE; 96 97 for (i = 0; i < mdb->max; i++) { 98 struct net_bridge_mdb_entry *mp; 99 struct net_bridge_port_group *p; 100 struct net_bridge_port_group __rcu **pp; 101 struct net_bridge_port *port; 102 103 hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) { 104 if (idx < s_idx) 105 goto skip; 106 107 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 108 if (nest2 == NULL) { 109 err = -EMSGSIZE; 110 goto out; 111 } 112 113 for (pp = &mp->ports; 114 (p = rcu_dereference(*pp)) != NULL; 115 pp = &p->next) { 116 struct nlattr *nest_ent; 117 struct br_mdb_entry e; 118 119 port = p->port; 120 if (!port) 121 continue; 122 123 memset(&e, 0, sizeof(e)); 124 e.ifindex = port->dev->ifindex; 125 e.vid = p->addr.vid; 126 __mdb_entry_fill_flags(&e, p->flags); 127 if (p->addr.proto == htons(ETH_P_IP)) 128 e.addr.u.ip4 = p->addr.u.ip4; 129 #if IS_ENABLED(CONFIG_IPV6) 130 if (p->addr.proto == htons(ETH_P_IPV6)) 131 e.addr.u.ip6 = p->addr.u.ip6; 132 #endif 133 e.addr.proto = p->addr.proto; 134 nest_ent = nla_nest_start(skb, 135 MDBA_MDB_ENTRY_INFO); 136 if (!nest_ent) { 137 nla_nest_cancel(skb, nest2); 138 err = -EMSGSIZE; 139 goto out; 140 } 141 if (nla_put_nohdr(skb, sizeof(e), &e) || 142 nla_put_u32(skb, 143 MDBA_MDB_EATTR_TIMER, 144 br_timer_value(&p->timer))) { 145 nla_nest_cancel(skb, nest_ent); 146 nla_nest_cancel(skb, nest2); 147 err = -EMSGSIZE; 148 goto out; 149 } 150 nla_nest_end(skb, nest_ent); 151 } 152 nla_nest_end(skb, nest2); 153 skip: 154 idx++; 155 } 156 } 157 158 out: 159 cb->args[1] = idx; 160 nla_nest_end(skb, nest); 161 return err; 162 } 163 164 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 165 { 166 struct net_device *dev; 167 struct net *net = sock_net(skb->sk); 168 struct nlmsghdr *nlh = NULL; 169 int idx = 0, s_idx; 170 171 s_idx = cb->args[0]; 172 173 rcu_read_lock(); 174 175 /* In theory this could be wrapped to 0... */ 176 cb->seq = net->dev_base_seq + br_mdb_rehash_seq; 177 178 for_each_netdev_rcu(net, dev) { 179 if (dev->priv_flags & IFF_EBRIDGE) { 180 struct br_port_msg *bpm; 181 182 if (idx < s_idx) 183 goto skip; 184 185 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 186 cb->nlh->nlmsg_seq, RTM_GETMDB, 187 sizeof(*bpm), NLM_F_MULTI); 188 if (nlh == NULL) 189 break; 190 191 bpm = nlmsg_data(nlh); 192 memset(bpm, 0, sizeof(*bpm)); 193 bpm->ifindex = dev->ifindex; 194 if (br_mdb_fill_info(skb, cb, dev) < 0) 195 goto out; 196 if (br_rports_fill_info(skb, cb, dev) < 0) 197 goto out; 198 199 cb->args[1] = 0; 200 nlmsg_end(skb, nlh); 201 skip: 202 idx++; 203 } 204 } 205 206 out: 207 if (nlh) 208 nlmsg_end(skb, nlh); 209 rcu_read_unlock(); 210 cb->args[0] = idx; 211 return skb->len; 212 } 213 214 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 215 struct net_device *dev, 216 struct br_mdb_entry *entry, u32 pid, 217 u32 seq, int type, unsigned int flags) 218 { 219 struct nlmsghdr *nlh; 220 struct br_port_msg *bpm; 221 struct nlattr *nest, *nest2; 222 223 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 224 if (!nlh) 225 return -EMSGSIZE; 226 227 bpm = nlmsg_data(nlh); 228 memset(bpm, 0, sizeof(*bpm)); 229 bpm->family = AF_BRIDGE; 230 bpm->ifindex = dev->ifindex; 231 nest = nla_nest_start(skb, MDBA_MDB); 232 if (nest == NULL) 233 goto cancel; 234 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 235 if (nest2 == NULL) 236 goto end; 237 238 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 239 goto end; 240 241 nla_nest_end(skb, nest2); 242 nla_nest_end(skb, nest); 243 nlmsg_end(skb, nlh); 244 return 0; 245 246 end: 247 nla_nest_end(skb, nest); 248 cancel: 249 nlmsg_cancel(skb, nlh); 250 return -EMSGSIZE; 251 } 252 253 static inline size_t rtnl_mdb_nlmsg_size(void) 254 { 255 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 256 + nla_total_size(sizeof(struct br_mdb_entry)); 257 } 258 259 struct br_mdb_complete_info { 260 struct net_bridge_port *port; 261 struct br_ip ip; 262 }; 263 264 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 265 { 266 struct br_mdb_complete_info *data = priv; 267 struct net_bridge_port_group __rcu **pp; 268 struct net_bridge_port_group *p; 269 struct net_bridge_mdb_htable *mdb; 270 struct net_bridge_mdb_entry *mp; 271 struct net_bridge_port *port = data->port; 272 struct net_bridge *br = port->br; 273 274 if (err) 275 goto err; 276 277 spin_lock_bh(&br->multicast_lock); 278 mdb = mlock_dereference(br->mdb, br); 279 mp = br_mdb_ip_get(mdb, &data->ip); 280 if (!mp) 281 goto out; 282 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 283 pp = &p->next) { 284 if (p->port != port) 285 continue; 286 p->flags |= MDB_PG_FLAGS_OFFLOAD; 287 } 288 out: 289 spin_unlock_bh(&br->multicast_lock); 290 err: 291 kfree(priv); 292 } 293 294 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 295 struct br_mdb_entry *entry, int type) 296 { 297 struct br_mdb_complete_info *complete_info; 298 struct switchdev_obj_port_mdb mdb = { 299 .obj = { 300 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 301 .flags = SWITCHDEV_F_DEFER, 302 }, 303 .vid = entry->vid, 304 }; 305 struct net_device *port_dev; 306 struct net *net = dev_net(dev); 307 struct sk_buff *skb; 308 int err = -ENOBUFS; 309 310 port_dev = __dev_get_by_index(net, entry->ifindex); 311 if (entry->addr.proto == htons(ETH_P_IP)) 312 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 313 #if IS_ENABLED(CONFIG_IPV6) 314 else 315 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 316 #endif 317 318 mdb.obj.orig_dev = port_dev; 319 if (port_dev && type == RTM_NEWMDB) { 320 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 321 if (complete_info) { 322 complete_info->port = p; 323 __mdb_entry_to_br_ip(entry, &complete_info->ip); 324 mdb.obj.complete_priv = complete_info; 325 mdb.obj.complete = br_mdb_complete; 326 switchdev_port_obj_add(port_dev, &mdb.obj); 327 } 328 } else if (port_dev && type == RTM_DELMDB) { 329 switchdev_port_obj_del(port_dev, &mdb.obj); 330 } 331 332 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 333 if (!skb) 334 goto errout; 335 336 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 337 if (err < 0) { 338 kfree_skb(skb); 339 goto errout; 340 } 341 342 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 343 return; 344 errout: 345 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 346 } 347 348 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 349 struct br_ip *group, int type, u8 flags) 350 { 351 struct br_mdb_entry entry; 352 353 memset(&entry, 0, sizeof(entry)); 354 entry.ifindex = port->dev->ifindex; 355 entry.addr.proto = group->proto; 356 entry.addr.u.ip4 = group->u.ip4; 357 #if IS_ENABLED(CONFIG_IPV6) 358 entry.addr.u.ip6 = group->u.ip6; 359 #endif 360 entry.vid = group->vid; 361 __mdb_entry_fill_flags(&entry, flags); 362 __br_mdb_notify(dev, port, &entry, type); 363 } 364 365 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 366 struct net_device *dev, 367 int ifindex, u32 pid, 368 u32 seq, int type, unsigned int flags) 369 { 370 struct br_port_msg *bpm; 371 struct nlmsghdr *nlh; 372 struct nlattr *nest; 373 374 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 375 if (!nlh) 376 return -EMSGSIZE; 377 378 bpm = nlmsg_data(nlh); 379 memset(bpm, 0, sizeof(*bpm)); 380 bpm->family = AF_BRIDGE; 381 bpm->ifindex = dev->ifindex; 382 nest = nla_nest_start(skb, MDBA_ROUTER); 383 if (!nest) 384 goto cancel; 385 386 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 387 goto end; 388 389 nla_nest_end(skb, nest); 390 nlmsg_end(skb, nlh); 391 return 0; 392 393 end: 394 nla_nest_end(skb, nest); 395 cancel: 396 nlmsg_cancel(skb, nlh); 397 return -EMSGSIZE; 398 } 399 400 static inline size_t rtnl_rtr_nlmsg_size(void) 401 { 402 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 403 + nla_total_size(sizeof(__u32)); 404 } 405 406 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 407 int type) 408 { 409 struct net *net = dev_net(dev); 410 struct sk_buff *skb; 411 int err = -ENOBUFS; 412 int ifindex; 413 414 ifindex = port ? port->dev->ifindex : 0; 415 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 416 if (!skb) 417 goto errout; 418 419 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 420 if (err < 0) { 421 kfree_skb(skb); 422 goto errout; 423 } 424 425 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 426 return; 427 428 errout: 429 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 430 } 431 432 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 433 { 434 if (entry->ifindex == 0) 435 return false; 436 437 if (entry->addr.proto == htons(ETH_P_IP)) { 438 if (!ipv4_is_multicast(entry->addr.u.ip4)) 439 return false; 440 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 441 return false; 442 #if IS_ENABLED(CONFIG_IPV6) 443 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 444 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 445 return false; 446 #endif 447 } else 448 return false; 449 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 450 return false; 451 if (entry->vid >= VLAN_VID_MASK) 452 return false; 453 454 return true; 455 } 456 457 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 458 struct net_device **pdev, struct br_mdb_entry **pentry) 459 { 460 struct net *net = sock_net(skb->sk); 461 struct br_mdb_entry *entry; 462 struct br_port_msg *bpm; 463 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 464 struct net_device *dev; 465 int err; 466 467 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL); 468 if (err < 0) 469 return err; 470 471 bpm = nlmsg_data(nlh); 472 if (bpm->ifindex == 0) { 473 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 474 return -EINVAL; 475 } 476 477 dev = __dev_get_by_index(net, bpm->ifindex); 478 if (dev == NULL) { 479 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 480 return -ENODEV; 481 } 482 483 if (!(dev->priv_flags & IFF_EBRIDGE)) { 484 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 485 return -EOPNOTSUPP; 486 } 487 488 *pdev = dev; 489 490 if (!tb[MDBA_SET_ENTRY] || 491 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 492 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 493 return -EINVAL; 494 } 495 496 entry = nla_data(tb[MDBA_SET_ENTRY]); 497 if (!is_valid_mdb_entry(entry)) { 498 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 499 return -EINVAL; 500 } 501 502 *pentry = entry; 503 return 0; 504 } 505 506 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 507 struct br_ip *group, unsigned char state) 508 { 509 struct net_bridge_mdb_entry *mp; 510 struct net_bridge_port_group *p; 511 struct net_bridge_port_group __rcu **pp; 512 struct net_bridge_mdb_htable *mdb; 513 unsigned long now = jiffies; 514 int err; 515 516 mdb = mlock_dereference(br->mdb, br); 517 mp = br_mdb_ip_get(mdb, group); 518 if (!mp) { 519 mp = br_multicast_new_group(br, port, group); 520 err = PTR_ERR_OR_ZERO(mp); 521 if (err) 522 return err; 523 } 524 525 for (pp = &mp->ports; 526 (p = mlock_dereference(*pp, br)) != NULL; 527 pp = &p->next) { 528 if (p->port == port) 529 return -EEXIST; 530 if ((unsigned long)p->port < (unsigned long)port) 531 break; 532 } 533 534 p = br_multicast_new_port_group(port, group, *pp, state); 535 if (unlikely(!p)) 536 return -ENOMEM; 537 rcu_assign_pointer(*pp, p); 538 if (state == MDB_TEMPORARY) 539 mod_timer(&p->timer, now + br->multicast_membership_interval); 540 541 return 0; 542 } 543 544 static int __br_mdb_add(struct net *net, struct net_bridge *br, 545 struct br_mdb_entry *entry) 546 { 547 struct br_ip ip; 548 struct net_device *dev; 549 struct net_bridge_port *p; 550 int ret; 551 552 if (!netif_running(br->dev) || br->multicast_disabled) 553 return -EINVAL; 554 555 dev = __dev_get_by_index(net, entry->ifindex); 556 if (!dev) 557 return -ENODEV; 558 559 p = br_port_get_rtnl(dev); 560 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 561 return -EINVAL; 562 563 __mdb_entry_to_br_ip(entry, &ip); 564 565 spin_lock_bh(&br->multicast_lock); 566 ret = br_mdb_add_group(br, p, &ip, entry->state); 567 spin_unlock_bh(&br->multicast_lock); 568 return ret; 569 } 570 571 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 572 { 573 struct net *net = sock_net(skb->sk); 574 struct net_bridge_vlan_group *vg; 575 struct net_device *dev, *pdev; 576 struct br_mdb_entry *entry; 577 struct net_bridge_port *p; 578 struct net_bridge_vlan *v; 579 struct net_bridge *br; 580 int err; 581 582 err = br_mdb_parse(skb, nlh, &dev, &entry); 583 if (err < 0) 584 return err; 585 586 br = netdev_priv(dev); 587 588 /* If vlan filtering is enabled and VLAN is not specified 589 * install mdb entry on all vlans configured on the port. 590 */ 591 pdev = __dev_get_by_index(net, entry->ifindex); 592 if (!pdev) 593 return -ENODEV; 594 595 p = br_port_get_rtnl(pdev); 596 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 597 return -EINVAL; 598 599 vg = nbp_vlan_group(p); 600 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 601 list_for_each_entry(v, &vg->vlan_list, vlist) { 602 entry->vid = v->vid; 603 err = __br_mdb_add(net, br, entry); 604 if (err) 605 break; 606 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 607 } 608 } else { 609 err = __br_mdb_add(net, br, entry); 610 if (!err) 611 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 612 } 613 614 return err; 615 } 616 617 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 618 { 619 struct net_bridge_mdb_htable *mdb; 620 struct net_bridge_mdb_entry *mp; 621 struct net_bridge_port_group *p; 622 struct net_bridge_port_group __rcu **pp; 623 struct br_ip ip; 624 int err = -EINVAL; 625 626 if (!netif_running(br->dev) || br->multicast_disabled) 627 return -EINVAL; 628 629 __mdb_entry_to_br_ip(entry, &ip); 630 631 spin_lock_bh(&br->multicast_lock); 632 mdb = mlock_dereference(br->mdb, br); 633 634 mp = br_mdb_ip_get(mdb, &ip); 635 if (!mp) 636 goto unlock; 637 638 for (pp = &mp->ports; 639 (p = mlock_dereference(*pp, br)) != NULL; 640 pp = &p->next) { 641 if (!p->port || p->port->dev->ifindex != entry->ifindex) 642 continue; 643 644 if (p->port->state == BR_STATE_DISABLED) 645 goto unlock; 646 647 __mdb_entry_fill_flags(entry, p->flags); 648 rcu_assign_pointer(*pp, p->next); 649 hlist_del_init(&p->mglist); 650 del_timer(&p->timer); 651 call_rcu_bh(&p->rcu, br_multicast_free_pg); 652 err = 0; 653 654 if (!mp->ports && !mp->mglist && 655 netif_running(br->dev)) 656 mod_timer(&mp->timer, jiffies); 657 break; 658 } 659 660 unlock: 661 spin_unlock_bh(&br->multicast_lock); 662 return err; 663 } 664 665 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) 666 { 667 struct net *net = sock_net(skb->sk); 668 struct net_bridge_vlan_group *vg; 669 struct net_device *dev, *pdev; 670 struct br_mdb_entry *entry; 671 struct net_bridge_port *p; 672 struct net_bridge_vlan *v; 673 struct net_bridge *br; 674 int err; 675 676 err = br_mdb_parse(skb, nlh, &dev, &entry); 677 if (err < 0) 678 return err; 679 680 br = netdev_priv(dev); 681 682 /* If vlan filtering is enabled and VLAN is not specified 683 * delete mdb entry on all vlans configured on the port. 684 */ 685 pdev = __dev_get_by_index(net, entry->ifindex); 686 if (!pdev) 687 return -ENODEV; 688 689 p = br_port_get_rtnl(pdev); 690 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 691 return -EINVAL; 692 693 vg = nbp_vlan_group(p); 694 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 695 list_for_each_entry(v, &vg->vlan_list, vlist) { 696 entry->vid = v->vid; 697 err = __br_mdb_del(br, entry); 698 if (!err) 699 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 700 } 701 } else { 702 err = __br_mdb_del(br, entry); 703 if (!err) 704 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 705 } 706 707 return err; 708 } 709 710 void br_mdb_init(void) 711 { 712 rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL); 713 rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); 714 rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); 715 } 716 717 void br_mdb_uninit(void) 718 { 719 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 720 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 721 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 722 } 723