1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/err.h> 3 #include <linux/igmp.h> 4 #include <linux/kernel.h> 5 #include <linux/netdevice.h> 6 #include <linux/rculist.h> 7 #include <linux/skbuff.h> 8 #include <linux/if_ether.h> 9 #include <net/ip.h> 10 #include <net/netlink.h> 11 #include <net/switchdev.h> 12 #if IS_ENABLED(CONFIG_IPV6) 13 #include <net/ipv6.h> 14 #include <net/addrconf.h> 15 #endif 16 17 #include "br_private.h" 18 19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 20 struct net_device *dev) 21 { 22 struct net_bridge *br = netdev_priv(dev); 23 struct net_bridge_port *p; 24 struct nlattr *nest, *port_nest; 25 26 if (!br->multicast_router || hlist_empty(&br->router_list)) 27 return 0; 28 29 nest = nla_nest_start(skb, MDBA_ROUTER); 30 if (nest == NULL) 31 return -EMSGSIZE; 32 33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 34 if (!p) 35 continue; 36 port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT); 37 if (!port_nest) 38 goto fail; 39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 41 br_timer_value(&p->multicast_router_timer)) || 42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 43 p->multicast_router)) { 44 nla_nest_cancel(skb, port_nest); 45 goto fail; 46 } 47 nla_nest_end(skb, port_nest); 48 } 49 50 nla_nest_end(skb, nest); 51 return 0; 52 fail: 53 nla_nest_cancel(skb, nest); 54 return -EMSGSIZE; 55 } 56 57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 58 { 59 e->state = flags & MDB_PG_FLAGS_PERMANENT; 60 e->flags = 0; 61 if (flags & MDB_PG_FLAGS_OFFLOAD) 62 e->flags |= MDB_FLAGS_OFFLOAD; 63 } 64 65 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 66 { 67 memset(ip, 0, sizeof(struct br_ip)); 68 ip->vid = entry->vid; 69 ip->proto = entry->addr.proto; 70 if (ip->proto == htons(ETH_P_IP)) 71 ip->u.ip4 = entry->addr.u.ip4; 72 #if IS_ENABLED(CONFIG_IPV6) 73 else 74 ip->u.ip6 = entry->addr.u.ip6; 75 #endif 76 } 77 78 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 79 struct net_device *dev) 80 { 81 int idx = 0, s_idx = cb->args[1], err = 0; 82 struct net_bridge *br = netdev_priv(dev); 83 struct net_bridge_mdb_entry *mp; 84 struct nlattr *nest, *nest2; 85 86 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 87 return 0; 88 89 nest = nla_nest_start(skb, MDBA_MDB); 90 if (nest == NULL) 91 return -EMSGSIZE; 92 93 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { 94 struct net_bridge_port_group *p; 95 struct net_bridge_port_group __rcu **pp; 96 struct net_bridge_port *port; 97 98 if (idx < s_idx) 99 goto skip; 100 101 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 102 if (!nest2) { 103 err = -EMSGSIZE; 104 break; 105 } 106 107 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; 108 pp = &p->next) { 109 struct nlattr *nest_ent; 110 struct br_mdb_entry e; 111 112 port = p->port; 113 if (!port) 114 continue; 115 116 memset(&e, 0, sizeof(e)); 117 e.ifindex = port->dev->ifindex; 118 e.vid = p->addr.vid; 119 __mdb_entry_fill_flags(&e, p->flags); 120 if (p->addr.proto == htons(ETH_P_IP)) 121 e.addr.u.ip4 = p->addr.u.ip4; 122 #if IS_ENABLED(CONFIG_IPV6) 123 if (p->addr.proto == htons(ETH_P_IPV6)) 124 e.addr.u.ip6 = p->addr.u.ip6; 125 #endif 126 e.addr.proto = p->addr.proto; 127 nest_ent = nla_nest_start(skb, MDBA_MDB_ENTRY_INFO); 128 if (!nest_ent) { 129 nla_nest_cancel(skb, nest2); 130 err = -EMSGSIZE; 131 goto out; 132 } 133 if (nla_put_nohdr(skb, sizeof(e), &e) || 134 nla_put_u32(skb, 135 MDBA_MDB_EATTR_TIMER, 136 br_timer_value(&p->timer))) { 137 nla_nest_cancel(skb, nest_ent); 138 nla_nest_cancel(skb, nest2); 139 err = -EMSGSIZE; 140 goto out; 141 } 142 nla_nest_end(skb, nest_ent); 143 } 144 nla_nest_end(skb, nest2); 145 skip: 146 idx++; 147 } 148 149 out: 150 cb->args[1] = idx; 151 nla_nest_end(skb, nest); 152 return err; 153 } 154 155 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh, 156 struct netlink_ext_ack *extack) 157 { 158 struct br_port_msg *bpm; 159 160 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 161 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request"); 162 return -EINVAL; 163 } 164 165 bpm = nlmsg_data(nlh); 166 if (bpm->ifindex) { 167 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request"); 168 return -EINVAL; 169 } 170 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 171 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 172 return -EINVAL; 173 } 174 175 return 0; 176 } 177 178 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 179 { 180 struct net_device *dev; 181 struct net *net = sock_net(skb->sk); 182 struct nlmsghdr *nlh = NULL; 183 int idx = 0, s_idx; 184 185 if (cb->strict_check) { 186 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack); 187 188 if (err < 0) 189 return err; 190 } 191 192 s_idx = cb->args[0]; 193 194 rcu_read_lock(); 195 196 cb->seq = net->dev_base_seq; 197 198 for_each_netdev_rcu(net, dev) { 199 if (dev->priv_flags & IFF_EBRIDGE) { 200 struct br_port_msg *bpm; 201 202 if (idx < s_idx) 203 goto skip; 204 205 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 206 cb->nlh->nlmsg_seq, RTM_GETMDB, 207 sizeof(*bpm), NLM_F_MULTI); 208 if (nlh == NULL) 209 break; 210 211 bpm = nlmsg_data(nlh); 212 memset(bpm, 0, sizeof(*bpm)); 213 bpm->ifindex = dev->ifindex; 214 if (br_mdb_fill_info(skb, cb, dev) < 0) 215 goto out; 216 if (br_rports_fill_info(skb, cb, dev) < 0) 217 goto out; 218 219 cb->args[1] = 0; 220 nlmsg_end(skb, nlh); 221 skip: 222 idx++; 223 } 224 } 225 226 out: 227 if (nlh) 228 nlmsg_end(skb, nlh); 229 rcu_read_unlock(); 230 cb->args[0] = idx; 231 return skb->len; 232 } 233 234 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 235 struct net_device *dev, 236 struct br_mdb_entry *entry, u32 pid, 237 u32 seq, int type, unsigned int flags) 238 { 239 struct nlmsghdr *nlh; 240 struct br_port_msg *bpm; 241 struct nlattr *nest, *nest2; 242 243 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 244 if (!nlh) 245 return -EMSGSIZE; 246 247 bpm = nlmsg_data(nlh); 248 memset(bpm, 0, sizeof(*bpm)); 249 bpm->family = AF_BRIDGE; 250 bpm->ifindex = dev->ifindex; 251 nest = nla_nest_start(skb, MDBA_MDB); 252 if (nest == NULL) 253 goto cancel; 254 nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY); 255 if (nest2 == NULL) 256 goto end; 257 258 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 259 goto end; 260 261 nla_nest_end(skb, nest2); 262 nla_nest_end(skb, nest); 263 nlmsg_end(skb, nlh); 264 return 0; 265 266 end: 267 nla_nest_end(skb, nest); 268 cancel: 269 nlmsg_cancel(skb, nlh); 270 return -EMSGSIZE; 271 } 272 273 static inline size_t rtnl_mdb_nlmsg_size(void) 274 { 275 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 276 + nla_total_size(sizeof(struct br_mdb_entry)); 277 } 278 279 struct br_mdb_complete_info { 280 struct net_bridge_port *port; 281 struct br_ip ip; 282 }; 283 284 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 285 { 286 struct br_mdb_complete_info *data = priv; 287 struct net_bridge_port_group __rcu **pp; 288 struct net_bridge_port_group *p; 289 struct net_bridge_mdb_entry *mp; 290 struct net_bridge_port *port = data->port; 291 struct net_bridge *br = port->br; 292 293 if (err) 294 goto err; 295 296 spin_lock_bh(&br->multicast_lock); 297 mp = br_mdb_ip_get(br, &data->ip); 298 if (!mp) 299 goto out; 300 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 301 pp = &p->next) { 302 if (p->port != port) 303 continue; 304 p->flags |= MDB_PG_FLAGS_OFFLOAD; 305 } 306 out: 307 spin_unlock_bh(&br->multicast_lock); 308 err: 309 kfree(priv); 310 } 311 312 static void br_mdb_switchdev_host_port(struct net_device *dev, 313 struct net_device *lower_dev, 314 struct br_mdb_entry *entry, int type) 315 { 316 struct switchdev_obj_port_mdb mdb = { 317 .obj = { 318 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 319 .flags = SWITCHDEV_F_DEFER, 320 }, 321 .vid = entry->vid, 322 }; 323 324 if (entry->addr.proto == htons(ETH_P_IP)) 325 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 326 #if IS_ENABLED(CONFIG_IPV6) 327 else 328 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 329 #endif 330 331 mdb.obj.orig_dev = dev; 332 switch (type) { 333 case RTM_NEWMDB: 334 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 335 break; 336 case RTM_DELMDB: 337 switchdev_port_obj_del(lower_dev, &mdb.obj); 338 break; 339 } 340 } 341 342 static void br_mdb_switchdev_host(struct net_device *dev, 343 struct br_mdb_entry *entry, int type) 344 { 345 struct net_device *lower_dev; 346 struct list_head *iter; 347 348 netdev_for_each_lower_dev(dev, lower_dev, iter) 349 br_mdb_switchdev_host_port(dev, lower_dev, entry, type); 350 } 351 352 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 353 struct br_mdb_entry *entry, int type) 354 { 355 struct br_mdb_complete_info *complete_info; 356 struct switchdev_obj_port_mdb mdb = { 357 .obj = { 358 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 359 .flags = SWITCHDEV_F_DEFER, 360 }, 361 .vid = entry->vid, 362 }; 363 struct net_device *port_dev; 364 struct net *net = dev_net(dev); 365 struct sk_buff *skb; 366 int err = -ENOBUFS; 367 368 port_dev = __dev_get_by_index(net, entry->ifindex); 369 if (entry->addr.proto == htons(ETH_P_IP)) 370 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 371 #if IS_ENABLED(CONFIG_IPV6) 372 else 373 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 374 #endif 375 376 mdb.obj.orig_dev = port_dev; 377 if (p && port_dev && type == RTM_NEWMDB) { 378 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 379 if (complete_info) { 380 complete_info->port = p; 381 __mdb_entry_to_br_ip(entry, &complete_info->ip); 382 mdb.obj.complete_priv = complete_info; 383 mdb.obj.complete = br_mdb_complete; 384 if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL)) 385 kfree(complete_info); 386 } 387 } else if (p && port_dev && type == RTM_DELMDB) { 388 switchdev_port_obj_del(port_dev, &mdb.obj); 389 } 390 391 if (!p) 392 br_mdb_switchdev_host(dev, entry, type); 393 394 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 395 if (!skb) 396 goto errout; 397 398 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 399 if (err < 0) { 400 kfree_skb(skb); 401 goto errout; 402 } 403 404 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 405 return; 406 errout: 407 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 408 } 409 410 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 411 struct br_ip *group, int type, u8 flags) 412 { 413 struct br_mdb_entry entry; 414 415 memset(&entry, 0, sizeof(entry)); 416 if (port) 417 entry.ifindex = port->dev->ifindex; 418 else 419 entry.ifindex = dev->ifindex; 420 entry.addr.proto = group->proto; 421 entry.addr.u.ip4 = group->u.ip4; 422 #if IS_ENABLED(CONFIG_IPV6) 423 entry.addr.u.ip6 = group->u.ip6; 424 #endif 425 entry.vid = group->vid; 426 __mdb_entry_fill_flags(&entry, flags); 427 __br_mdb_notify(dev, port, &entry, type); 428 } 429 430 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 431 struct net_device *dev, 432 int ifindex, u32 pid, 433 u32 seq, int type, unsigned int flags) 434 { 435 struct br_port_msg *bpm; 436 struct nlmsghdr *nlh; 437 struct nlattr *nest; 438 439 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 440 if (!nlh) 441 return -EMSGSIZE; 442 443 bpm = nlmsg_data(nlh); 444 memset(bpm, 0, sizeof(*bpm)); 445 bpm->family = AF_BRIDGE; 446 bpm->ifindex = dev->ifindex; 447 nest = nla_nest_start(skb, MDBA_ROUTER); 448 if (!nest) 449 goto cancel; 450 451 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 452 goto end; 453 454 nla_nest_end(skb, nest); 455 nlmsg_end(skb, nlh); 456 return 0; 457 458 end: 459 nla_nest_end(skb, nest); 460 cancel: 461 nlmsg_cancel(skb, nlh); 462 return -EMSGSIZE; 463 } 464 465 static inline size_t rtnl_rtr_nlmsg_size(void) 466 { 467 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 468 + nla_total_size(sizeof(__u32)); 469 } 470 471 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 472 int type) 473 { 474 struct net *net = dev_net(dev); 475 struct sk_buff *skb; 476 int err = -ENOBUFS; 477 int ifindex; 478 479 ifindex = port ? port->dev->ifindex : 0; 480 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 481 if (!skb) 482 goto errout; 483 484 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 485 if (err < 0) { 486 kfree_skb(skb); 487 goto errout; 488 } 489 490 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 491 return; 492 493 errout: 494 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 495 } 496 497 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 498 { 499 if (entry->ifindex == 0) 500 return false; 501 502 if (entry->addr.proto == htons(ETH_P_IP)) { 503 if (!ipv4_is_multicast(entry->addr.u.ip4)) 504 return false; 505 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 506 return false; 507 #if IS_ENABLED(CONFIG_IPV6) 508 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 509 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 510 return false; 511 #endif 512 } else 513 return false; 514 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 515 return false; 516 if (entry->vid >= VLAN_VID_MASK) 517 return false; 518 519 return true; 520 } 521 522 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 523 struct net_device **pdev, struct br_mdb_entry **pentry) 524 { 525 struct net *net = sock_net(skb->sk); 526 struct br_mdb_entry *entry; 527 struct br_port_msg *bpm; 528 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 529 struct net_device *dev; 530 int err; 531 532 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL, 533 NULL); 534 if (err < 0) 535 return err; 536 537 bpm = nlmsg_data(nlh); 538 if (bpm->ifindex == 0) { 539 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 540 return -EINVAL; 541 } 542 543 dev = __dev_get_by_index(net, bpm->ifindex); 544 if (dev == NULL) { 545 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 546 return -ENODEV; 547 } 548 549 if (!(dev->priv_flags & IFF_EBRIDGE)) { 550 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 551 return -EOPNOTSUPP; 552 } 553 554 *pdev = dev; 555 556 if (!tb[MDBA_SET_ENTRY] || 557 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 558 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 559 return -EINVAL; 560 } 561 562 entry = nla_data(tb[MDBA_SET_ENTRY]); 563 if (!is_valid_mdb_entry(entry)) { 564 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 565 return -EINVAL; 566 } 567 568 *pentry = entry; 569 return 0; 570 } 571 572 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 573 struct br_ip *group, unsigned char state) 574 { 575 struct net_bridge_mdb_entry *mp; 576 struct net_bridge_port_group *p; 577 struct net_bridge_port_group __rcu **pp; 578 unsigned long now = jiffies; 579 int err; 580 581 mp = br_mdb_ip_get(br, group); 582 if (!mp) { 583 mp = br_multicast_new_group(br, group); 584 err = PTR_ERR_OR_ZERO(mp); 585 if (err) 586 return err; 587 } 588 589 for (pp = &mp->ports; 590 (p = mlock_dereference(*pp, br)) != NULL; 591 pp = &p->next) { 592 if (p->port == port) 593 return -EEXIST; 594 if ((unsigned long)p->port < (unsigned long)port) 595 break; 596 } 597 598 p = br_multicast_new_port_group(port, group, *pp, state, NULL); 599 if (unlikely(!p)) 600 return -ENOMEM; 601 rcu_assign_pointer(*pp, p); 602 if (state == MDB_TEMPORARY) 603 mod_timer(&p->timer, now + br->multicast_membership_interval); 604 605 return 0; 606 } 607 608 static int __br_mdb_add(struct net *net, struct net_bridge *br, 609 struct br_mdb_entry *entry) 610 { 611 struct br_ip ip; 612 struct net_device *dev; 613 struct net_bridge_port *p; 614 int ret; 615 616 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 617 return -EINVAL; 618 619 dev = __dev_get_by_index(net, entry->ifindex); 620 if (!dev) 621 return -ENODEV; 622 623 p = br_port_get_rtnl(dev); 624 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 625 return -EINVAL; 626 627 __mdb_entry_to_br_ip(entry, &ip); 628 629 spin_lock_bh(&br->multicast_lock); 630 ret = br_mdb_add_group(br, p, &ip, entry->state); 631 spin_unlock_bh(&br->multicast_lock); 632 return ret; 633 } 634 635 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 636 struct netlink_ext_ack *extack) 637 { 638 struct net *net = sock_net(skb->sk); 639 struct net_bridge_vlan_group *vg; 640 struct net_device *dev, *pdev; 641 struct br_mdb_entry *entry; 642 struct net_bridge_port *p; 643 struct net_bridge_vlan *v; 644 struct net_bridge *br; 645 int err; 646 647 err = br_mdb_parse(skb, nlh, &dev, &entry); 648 if (err < 0) 649 return err; 650 651 br = netdev_priv(dev); 652 653 /* If vlan filtering is enabled and VLAN is not specified 654 * install mdb entry on all vlans configured on the port. 655 */ 656 pdev = __dev_get_by_index(net, entry->ifindex); 657 if (!pdev) 658 return -ENODEV; 659 660 p = br_port_get_rtnl(pdev); 661 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 662 return -EINVAL; 663 664 vg = nbp_vlan_group(p); 665 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 666 list_for_each_entry(v, &vg->vlan_list, vlist) { 667 entry->vid = v->vid; 668 err = __br_mdb_add(net, br, entry); 669 if (err) 670 break; 671 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 672 } 673 } else { 674 err = __br_mdb_add(net, br, entry); 675 if (!err) 676 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 677 } 678 679 return err; 680 } 681 682 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 683 { 684 struct net_bridge_mdb_entry *mp; 685 struct net_bridge_port_group *p; 686 struct net_bridge_port_group __rcu **pp; 687 struct br_ip ip; 688 int err = -EINVAL; 689 690 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 691 return -EINVAL; 692 693 __mdb_entry_to_br_ip(entry, &ip); 694 695 spin_lock_bh(&br->multicast_lock); 696 mp = br_mdb_ip_get(br, &ip); 697 if (!mp) 698 goto unlock; 699 700 for (pp = &mp->ports; 701 (p = mlock_dereference(*pp, br)) != NULL; 702 pp = &p->next) { 703 if (!p->port || p->port->dev->ifindex != entry->ifindex) 704 continue; 705 706 if (p->port->state == BR_STATE_DISABLED) 707 goto unlock; 708 709 __mdb_entry_fill_flags(entry, p->flags); 710 rcu_assign_pointer(*pp, p->next); 711 hlist_del_init(&p->mglist); 712 del_timer(&p->timer); 713 kfree_rcu(p, rcu); 714 err = 0; 715 716 if (!mp->ports && !mp->host_joined && 717 netif_running(br->dev)) 718 mod_timer(&mp->timer, jiffies); 719 break; 720 } 721 722 unlock: 723 spin_unlock_bh(&br->multicast_lock); 724 return err; 725 } 726 727 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 728 struct netlink_ext_ack *extack) 729 { 730 struct net *net = sock_net(skb->sk); 731 struct net_bridge_vlan_group *vg; 732 struct net_device *dev, *pdev; 733 struct br_mdb_entry *entry; 734 struct net_bridge_port *p; 735 struct net_bridge_vlan *v; 736 struct net_bridge *br; 737 int err; 738 739 err = br_mdb_parse(skb, nlh, &dev, &entry); 740 if (err < 0) 741 return err; 742 743 br = netdev_priv(dev); 744 745 /* If vlan filtering is enabled and VLAN is not specified 746 * delete mdb entry on all vlans configured on the port. 747 */ 748 pdev = __dev_get_by_index(net, entry->ifindex); 749 if (!pdev) 750 return -ENODEV; 751 752 p = br_port_get_rtnl(pdev); 753 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 754 return -EINVAL; 755 756 vg = nbp_vlan_group(p); 757 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 758 list_for_each_entry(v, &vg->vlan_list, vlist) { 759 entry->vid = v->vid; 760 err = __br_mdb_del(br, entry); 761 if (!err) 762 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 763 } 764 } else { 765 err = __br_mdb_del(br, entry); 766 if (!err) 767 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 768 } 769 770 return err; 771 } 772 773 void br_mdb_init(void) 774 { 775 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0); 776 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0); 777 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0); 778 } 779 780 void br_mdb_uninit(void) 781 { 782 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 783 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 784 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 785 } 786