1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/err.h> 3 #include <linux/igmp.h> 4 #include <linux/kernel.h> 5 #include <linux/netdevice.h> 6 #include <linux/rculist.h> 7 #include <linux/skbuff.h> 8 #include <linux/if_ether.h> 9 #include <net/ip.h> 10 #include <net/netlink.h> 11 #include <net/switchdev.h> 12 #if IS_ENABLED(CONFIG_IPV6) 13 #include <net/ipv6.h> 14 #include <net/addrconf.h> 15 #endif 16 17 #include "br_private.h" 18 19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 20 struct net_device *dev) 21 { 22 struct net_bridge *br = netdev_priv(dev); 23 struct net_bridge_port *p; 24 struct nlattr *nest, *port_nest; 25 26 if (!br->multicast_router || hlist_empty(&br->router_list)) 27 return 0; 28 29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER); 30 if (nest == NULL) 31 return -EMSGSIZE; 32 33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 34 if (!p) 35 continue; 36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT); 37 if (!port_nest) 38 goto fail; 39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 41 br_timer_value(&p->multicast_router_timer)) || 42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 43 p->multicast_router)) { 44 nla_nest_cancel(skb, port_nest); 45 goto fail; 46 } 47 nla_nest_end(skb, port_nest); 48 } 49 50 nla_nest_end(skb, nest); 51 return 0; 52 fail: 53 nla_nest_cancel(skb, nest); 54 return -EMSGSIZE; 55 } 56 57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 58 { 59 e->state = flags & MDB_PG_FLAGS_PERMANENT; 60 e->flags = 0; 61 if (flags & MDB_PG_FLAGS_OFFLOAD) 62 e->flags |= MDB_FLAGS_OFFLOAD; 63 } 64 65 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 66 { 67 memset(ip, 0, sizeof(struct br_ip)); 68 ip->vid = entry->vid; 69 ip->proto = entry->addr.proto; 70 if (ip->proto == htons(ETH_P_IP)) 71 ip->u.ip4 = entry->addr.u.ip4; 72 #if IS_ENABLED(CONFIG_IPV6) 73 else 74 ip->u.ip6 = entry->addr.u.ip6; 75 #endif 76 } 77 78 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 79 struct net_device *dev) 80 { 81 int idx = 0, s_idx = cb->args[1], err = 0; 82 struct net_bridge *br = netdev_priv(dev); 83 struct net_bridge_mdb_entry *mp; 84 struct nlattr *nest, *nest2; 85 86 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 87 return 0; 88 89 nest = nla_nest_start_noflag(skb, MDBA_MDB); 90 if (nest == NULL) 91 return -EMSGSIZE; 92 93 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { 94 struct net_bridge_port_group *p; 95 struct net_bridge_port_group __rcu **pp; 96 struct net_bridge_port *port; 97 98 if (idx < s_idx) 99 goto skip; 100 101 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); 102 if (!nest2) { 103 err = -EMSGSIZE; 104 break; 105 } 106 107 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; 108 pp = &p->next) { 109 struct nlattr *nest_ent; 110 struct br_mdb_entry e; 111 112 port = p->port; 113 if (!port) 114 continue; 115 116 memset(&e, 0, sizeof(e)); 117 e.ifindex = port->dev->ifindex; 118 e.vid = p->addr.vid; 119 __mdb_entry_fill_flags(&e, p->flags); 120 if (p->addr.proto == htons(ETH_P_IP)) 121 e.addr.u.ip4 = p->addr.u.ip4; 122 #if IS_ENABLED(CONFIG_IPV6) 123 if (p->addr.proto == htons(ETH_P_IPV6)) 124 e.addr.u.ip6 = p->addr.u.ip6; 125 #endif 126 e.addr.proto = p->addr.proto; 127 nest_ent = nla_nest_start_noflag(skb, 128 MDBA_MDB_ENTRY_INFO); 129 if (!nest_ent) { 130 nla_nest_cancel(skb, nest2); 131 err = -EMSGSIZE; 132 goto out; 133 } 134 if (nla_put_nohdr(skb, sizeof(e), &e) || 135 nla_put_u32(skb, 136 MDBA_MDB_EATTR_TIMER, 137 br_timer_value(&p->timer))) { 138 nla_nest_cancel(skb, nest_ent); 139 nla_nest_cancel(skb, nest2); 140 err = -EMSGSIZE; 141 goto out; 142 } 143 nla_nest_end(skb, nest_ent); 144 } 145 nla_nest_end(skb, nest2); 146 skip: 147 idx++; 148 } 149 150 out: 151 cb->args[1] = idx; 152 nla_nest_end(skb, nest); 153 return err; 154 } 155 156 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh, 157 struct netlink_ext_ack *extack) 158 { 159 struct br_port_msg *bpm; 160 161 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 162 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request"); 163 return -EINVAL; 164 } 165 166 bpm = nlmsg_data(nlh); 167 if (bpm->ifindex) { 168 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request"); 169 return -EINVAL; 170 } 171 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 172 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 173 return -EINVAL; 174 } 175 176 return 0; 177 } 178 179 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 180 { 181 struct net_device *dev; 182 struct net *net = sock_net(skb->sk); 183 struct nlmsghdr *nlh = NULL; 184 int idx = 0, s_idx; 185 186 if (cb->strict_check) { 187 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack); 188 189 if (err < 0) 190 return err; 191 } 192 193 s_idx = cb->args[0]; 194 195 rcu_read_lock(); 196 197 cb->seq = net->dev_base_seq; 198 199 for_each_netdev_rcu(net, dev) { 200 if (dev->priv_flags & IFF_EBRIDGE) { 201 struct br_port_msg *bpm; 202 203 if (idx < s_idx) 204 goto skip; 205 206 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 207 cb->nlh->nlmsg_seq, RTM_GETMDB, 208 sizeof(*bpm), NLM_F_MULTI); 209 if (nlh == NULL) 210 break; 211 212 bpm = nlmsg_data(nlh); 213 memset(bpm, 0, sizeof(*bpm)); 214 bpm->ifindex = dev->ifindex; 215 if (br_mdb_fill_info(skb, cb, dev) < 0) 216 goto out; 217 if (br_rports_fill_info(skb, cb, dev) < 0) 218 goto out; 219 220 cb->args[1] = 0; 221 nlmsg_end(skb, nlh); 222 skip: 223 idx++; 224 } 225 } 226 227 out: 228 if (nlh) 229 nlmsg_end(skb, nlh); 230 rcu_read_unlock(); 231 cb->args[0] = idx; 232 return skb->len; 233 } 234 235 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 236 struct net_device *dev, 237 struct br_mdb_entry *entry, u32 pid, 238 u32 seq, int type, unsigned int flags) 239 { 240 struct nlmsghdr *nlh; 241 struct br_port_msg *bpm; 242 struct nlattr *nest, *nest2; 243 244 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 245 if (!nlh) 246 return -EMSGSIZE; 247 248 bpm = nlmsg_data(nlh); 249 memset(bpm, 0, sizeof(*bpm)); 250 bpm->family = AF_BRIDGE; 251 bpm->ifindex = dev->ifindex; 252 nest = nla_nest_start_noflag(skb, MDBA_MDB); 253 if (nest == NULL) 254 goto cancel; 255 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); 256 if (nest2 == NULL) 257 goto end; 258 259 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 260 goto end; 261 262 nla_nest_end(skb, nest2); 263 nla_nest_end(skb, nest); 264 nlmsg_end(skb, nlh); 265 return 0; 266 267 end: 268 nla_nest_end(skb, nest); 269 cancel: 270 nlmsg_cancel(skb, nlh); 271 return -EMSGSIZE; 272 } 273 274 static inline size_t rtnl_mdb_nlmsg_size(void) 275 { 276 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 277 + nla_total_size(sizeof(struct br_mdb_entry)); 278 } 279 280 struct br_mdb_complete_info { 281 struct net_bridge_port *port; 282 struct br_ip ip; 283 }; 284 285 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 286 { 287 struct br_mdb_complete_info *data = priv; 288 struct net_bridge_port_group __rcu **pp; 289 struct net_bridge_port_group *p; 290 struct net_bridge_mdb_entry *mp; 291 struct net_bridge_port *port = data->port; 292 struct net_bridge *br = port->br; 293 294 if (err) 295 goto err; 296 297 spin_lock_bh(&br->multicast_lock); 298 mp = br_mdb_ip_get(br, &data->ip); 299 if (!mp) 300 goto out; 301 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 302 pp = &p->next) { 303 if (p->port != port) 304 continue; 305 p->flags |= MDB_PG_FLAGS_OFFLOAD; 306 } 307 out: 308 spin_unlock_bh(&br->multicast_lock); 309 err: 310 kfree(priv); 311 } 312 313 static void br_mdb_switchdev_host_port(struct net_device *dev, 314 struct net_device *lower_dev, 315 struct br_mdb_entry *entry, int type) 316 { 317 struct switchdev_obj_port_mdb mdb = { 318 .obj = { 319 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 320 .flags = SWITCHDEV_F_DEFER, 321 }, 322 .vid = entry->vid, 323 }; 324 325 if (entry->addr.proto == htons(ETH_P_IP)) 326 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 327 #if IS_ENABLED(CONFIG_IPV6) 328 else 329 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 330 #endif 331 332 mdb.obj.orig_dev = dev; 333 switch (type) { 334 case RTM_NEWMDB: 335 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 336 break; 337 case RTM_DELMDB: 338 switchdev_port_obj_del(lower_dev, &mdb.obj); 339 break; 340 } 341 } 342 343 static void br_mdb_switchdev_host(struct net_device *dev, 344 struct br_mdb_entry *entry, int type) 345 { 346 struct net_device *lower_dev; 347 struct list_head *iter; 348 349 netdev_for_each_lower_dev(dev, lower_dev, iter) 350 br_mdb_switchdev_host_port(dev, lower_dev, entry, type); 351 } 352 353 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 354 struct br_mdb_entry *entry, int type) 355 { 356 struct br_mdb_complete_info *complete_info; 357 struct switchdev_obj_port_mdb mdb = { 358 .obj = { 359 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 360 .flags = SWITCHDEV_F_DEFER, 361 }, 362 .vid = entry->vid, 363 }; 364 struct net_device *port_dev; 365 struct net *net = dev_net(dev); 366 struct sk_buff *skb; 367 int err = -ENOBUFS; 368 369 port_dev = __dev_get_by_index(net, entry->ifindex); 370 if (entry->addr.proto == htons(ETH_P_IP)) 371 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 372 #if IS_ENABLED(CONFIG_IPV6) 373 else 374 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 375 #endif 376 377 mdb.obj.orig_dev = port_dev; 378 if (p && port_dev && type == RTM_NEWMDB) { 379 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 380 if (complete_info) { 381 complete_info->port = p; 382 __mdb_entry_to_br_ip(entry, &complete_info->ip); 383 mdb.obj.complete_priv = complete_info; 384 mdb.obj.complete = br_mdb_complete; 385 if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL)) 386 kfree(complete_info); 387 } 388 } else if (p && port_dev && type == RTM_DELMDB) { 389 switchdev_port_obj_del(port_dev, &mdb.obj); 390 } 391 392 if (!p) 393 br_mdb_switchdev_host(dev, entry, type); 394 395 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 396 if (!skb) 397 goto errout; 398 399 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 400 if (err < 0) { 401 kfree_skb(skb); 402 goto errout; 403 } 404 405 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 406 return; 407 errout: 408 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 409 } 410 411 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 412 struct br_ip *group, int type, u8 flags) 413 { 414 struct br_mdb_entry entry; 415 416 memset(&entry, 0, sizeof(entry)); 417 if (port) 418 entry.ifindex = port->dev->ifindex; 419 else 420 entry.ifindex = dev->ifindex; 421 entry.addr.proto = group->proto; 422 entry.addr.u.ip4 = group->u.ip4; 423 #if IS_ENABLED(CONFIG_IPV6) 424 entry.addr.u.ip6 = group->u.ip6; 425 #endif 426 entry.vid = group->vid; 427 __mdb_entry_fill_flags(&entry, flags); 428 __br_mdb_notify(dev, port, &entry, type); 429 } 430 431 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 432 struct net_device *dev, 433 int ifindex, u32 pid, 434 u32 seq, int type, unsigned int flags) 435 { 436 struct br_port_msg *bpm; 437 struct nlmsghdr *nlh; 438 struct nlattr *nest; 439 440 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 441 if (!nlh) 442 return -EMSGSIZE; 443 444 bpm = nlmsg_data(nlh); 445 memset(bpm, 0, sizeof(*bpm)); 446 bpm->family = AF_BRIDGE; 447 bpm->ifindex = dev->ifindex; 448 nest = nla_nest_start_noflag(skb, MDBA_ROUTER); 449 if (!nest) 450 goto cancel; 451 452 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 453 goto end; 454 455 nla_nest_end(skb, nest); 456 nlmsg_end(skb, nlh); 457 return 0; 458 459 end: 460 nla_nest_end(skb, nest); 461 cancel: 462 nlmsg_cancel(skb, nlh); 463 return -EMSGSIZE; 464 } 465 466 static inline size_t rtnl_rtr_nlmsg_size(void) 467 { 468 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 469 + nla_total_size(sizeof(__u32)); 470 } 471 472 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 473 int type) 474 { 475 struct net *net = dev_net(dev); 476 struct sk_buff *skb; 477 int err = -ENOBUFS; 478 int ifindex; 479 480 ifindex = port ? port->dev->ifindex : 0; 481 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 482 if (!skb) 483 goto errout; 484 485 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 486 if (err < 0) { 487 kfree_skb(skb); 488 goto errout; 489 } 490 491 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 492 return; 493 494 errout: 495 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 496 } 497 498 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 499 { 500 if (entry->ifindex == 0) 501 return false; 502 503 if (entry->addr.proto == htons(ETH_P_IP)) { 504 if (!ipv4_is_multicast(entry->addr.u.ip4)) 505 return false; 506 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 507 return false; 508 #if IS_ENABLED(CONFIG_IPV6) 509 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 510 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 511 return false; 512 #endif 513 } else 514 return false; 515 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 516 return false; 517 if (entry->vid >= VLAN_VID_MASK) 518 return false; 519 520 return true; 521 } 522 523 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 524 struct net_device **pdev, struct br_mdb_entry **pentry) 525 { 526 struct net *net = sock_net(skb->sk); 527 struct br_mdb_entry *entry; 528 struct br_port_msg *bpm; 529 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 530 struct net_device *dev; 531 int err; 532 533 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 534 MDBA_SET_ENTRY_MAX, NULL, NULL); 535 if (err < 0) 536 return err; 537 538 bpm = nlmsg_data(nlh); 539 if (bpm->ifindex == 0) { 540 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 541 return -EINVAL; 542 } 543 544 dev = __dev_get_by_index(net, bpm->ifindex); 545 if (dev == NULL) { 546 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 547 return -ENODEV; 548 } 549 550 if (!(dev->priv_flags & IFF_EBRIDGE)) { 551 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 552 return -EOPNOTSUPP; 553 } 554 555 *pdev = dev; 556 557 if (!tb[MDBA_SET_ENTRY] || 558 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 559 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 560 return -EINVAL; 561 } 562 563 entry = nla_data(tb[MDBA_SET_ENTRY]); 564 if (!is_valid_mdb_entry(entry)) { 565 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 566 return -EINVAL; 567 } 568 569 *pentry = entry; 570 return 0; 571 } 572 573 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 574 struct br_ip *group, unsigned char state) 575 { 576 struct net_bridge_mdb_entry *mp; 577 struct net_bridge_port_group *p; 578 struct net_bridge_port_group __rcu **pp; 579 unsigned long now = jiffies; 580 int err; 581 582 mp = br_mdb_ip_get(br, group); 583 if (!mp) { 584 mp = br_multicast_new_group(br, group); 585 err = PTR_ERR_OR_ZERO(mp); 586 if (err) 587 return err; 588 } 589 590 for (pp = &mp->ports; 591 (p = mlock_dereference(*pp, br)) != NULL; 592 pp = &p->next) { 593 if (p->port == port) 594 return -EEXIST; 595 if ((unsigned long)p->port < (unsigned long)port) 596 break; 597 } 598 599 p = br_multicast_new_port_group(port, group, *pp, state, NULL); 600 if (unlikely(!p)) 601 return -ENOMEM; 602 rcu_assign_pointer(*pp, p); 603 if (state == MDB_TEMPORARY) 604 mod_timer(&p->timer, now + br->multicast_membership_interval); 605 606 return 0; 607 } 608 609 static int __br_mdb_add(struct net *net, struct net_bridge *br, 610 struct br_mdb_entry *entry) 611 { 612 struct br_ip ip; 613 struct net_device *dev; 614 struct net_bridge_port *p; 615 int ret; 616 617 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 618 return -EINVAL; 619 620 dev = __dev_get_by_index(net, entry->ifindex); 621 if (!dev) 622 return -ENODEV; 623 624 p = br_port_get_rtnl(dev); 625 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 626 return -EINVAL; 627 628 __mdb_entry_to_br_ip(entry, &ip); 629 630 spin_lock_bh(&br->multicast_lock); 631 ret = br_mdb_add_group(br, p, &ip, entry->state); 632 spin_unlock_bh(&br->multicast_lock); 633 return ret; 634 } 635 636 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 637 struct netlink_ext_ack *extack) 638 { 639 struct net *net = sock_net(skb->sk); 640 struct net_bridge_vlan_group *vg; 641 struct net_device *dev, *pdev; 642 struct br_mdb_entry *entry; 643 struct net_bridge_port *p; 644 struct net_bridge_vlan *v; 645 struct net_bridge *br; 646 int err; 647 648 err = br_mdb_parse(skb, nlh, &dev, &entry); 649 if (err < 0) 650 return err; 651 652 br = netdev_priv(dev); 653 654 /* If vlan filtering is enabled and VLAN is not specified 655 * install mdb entry on all vlans configured on the port. 656 */ 657 pdev = __dev_get_by_index(net, entry->ifindex); 658 if (!pdev) 659 return -ENODEV; 660 661 p = br_port_get_rtnl(pdev); 662 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 663 return -EINVAL; 664 665 vg = nbp_vlan_group(p); 666 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 667 list_for_each_entry(v, &vg->vlan_list, vlist) { 668 entry->vid = v->vid; 669 err = __br_mdb_add(net, br, entry); 670 if (err) 671 break; 672 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 673 } 674 } else { 675 err = __br_mdb_add(net, br, entry); 676 if (!err) 677 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 678 } 679 680 return err; 681 } 682 683 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 684 { 685 struct net_bridge_mdb_entry *mp; 686 struct net_bridge_port_group *p; 687 struct net_bridge_port_group __rcu **pp; 688 struct br_ip ip; 689 int err = -EINVAL; 690 691 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 692 return -EINVAL; 693 694 __mdb_entry_to_br_ip(entry, &ip); 695 696 spin_lock_bh(&br->multicast_lock); 697 mp = br_mdb_ip_get(br, &ip); 698 if (!mp) 699 goto unlock; 700 701 for (pp = &mp->ports; 702 (p = mlock_dereference(*pp, br)) != NULL; 703 pp = &p->next) { 704 if (!p->port || p->port->dev->ifindex != entry->ifindex) 705 continue; 706 707 if (p->port->state == BR_STATE_DISABLED) 708 goto unlock; 709 710 __mdb_entry_fill_flags(entry, p->flags); 711 rcu_assign_pointer(*pp, p->next); 712 hlist_del_init(&p->mglist); 713 del_timer(&p->timer); 714 kfree_rcu(p, rcu); 715 err = 0; 716 717 if (!mp->ports && !mp->host_joined && 718 netif_running(br->dev)) 719 mod_timer(&mp->timer, jiffies); 720 break; 721 } 722 723 unlock: 724 spin_unlock_bh(&br->multicast_lock); 725 return err; 726 } 727 728 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 729 struct netlink_ext_ack *extack) 730 { 731 struct net *net = sock_net(skb->sk); 732 struct net_bridge_vlan_group *vg; 733 struct net_device *dev, *pdev; 734 struct br_mdb_entry *entry; 735 struct net_bridge_port *p; 736 struct net_bridge_vlan *v; 737 struct net_bridge *br; 738 int err; 739 740 err = br_mdb_parse(skb, nlh, &dev, &entry); 741 if (err < 0) 742 return err; 743 744 br = netdev_priv(dev); 745 746 /* If vlan filtering is enabled and VLAN is not specified 747 * delete mdb entry on all vlans configured on the port. 748 */ 749 pdev = __dev_get_by_index(net, entry->ifindex); 750 if (!pdev) 751 return -ENODEV; 752 753 p = br_port_get_rtnl(pdev); 754 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 755 return -EINVAL; 756 757 vg = nbp_vlan_group(p); 758 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 759 list_for_each_entry(v, &vg->vlan_list, vlist) { 760 entry->vid = v->vid; 761 err = __br_mdb_del(br, entry); 762 if (!err) 763 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 764 } 765 } else { 766 err = __br_mdb_del(br, entry); 767 if (!err) 768 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 769 } 770 771 return err; 772 } 773 774 void br_mdb_init(void) 775 { 776 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0); 777 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0); 778 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0); 779 } 780 781 void br_mdb_uninit(void) 782 { 783 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 784 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 785 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 786 } 787