1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/err.h> 3 #include <linux/igmp.h> 4 #include <linux/kernel.h> 5 #include <linux/netdevice.h> 6 #include <linux/rculist.h> 7 #include <linux/skbuff.h> 8 #include <linux/if_ether.h> 9 #include <net/ip.h> 10 #include <net/netlink.h> 11 #include <net/switchdev.h> 12 #if IS_ENABLED(CONFIG_IPV6) 13 #include <net/ipv6.h> 14 #include <net/addrconf.h> 15 #endif 16 17 #include "br_private.h" 18 19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 20 struct net_device *dev) 21 { 22 struct net_bridge *br = netdev_priv(dev); 23 struct net_bridge_port *p; 24 struct nlattr *nest, *port_nest; 25 26 if (!br->multicast_router || hlist_empty(&br->router_list)) 27 return 0; 28 29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER); 30 if (nest == NULL) 31 return -EMSGSIZE; 32 33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) { 34 if (!p) 35 continue; 36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT); 37 if (!port_nest) 38 goto fail; 39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) || 40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER, 41 br_timer_value(&p->multicast_router_timer)) || 42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE, 43 p->multicast_router)) { 44 nla_nest_cancel(skb, port_nest); 45 goto fail; 46 } 47 nla_nest_end(skb, port_nest); 48 } 49 50 nla_nest_end(skb, nest); 51 return 0; 52 fail: 53 nla_nest_cancel(skb, nest); 54 return -EMSGSIZE; 55 } 56 57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) 58 { 59 e->state = flags & MDB_PG_FLAGS_PERMANENT; 60 e->flags = 0; 61 if (flags & MDB_PG_FLAGS_OFFLOAD) 62 e->flags |= MDB_FLAGS_OFFLOAD; 63 if (flags & MDB_PG_FLAGS_FAST_LEAVE) 64 e->flags |= MDB_FLAGS_FAST_LEAVE; 65 } 66 67 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 68 { 69 memset(ip, 0, sizeof(struct br_ip)); 70 ip->vid = entry->vid; 71 ip->proto = entry->addr.proto; 72 if (ip->proto == htons(ETH_P_IP)) 73 ip->u.ip4 = entry->addr.u.ip4; 74 #if IS_ENABLED(CONFIG_IPV6) 75 else 76 ip->u.ip6 = entry->addr.u.ip6; 77 #endif 78 } 79 80 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 81 struct net_device *dev) 82 { 83 int idx = 0, s_idx = cb->args[1], err = 0; 84 struct net_bridge *br = netdev_priv(dev); 85 struct net_bridge_mdb_entry *mp; 86 struct nlattr *nest, *nest2; 87 88 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) 89 return 0; 90 91 nest = nla_nest_start_noflag(skb, MDBA_MDB); 92 if (nest == NULL) 93 return -EMSGSIZE; 94 95 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { 96 struct net_bridge_port_group *p; 97 struct net_bridge_port_group __rcu **pp; 98 struct net_bridge_port *port; 99 100 if (idx < s_idx) 101 goto skip; 102 103 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); 104 if (!nest2) { 105 err = -EMSGSIZE; 106 break; 107 } 108 109 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; 110 pp = &p->next) { 111 struct nlattr *nest_ent; 112 struct br_mdb_entry e; 113 114 port = p->port; 115 if (!port) 116 continue; 117 118 memset(&e, 0, sizeof(e)); 119 e.ifindex = port->dev->ifindex; 120 e.vid = p->addr.vid; 121 __mdb_entry_fill_flags(&e, p->flags); 122 if (p->addr.proto == htons(ETH_P_IP)) 123 e.addr.u.ip4 = p->addr.u.ip4; 124 #if IS_ENABLED(CONFIG_IPV6) 125 if (p->addr.proto == htons(ETH_P_IPV6)) 126 e.addr.u.ip6 = p->addr.u.ip6; 127 #endif 128 e.addr.proto = p->addr.proto; 129 nest_ent = nla_nest_start_noflag(skb, 130 MDBA_MDB_ENTRY_INFO); 131 if (!nest_ent) { 132 nla_nest_cancel(skb, nest2); 133 err = -EMSGSIZE; 134 goto out; 135 } 136 if (nla_put_nohdr(skb, sizeof(e), &e) || 137 nla_put_u32(skb, 138 MDBA_MDB_EATTR_TIMER, 139 br_timer_value(&p->timer))) { 140 nla_nest_cancel(skb, nest_ent); 141 nla_nest_cancel(skb, nest2); 142 err = -EMSGSIZE; 143 goto out; 144 } 145 nla_nest_end(skb, nest_ent); 146 } 147 nla_nest_end(skb, nest2); 148 skip: 149 idx++; 150 } 151 152 out: 153 cb->args[1] = idx; 154 nla_nest_end(skb, nest); 155 return err; 156 } 157 158 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh, 159 struct netlink_ext_ack *extack) 160 { 161 struct br_port_msg *bpm; 162 163 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) { 164 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request"); 165 return -EINVAL; 166 } 167 168 bpm = nlmsg_data(nlh); 169 if (bpm->ifindex) { 170 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request"); 171 return -EINVAL; 172 } 173 if (nlmsg_attrlen(nlh, sizeof(*bpm))) { 174 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request"); 175 return -EINVAL; 176 } 177 178 return 0; 179 } 180 181 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 182 { 183 struct net_device *dev; 184 struct net *net = sock_net(skb->sk); 185 struct nlmsghdr *nlh = NULL; 186 int idx = 0, s_idx; 187 188 if (cb->strict_check) { 189 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack); 190 191 if (err < 0) 192 return err; 193 } 194 195 s_idx = cb->args[0]; 196 197 rcu_read_lock(); 198 199 cb->seq = net->dev_base_seq; 200 201 for_each_netdev_rcu(net, dev) { 202 if (dev->priv_flags & IFF_EBRIDGE) { 203 struct br_port_msg *bpm; 204 205 if (idx < s_idx) 206 goto skip; 207 208 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 209 cb->nlh->nlmsg_seq, RTM_GETMDB, 210 sizeof(*bpm), NLM_F_MULTI); 211 if (nlh == NULL) 212 break; 213 214 bpm = nlmsg_data(nlh); 215 memset(bpm, 0, sizeof(*bpm)); 216 bpm->ifindex = dev->ifindex; 217 if (br_mdb_fill_info(skb, cb, dev) < 0) 218 goto out; 219 if (br_rports_fill_info(skb, cb, dev) < 0) 220 goto out; 221 222 cb->args[1] = 0; 223 nlmsg_end(skb, nlh); 224 skip: 225 idx++; 226 } 227 } 228 229 out: 230 if (nlh) 231 nlmsg_end(skb, nlh); 232 rcu_read_unlock(); 233 cb->args[0] = idx; 234 return skb->len; 235 } 236 237 static int nlmsg_populate_mdb_fill(struct sk_buff *skb, 238 struct net_device *dev, 239 struct br_mdb_entry *entry, u32 pid, 240 u32 seq, int type, unsigned int flags) 241 { 242 struct nlmsghdr *nlh; 243 struct br_port_msg *bpm; 244 struct nlattr *nest, *nest2; 245 246 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); 247 if (!nlh) 248 return -EMSGSIZE; 249 250 bpm = nlmsg_data(nlh); 251 memset(bpm, 0, sizeof(*bpm)); 252 bpm->family = AF_BRIDGE; 253 bpm->ifindex = dev->ifindex; 254 nest = nla_nest_start_noflag(skb, MDBA_MDB); 255 if (nest == NULL) 256 goto cancel; 257 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); 258 if (nest2 == NULL) 259 goto end; 260 261 if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry)) 262 goto end; 263 264 nla_nest_end(skb, nest2); 265 nla_nest_end(skb, nest); 266 nlmsg_end(skb, nlh); 267 return 0; 268 269 end: 270 nla_nest_end(skb, nest); 271 cancel: 272 nlmsg_cancel(skb, nlh); 273 return -EMSGSIZE; 274 } 275 276 static inline size_t rtnl_mdb_nlmsg_size(void) 277 { 278 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 279 + nla_total_size(sizeof(struct br_mdb_entry)); 280 } 281 282 struct br_mdb_complete_info { 283 struct net_bridge_port *port; 284 struct br_ip ip; 285 }; 286 287 static void br_mdb_complete(struct net_device *dev, int err, void *priv) 288 { 289 struct br_mdb_complete_info *data = priv; 290 struct net_bridge_port_group __rcu **pp; 291 struct net_bridge_port_group *p; 292 struct net_bridge_mdb_entry *mp; 293 struct net_bridge_port *port = data->port; 294 struct net_bridge *br = port->br; 295 296 if (err) 297 goto err; 298 299 spin_lock_bh(&br->multicast_lock); 300 mp = br_mdb_ip_get(br, &data->ip); 301 if (!mp) 302 goto out; 303 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 304 pp = &p->next) { 305 if (p->port != port) 306 continue; 307 p->flags |= MDB_PG_FLAGS_OFFLOAD; 308 } 309 out: 310 spin_unlock_bh(&br->multicast_lock); 311 err: 312 kfree(priv); 313 } 314 315 static void br_mdb_switchdev_host_port(struct net_device *dev, 316 struct net_device *lower_dev, 317 struct br_mdb_entry *entry, int type) 318 { 319 struct switchdev_obj_port_mdb mdb = { 320 .obj = { 321 .id = SWITCHDEV_OBJ_ID_HOST_MDB, 322 .flags = SWITCHDEV_F_DEFER, 323 }, 324 .vid = entry->vid, 325 }; 326 327 if (entry->addr.proto == htons(ETH_P_IP)) 328 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 329 #if IS_ENABLED(CONFIG_IPV6) 330 else 331 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 332 #endif 333 334 mdb.obj.orig_dev = dev; 335 switch (type) { 336 case RTM_NEWMDB: 337 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL); 338 break; 339 case RTM_DELMDB: 340 switchdev_port_obj_del(lower_dev, &mdb.obj); 341 break; 342 } 343 } 344 345 static void br_mdb_switchdev_host(struct net_device *dev, 346 struct br_mdb_entry *entry, int type) 347 { 348 struct net_device *lower_dev; 349 struct list_head *iter; 350 351 netdev_for_each_lower_dev(dev, lower_dev, iter) 352 br_mdb_switchdev_host_port(dev, lower_dev, entry, type); 353 } 354 355 static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 356 struct br_mdb_entry *entry, int type) 357 { 358 struct br_mdb_complete_info *complete_info; 359 struct switchdev_obj_port_mdb mdb = { 360 .obj = { 361 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 362 .flags = SWITCHDEV_F_DEFER, 363 }, 364 .vid = entry->vid, 365 }; 366 struct net_device *port_dev; 367 struct net *net = dev_net(dev); 368 struct sk_buff *skb; 369 int err = -ENOBUFS; 370 371 port_dev = __dev_get_by_index(net, entry->ifindex); 372 if (entry->addr.proto == htons(ETH_P_IP)) 373 ip_eth_mc_map(entry->addr.u.ip4, mdb.addr); 374 #if IS_ENABLED(CONFIG_IPV6) 375 else 376 ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr); 377 #endif 378 379 mdb.obj.orig_dev = port_dev; 380 if (p && port_dev && type == RTM_NEWMDB) { 381 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 382 if (complete_info) { 383 complete_info->port = p; 384 __mdb_entry_to_br_ip(entry, &complete_info->ip); 385 mdb.obj.complete_priv = complete_info; 386 mdb.obj.complete = br_mdb_complete; 387 if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL)) 388 kfree(complete_info); 389 } 390 } else if (p && port_dev && type == RTM_DELMDB) { 391 switchdev_port_obj_del(port_dev, &mdb.obj); 392 } 393 394 if (!p) 395 br_mdb_switchdev_host(dev, entry, type); 396 397 skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC); 398 if (!skb) 399 goto errout; 400 401 err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF); 402 if (err < 0) { 403 kfree_skb(skb); 404 goto errout; 405 } 406 407 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 408 return; 409 errout: 410 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 411 } 412 413 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 414 struct br_ip *group, int type, u8 flags) 415 { 416 struct br_mdb_entry entry; 417 418 memset(&entry, 0, sizeof(entry)); 419 if (port) 420 entry.ifindex = port->dev->ifindex; 421 else 422 entry.ifindex = dev->ifindex; 423 entry.addr.proto = group->proto; 424 entry.addr.u.ip4 = group->u.ip4; 425 #if IS_ENABLED(CONFIG_IPV6) 426 entry.addr.u.ip6 = group->u.ip6; 427 #endif 428 entry.vid = group->vid; 429 __mdb_entry_fill_flags(&entry, flags); 430 __br_mdb_notify(dev, port, &entry, type); 431 } 432 433 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 434 struct net_device *dev, 435 int ifindex, u32 pid, 436 u32 seq, int type, unsigned int flags) 437 { 438 struct br_port_msg *bpm; 439 struct nlmsghdr *nlh; 440 struct nlattr *nest; 441 442 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 443 if (!nlh) 444 return -EMSGSIZE; 445 446 bpm = nlmsg_data(nlh); 447 memset(bpm, 0, sizeof(*bpm)); 448 bpm->family = AF_BRIDGE; 449 bpm->ifindex = dev->ifindex; 450 nest = nla_nest_start_noflag(skb, MDBA_ROUTER); 451 if (!nest) 452 goto cancel; 453 454 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex)) 455 goto end; 456 457 nla_nest_end(skb, nest); 458 nlmsg_end(skb, nlh); 459 return 0; 460 461 end: 462 nla_nest_end(skb, nest); 463 cancel: 464 nlmsg_cancel(skb, nlh); 465 return -EMSGSIZE; 466 } 467 468 static inline size_t rtnl_rtr_nlmsg_size(void) 469 { 470 return NLMSG_ALIGN(sizeof(struct br_port_msg)) 471 + nla_total_size(sizeof(__u32)); 472 } 473 474 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 475 int type) 476 { 477 struct net *net = dev_net(dev); 478 struct sk_buff *skb; 479 int err = -ENOBUFS; 480 int ifindex; 481 482 ifindex = port ? port->dev->ifindex : 0; 483 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC); 484 if (!skb) 485 goto errout; 486 487 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF); 488 if (err < 0) { 489 kfree_skb(skb); 490 goto errout; 491 } 492 493 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC); 494 return; 495 496 errout: 497 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 498 } 499 500 static bool is_valid_mdb_entry(struct br_mdb_entry *entry) 501 { 502 if (entry->ifindex == 0) 503 return false; 504 505 if (entry->addr.proto == htons(ETH_P_IP)) { 506 if (!ipv4_is_multicast(entry->addr.u.ip4)) 507 return false; 508 if (ipv4_is_local_multicast(entry->addr.u.ip4)) 509 return false; 510 #if IS_ENABLED(CONFIG_IPV6) 511 } else if (entry->addr.proto == htons(ETH_P_IPV6)) { 512 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) 513 return false; 514 #endif 515 } else 516 return false; 517 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) 518 return false; 519 if (entry->vid >= VLAN_VID_MASK) 520 return false; 521 522 return true; 523 } 524 525 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, 526 struct net_device **pdev, struct br_mdb_entry **pentry) 527 { 528 struct net *net = sock_net(skb->sk); 529 struct br_mdb_entry *entry; 530 struct br_port_msg *bpm; 531 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; 532 struct net_device *dev; 533 int err; 534 535 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb, 536 MDBA_SET_ENTRY_MAX, NULL, NULL); 537 if (err < 0) 538 return err; 539 540 bpm = nlmsg_data(nlh); 541 if (bpm->ifindex == 0) { 542 pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n"); 543 return -EINVAL; 544 } 545 546 dev = __dev_get_by_index(net, bpm->ifindex); 547 if (dev == NULL) { 548 pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n"); 549 return -ENODEV; 550 } 551 552 if (!(dev->priv_flags & IFF_EBRIDGE)) { 553 pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n"); 554 return -EOPNOTSUPP; 555 } 556 557 *pdev = dev; 558 559 if (!tb[MDBA_SET_ENTRY] || 560 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { 561 pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n"); 562 return -EINVAL; 563 } 564 565 entry = nla_data(tb[MDBA_SET_ENTRY]); 566 if (!is_valid_mdb_entry(entry)) { 567 pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n"); 568 return -EINVAL; 569 } 570 571 *pentry = entry; 572 return 0; 573 } 574 575 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 576 struct br_ip *group, unsigned char state) 577 { 578 struct net_bridge_mdb_entry *mp; 579 struct net_bridge_port_group *p; 580 struct net_bridge_port_group __rcu **pp; 581 unsigned long now = jiffies; 582 int err; 583 584 mp = br_mdb_ip_get(br, group); 585 if (!mp) { 586 mp = br_multicast_new_group(br, group); 587 err = PTR_ERR_OR_ZERO(mp); 588 if (err) 589 return err; 590 } 591 592 for (pp = &mp->ports; 593 (p = mlock_dereference(*pp, br)) != NULL; 594 pp = &p->next) { 595 if (p->port == port) 596 return -EEXIST; 597 if ((unsigned long)p->port < (unsigned long)port) 598 break; 599 } 600 601 p = br_multicast_new_port_group(port, group, *pp, state, NULL); 602 if (unlikely(!p)) 603 return -ENOMEM; 604 rcu_assign_pointer(*pp, p); 605 if (state == MDB_TEMPORARY) 606 mod_timer(&p->timer, now + br->multicast_membership_interval); 607 608 return 0; 609 } 610 611 static int __br_mdb_add(struct net *net, struct net_bridge *br, 612 struct br_mdb_entry *entry) 613 { 614 struct br_ip ip; 615 struct net_device *dev; 616 struct net_bridge_port *p; 617 int ret; 618 619 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 620 return -EINVAL; 621 622 dev = __dev_get_by_index(net, entry->ifindex); 623 if (!dev) 624 return -ENODEV; 625 626 p = br_port_get_rtnl(dev); 627 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 628 return -EINVAL; 629 630 __mdb_entry_to_br_ip(entry, &ip); 631 632 spin_lock_bh(&br->multicast_lock); 633 ret = br_mdb_add_group(br, p, &ip, entry->state); 634 spin_unlock_bh(&br->multicast_lock); 635 return ret; 636 } 637 638 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, 639 struct netlink_ext_ack *extack) 640 { 641 struct net *net = sock_net(skb->sk); 642 struct net_bridge_vlan_group *vg; 643 struct net_device *dev, *pdev; 644 struct br_mdb_entry *entry; 645 struct net_bridge_port *p; 646 struct net_bridge_vlan *v; 647 struct net_bridge *br; 648 int err; 649 650 err = br_mdb_parse(skb, nlh, &dev, &entry); 651 if (err < 0) 652 return err; 653 654 br = netdev_priv(dev); 655 656 /* If vlan filtering is enabled and VLAN is not specified 657 * install mdb entry on all vlans configured on the port. 658 */ 659 pdev = __dev_get_by_index(net, entry->ifindex); 660 if (!pdev) 661 return -ENODEV; 662 663 p = br_port_get_rtnl(pdev); 664 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 665 return -EINVAL; 666 667 vg = nbp_vlan_group(p); 668 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 669 list_for_each_entry(v, &vg->vlan_list, vlist) { 670 entry->vid = v->vid; 671 err = __br_mdb_add(net, br, entry); 672 if (err) 673 break; 674 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 675 } 676 } else { 677 err = __br_mdb_add(net, br, entry); 678 if (!err) 679 __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 680 } 681 682 return err; 683 } 684 685 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) 686 { 687 struct net_bridge_mdb_entry *mp; 688 struct net_bridge_port_group *p; 689 struct net_bridge_port_group __rcu **pp; 690 struct br_ip ip; 691 int err = -EINVAL; 692 693 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED)) 694 return -EINVAL; 695 696 __mdb_entry_to_br_ip(entry, &ip); 697 698 spin_lock_bh(&br->multicast_lock); 699 mp = br_mdb_ip_get(br, &ip); 700 if (!mp) 701 goto unlock; 702 703 for (pp = &mp->ports; 704 (p = mlock_dereference(*pp, br)) != NULL; 705 pp = &p->next) { 706 if (!p->port || p->port->dev->ifindex != entry->ifindex) 707 continue; 708 709 if (p->port->state == BR_STATE_DISABLED) 710 goto unlock; 711 712 __mdb_entry_fill_flags(entry, p->flags); 713 rcu_assign_pointer(*pp, p->next); 714 hlist_del_init(&p->mglist); 715 del_timer(&p->timer); 716 kfree_rcu(p, rcu); 717 err = 0; 718 719 if (!mp->ports && !mp->host_joined && 720 netif_running(br->dev)) 721 mod_timer(&mp->timer, jiffies); 722 break; 723 } 724 725 unlock: 726 spin_unlock_bh(&br->multicast_lock); 727 return err; 728 } 729 730 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, 731 struct netlink_ext_ack *extack) 732 { 733 struct net *net = sock_net(skb->sk); 734 struct net_bridge_vlan_group *vg; 735 struct net_device *dev, *pdev; 736 struct br_mdb_entry *entry; 737 struct net_bridge_port *p; 738 struct net_bridge_vlan *v; 739 struct net_bridge *br; 740 int err; 741 742 err = br_mdb_parse(skb, nlh, &dev, &entry); 743 if (err < 0) 744 return err; 745 746 br = netdev_priv(dev); 747 748 /* If vlan filtering is enabled and VLAN is not specified 749 * delete mdb entry on all vlans configured on the port. 750 */ 751 pdev = __dev_get_by_index(net, entry->ifindex); 752 if (!pdev) 753 return -ENODEV; 754 755 p = br_port_get_rtnl(pdev); 756 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 757 return -EINVAL; 758 759 vg = nbp_vlan_group(p); 760 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) { 761 list_for_each_entry(v, &vg->vlan_list, vlist) { 762 entry->vid = v->vid; 763 err = __br_mdb_del(br, entry); 764 if (!err) 765 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 766 } 767 } else { 768 err = __br_mdb_del(br, entry); 769 if (!err) 770 __br_mdb_notify(dev, p, entry, RTM_DELMDB); 771 } 772 773 return err; 774 } 775 776 void br_mdb_init(void) 777 { 778 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0); 779 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0); 780 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0); 781 } 782 783 void br_mdb_uninit(void) 784 { 785 rtnl_unregister(PF_BRIDGE, RTM_GETMDB); 786 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB); 787 rtnl_unregister(PF_BRIDGE, RTM_DELMDB); 788 } 789