1 /* 2 * Bridge netlink control interface 3 * 4 * Authors: 5 * Stephen Hemminger <shemminger@osdl.org> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/etherdevice.h> 16 #include <net/rtnetlink.h> 17 #include <net/net_namespace.h> 18 #include <net/sock.h> 19 #include <uapi/linux/if_bridge.h> 20 21 #include "br_private.h" 22 #include "br_private_stp.h" 23 #include "br_private_tunnel.h" 24 25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, 26 u32 filter_mask) 27 { 28 struct net_bridge_vlan *v; 29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 30 u16 flags, pvid; 31 int num_vlans = 0; 32 33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 34 return 0; 35 36 pvid = br_get_pvid(vg); 37 /* Count number of vlan infos */ 38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 39 flags = 0; 40 /* only a context, bridge vlan not activated */ 41 if (!br_vlan_should_use(v)) 42 continue; 43 if (v->vid == pvid) 44 flags |= BRIDGE_VLAN_INFO_PVID; 45 46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 47 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 48 49 if (vid_range_start == 0) { 50 goto initvars; 51 } else if ((v->vid - vid_range_end) == 1 && 52 flags == vid_range_flags) { 53 vid_range_end = v->vid; 54 continue; 55 } else { 56 if ((vid_range_end - vid_range_start) > 0) 57 num_vlans += 2; 58 else 59 num_vlans += 1; 60 } 61 initvars: 62 vid_range_start = v->vid; 63 vid_range_end = v->vid; 64 vid_range_flags = flags; 65 } 66 67 if (vid_range_start != 0) { 68 if ((vid_range_end - vid_range_start) > 0) 69 num_vlans += 2; 70 else 71 num_vlans += 1; 72 } 73 74 return num_vlans; 75 } 76 77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg, 78 u32 filter_mask) 79 { 80 int num_vlans; 81 82 if (!vg) 83 return 0; 84 85 if (filter_mask & RTEXT_FILTER_BRVLAN) 86 return vg->num_vlans; 87 88 rcu_read_lock(); 89 num_vlans = __get_num_vlan_infos(vg, filter_mask); 90 rcu_read_unlock(); 91 92 return num_vlans; 93 } 94 95 static size_t br_get_link_af_size_filtered(const struct net_device *dev, 96 u32 filter_mask) 97 { 98 struct net_bridge_vlan_group *vg = NULL; 99 struct net_bridge_port *p = NULL; 100 struct net_bridge *br; 101 int num_vlan_infos; 102 size_t vinfo_sz = 0; 103 104 rcu_read_lock(); 105 if (br_port_exists(dev)) { 106 p = br_port_get_rcu(dev); 107 vg = nbp_vlan_group_rcu(p); 108 } else if (dev->priv_flags & IFF_EBRIDGE) { 109 br = netdev_priv(dev); 110 vg = br_vlan_group_rcu(br); 111 } 112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); 113 rcu_read_unlock(); 114 115 if (p && (p->flags & BR_VLAN_TUNNEL)) 116 vinfo_sz += br_get_vlan_tunnel_info_size(vg); 117 118 /* Each VLAN is returned in bridge_vlan_info along with flags */ 119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); 120 121 return vinfo_sz; 122 } 123 124 static inline size_t br_port_info_size(void) 125 { 126 return nla_total_size(1) /* IFLA_BRPORT_STATE */ 127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ 128 + nla_total_size(4) /* IFLA_BRPORT_COST */ 129 + nla_total_size(1) /* IFLA_BRPORT_MODE */ 130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */ 131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ 132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */ 134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 136 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */ 137 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */ 138 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ 139 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ 140 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ 141 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ 142 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ 143 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ 144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ 145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */ 146 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */ 147 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */ 148 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */ 149 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */ 150 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */ 151 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */ 152 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */ 153 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 154 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */ 155 #endif 156 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ 157 + 0; 158 } 159 160 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask) 161 { 162 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 163 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 164 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 165 + nla_total_size(4) /* IFLA_MASTER */ 166 + nla_total_size(4) /* IFLA_MTU */ 167 + nla_total_size(4) /* IFLA_LINK */ 168 + nla_total_size(1) /* IFLA_OPERSTATE */ 169 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */ 170 + nla_total_size(br_get_link_af_size_filtered(dev, 171 filter_mask)); /* IFLA_AF_SPEC */ 172 } 173 174 static int br_port_fill_attrs(struct sk_buff *skb, 175 const struct net_bridge_port *p) 176 { 177 u8 mode = !!(p->flags & BR_HAIRPIN_MODE); 178 u64 timerval; 179 180 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || 181 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || 182 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || 183 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || 184 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || 185 nla_put_u8(skb, IFLA_BRPORT_PROTECT, 186 !!(p->flags & BR_ROOT_BLOCK)) || 187 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 188 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || 189 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST, 190 !!(p->flags & BR_MULTICAST_TO_UNICAST)) || 191 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || 192 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 193 !!(p->flags & BR_FLOOD)) || 194 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD, 195 !!(p->flags & BR_MCAST_FLOOD)) || 196 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD, 197 !!(p->flags & BR_BCAST_FLOOD)) || 198 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) || 199 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI, 200 !!(p->flags & BR_PROXYARP_WIFI)) || 201 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id), 202 &p->designated_root) || 203 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id), 204 &p->designated_bridge) || 205 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) || 206 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) || 207 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) || 208 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || 209 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, 210 p->topology_change_ack) || 211 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) || 212 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags & 213 BR_VLAN_TUNNEL)) || 214 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) || 215 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS, 216 !!(p->flags & BR_NEIGH_SUPPRESS))) 217 return -EMSGSIZE; 218 219 timerval = br_timer_value(&p->message_age_timer); 220 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval, 221 IFLA_BRPORT_PAD)) 222 return -EMSGSIZE; 223 timerval = br_timer_value(&p->forward_delay_timer); 224 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval, 225 IFLA_BRPORT_PAD)) 226 return -EMSGSIZE; 227 timerval = br_timer_value(&p->hold_timer); 228 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval, 229 IFLA_BRPORT_PAD)) 230 return -EMSGSIZE; 231 232 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 233 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER, 234 p->multicast_router)) 235 return -EMSGSIZE; 236 #endif 237 238 return 0; 239 } 240 241 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start, 242 u16 vid_end, u16 flags) 243 { 244 struct bridge_vlan_info vinfo; 245 246 if ((vid_end - vid_start) > 0) { 247 /* add range to skb */ 248 vinfo.vid = vid_start; 249 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN; 250 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 251 sizeof(vinfo), &vinfo)) 252 goto nla_put_failure; 253 254 vinfo.vid = vid_end; 255 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END; 256 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 257 sizeof(vinfo), &vinfo)) 258 goto nla_put_failure; 259 } else { 260 vinfo.vid = vid_start; 261 vinfo.flags = flags; 262 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 263 sizeof(vinfo), &vinfo)) 264 goto nla_put_failure; 265 } 266 267 return 0; 268 269 nla_put_failure: 270 return -EMSGSIZE; 271 } 272 273 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb, 274 struct net_bridge_vlan_group *vg) 275 { 276 struct net_bridge_vlan *v; 277 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0; 278 u16 flags, pvid; 279 int err = 0; 280 281 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan 282 * and mark vlan info with begin and end flags 283 * if vlaninfo represents a range 284 */ 285 pvid = br_get_pvid(vg); 286 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 287 flags = 0; 288 if (!br_vlan_should_use(v)) 289 continue; 290 if (v->vid == pvid) 291 flags |= BRIDGE_VLAN_INFO_PVID; 292 293 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 294 flags |= BRIDGE_VLAN_INFO_UNTAGGED; 295 296 if (vid_range_start == 0) { 297 goto initvars; 298 } else if ((v->vid - vid_range_end) == 1 && 299 flags == vid_range_flags) { 300 vid_range_end = v->vid; 301 continue; 302 } else { 303 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 304 vid_range_end, 305 vid_range_flags); 306 if (err) 307 return err; 308 } 309 310 initvars: 311 vid_range_start = v->vid; 312 vid_range_end = v->vid; 313 vid_range_flags = flags; 314 } 315 316 if (vid_range_start != 0) { 317 /* Call it once more to send any left over vlans */ 318 err = br_fill_ifvlaninfo_range(skb, vid_range_start, 319 vid_range_end, 320 vid_range_flags); 321 if (err) 322 return err; 323 } 324 325 return 0; 326 } 327 328 static int br_fill_ifvlaninfo(struct sk_buff *skb, 329 struct net_bridge_vlan_group *vg) 330 { 331 struct bridge_vlan_info vinfo; 332 struct net_bridge_vlan *v; 333 u16 pvid; 334 335 pvid = br_get_pvid(vg); 336 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { 337 if (!br_vlan_should_use(v)) 338 continue; 339 340 vinfo.vid = v->vid; 341 vinfo.flags = 0; 342 if (v->vid == pvid) 343 vinfo.flags |= BRIDGE_VLAN_INFO_PVID; 344 345 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) 346 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; 347 348 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, 349 sizeof(vinfo), &vinfo)) 350 goto nla_put_failure; 351 } 352 353 return 0; 354 355 nla_put_failure: 356 return -EMSGSIZE; 357 } 358 359 /* 360 * Create one netlink message for one interface 361 * Contains port and master info as well as carrier and bridge state. 362 */ 363 static int br_fill_ifinfo(struct sk_buff *skb, 364 struct net_bridge_port *port, 365 u32 pid, u32 seq, int event, unsigned int flags, 366 u32 filter_mask, const struct net_device *dev) 367 { 368 struct net_bridge *br; 369 struct ifinfomsg *hdr; 370 struct nlmsghdr *nlh; 371 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 372 373 if (port) 374 br = port->br; 375 else 376 br = netdev_priv(dev); 377 378 br_debug(br, "br_fill_info event %d port %s master %s\n", 379 event, dev->name, br->dev->name); 380 381 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); 382 if (nlh == NULL) 383 return -EMSGSIZE; 384 385 hdr = nlmsg_data(nlh); 386 hdr->ifi_family = AF_BRIDGE; 387 hdr->__ifi_pad = 0; 388 hdr->ifi_type = dev->type; 389 hdr->ifi_index = dev->ifindex; 390 hdr->ifi_flags = dev_get_flags(dev); 391 hdr->ifi_change = 0; 392 393 if (nla_put_string(skb, IFLA_IFNAME, dev->name) || 394 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || 395 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 396 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 397 (dev->addr_len && 398 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 399 (dev->ifindex != dev_get_iflink(dev) && 400 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) 401 goto nla_put_failure; 402 403 if (event == RTM_NEWLINK && port) { 404 struct nlattr *nest 405 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); 406 407 if (nest == NULL || br_port_fill_attrs(skb, port) < 0) 408 goto nla_put_failure; 409 nla_nest_end(skb, nest); 410 } 411 412 /* Check if the VID information is requested */ 413 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 414 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 415 struct net_bridge_vlan_group *vg; 416 struct nlattr *af; 417 int err; 418 419 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ 420 rcu_read_lock(); 421 if (port) 422 vg = nbp_vlan_group_rcu(port); 423 else 424 vg = br_vlan_group_rcu(br); 425 426 if (!vg || !vg->num_vlans) { 427 rcu_read_unlock(); 428 goto done; 429 } 430 af = nla_nest_start(skb, IFLA_AF_SPEC); 431 if (!af) { 432 rcu_read_unlock(); 433 goto nla_put_failure; 434 } 435 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 436 err = br_fill_ifvlaninfo_compressed(skb, vg); 437 else 438 err = br_fill_ifvlaninfo(skb, vg); 439 440 if (port && (port->flags & BR_VLAN_TUNNEL)) 441 err = br_fill_vlan_tunnel_info(skb, vg); 442 rcu_read_unlock(); 443 if (err) 444 goto nla_put_failure; 445 nla_nest_end(skb, af); 446 } 447 448 done: 449 nlmsg_end(skb, nlh); 450 return 0; 451 452 nla_put_failure: 453 nlmsg_cancel(skb, nlh); 454 return -EMSGSIZE; 455 } 456 457 /* 458 * Notify listeners of a change in port information 459 */ 460 void br_ifinfo_notify(int event, struct net_bridge_port *port) 461 { 462 struct net *net; 463 struct sk_buff *skb; 464 int err = -ENOBUFS; 465 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; 466 467 if (!port) 468 return; 469 470 net = dev_net(port->dev); 471 br_debug(port->br, "port %u(%s) event %d\n", 472 (unsigned int)port->port_no, port->dev->name, event); 473 474 skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC); 475 if (skb == NULL) 476 goto errout; 477 478 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev); 479 if (err < 0) { 480 /* -EMSGSIZE implies BUG in br_nlmsg_size() */ 481 WARN_ON(err == -EMSGSIZE); 482 kfree_skb(skb); 483 goto errout; 484 } 485 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 486 return; 487 errout: 488 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 489 } 490 491 492 /* 493 * Dump information about all ports, in response to GETLINK 494 */ 495 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, 496 struct net_device *dev, u32 filter_mask, int nlflags) 497 { 498 struct net_bridge_port *port = br_port_get_rtnl(dev); 499 500 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && 501 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) 502 return 0; 503 504 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, 505 filter_mask, dev); 506 } 507 508 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, 509 int cmd, struct bridge_vlan_info *vinfo, bool *changed) 510 { 511 bool curr_change; 512 int err = 0; 513 514 switch (cmd) { 515 case RTM_SETLINK: 516 if (p) { 517 /* if the MASTER flag is set this will act on the global 518 * per-VLAN entry as well 519 */ 520 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags, 521 &curr_change); 522 } else { 523 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY; 524 err = br_vlan_add(br, vinfo->vid, vinfo->flags, 525 &curr_change); 526 } 527 if (curr_change) 528 *changed = true; 529 break; 530 531 case RTM_DELLINK: 532 if (p) { 533 if (!nbp_vlan_delete(p, vinfo->vid)) 534 *changed = true; 535 536 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) && 537 !br_vlan_delete(p->br, vinfo->vid)) 538 *changed = true; 539 } else if (!br_vlan_delete(br, vinfo->vid)) { 540 *changed = true; 541 } 542 break; 543 } 544 545 return err; 546 } 547 548 static int br_process_vlan_info(struct net_bridge *br, 549 struct net_bridge_port *p, int cmd, 550 struct bridge_vlan_info *vinfo_curr, 551 struct bridge_vlan_info **vinfo_last, 552 bool *changed) 553 { 554 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK) 555 return -EINVAL; 556 557 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 558 /* check if we are already processing a range */ 559 if (*vinfo_last) 560 return -EINVAL; 561 *vinfo_last = vinfo_curr; 562 /* don't allow range of pvids */ 563 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID) 564 return -EINVAL; 565 return 0; 566 } 567 568 if (*vinfo_last) { 569 struct bridge_vlan_info tmp_vinfo; 570 int v, err; 571 572 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END)) 573 return -EINVAL; 574 575 if (vinfo_curr->vid <= (*vinfo_last)->vid) 576 return -EINVAL; 577 578 memcpy(&tmp_vinfo, *vinfo_last, 579 sizeof(struct bridge_vlan_info)); 580 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) { 581 tmp_vinfo.vid = v; 582 err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed); 583 if (err) 584 break; 585 } 586 *vinfo_last = NULL; 587 588 return err; 589 } 590 591 return br_vlan_info(br, p, cmd, vinfo_curr, changed); 592 } 593 594 static int br_afspec(struct net_bridge *br, 595 struct net_bridge_port *p, 596 struct nlattr *af_spec, 597 int cmd, bool *changed) 598 { 599 struct bridge_vlan_info *vinfo_curr = NULL; 600 struct bridge_vlan_info *vinfo_last = NULL; 601 struct nlattr *attr; 602 struct vtunnel_info tinfo_last = {}; 603 struct vtunnel_info tinfo_curr = {}; 604 int err = 0, rem; 605 606 nla_for_each_nested(attr, af_spec, rem) { 607 err = 0; 608 switch (nla_type(attr)) { 609 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 610 if (!p || !(p->flags & BR_VLAN_TUNNEL)) 611 return -EINVAL; 612 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 613 if (err) 614 return err; 615 err = br_process_vlan_tunnel_info(br, p, cmd, 616 &tinfo_curr, 617 &tinfo_last, 618 changed); 619 if (err) 620 return err; 621 break; 622 case IFLA_BRIDGE_VLAN_INFO: 623 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 624 return -EINVAL; 625 vinfo_curr = nla_data(attr); 626 err = br_process_vlan_info(br, p, cmd, vinfo_curr, 627 &vinfo_last, changed); 628 if (err) 629 return err; 630 break; 631 } 632 } 633 634 return err; 635 } 636 637 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { 638 [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 639 [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 640 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 641 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 642 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 643 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 644 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 645 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 646 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 647 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, 648 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, 649 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 }, 650 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 }, 651 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 }, 652 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 }, 653 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, 654 }; 655 656 /* Change the state of the port and notify spanning tree */ 657 static int br_set_port_state(struct net_bridge_port *p, u8 state) 658 { 659 if (state > BR_STATE_BLOCKING) 660 return -EINVAL; 661 662 /* if kernel STP is running, don't allow changes */ 663 if (p->br->stp_enabled == BR_KERNEL_STP) 664 return -EBUSY; 665 666 /* if device is not up, change is not allowed 667 * if link is not present, only allowable state is disabled 668 */ 669 if (!netif_running(p->dev) || 670 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED)) 671 return -ENETDOWN; 672 673 br_set_state(p, state); 674 br_port_state_selection(p->br); 675 return 0; 676 } 677 678 /* Set/clear or port flags based on attribute */ 679 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], 680 int attrtype, unsigned long mask) 681 { 682 unsigned long flags; 683 int err; 684 685 if (!tb[attrtype]) 686 return 0; 687 688 if (nla_get_u8(tb[attrtype])) 689 flags = p->flags | mask; 690 else 691 flags = p->flags & ~mask; 692 693 err = br_switchdev_set_port_flag(p, flags, mask); 694 if (err) 695 return err; 696 697 p->flags = flags; 698 return 0; 699 } 700 701 /* Process bridge protocol info on port */ 702 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) 703 { 704 unsigned long old_flags = p->flags; 705 bool br_vlan_tunnel_old = false; 706 int err; 707 708 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 709 if (err) 710 return err; 711 712 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 713 if (err) 714 return err; 715 716 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 717 if (err) 718 return err; 719 720 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 721 if (err) 722 return err; 723 724 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); 725 if (err) 726 return err; 727 728 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); 729 if (err) 730 return err; 731 732 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD); 733 if (err) 734 return err; 735 736 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST); 737 if (err) 738 return err; 739 740 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD); 741 if (err) 742 return err; 743 744 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); 745 if (err) 746 return err; 747 748 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); 749 if (err) 750 return err; 751 752 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false; 753 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); 754 if (err) 755 return err; 756 757 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL)) 758 nbp_vlan_tunnel_info_flush(p); 759 760 if (tb[IFLA_BRPORT_COST]) { 761 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 762 if (err) 763 return err; 764 } 765 766 if (tb[IFLA_BRPORT_PRIORITY]) { 767 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); 768 if (err) 769 return err; 770 } 771 772 if (tb[IFLA_BRPORT_STATE]) { 773 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); 774 if (err) 775 return err; 776 } 777 778 if (tb[IFLA_BRPORT_FLUSH]) 779 br_fdb_delete_by_port(p->br, p, 0, 0); 780 781 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 782 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) { 783 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]); 784 785 err = br_multicast_set_port_router(p, mcast_router); 786 if (err) 787 return err; 788 } 789 #endif 790 791 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) { 792 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]); 793 794 if (fwd_mask & BR_GROUPFWD_MACPAUSE) 795 return -EINVAL; 796 p->group_fwd_mask = fwd_mask; 797 } 798 799 err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, 800 BR_NEIGH_SUPPRESS); 801 if (err) 802 return err; 803 804 br_port_flags_change(p, old_flags ^ p->flags); 805 return 0; 806 } 807 808 /* Change state and parameters on port. */ 809 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 810 { 811 struct nlattr *protinfo; 812 struct nlattr *afspec; 813 struct net_bridge_port *p; 814 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 815 bool changed = false; 816 int err = 0; 817 818 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO); 819 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 820 if (!protinfo && !afspec) 821 return 0; 822 823 p = br_port_get_rtnl(dev); 824 /* We want to accept dev as bridge itself if the AF_SPEC 825 * is set to see if someone is setting vlan info on the bridge 826 */ 827 if (!p && !afspec) 828 return -EINVAL; 829 830 if (p && protinfo) { 831 if (protinfo->nla_type & NLA_F_NESTED) { 832 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo, 833 br_port_policy, NULL); 834 if (err) 835 return err; 836 837 spin_lock_bh(&p->br->lock); 838 err = br_setport(p, tb); 839 spin_unlock_bh(&p->br->lock); 840 } else { 841 /* Binary compatibility with old RSTP */ 842 if (nla_len(protinfo) < sizeof(u8)) 843 return -EINVAL; 844 845 spin_lock_bh(&p->br->lock); 846 err = br_set_port_state(p, nla_get_u8(protinfo)); 847 spin_unlock_bh(&p->br->lock); 848 } 849 if (err) 850 goto out; 851 changed = true; 852 } 853 854 if (afspec) { 855 err = br_afspec((struct net_bridge *)netdev_priv(dev), p, 856 afspec, RTM_SETLINK, &changed); 857 } 858 859 if (changed) 860 br_ifinfo_notify(RTM_NEWLINK, p); 861 out: 862 return err; 863 } 864 865 /* Delete port information */ 866 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) 867 { 868 struct nlattr *afspec; 869 struct net_bridge_port *p; 870 bool changed = false; 871 int err = 0; 872 873 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 874 if (!afspec) 875 return 0; 876 877 p = br_port_get_rtnl(dev); 878 /* We want to accept dev as bridge itself as well */ 879 if (!p && !(dev->priv_flags & IFF_EBRIDGE)) 880 return -EINVAL; 881 882 err = br_afspec((struct net_bridge *)netdev_priv(dev), p, 883 afspec, RTM_DELLINK, &changed); 884 if (changed) 885 /* Send RTM_NEWLINK because userspace 886 * expects RTM_NEWLINK for vlan dels 887 */ 888 br_ifinfo_notify(RTM_NEWLINK, p); 889 890 return err; 891 } 892 893 static int br_validate(struct nlattr *tb[], struct nlattr *data[], 894 struct netlink_ext_ack *extack) 895 { 896 if (tb[IFLA_ADDRESS]) { 897 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 898 return -EINVAL; 899 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 900 return -EADDRNOTAVAIL; 901 } 902 903 if (!data) 904 return 0; 905 906 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 907 if (data[IFLA_BR_VLAN_PROTOCOL]) { 908 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) { 909 case htons(ETH_P_8021Q): 910 case htons(ETH_P_8021AD): 911 break; 912 default: 913 return -EPROTONOSUPPORT; 914 } 915 } 916 917 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 918 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 919 920 if (defpvid >= VLAN_VID_MASK) 921 return -EINVAL; 922 } 923 #endif 924 925 return 0; 926 } 927 928 static int br_port_slave_changelink(struct net_device *brdev, 929 struct net_device *dev, 930 struct nlattr *tb[], 931 struct nlattr *data[], 932 struct netlink_ext_ack *extack) 933 { 934 struct net_bridge *br = netdev_priv(brdev); 935 int ret; 936 937 if (!data) 938 return 0; 939 940 spin_lock_bh(&br->lock); 941 ret = br_setport(br_port_get_rtnl(dev), data); 942 spin_unlock_bh(&br->lock); 943 944 return ret; 945 } 946 947 static int br_port_fill_slave_info(struct sk_buff *skb, 948 const struct net_device *brdev, 949 const struct net_device *dev) 950 { 951 return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); 952 } 953 954 static size_t br_port_get_slave_size(const struct net_device *brdev, 955 const struct net_device *dev) 956 { 957 return br_port_info_size(); 958 } 959 960 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { 961 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, 962 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, 963 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, 964 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, 965 [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, 966 [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, 967 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, 968 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 }, 969 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 }, 970 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY, 971 .len = ETH_ALEN }, 972 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 }, 973 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 }, 974 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 }, 975 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 }, 976 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 }, 977 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 }, 978 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, 979 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, 980 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, 981 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, 982 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, 983 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, 984 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, 985 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, 986 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 }, 987 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 }, 988 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 989 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 990 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 991 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 }, 992 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, 993 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 }, 994 }; 995 996 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 997 struct nlattr *data[], 998 struct netlink_ext_ack *extack) 999 { 1000 struct net_bridge *br = netdev_priv(brdev); 1001 int err; 1002 1003 if (!data) 1004 return 0; 1005 1006 if (data[IFLA_BR_FORWARD_DELAY]) { 1007 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); 1008 if (err) 1009 return err; 1010 } 1011 1012 if (data[IFLA_BR_HELLO_TIME]) { 1013 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); 1014 if (err) 1015 return err; 1016 } 1017 1018 if (data[IFLA_BR_MAX_AGE]) { 1019 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); 1020 if (err) 1021 return err; 1022 } 1023 1024 if (data[IFLA_BR_AGEING_TIME]) { 1025 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME])); 1026 if (err) 1027 return err; 1028 } 1029 1030 if (data[IFLA_BR_STP_STATE]) { 1031 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); 1032 1033 br_stp_set_enabled(br, stp_enabled); 1034 } 1035 1036 if (data[IFLA_BR_PRIORITY]) { 1037 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); 1038 1039 br_stp_set_bridge_priority(br, priority); 1040 } 1041 1042 if (data[IFLA_BR_VLAN_FILTERING]) { 1043 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); 1044 1045 err = __br_vlan_filter_toggle(br, vlan_filter); 1046 if (err) 1047 return err; 1048 } 1049 1050 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1051 if (data[IFLA_BR_VLAN_PROTOCOL]) { 1052 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]); 1053 1054 err = __br_vlan_set_proto(br, vlan_proto); 1055 if (err) 1056 return err; 1057 } 1058 1059 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) { 1060 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]); 1061 1062 err = __br_vlan_set_default_pvid(br, defpvid); 1063 if (err) 1064 return err; 1065 } 1066 1067 if (data[IFLA_BR_VLAN_STATS_ENABLED]) { 1068 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]); 1069 1070 err = br_vlan_set_stats(br, vlan_stats); 1071 if (err) 1072 return err; 1073 } 1074 #endif 1075 1076 if (data[IFLA_BR_GROUP_FWD_MASK]) { 1077 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]); 1078 1079 if (fwd_mask & BR_GROUPFWD_RESTRICTED) 1080 return -EINVAL; 1081 br->group_fwd_mask = fwd_mask; 1082 } 1083 1084 if (data[IFLA_BR_GROUP_ADDR]) { 1085 u8 new_addr[ETH_ALEN]; 1086 1087 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN) 1088 return -EINVAL; 1089 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN); 1090 if (!is_link_local_ether_addr(new_addr)) 1091 return -EINVAL; 1092 if (new_addr[5] == 1 || /* 802.3x Pause address */ 1093 new_addr[5] == 2 || /* 802.3ad Slow protocols */ 1094 new_addr[5] == 3) /* 802.1X PAE address */ 1095 return -EINVAL; 1096 spin_lock_bh(&br->lock); 1097 memcpy(br->group_addr, new_addr, sizeof(br->group_addr)); 1098 spin_unlock_bh(&br->lock); 1099 br->group_addr_set = true; 1100 br_recalculate_fwd_mask(br); 1101 } 1102 1103 if (data[IFLA_BR_FDB_FLUSH]) 1104 br_fdb_flush(br); 1105 1106 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1107 if (data[IFLA_BR_MCAST_ROUTER]) { 1108 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]); 1109 1110 err = br_multicast_set_router(br, multicast_router); 1111 if (err) 1112 return err; 1113 } 1114 1115 if (data[IFLA_BR_MCAST_SNOOPING]) { 1116 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]); 1117 1118 err = br_multicast_toggle(br, mcast_snooping); 1119 if (err) 1120 return err; 1121 } 1122 1123 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) { 1124 u8 val; 1125 1126 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]); 1127 br->multicast_query_use_ifaddr = !!val; 1128 } 1129 1130 if (data[IFLA_BR_MCAST_QUERIER]) { 1131 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]); 1132 1133 err = br_multicast_set_querier(br, mcast_querier); 1134 if (err) 1135 return err; 1136 } 1137 1138 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) { 1139 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]); 1140 1141 br->hash_elasticity = val; 1142 } 1143 1144 if (data[IFLA_BR_MCAST_HASH_MAX]) { 1145 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]); 1146 1147 err = br_multicast_set_hash_max(br, hash_max); 1148 if (err) 1149 return err; 1150 } 1151 1152 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) { 1153 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]); 1154 1155 br->multicast_last_member_count = val; 1156 } 1157 1158 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) { 1159 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]); 1160 1161 br->multicast_startup_query_count = val; 1162 } 1163 1164 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) { 1165 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]); 1166 1167 br->multicast_last_member_interval = clock_t_to_jiffies(val); 1168 } 1169 1170 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) { 1171 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]); 1172 1173 br->multicast_membership_interval = clock_t_to_jiffies(val); 1174 } 1175 1176 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) { 1177 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]); 1178 1179 br->multicast_querier_interval = clock_t_to_jiffies(val); 1180 } 1181 1182 if (data[IFLA_BR_MCAST_QUERY_INTVL]) { 1183 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); 1184 1185 br->multicast_query_interval = clock_t_to_jiffies(val); 1186 } 1187 1188 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { 1189 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]); 1190 1191 br->multicast_query_response_interval = clock_t_to_jiffies(val); 1192 } 1193 1194 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { 1195 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); 1196 1197 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1198 } 1199 1200 if (data[IFLA_BR_MCAST_STATS_ENABLED]) { 1201 __u8 mcast_stats; 1202 1203 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]); 1204 br->multicast_stats_enabled = !!mcast_stats; 1205 } 1206 1207 if (data[IFLA_BR_MCAST_IGMP_VERSION]) { 1208 __u8 igmp_version; 1209 1210 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]); 1211 err = br_multicast_set_igmp_version(br, igmp_version); 1212 if (err) 1213 return err; 1214 } 1215 1216 #if IS_ENABLED(CONFIG_IPV6) 1217 if (data[IFLA_BR_MCAST_MLD_VERSION]) { 1218 __u8 mld_version; 1219 1220 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]); 1221 err = br_multicast_set_mld_version(br, mld_version); 1222 if (err) 1223 return err; 1224 } 1225 #endif 1226 #endif 1227 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1228 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1229 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]); 1230 1231 br->nf_call_iptables = val ? true : false; 1232 } 1233 1234 if (data[IFLA_BR_NF_CALL_IP6TABLES]) { 1235 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]); 1236 1237 br->nf_call_ip6tables = val ? true : false; 1238 } 1239 1240 if (data[IFLA_BR_NF_CALL_ARPTABLES]) { 1241 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]); 1242 1243 br->nf_call_arptables = val ? true : false; 1244 } 1245 #endif 1246 1247 return 0; 1248 } 1249 1250 static int br_dev_newlink(struct net *src_net, struct net_device *dev, 1251 struct nlattr *tb[], struct nlattr *data[], 1252 struct netlink_ext_ack *extack) 1253 { 1254 struct net_bridge *br = netdev_priv(dev); 1255 int err; 1256 1257 if (tb[IFLA_ADDRESS]) { 1258 spin_lock_bh(&br->lock); 1259 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); 1260 spin_unlock_bh(&br->lock); 1261 } 1262 1263 err = register_netdevice(dev); 1264 if (err) 1265 return err; 1266 1267 err = br_changelink(dev, tb, data, extack); 1268 if (err) 1269 unregister_netdevice(dev); 1270 return err; 1271 } 1272 1273 static size_t br_get_size(const struct net_device *brdev) 1274 { 1275 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1276 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ 1277 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ 1278 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ 1279 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ 1280 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 1281 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 1282 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1283 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */ 1284 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */ 1285 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */ 1286 #endif 1287 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */ 1288 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */ 1289 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */ 1290 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */ 1291 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */ 1292 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */ 1293 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */ 1294 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */ 1295 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */ 1296 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */ 1297 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */ 1298 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */ 1299 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1300 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */ 1301 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1302 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1303 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1304 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */ 1305 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1306 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1307 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1308 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */ 1309 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */ 1310 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */ 1311 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */ 1312 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */ 1313 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */ 1314 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */ 1315 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */ 1316 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */ 1317 #endif 1318 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1319 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */ 1320 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */ 1321 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */ 1322 #endif 1323 0; 1324 } 1325 1326 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) 1327 { 1328 struct net_bridge *br = netdev_priv(brdev); 1329 u32 forward_delay = jiffies_to_clock_t(br->forward_delay); 1330 u32 hello_time = jiffies_to_clock_t(br->hello_time); 1331 u32 age_time = jiffies_to_clock_t(br->max_age); 1332 u32 ageing_time = jiffies_to_clock_t(br->ageing_time); 1333 u32 stp_enabled = br->stp_enabled; 1334 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; 1335 u8 vlan_enabled = br_vlan_enabled(br->dev); 1336 u64 clockval; 1337 1338 clockval = br_timer_value(&br->hello_timer); 1339 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD)) 1340 return -EMSGSIZE; 1341 clockval = br_timer_value(&br->tcn_timer); 1342 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD)) 1343 return -EMSGSIZE; 1344 clockval = br_timer_value(&br->topology_change_timer); 1345 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval, 1346 IFLA_BR_PAD)) 1347 return -EMSGSIZE; 1348 clockval = br_timer_value(&br->gc_work.timer); 1349 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD)) 1350 return -EMSGSIZE; 1351 1352 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || 1353 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || 1354 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || 1355 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || 1356 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || 1357 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || 1358 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) || 1359 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) || 1360 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id), 1361 &br->bridge_id) || 1362 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id), 1363 &br->designated_root) || 1364 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) || 1365 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) || 1366 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) || 1367 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED, 1368 br->topology_change_detected) || 1369 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr)) 1370 return -EMSGSIZE; 1371 1372 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1373 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) || 1374 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) || 1375 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled)) 1376 return -EMSGSIZE; 1377 #endif 1378 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1379 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) || 1380 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) || 1381 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1382 br->multicast_query_use_ifaddr) || 1383 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || 1384 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED, 1385 br->multicast_stats_enabled) || 1386 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, 1387 br->hash_elasticity) || 1388 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1389 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT, 1390 br->multicast_last_member_count) || 1391 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT, 1392 br->multicast_startup_query_count) || 1393 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION, 1394 br->multicast_igmp_version)) 1395 return -EMSGSIZE; 1396 #if IS_ENABLED(CONFIG_IPV6) 1397 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION, 1398 br->multicast_mld_version)) 1399 return -EMSGSIZE; 1400 #endif 1401 clockval = jiffies_to_clock_t(br->multicast_last_member_interval); 1402 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval, 1403 IFLA_BR_PAD)) 1404 return -EMSGSIZE; 1405 clockval = jiffies_to_clock_t(br->multicast_membership_interval); 1406 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval, 1407 IFLA_BR_PAD)) 1408 return -EMSGSIZE; 1409 clockval = jiffies_to_clock_t(br->multicast_querier_interval); 1410 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval, 1411 IFLA_BR_PAD)) 1412 return -EMSGSIZE; 1413 clockval = jiffies_to_clock_t(br->multicast_query_interval); 1414 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval, 1415 IFLA_BR_PAD)) 1416 return -EMSGSIZE; 1417 clockval = jiffies_to_clock_t(br->multicast_query_response_interval); 1418 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval, 1419 IFLA_BR_PAD)) 1420 return -EMSGSIZE; 1421 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval); 1422 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval, 1423 IFLA_BR_PAD)) 1424 return -EMSGSIZE; 1425 #endif 1426 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1427 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES, 1428 br->nf_call_iptables ? 1 : 0) || 1429 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES, 1430 br->nf_call_ip6tables ? 1 : 0) || 1431 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES, 1432 br->nf_call_arptables ? 1 : 0)) 1433 return -EMSGSIZE; 1434 #endif 1435 1436 return 0; 1437 } 1438 1439 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) 1440 { 1441 struct net_bridge_port *p = NULL; 1442 struct net_bridge_vlan_group *vg; 1443 struct net_bridge_vlan *v; 1444 struct net_bridge *br; 1445 int numvls = 0; 1446 1447 switch (attr) { 1448 case IFLA_STATS_LINK_XSTATS: 1449 br = netdev_priv(dev); 1450 vg = br_vlan_group(br); 1451 break; 1452 case IFLA_STATS_LINK_XSTATS_SLAVE: 1453 p = br_port_get_rtnl(dev); 1454 if (!p) 1455 return 0; 1456 br = p->br; 1457 vg = nbp_vlan_group(p); 1458 break; 1459 default: 1460 return 0; 1461 } 1462 1463 if (vg) { 1464 /* we need to count all, even placeholder entries */ 1465 list_for_each_entry(v, &vg->vlan_list, vlist) 1466 numvls++; 1467 } 1468 1469 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1470 nla_total_size(sizeof(struct br_mcast_stats)) + 1471 nla_total_size(0); 1472 } 1473 1474 static int br_fill_linkxstats(struct sk_buff *skb, 1475 const struct net_device *dev, 1476 int *prividx, int attr) 1477 { 1478 struct nlattr *nla __maybe_unused; 1479 struct net_bridge_port *p = NULL; 1480 struct net_bridge_vlan_group *vg; 1481 struct net_bridge_vlan *v; 1482 struct net_bridge *br; 1483 struct nlattr *nest; 1484 int vl_idx = 0; 1485 1486 switch (attr) { 1487 case IFLA_STATS_LINK_XSTATS: 1488 br = netdev_priv(dev); 1489 vg = br_vlan_group(br); 1490 break; 1491 case IFLA_STATS_LINK_XSTATS_SLAVE: 1492 p = br_port_get_rtnl(dev); 1493 if (!p) 1494 return 0; 1495 br = p->br; 1496 vg = nbp_vlan_group(p); 1497 break; 1498 default: 1499 return -EINVAL; 1500 } 1501 1502 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); 1503 if (!nest) 1504 return -EMSGSIZE; 1505 1506 if (vg) { 1507 u16 pvid; 1508 1509 pvid = br_get_pvid(vg); 1510 list_for_each_entry(v, &vg->vlan_list, vlist) { 1511 struct bridge_vlan_xstats vxi; 1512 struct br_vlan_stats stats; 1513 1514 if (++vl_idx < *prividx) 1515 continue; 1516 memset(&vxi, 0, sizeof(vxi)); 1517 vxi.vid = v->vid; 1518 vxi.flags = v->flags; 1519 if (v->vid == pvid) 1520 vxi.flags |= BRIDGE_VLAN_INFO_PVID; 1521 br_vlan_get_stats(v, &stats); 1522 vxi.rx_bytes = stats.rx_bytes; 1523 vxi.rx_packets = stats.rx_packets; 1524 vxi.tx_bytes = stats.tx_bytes; 1525 vxi.tx_packets = stats.tx_packets; 1526 1527 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1528 goto nla_put_failure; 1529 } 1530 } 1531 1532 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1533 if (++vl_idx >= *prividx) { 1534 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, 1535 sizeof(struct br_mcast_stats), 1536 BRIDGE_XSTATS_PAD); 1537 if (!nla) 1538 goto nla_put_failure; 1539 br_multicast_get_stats(br, p, nla_data(nla)); 1540 } 1541 #endif 1542 nla_nest_end(skb, nest); 1543 *prividx = 0; 1544 1545 return 0; 1546 1547 nla_put_failure: 1548 nla_nest_end(skb, nest); 1549 *prividx = vl_idx; 1550 1551 return -EMSGSIZE; 1552 } 1553 1554 static struct rtnl_af_ops br_af_ops __read_mostly = { 1555 .family = AF_BRIDGE, 1556 .get_link_af_size = br_get_link_af_size_filtered, 1557 }; 1558 1559 struct rtnl_link_ops br_link_ops __read_mostly = { 1560 .kind = "bridge", 1561 .priv_size = sizeof(struct net_bridge), 1562 .setup = br_dev_setup, 1563 .maxtype = IFLA_BR_MAX, 1564 .policy = br_policy, 1565 .validate = br_validate, 1566 .newlink = br_dev_newlink, 1567 .changelink = br_changelink, 1568 .dellink = br_dev_delete, 1569 .get_size = br_get_size, 1570 .fill_info = br_fill_info, 1571 .fill_linkxstats = br_fill_linkxstats, 1572 .get_linkxstats_size = br_get_linkxstats_size, 1573 1574 .slave_maxtype = IFLA_BRPORT_MAX, 1575 .slave_policy = br_port_policy, 1576 .slave_changelink = br_port_slave_changelink, 1577 .get_slave_size = br_port_get_slave_size, 1578 .fill_slave_info = br_port_fill_slave_info, 1579 }; 1580 1581 int __init br_netlink_init(void) 1582 { 1583 int err; 1584 1585 br_mdb_init(); 1586 rtnl_af_register(&br_af_ops); 1587 1588 err = rtnl_link_register(&br_link_ops); 1589 if (err) 1590 goto out_af; 1591 1592 return 0; 1593 1594 out_af: 1595 rtnl_af_unregister(&br_af_ops); 1596 br_mdb_uninit(); 1597 return err; 1598 } 1599 1600 void br_netlink_fini(void) 1601 { 1602 br_mdb_uninit(); 1603 rtnl_af_unregister(&br_af_ops); 1604 rtnl_link_unregister(&br_link_ops); 1605 } 1606